backend_cleanup_tracker.cc 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // Copyright (c) 2017 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. // Internal helper used to sequence cleanup and reuse of cache directories
  5. // among different objects.
  6. #include "net/disk_cache/backend_cleanup_tracker.h"
  7. #include <unordered_map>
  8. #include <utility>
  9. #include "base/callback.h"
  10. #include "base/files/file_path.h"
  11. #include "base/lazy_instance.h"
  12. #include "base/memory/ref_counted.h"
  13. #include "base/synchronization/lock.h"
  14. #include "base/task/sequenced_task_runner.h"
  15. #include "base/threading/sequenced_task_runner_handle.h"
  16. namespace disk_cache {
  17. namespace {
  18. using TrackerMap = std::unordered_map<base::FilePath, BackendCleanupTracker*>;
  19. struct AllBackendCleanupTrackers {
  20. TrackerMap map;
  21. // Since clients can potentially call CreateCacheBackend from multiple
  22. // threads, we need to lock the map keeping track of cleanup trackers
  23. // for these backends. Our overall strategy is to have TryCreate
  24. // acts as an arbitrator --- whatever thread grabs one, gets to operate
  25. // on the tracker freely until it gets destroyed.
  26. base::Lock lock;
  27. };
  28. static base::LazyInstance<AllBackendCleanupTrackers>::Leaky g_all_trackers;
  29. } // namespace.
  30. // static
  31. scoped_refptr<BackendCleanupTracker> BackendCleanupTracker::TryCreate(
  32. const base::FilePath& path,
  33. base::OnceClosure retry_closure) {
  34. AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
  35. base::AutoLock lock(all_trackers->lock);
  36. std::pair<TrackerMap::iterator, bool> insert_result =
  37. all_trackers->map.insert(
  38. std::pair<base::FilePath, BackendCleanupTracker*>(path, nullptr));
  39. if (insert_result.second) {
  40. auto tracker = base::WrapRefCounted(new BackendCleanupTracker(path));
  41. insert_result.first->second = tracker.get();
  42. return tracker;
  43. } else {
  44. insert_result.first->second->AddPostCleanupCallbackImpl(
  45. std::move(retry_closure));
  46. return nullptr;
  47. }
  48. }
  49. void BackendCleanupTracker::AddPostCleanupCallback(base::OnceClosure cb) {
  50. DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
  51. // Despite the sequencing requirement we need to grab the table lock since
  52. // this may otherwise race against TryMakeContext.
  53. base::AutoLock lock(g_all_trackers.Get().lock);
  54. AddPostCleanupCallbackImpl(std::move(cb));
  55. }
  56. void BackendCleanupTracker::AddPostCleanupCallbackImpl(base::OnceClosure cb) {
  57. post_cleanup_cbs_.emplace_back(base::SequencedTaskRunnerHandle::Get(),
  58. std::move(cb));
  59. }
  60. BackendCleanupTracker::BackendCleanupTracker(const base::FilePath& path)
  61. : path_(path) {}
  62. BackendCleanupTracker::~BackendCleanupTracker() {
  63. DCHECK_CALLED_ON_VALID_SEQUENCE(seq_checker_);
  64. {
  65. AllBackendCleanupTrackers* all_trackers = g_all_trackers.Pointer();
  66. base::AutoLock lock(all_trackers->lock);
  67. int rv = all_trackers->map.erase(path_);
  68. DCHECK_EQ(1, rv);
  69. }
  70. while (!post_cleanup_cbs_.empty()) {
  71. post_cleanup_cbs_.back().first->PostTask(
  72. FROM_HERE, std::move(post_cleanup_cbs_.back().second));
  73. post_cleanup_cbs_.pop_back();
  74. }
  75. }
  76. } // namespace disk_cache