mem_entry_impl.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "net/disk_cache/memory/mem_entry_impl.h"
  5. #include <algorithm>
  6. #include <memory>
  7. #include <utility>
  8. #include "base/bind.h"
  9. #include "base/check_op.h"
  10. #include "base/format_macros.h"
  11. #include "base/metrics/histogram_macros.h"
  12. #include "base/numerics/safe_math.h"
  13. #include "base/strings/stringprintf.h"
  14. #include "base/values.h"
  15. #include "net/base/interval.h"
  16. #include "net/base/io_buffer.h"
  17. #include "net/base/net_errors.h"
  18. #include "net/disk_cache/memory/mem_backend_impl.h"
  19. #include "net/disk_cache/net_log_parameters.h"
  20. #include "net/log/net_log_event_type.h"
  21. #include "net/log/net_log_source_type.h"
  22. using base::Time;
  23. namespace disk_cache {
  24. namespace {
  25. const int kSparseData = 1;
  26. // Maximum size of a child of sparse entry is 2 to the power of this number.
  27. const int kMaxChildEntryBits = 12;
  28. // Sparse entry children have maximum size of 4KB.
  29. const int kMaxChildEntrySize = 1 << kMaxChildEntryBits;
  30. // Convert global offset to child index.
  31. int64_t ToChildIndex(int64_t offset) {
  32. return offset >> kMaxChildEntryBits;
  33. }
  34. // Convert global offset to offset in child entry.
  35. int ToChildOffset(int64_t offset) {
  36. return static_cast<int>(offset & (kMaxChildEntrySize - 1));
  37. }
  38. // Returns a name for a child entry given the base_name of the parent and the
  39. // child_id. This name is only used for logging purposes.
  40. // If the entry is called entry_name, child entries will be named something
  41. // like Range_entry_name:YYY where YYY is the number of the particular child.
  42. std::string GenerateChildName(const std::string& base_name, int64_t child_id) {
  43. return base::StringPrintf("Range_%s:%" PRId64, base_name.c_str(), child_id);
  44. }
  45. // Returns NetLog parameters for the creation of a MemEntryImpl. A separate
  46. // function is needed because child entries don't store their key().
  47. base::Value NetLogEntryCreationParams(const MemEntryImpl* entry) {
  48. base::Value::Dict dict;
  49. std::string key;
  50. switch (entry->type()) {
  51. case MemEntryImpl::EntryType::kParent:
  52. key = entry->key();
  53. break;
  54. case MemEntryImpl::EntryType::kChild:
  55. key = GenerateChildName(entry->parent()->key(), entry->child_id());
  56. break;
  57. }
  58. dict.Set("key", key);
  59. dict.Set("created", true);
  60. return base::Value(std::move(dict));
  61. }
  62. } // namespace
  63. MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
  64. const std::string& key,
  65. net::NetLog* net_log)
  66. : MemEntryImpl(backend,
  67. key,
  68. 0, // child_id
  69. nullptr, // parent
  70. net_log) {
  71. Open();
  72. // Just creating the entry (without any data) could cause the storage to
  73. // grow beyond capacity, but we allow such infractions.
  74. backend_->ModifyStorageSize(GetStorageSize());
  75. }
  76. MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
  77. int64_t child_id,
  78. MemEntryImpl* parent,
  79. net::NetLog* net_log)
  80. : MemEntryImpl(backend,
  81. std::string(), // key
  82. child_id,
  83. parent,
  84. net_log) {
  85. (*parent_->children_)[child_id] = this;
  86. }
  87. void MemEntryImpl::Open() {
  88. // Only a parent entry can be opened.
  89. DCHECK_EQ(EntryType::kParent, type());
  90. CHECK_NE(ref_count_, std::numeric_limits<uint32_t>::max());
  91. ++ref_count_;
  92. DCHECK(!doomed_);
  93. }
  94. bool MemEntryImpl::InUse() const {
  95. if (type() == EntryType::kChild)
  96. return parent_->InUse();
  97. return ref_count_ > 0;
  98. }
  99. int MemEntryImpl::GetStorageSize() const {
  100. int storage_size = static_cast<int32_t>(key_.size());
  101. for (const auto& i : data_)
  102. storage_size += i.size();
  103. return storage_size;
  104. }
  105. void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) {
  106. if (!doomed_ && backend_)
  107. backend_->OnEntryUpdated(this);
  108. last_used_ = MemBackendImpl::Now(backend_);
  109. if (modified_enum == ENTRY_WAS_MODIFIED)
  110. last_modified_ = last_used_;
  111. }
  112. void MemEntryImpl::Doom() {
  113. if (!doomed_) {
  114. doomed_ = true;
  115. if (backend_)
  116. backend_->OnEntryDoomed(this);
  117. net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
  118. }
  119. if (!ref_count_)
  120. delete this;
  121. }
  122. void MemEntryImpl::Close() {
  123. DCHECK_EQ(EntryType::kParent, type());
  124. CHECK_GT(ref_count_, 0u);
  125. --ref_count_;
  126. if (ref_count_ == 0 && !doomed_) {
  127. // At this point the user is clearly done writing, so make sure there isn't
  128. // wastage due to exponential growth of vector for main data stream.
  129. Compact();
  130. if (children_) {
  131. for (const auto& child_info : *children_) {
  132. if (child_info.second != this)
  133. child_info.second->Compact();
  134. }
  135. }
  136. }
  137. if (!ref_count_ && doomed_)
  138. delete this;
  139. }
  140. std::string MemEntryImpl::GetKey() const {
  141. // A child entry doesn't have key so this method should not be called.
  142. DCHECK_EQ(EntryType::kParent, type());
  143. return key_;
  144. }
  145. Time MemEntryImpl::GetLastUsed() const {
  146. return last_used_;
  147. }
  148. Time MemEntryImpl::GetLastModified() const {
  149. return last_modified_;
  150. }
  151. int32_t MemEntryImpl::GetDataSize(int index) const {
  152. if (index < 0 || index >= kNumStreams)
  153. return 0;
  154. return data_[index].size();
  155. }
  156. int MemEntryImpl::ReadData(int index,
  157. int offset,
  158. IOBuffer* buf,
  159. int buf_len,
  160. CompletionOnceCallback callback) {
  161. if (net_log_.IsCapturing()) {
  162. NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
  163. net::NetLogEventPhase::BEGIN, index, offset, buf_len,
  164. false);
  165. }
  166. int result = InternalReadData(index, offset, buf, buf_len);
  167. if (net_log_.IsCapturing()) {
  168. NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
  169. net::NetLogEventPhase::END, result);
  170. }
  171. return result;
  172. }
  173. int MemEntryImpl::WriteData(int index,
  174. int offset,
  175. IOBuffer* buf,
  176. int buf_len,
  177. CompletionOnceCallback callback,
  178. bool truncate) {
  179. if (net_log_.IsCapturing()) {
  180. NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
  181. net::NetLogEventPhase::BEGIN, index, offset, buf_len,
  182. truncate);
  183. }
  184. int result = InternalWriteData(index, offset, buf, buf_len, truncate);
  185. if (net_log_.IsCapturing()) {
  186. NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
  187. net::NetLogEventPhase::END, result);
  188. }
  189. return result;
  190. }
  191. int MemEntryImpl::ReadSparseData(int64_t offset,
  192. IOBuffer* buf,
  193. int buf_len,
  194. CompletionOnceCallback callback) {
  195. if (net_log_.IsCapturing()) {
  196. NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_READ,
  197. net::NetLogEventPhase::BEGIN, offset, buf_len);
  198. }
  199. int result = InternalReadSparseData(offset, buf, buf_len);
  200. if (net_log_.IsCapturing())
  201. net_log_.EndEvent(net::NetLogEventType::SPARSE_READ);
  202. return result;
  203. }
  204. int MemEntryImpl::WriteSparseData(int64_t offset,
  205. IOBuffer* buf,
  206. int buf_len,
  207. CompletionOnceCallback callback) {
  208. if (net_log_.IsCapturing()) {
  209. NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_WRITE,
  210. net::NetLogEventPhase::BEGIN, offset, buf_len);
  211. }
  212. int result = InternalWriteSparseData(offset, buf, buf_len);
  213. if (net_log_.IsCapturing())
  214. net_log_.EndEvent(net::NetLogEventType::SPARSE_WRITE);
  215. return result;
  216. }
  217. RangeResult MemEntryImpl::GetAvailableRange(int64_t offset,
  218. int len,
  219. RangeResultCallback callback) {
  220. if (net_log_.IsCapturing()) {
  221. NetLogSparseOperation(net_log_, net::NetLogEventType::SPARSE_GET_RANGE,
  222. net::NetLogEventPhase::BEGIN, offset, len);
  223. }
  224. RangeResult result = InternalGetAvailableRange(offset, len);
  225. if (net_log_.IsCapturing()) {
  226. net_log_.EndEvent(net::NetLogEventType::SPARSE_GET_RANGE, [&] {
  227. return CreateNetLogGetAvailableRangeResultParams(result);
  228. });
  229. }
  230. return result;
  231. }
  232. bool MemEntryImpl::CouldBeSparse() const {
  233. DCHECK_EQ(EntryType::kParent, type());
  234. return (children_.get() != nullptr);
  235. }
  236. net::Error MemEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
  237. return net::OK;
  238. }
  239. void MemEntryImpl::SetLastUsedTimeForTest(base::Time time) {
  240. last_used_ = time;
  241. }
  242. // ------------------------------------------------------------------------
  243. MemEntryImpl::MemEntryImpl(base::WeakPtr<MemBackendImpl> backend,
  244. const ::std::string& key,
  245. int64_t child_id,
  246. MemEntryImpl* parent,
  247. net::NetLog* net_log)
  248. : key_(key),
  249. child_id_(child_id),
  250. parent_(parent),
  251. last_modified_(MemBackendImpl::Now(backend)),
  252. last_used_(last_modified_),
  253. backend_(backend) {
  254. backend_->OnEntryInserted(this);
  255. net_log_ = net::NetLogWithSource::Make(
  256. net_log, net::NetLogSourceType::MEMORY_CACHE_ENTRY);
  257. net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL,
  258. [&] { return NetLogEntryCreationParams(this); });
  259. }
  260. MemEntryImpl::~MemEntryImpl() {
  261. if (backend_)
  262. backend_->ModifyStorageSize(-GetStorageSize());
  263. if (type() == EntryType::kParent) {
  264. if (children_) {
  265. EntryMap children;
  266. children_->swap(children);
  267. for (auto& it : children) {
  268. // Since |this| is stored in the map, it should be guarded against
  269. // double dooming, which will result in double destruction.
  270. if (it.second != this)
  271. it.second->Doom();
  272. }
  273. }
  274. } else {
  275. parent_->children_->erase(child_id_);
  276. }
  277. net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_MEM_ENTRY_IMPL);
  278. }
  279. int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
  280. int buf_len) {
  281. DCHECK(type() == EntryType::kParent || index == kSparseData);
  282. if (index < 0 || index >= kNumStreams || buf_len < 0)
  283. return net::ERR_INVALID_ARGUMENT;
  284. int entry_size = data_[index].size();
  285. if (offset >= entry_size || offset < 0 || !buf_len)
  286. return 0;
  287. int end_offset;
  288. if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
  289. end_offset > entry_size)
  290. buf_len = entry_size - offset;
  291. UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
  292. std::copy(data_[index].begin() + offset,
  293. data_[index].begin() + offset + buf_len, buf->data());
  294. return buf_len;
  295. }
  296. int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
  297. int buf_len, bool truncate) {
  298. DCHECK(type() == EntryType::kParent || index == kSparseData);
  299. if (!backend_)
  300. return net::ERR_INSUFFICIENT_RESOURCES;
  301. if (index < 0 || index >= kNumStreams)
  302. return net::ERR_INVALID_ARGUMENT;
  303. if (offset < 0 || buf_len < 0)
  304. return net::ERR_INVALID_ARGUMENT;
  305. int max_file_size = backend_->MaxFileSize();
  306. int end_offset;
  307. if (offset > max_file_size || buf_len > max_file_size ||
  308. !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
  309. end_offset > max_file_size) {
  310. return net::ERR_FAILED;
  311. }
  312. int old_data_size = data_[index].size();
  313. if (truncate || old_data_size < end_offset) {
  314. int delta = end_offset - old_data_size;
  315. backend_->ModifyStorageSize(delta);
  316. if (backend_->HasExceededStorageSize()) {
  317. backend_->ModifyStorageSize(-delta);
  318. return net::ERR_INSUFFICIENT_RESOURCES;
  319. }
  320. data_[index].resize(end_offset);
  321. // Zero fill any hole.
  322. if (old_data_size < offset) {
  323. std::fill(data_[index].begin() + old_data_size,
  324. data_[index].begin() + offset, 0);
  325. }
  326. }
  327. UpdateStateOnUse(ENTRY_WAS_MODIFIED);
  328. if (!buf_len)
  329. return 0;
  330. std::copy(buf->data(), buf->data() + buf_len, data_[index].begin() + offset);
  331. return buf_len;
  332. }
  333. int MemEntryImpl::InternalReadSparseData(int64_t offset,
  334. IOBuffer* buf,
  335. int buf_len) {
  336. DCHECK_EQ(EntryType::kParent, type());
  337. if (!InitSparseInfo())
  338. return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
  339. if (offset < 0 || buf_len < 0)
  340. return net::ERR_INVALID_ARGUMENT;
  341. // Ensure that offset + buf_len does not overflow. This ensures that
  342. // offset + io_buf->BytesConsumed() never overflows below.
  343. // The result of std::min is guaranteed to fit into int since buf_len did.
  344. buf_len = std::min(static_cast<int64_t>(buf_len),
  345. std::numeric_limits<int64_t>::max() - offset);
  346. // We will keep using this buffer and adjust the offset in this buffer.
  347. scoped_refptr<net::DrainableIOBuffer> io_buf =
  348. base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
  349. // Iterate until we have read enough.
  350. while (io_buf->BytesRemaining()) {
  351. MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false);
  352. // No child present for that offset.
  353. if (!child)
  354. break;
  355. // We then need to prepare the child offset and len.
  356. int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
  357. // If we are trying to read from a position that the child entry has no data
  358. // we should stop.
  359. if (child_offset < child->child_first_pos_)
  360. break;
  361. if (net_log_.IsCapturing()) {
  362. NetLogSparseReadWrite(net_log_,
  363. net::NetLogEventType::SPARSE_READ_CHILD_DATA,
  364. net::NetLogEventPhase::BEGIN,
  365. child->net_log_.source(), io_buf->BytesRemaining());
  366. }
  367. int ret =
  368. child->ReadData(kSparseData, child_offset, io_buf.get(),
  369. io_buf->BytesRemaining(), CompletionOnceCallback());
  370. if (net_log_.IsCapturing()) {
  371. net_log_.EndEventWithNetErrorCode(
  372. net::NetLogEventType::SPARSE_READ_CHILD_DATA, ret);
  373. }
  374. // If we encounter an error in one entry, return immediately.
  375. if (ret < 0)
  376. return ret;
  377. else if (ret == 0)
  378. break;
  379. // Increment the counter by number of bytes read in the child entry.
  380. io_buf->DidConsume(ret);
  381. }
  382. UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
  383. return io_buf->BytesConsumed();
  384. }
  385. int MemEntryImpl::InternalWriteSparseData(int64_t offset,
  386. IOBuffer* buf,
  387. int buf_len) {
  388. DCHECK_EQ(EntryType::kParent, type());
  389. if (!InitSparseInfo())
  390. return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
  391. // We can't generally do this without the backend since we need it to create
  392. // child entries.
  393. if (!backend_)
  394. return net::ERR_FAILED;
  395. // Check that offset + buf_len does not overflow. This ensures that
  396. // offset + io_buf->BytesConsumed() never overflows below.
  397. if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid())
  398. return net::ERR_INVALID_ARGUMENT;
  399. scoped_refptr<net::DrainableIOBuffer> io_buf =
  400. base::MakeRefCounted<net::DrainableIOBuffer>(buf, buf_len);
  401. // This loop walks through child entries continuously starting from |offset|
  402. // and writes blocks of data (of maximum size kMaxChildEntrySize) into each
  403. // child entry until all |buf_len| bytes are written. The write operation can
  404. // start in the middle of an entry.
  405. while (io_buf->BytesRemaining()) {
  406. MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true);
  407. int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
  408. // Find the right amount to write, this evaluates the remaining bytes to
  409. // write and remaining capacity of this child entry.
  410. int write_len =
  411. std::min(io_buf->BytesRemaining(), kMaxChildEntrySize - child_offset);
  412. // Keep a record of the last byte position (exclusive) in the child.
  413. int data_size = child->GetDataSize(kSparseData);
  414. if (net_log_.IsCapturing()) {
  415. NetLogSparseReadWrite(
  416. net_log_, net::NetLogEventType::SPARSE_WRITE_CHILD_DATA,
  417. net::NetLogEventPhase::BEGIN, child->net_log_.source(), write_len);
  418. }
  419. // Always writes to the child entry. This operation may overwrite data
  420. // previously written.
  421. // TODO(hclam): if there is data in the entry and this write is not
  422. // continuous we may want to discard this write.
  423. int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
  424. write_len, CompletionOnceCallback(), true);
  425. if (net_log_.IsCapturing()) {
  426. net_log_.EndEventWithNetErrorCode(
  427. net::NetLogEventType::SPARSE_WRITE_CHILD_DATA, ret);
  428. }
  429. if (ret < 0)
  430. return ret;
  431. else if (ret == 0)
  432. break;
  433. // Keep a record of the first byte position in the child if the write was
  434. // not aligned nor continuous. This is to enable witting to the middle
  435. // of an entry and still keep track of data off the aligned edge.
  436. if (data_size != child_offset)
  437. child->child_first_pos_ = child_offset;
  438. // Adjust the offset in the IO buffer.
  439. io_buf->DidConsume(ret);
  440. }
  441. UpdateStateOnUse(ENTRY_WAS_MODIFIED);
  442. return io_buf->BytesConsumed();
  443. }
  444. RangeResult MemEntryImpl::InternalGetAvailableRange(int64_t offset, int len) {
  445. DCHECK_EQ(EntryType::kParent, type());
  446. if (!InitSparseInfo())
  447. return RangeResult(net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
  448. if (offset < 0 || len < 0)
  449. return RangeResult(net::ERR_INVALID_ARGUMENT);
  450. // Truncate |len| to make sure that |offset + len| does not overflow.
  451. // This is OK since one can't write that far anyway.
  452. // The result of std::min is guaranteed to fit into int since |len| did.
  453. len = std::min(static_cast<int64_t>(len),
  454. std::numeric_limits<int64_t>::max() - offset);
  455. net::Interval<int64_t> requested(offset, offset + len);
  456. // Find the first relevant child, if any --- may have to skip over
  457. // one entry as it may be before the range (consider, for example,
  458. // if the request is for [2048, 10000), while [0, 1024) is a valid range
  459. // for the entry).
  460. EntryMap::const_iterator i = children_->lower_bound(ToChildIndex(offset));
  461. if (i != children_->cend() && !ChildInterval(i).Intersects(requested))
  462. ++i;
  463. net::Interval<int64_t> found;
  464. if (i != children_->cend() &&
  465. requested.Intersects(ChildInterval(i), &found)) {
  466. // Found something relevant; now just need to expand this out if next
  467. // children are contiguous and relevant to the request.
  468. while (true) {
  469. ++i;
  470. net::Interval<int64_t> relevant_in_next_child;
  471. if (i == children_->cend() ||
  472. !requested.Intersects(ChildInterval(i), &relevant_in_next_child) ||
  473. relevant_in_next_child.min() != found.max()) {
  474. break;
  475. }
  476. found.SpanningUnion(relevant_in_next_child);
  477. }
  478. return RangeResult(found.min(), found.Length());
  479. }
  480. return RangeResult(offset, 0);
  481. }
  482. bool MemEntryImpl::InitSparseInfo() {
  483. DCHECK_EQ(EntryType::kParent, type());
  484. if (!children_) {
  485. // If we already have some data in sparse stream but we are being
  486. // initialized as a sparse entry, we should fail.
  487. if (GetDataSize(kSparseData))
  488. return false;
  489. children_ = std::make_unique<EntryMap>();
  490. // The parent entry stores data for the first block, so save this object to
  491. // index 0.
  492. (*children_)[0] = this;
  493. }
  494. return true;
  495. }
  496. MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) {
  497. DCHECK_EQ(EntryType::kParent, type());
  498. int64_t index = ToChildIndex(offset);
  499. auto i = children_->find(index);
  500. if (i != children_->end())
  501. return i->second;
  502. if (create)
  503. return new MemEntryImpl(backend_, index, this, net_log_.net_log());
  504. return nullptr;
  505. }
  506. net::Interval<int64_t> MemEntryImpl::ChildInterval(
  507. MemEntryImpl::EntryMap::const_iterator i) {
  508. DCHECK(i != children_->cend());
  509. const MemEntryImpl* child = i->second;
  510. // The valid range in child is [child_first_pos_, DataSize), since the child
  511. // entry ops just use standard disk_cache::Entry API, so DataSize is
  512. // not aware of any hole in the beginning.
  513. int64_t child_responsibility_start = (i->first) * kMaxChildEntrySize;
  514. return net::Interval<int64_t>(
  515. child_responsibility_start + child->child_first_pos_,
  516. child_responsibility_start + child->GetDataSize(kSparseData));
  517. }
  518. void MemEntryImpl::Compact() {
  519. // Stream 0 should already be fine since it's written out in a single WriteData().
  520. data_[1].shrink_to_fit();
  521. data_[2].shrink_to_fit();
  522. }
  523. } // namespace disk_cache