simple_entry_impl.cc 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719
  1. // Copyright (c) 2013 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "net/disk_cache/simple/simple_entry_impl.h"
  5. #include <algorithm>
  6. #include <cstring>
  7. #include <limits>
  8. #include <utility>
  9. #include <vector>
  10. #include "base/bind.h"
  11. #include "base/callback.h"
  12. #include "base/callback_helpers.h"
  13. #include "base/check_op.h"
  14. #include "base/cxx17_backports.h"
  15. #include "base/location.h"
  16. #include "base/memory/raw_ptr.h"
  17. #include "base/notreached.h"
  18. #include "base/task/task_runner.h"
  19. #include "base/task/task_runner_util.h"
  20. #include "base/threading/sequenced_task_runner_handle.h"
  21. #include "base/time/time.h"
  22. #include "base/trace_event/memory_usage_estimator.h"
  23. #include "net/base/io_buffer.h"
  24. #include "net/base/net_errors.h"
  25. #include "net/base/prioritized_task_runner.h"
  26. #include "net/disk_cache/backend_cleanup_tracker.h"
  27. #include "net/disk_cache/net_log_parameters.h"
  28. #include "net/disk_cache/simple/simple_backend_impl.h"
  29. #include "net/disk_cache/simple/simple_histogram_enums.h"
  30. #include "net/disk_cache/simple/simple_histogram_macros.h"
  31. #include "net/disk_cache/simple/simple_index.h"
  32. #include "net/disk_cache/simple/simple_net_log_parameters.h"
  33. #include "net/disk_cache/simple/simple_synchronous_entry.h"
  34. #include "net/disk_cache/simple/simple_util.h"
  35. #include "net/log/net_log.h"
  36. #include "net/log/net_log_source_type.h"
  37. #include "third_party/zlib/zlib.h"
  38. namespace disk_cache {
  39. namespace {
  40. // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
  41. // the cache.
  42. const int64_t kMaxSparseDataSizeDivisor = 10;
  43. OpenEntryIndexEnum ComputeIndexState(SimpleBackendImpl* backend,
  44. uint64_t entry_hash) {
  45. if (!backend->index()->initialized())
  46. return INDEX_NOEXIST;
  47. if (backend->index()->Has(entry_hash))
  48. return INDEX_HIT;
  49. return INDEX_MISS;
  50. }
  51. void RecordOpenEntryIndexState(net::CacheType cache_type,
  52. OpenEntryIndexEnum state) {
  53. SIMPLE_CACHE_UMA(ENUMERATION, "OpenEntryIndexState", cache_type, state,
  54. INDEX_MAX);
  55. }
  56. void RecordHeaderSize(net::CacheType cache_type, int size) {
  57. SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, size);
  58. }
  59. void InvokeCallbackIfBackendIsAlive(
  60. const base::WeakPtr<SimpleBackendImpl>& backend,
  61. net::CompletionOnceCallback completion_callback,
  62. int result) {
  63. DCHECK(!completion_callback.is_null());
  64. if (!backend.get())
  65. return;
  66. std::move(completion_callback).Run(result);
  67. }
  68. void InvokeEntryResultCallbackIfBackendIsAlive(
  69. const base::WeakPtr<SimpleBackendImpl>& backend,
  70. EntryResultCallback completion_callback,
  71. EntryResult result) {
  72. DCHECK(!completion_callback.is_null());
  73. if (!backend.get())
  74. return;
  75. std::move(completion_callback).Run(std::move(result));
  76. }
  77. // If |sync_possible| is false, and callback is available, posts rv to it and
  78. // return net::ERR_IO_PENDING; otherwise just passes through rv.
  79. int PostToCallbackIfNeeded(bool sync_possible,
  80. net::CompletionOnceCallback callback,
  81. int rv) {
  82. if (!sync_possible && !callback.is_null()) {
  83. base::SequencedTaskRunnerHandle::Get()->PostTask(
  84. FROM_HERE, base::BindOnce(std::move(callback), rv));
  85. return net::ERR_IO_PENDING;
  86. } else {
  87. return rv;
  88. }
  89. }
  90. } // namespace
  91. using base::OnceClosure;
  92. using base::FilePath;
  93. using base::Time;
  94. using base::TaskRunner;
  95. // A helper class to insure that RunNextOperationIfNeeded() is called when
  96. // exiting the current stack frame.
  97. class SimpleEntryImpl::ScopedOperationRunner {
  98. public:
  99. explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
  100. }
  101. ~ScopedOperationRunner() {
  102. entry_->RunNextOperationIfNeeded();
  103. }
  104. private:
  105. const raw_ptr<SimpleEntryImpl> entry_;
  106. };
  107. SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() = default;
  108. SimpleEntryImpl::SimpleEntryImpl(
  109. net::CacheType cache_type,
  110. const FilePath& path,
  111. scoped_refptr<BackendCleanupTracker> cleanup_tracker,
  112. const uint64_t entry_hash,
  113. OperationsMode operations_mode,
  114. SimpleBackendImpl* backend,
  115. SimpleFileTracker* file_tracker,
  116. scoped_refptr<BackendFileOperationsFactory> file_operations_factory,
  117. net::NetLog* net_log,
  118. uint32_t entry_priority)
  119. : cleanup_tracker_(std::move(cleanup_tracker)),
  120. backend_(backend->AsWeakPtr()),
  121. file_tracker_(file_tracker),
  122. file_operations_factory_(std::move(file_operations_factory)),
  123. cache_type_(cache_type),
  124. path_(path),
  125. entry_hash_(entry_hash),
  126. use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
  127. last_used_(Time::Now()),
  128. last_modified_(last_used_),
  129. prioritized_task_runner_(backend_->prioritized_task_runner()),
  130. net_log_(
  131. net::NetLogWithSource::Make(net_log,
  132. net::NetLogSourceType::DISK_CACHE_ENTRY)),
  133. stream_0_data_(base::MakeRefCounted<net::GrowableIOBuffer>()),
  134. entry_priority_(entry_priority) {
  135. static_assert(std::extent<decltype(data_size_)>() ==
  136. std::extent<decltype(crc32s_end_offset_)>(),
  137. "arrays should be the same size");
  138. static_assert(
  139. std::extent<decltype(data_size_)>() == std::extent<decltype(crc32s_)>(),
  140. "arrays should be the same size");
  141. static_assert(std::extent<decltype(data_size_)>() ==
  142. std::extent<decltype(have_written_)>(),
  143. "arrays should be the same size");
  144. ResetEntry();
  145. NetLogSimpleEntryConstruction(net_log_,
  146. net::NetLogEventType::SIMPLE_CACHE_ENTRY,
  147. net::NetLogEventPhase::BEGIN, this);
  148. }
  149. void SimpleEntryImpl::SetActiveEntryProxy(
  150. std::unique_ptr<ActiveEntryProxy> active_entry_proxy) {
  151. DCHECK(!active_entry_proxy_);
  152. active_entry_proxy_ = std::move(active_entry_proxy);
  153. }
  154. EntryResult SimpleEntryImpl::OpenEntry(EntryResultCallback callback) {
  155. DCHECK(backend_.get());
  156. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_CALL);
  157. OpenEntryIndexEnum index_state =
  158. ComputeIndexState(backend_.get(), entry_hash_);
  159. RecordOpenEntryIndexState(cache_type_, index_state);
  160. // If entry is not known to the index, initiate fast failover to the network.
  161. if (index_state == INDEX_MISS) {
  162. net_log_.AddEventWithNetErrorCode(
  163. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
  164. return EntryResult::MakeError(net::ERR_FAILED);
  165. }
  166. pending_operations_.push(SimpleEntryOperation::OpenOperation(
  167. this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
  168. RunNextOperationIfNeeded();
  169. return EntryResult::MakeError(net::ERR_IO_PENDING);
  170. }
  171. EntryResult SimpleEntryImpl::CreateEntry(EntryResultCallback callback) {
  172. DCHECK(backend_.get());
  173. DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
  174. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_CALL);
  175. EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
  176. if (use_optimistic_operations_ &&
  177. state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
  178. net_log_.AddEvent(
  179. net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
  180. ReturnEntryToCaller();
  181. result = EntryResult::MakeCreated(this);
  182. pending_operations_.push(SimpleEntryOperation::CreateOperation(
  183. this, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
  184. EntryResultCallback()));
  185. // If we are optimistically returning before a preceeding doom, we need to
  186. // wait for that IO, about which we will be notified externally.
  187. if (optimistic_create_pending_doom_state_ != CREATE_NORMAL) {
  188. DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
  189. optimistic_create_pending_doom_state_);
  190. state_ = STATE_IO_PENDING;
  191. }
  192. } else {
  193. pending_operations_.push(SimpleEntryOperation::CreateOperation(
  194. this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
  195. }
  196. // We insert the entry in the index before creating the entry files in the
  197. // SimpleSynchronousEntry, because this way the worst scenario is when we
  198. // have the entry in the index but we don't have the created files yet, this
  199. // way we never leak files. CreationOperationComplete will remove the entry
  200. // from the index if the creation fails.
  201. backend_->index()->Insert(entry_hash_);
  202. RunNextOperationIfNeeded();
  203. return result;
  204. }
  205. EntryResult SimpleEntryImpl::OpenOrCreateEntry(EntryResultCallback callback) {
  206. DCHECK(backend_.get());
  207. DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
  208. net_log_.AddEvent(
  209. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_CALL);
  210. OpenEntryIndexEnum index_state =
  211. ComputeIndexState(backend_.get(), entry_hash_);
  212. RecordOpenEntryIndexState(cache_type_, index_state);
  213. EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
  214. if (index_state == INDEX_MISS && use_optimistic_operations_ &&
  215. state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
  216. net_log_.AddEvent(
  217. net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
  218. ReturnEntryToCaller();
  219. result = EntryResult::MakeCreated(this);
  220. pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
  221. this, index_state, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
  222. EntryResultCallback()));
  223. // The post-doom stuff should go through CreateEntry, not here.
  224. DCHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
  225. } else {
  226. pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
  227. this, index_state, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK,
  228. std::move(callback)));
  229. }
  230. // We insert the entry in the index before creating the entry files in the
  231. // SimpleSynchronousEntry, because this way the worst scenario is when we
  232. // have the entry in the index but we don't have the created files yet, this
  233. // way we never leak files. CreationOperationComplete will remove the entry
  234. // from the index if the creation fails.
  235. backend_->index()->Insert(entry_hash_);
  236. RunNextOperationIfNeeded();
  237. return result;
  238. }
  239. net::Error SimpleEntryImpl::DoomEntry(net::CompletionOnceCallback callback) {
  240. if (doom_state_ != DOOM_NONE)
  241. return net::OK;
  242. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_CALL);
  243. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
  244. MarkAsDoomed(DOOM_QUEUED);
  245. if (backend_.get()) {
  246. if (optimistic_create_pending_doom_state_ == CREATE_NORMAL) {
  247. post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
  248. } else {
  249. DCHECK_EQ(STATE_IO_PENDING, state_);
  250. DCHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
  251. optimistic_create_pending_doom_state_);
  252. // If we are in this state, we went ahead with making the entry even
  253. // though the backend was already keeping track of a doom, so it can't
  254. // keep track of ours. So we delay notifying it until
  255. // NotifyDoomBeforeCreateComplete is called. Since this path is invoked
  256. // only when the queue of post-doom callbacks was previously empty, while
  257. // the CompletionOnceCallback for the op is posted,
  258. // NotifyDoomBeforeCreateComplete() will be the first thing running after
  259. // the previous doom completes, so at that point we can immediately grab
  260. // a spot in entries_pending_doom_.
  261. optimistic_create_pending_doom_state_ =
  262. CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM;
  263. }
  264. }
  265. pending_operations_.push(
  266. SimpleEntryOperation::DoomOperation(this, std::move(callback)));
  267. RunNextOperationIfNeeded();
  268. return net::ERR_IO_PENDING;
  269. }
  270. void SimpleEntryImpl::SetCreatePendingDoom() {
  271. DCHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
  272. optimistic_create_pending_doom_state_ = CREATE_OPTIMISTIC_PENDING_DOOM;
  273. }
  274. void SimpleEntryImpl::NotifyDoomBeforeCreateComplete() {
  275. DCHECK_EQ(STATE_IO_PENDING, state_);
  276. DCHECK_NE(CREATE_NORMAL, optimistic_create_pending_doom_state_);
  277. if (backend_.get() && optimistic_create_pending_doom_state_ ==
  278. CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM)
  279. post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
  280. state_ = STATE_UNINITIALIZED;
  281. optimistic_create_pending_doom_state_ = CREATE_NORMAL;
  282. RunNextOperationIfNeeded();
  283. }
  284. void SimpleEntryImpl::SetKey(const std::string& key) {
  285. key_ = key;
  286. net_log_.AddEventWithStringParams(
  287. net::NetLogEventType::SIMPLE_CACHE_ENTRY_SET_KEY, "key", key);
  288. }
  289. void SimpleEntryImpl::Doom() {
  290. DoomEntry(CompletionOnceCallback());
  291. }
  292. void SimpleEntryImpl::Close() {
  293. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  294. CHECK_LT(0, open_count_);
  295. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_CALL);
  296. if (--open_count_ > 0) {
  297. DCHECK(!HasOneRef());
  298. Release(); // Balanced in ReturnEntryToCaller().
  299. return;
  300. }
  301. pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
  302. DCHECK(!HasOneRef());
  303. Release(); // Balanced in ReturnEntryToCaller().
  304. RunNextOperationIfNeeded();
  305. }
  306. std::string SimpleEntryImpl::GetKey() const {
  307. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  308. return key_;
  309. }
  310. Time SimpleEntryImpl::GetLastUsed() const {
  311. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  312. DCHECK(cache_type_ != net::APP_CACHE);
  313. return last_used_;
  314. }
  315. Time SimpleEntryImpl::GetLastModified() const {
  316. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  317. return last_modified_;
  318. }
  319. int32_t SimpleEntryImpl::GetDataSize(int stream_index) const {
  320. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  321. DCHECK_LE(0, data_size_[stream_index]);
  322. return data_size_[stream_index];
  323. }
  324. int SimpleEntryImpl::ReadData(int stream_index,
  325. int offset,
  326. net::IOBuffer* buf,
  327. int buf_len,
  328. CompletionOnceCallback callback) {
  329. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  330. if (net_log_.IsCapturing()) {
  331. NetLogReadWriteData(
  332. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_CALL,
  333. net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
  334. }
  335. if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
  336. buf_len < 0) {
  337. if (net_log_.IsCapturing()) {
  338. NetLogReadWriteComplete(
  339. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
  340. net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
  341. }
  342. return net::ERR_INVALID_ARGUMENT;
  343. }
  344. // If this is the only operation, bypass the queue, and also see if there is
  345. // in-memory data to handle it synchronously. In principle, multiple reads can
  346. // be parallelized, but past studies have shown that parallelizable ones
  347. // happen <1% of the time, so it's probably not worth the effort.
  348. bool alone_in_queue =
  349. pending_operations_.size() == 0 && state_ == STATE_READY;
  350. if (alone_in_queue) {
  351. return ReadDataInternal(/*sync_possible = */ true, stream_index, offset,
  352. buf, buf_len, std::move(callback));
  353. }
  354. pending_operations_.push(SimpleEntryOperation::ReadOperation(
  355. this, stream_index, offset, buf_len, buf, std::move(callback)));
  356. RunNextOperationIfNeeded();
  357. return net::ERR_IO_PENDING;
  358. }
  359. int SimpleEntryImpl::WriteData(int stream_index,
  360. int offset,
  361. net::IOBuffer* buf,
  362. int buf_len,
  363. CompletionOnceCallback callback,
  364. bool truncate) {
  365. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  366. if (net_log_.IsCapturing()) {
  367. NetLogReadWriteData(
  368. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_CALL,
  369. net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
  370. }
  371. if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
  372. offset < 0 || buf_len < 0) {
  373. if (net_log_.IsCapturing()) {
  374. NetLogReadWriteComplete(
  375. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
  376. net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
  377. }
  378. return net::ERR_INVALID_ARGUMENT;
  379. }
  380. int end_offset;
  381. if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
  382. (backend_.get() && end_offset > backend_->MaxFileSize())) {
  383. if (net_log_.IsCapturing()) {
  384. NetLogReadWriteComplete(
  385. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
  386. net::NetLogEventPhase::NONE, net::ERR_FAILED);
  387. }
  388. return net::ERR_FAILED;
  389. }
  390. ScopedOperationRunner operation_runner(this);
  391. // Stream 0 data is kept in memory, so can be written immediatly if there are
  392. // no IO operations pending.
  393. if (stream_index == 0 && state_ == STATE_READY &&
  394. pending_operations_.size() == 0)
  395. return SetStream0Data(buf, offset, buf_len, truncate);
  396. // We can only do optimistic Write if there is no pending operations, so
  397. // that we are sure that the next call to RunNextOperationIfNeeded will
  398. // actually run the write operation that sets the stream size. It also
  399. // prevents from previous possibly-conflicting writes that could be stacked
  400. // in the |pending_operations_|. We could optimize this for when we have
  401. // only read operations enqueued, but past studies have shown that that such
  402. // parallelizable cases are very rare.
  403. const bool optimistic =
  404. (use_optimistic_operations_ && state_ == STATE_READY &&
  405. pending_operations_.size() == 0);
  406. CompletionOnceCallback op_callback;
  407. scoped_refptr<net::IOBuffer> op_buf;
  408. int ret_value = net::ERR_FAILED;
  409. if (!optimistic) {
  410. op_buf = buf;
  411. op_callback = std::move(callback);
  412. ret_value = net::ERR_IO_PENDING;
  413. } else {
  414. // TODO(morlovich,pasko): For performance, don't use a copy of an IOBuffer
  415. // here to avoid paying the price of the RefCountedThreadSafe atomic
  416. // operations.
  417. if (buf) {
  418. op_buf = base::MakeRefCounted<IOBuffer>(buf_len);
  419. memcpy(op_buf->data(), buf->data(), buf_len);
  420. }
  421. op_callback = CompletionOnceCallback();
  422. ret_value = buf_len;
  423. if (net_log_.IsCapturing()) {
  424. NetLogReadWriteComplete(
  425. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
  426. net::NetLogEventPhase::NONE, buf_len);
  427. }
  428. }
  429. pending_operations_.push(SimpleEntryOperation::WriteOperation(
  430. this, stream_index, offset, buf_len, op_buf.get(), truncate, optimistic,
  431. std::move(op_callback)));
  432. return ret_value;
  433. }
  434. int SimpleEntryImpl::ReadSparseData(int64_t offset,
  435. net::IOBuffer* buf,
  436. int buf_len,
  437. CompletionOnceCallback callback) {
  438. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  439. if (net_log_.IsCapturing()) {
  440. NetLogSparseOperation(
  441. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_CALL,
  442. net::NetLogEventPhase::NONE, offset, buf_len);
  443. }
  444. if (offset < 0 || buf_len < 0) {
  445. if (net_log_.IsCapturing()) {
  446. NetLogReadWriteComplete(
  447. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
  448. net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
  449. }
  450. return net::ERR_INVALID_ARGUMENT;
  451. }
  452. // Truncate |buf_len| to make sure that |offset + buf_len| does not overflow.
  453. // This is OK since one can't write that far anyway.
  454. // The result of std::min is guaranteed to fit into int since |buf_len| did.
  455. buf_len = std::min(static_cast<int64_t>(buf_len),
  456. std::numeric_limits<int64_t>::max() - offset);
  457. ScopedOperationRunner operation_runner(this);
  458. pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
  459. this, offset, buf_len, buf, std::move(callback)));
  460. return net::ERR_IO_PENDING;
  461. }
  462. int SimpleEntryImpl::WriteSparseData(int64_t offset,
  463. net::IOBuffer* buf,
  464. int buf_len,
  465. CompletionOnceCallback callback) {
  466. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  467. if (net_log_.IsCapturing()) {
  468. NetLogSparseOperation(
  469. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_CALL,
  470. net::NetLogEventPhase::NONE, offset, buf_len);
  471. }
  472. if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid()) {
  473. if (net_log_.IsCapturing()) {
  474. NetLogReadWriteComplete(
  475. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
  476. net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
  477. }
  478. return net::ERR_INVALID_ARGUMENT;
  479. }
  480. ScopedOperationRunner operation_runner(this);
  481. pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
  482. this, offset, buf_len, buf, std::move(callback)));
  483. return net::ERR_IO_PENDING;
  484. }
  485. RangeResult SimpleEntryImpl::GetAvailableRange(int64_t offset,
  486. int len,
  487. RangeResultCallback callback) {
  488. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  489. if (offset < 0 || len < 0)
  490. return RangeResult(net::ERR_INVALID_ARGUMENT);
  491. // Truncate |len| to make sure that |offset + len| does not overflow.
  492. // This is OK since one can't write that far anyway.
  493. // The result of std::min is guaranteed to fit into int since |len| did.
  494. len = std::min(static_cast<int64_t>(len),
  495. std::numeric_limits<int64_t>::max() - offset);
  496. ScopedOperationRunner operation_runner(this);
  497. pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
  498. this, offset, len, std::move(callback)));
  499. return RangeResult(net::ERR_IO_PENDING);
  500. }
  501. bool SimpleEntryImpl::CouldBeSparse() const {
  502. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  503. // TODO(morlovich): Actually check.
  504. return true;
  505. }
  506. void SimpleEntryImpl::CancelSparseIO() {
  507. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  508. // The Simple Cache does not return distinct objects for the same non-doomed
  509. // entry, so there's no need to coordinate which object is performing sparse
  510. // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
  511. }
  512. net::Error SimpleEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
  513. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  514. // The simple Cache does not return distinct objects for the same non-doomed
  515. // entry, so there's no need to coordinate which object is performing sparse
  516. // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
  517. return net::OK;
  518. }
  519. void SimpleEntryImpl::SetLastUsedTimeForTest(base::Time time) {
  520. last_used_ = time;
  521. backend_->index()->SetLastUsedTimeForTest(entry_hash_, time);
  522. }
  523. void SimpleEntryImpl::SetPriority(uint32_t entry_priority) {
  524. entry_priority_ = entry_priority;
  525. }
  526. SimpleEntryImpl::~SimpleEntryImpl() {
  527. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  528. DCHECK_EQ(0U, pending_operations_.size());
  529. // This used to DCHECK on `state_`, but it turns out that destruction
  530. // happening on thread shutdown, when closures holding `this` get deleted
  531. // can happen in circumstances not possible during normal use, such as when
  532. // I/O for Close operation is keeping the entry alive in STATE_IO_PENDING, or
  533. // an entry that's STATE_READY has callbacks pending to hand it over to the
  534. // user right as the thread is shutdown (this would also have a non-null
  535. // `synchronous_entry_`).
  536. net_log_.EndEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY);
  537. }
  538. void SimpleEntryImpl::PostClientCallback(net::CompletionOnceCallback callback,
  539. int result) {
  540. if (callback.is_null())
  541. return;
  542. // Note that the callback is posted rather than directly invoked to avoid
  543. // reentrancy issues.
  544. base::SequencedTaskRunnerHandle::Get()->PostTask(
  545. FROM_HERE, base::BindOnce(&InvokeCallbackIfBackendIsAlive, backend_,
  546. std::move(callback), result));
  547. }
  548. void SimpleEntryImpl::PostClientCallback(EntryResultCallback callback,
  549. EntryResult result) {
  550. if (callback.is_null())
  551. return;
  552. // Note that the callback is posted rather than directly invoked to avoid
  553. // reentrancy issues.
  554. base::SequencedTaskRunnerHandle::Get()->PostTask(
  555. FROM_HERE,
  556. base::BindOnce(&InvokeEntryResultCallbackIfBackendIsAlive, backend_,
  557. std::move(callback), std::move(result)));
  558. }
  559. void SimpleEntryImpl::ResetEntry() {
  560. // If we're doomed, we can't really do anything else with the entry, since
  561. // we no longer own the name and are disconnected from the active entry table.
  562. // We preserve doom_state_ accross this entry for this same reason.
  563. state_ = doom_state_ == DOOM_COMPLETED ? STATE_FAILURE : STATE_UNINITIALIZED;
  564. std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
  565. std::memset(crc32s_, 0, sizeof(crc32s_));
  566. std::memset(have_written_, 0, sizeof(have_written_));
  567. std::memset(data_size_, 0, sizeof(data_size_));
  568. }
  569. void SimpleEntryImpl::ReturnEntryToCaller() {
  570. DCHECK(backend_);
  571. ++open_count_;
  572. AddRef(); // Balanced in Close()
  573. }
  574. void SimpleEntryImpl::ReturnEntryToCallerAsync(bool is_open,
  575. EntryResultCallback callback) {
  576. DCHECK(!callback.is_null());
  577. // |open_count_| must be incremented immediately, so that a Close on an alias
  578. // doesn't try to wrap things up.
  579. ++open_count_;
  580. // Note that the callback is posted rather than directly invoked to avoid
  581. // reentrancy issues.
  582. base::SequencedTaskRunnerHandle::Get()->PostTask(
  583. FROM_HERE,
  584. base::BindOnce(&SimpleEntryImpl::FinishReturnEntryToCallerAsync, this,
  585. is_open, std::move(callback)));
  586. }
  587. void SimpleEntryImpl::FinishReturnEntryToCallerAsync(
  588. bool is_open,
  589. EntryResultCallback callback) {
  590. AddRef(); // Balanced in Close()
  591. if (!backend_.get()) {
  592. // With backend dead, Open/Create operations are responsible for cleaning up
  593. // the entry --- the ownership is never transferred to the caller, and their
  594. // callback isn't invoked.
  595. Close();
  596. return;
  597. }
  598. std::move(callback).Run(is_open ? EntryResult::MakeOpened(this)
  599. : EntryResult::MakeCreated(this));
  600. }
  601. void SimpleEntryImpl::MarkAsDoomed(DoomState new_state) {
  602. DCHECK_NE(DOOM_NONE, new_state);
  603. doom_state_ = new_state;
  604. if (!backend_.get())
  605. return;
  606. backend_->index()->Remove(entry_hash_);
  607. active_entry_proxy_.reset();
  608. }
  609. void SimpleEntryImpl::RunNextOperationIfNeeded() {
  610. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  611. if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
  612. SimpleEntryOperation operation = std::move(pending_operations_.front());
  613. pending_operations_.pop();
  614. switch (operation.type()) {
  615. case SimpleEntryOperation::TYPE_OPEN:
  616. OpenEntryInternal(operation.entry_result_state(),
  617. operation.ReleaseEntryResultCallback());
  618. break;
  619. case SimpleEntryOperation::TYPE_CREATE:
  620. CreateEntryInternal(operation.entry_result_state(),
  621. operation.ReleaseEntryResultCallback());
  622. break;
  623. case SimpleEntryOperation::TYPE_OPEN_OR_CREATE:
  624. OpenOrCreateEntryInternal(operation.index_state(),
  625. operation.entry_result_state(),
  626. operation.ReleaseEntryResultCallback());
  627. break;
  628. case SimpleEntryOperation::TYPE_CLOSE:
  629. CloseInternal();
  630. break;
  631. case SimpleEntryOperation::TYPE_READ:
  632. ReadDataInternal(/* sync_possible= */ false, operation.index(),
  633. operation.offset(), operation.buf(),
  634. operation.length(), operation.ReleaseCallback());
  635. break;
  636. case SimpleEntryOperation::TYPE_WRITE:
  637. WriteDataInternal(operation.index(), operation.offset(),
  638. operation.buf(), operation.length(),
  639. operation.ReleaseCallback(), operation.truncate());
  640. break;
  641. case SimpleEntryOperation::TYPE_READ_SPARSE:
  642. ReadSparseDataInternal(operation.sparse_offset(), operation.buf(),
  643. operation.length(), operation.ReleaseCallback());
  644. break;
  645. case SimpleEntryOperation::TYPE_WRITE_SPARSE:
  646. WriteSparseDataInternal(operation.sparse_offset(), operation.buf(),
  647. operation.length(),
  648. operation.ReleaseCallback());
  649. break;
  650. case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
  651. GetAvailableRangeInternal(operation.sparse_offset(), operation.length(),
  652. operation.ReleaseRangeResultCalback());
  653. break;
  654. case SimpleEntryOperation::TYPE_DOOM:
  655. DoomEntryInternal(operation.ReleaseCallback());
  656. break;
  657. default:
  658. NOTREACHED();
  659. }
  660. // |this| may have been deleted.
  661. }
  662. }
  663. void SimpleEntryImpl::OpenEntryInternal(
  664. SimpleEntryOperation::EntryResultState result_state,
  665. EntryResultCallback callback) {
  666. ScopedOperationRunner operation_runner(this);
  667. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
  668. // No optimistic sync return possible on open.
  669. DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
  670. if (state_ == STATE_READY) {
  671. ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
  672. NetLogSimpleEntryCreation(net_log_,
  673. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
  674. net::NetLogEventPhase::NONE, this, net::OK);
  675. return;
  676. }
  677. if (state_ == STATE_FAILURE) {
  678. PostClientCallback(std::move(callback),
  679. EntryResult::MakeError(net::ERR_FAILED));
  680. NetLogSimpleEntryCreation(
  681. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
  682. net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
  683. return;
  684. }
  685. DCHECK_EQ(STATE_UNINITIALIZED, state_);
  686. DCHECK(!synchronous_entry_);
  687. state_ = STATE_IO_PENDING;
  688. const base::TimeTicks start_time = base::TimeTicks::Now();
  689. auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
  690. last_used_, last_modified_, data_size_, sparse_data_size_));
  691. int32_t trailer_prefetch_size = -1;
  692. base::Time last_used_time;
  693. if (SimpleBackendImpl* backend = backend_.get()) {
  694. if (cache_type_ == net::APP_CACHE) {
  695. trailer_prefetch_size =
  696. backend->index()->GetTrailerPrefetchSize(entry_hash_);
  697. } else {
  698. last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
  699. }
  700. }
  701. base::OnceClosure task = base::BindOnce(
  702. &SimpleSynchronousEntry::OpenEntry, cache_type_, path_, key_, entry_hash_,
  703. file_tracker_, file_operations_factory_->CreateUnbound(),
  704. trailer_prefetch_size, results.get());
  705. base::OnceClosure reply = base::BindOnce(
  706. &SimpleEntryImpl::CreationOperationComplete, this, result_state,
  707. std::move(callback), start_time, last_used_time, std::move(results),
  708. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END);
  709. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  710. std::move(reply), entry_priority_);
  711. }
  712. void SimpleEntryImpl::CreateEntryInternal(
  713. SimpleEntryOperation::EntryResultState result_state,
  714. EntryResultCallback callback) {
  715. ScopedOperationRunner operation_runner(this);
  716. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
  717. if (state_ != STATE_UNINITIALIZED) {
  718. // There is already an active normal entry.
  719. NetLogSimpleEntryCreation(
  720. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END,
  721. net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
  722. // If we have optimistically returned an entry, we would be the first entry
  723. // in queue with state_ == STATE_UNINITIALIZED.
  724. DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
  725. PostClientCallback(std::move(callback),
  726. EntryResult::MakeError(net::ERR_FAILED));
  727. return;
  728. }
  729. DCHECK_EQ(STATE_UNINITIALIZED, state_);
  730. DCHECK(!synchronous_entry_);
  731. state_ = STATE_IO_PENDING;
  732. // Since we don't know the correct values for |last_used_| and
  733. // |last_modified_| yet, we make this approximation.
  734. last_used_ = last_modified_ = base::Time::Now();
  735. const base::TimeTicks start_time = base::TimeTicks::Now();
  736. auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
  737. last_used_, last_modified_, data_size_, sparse_data_size_));
  738. OnceClosure task =
  739. base::BindOnce(&SimpleSynchronousEntry::CreateEntry, cache_type_, path_,
  740. key_, entry_hash_, file_tracker_,
  741. file_operations_factory_->CreateUnbound(), results.get());
  742. OnceClosure reply = base::BindOnce(
  743. &SimpleEntryImpl::CreationOperationComplete, this, result_state,
  744. std::move(callback), start_time, base::Time(), std::move(results),
  745. net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END);
  746. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  747. std::move(reply), entry_priority_);
  748. }
  749. void SimpleEntryImpl::OpenOrCreateEntryInternal(
  750. OpenEntryIndexEnum index_state,
  751. SimpleEntryOperation::EntryResultState result_state,
  752. EntryResultCallback callback) {
  753. ScopedOperationRunner operation_runner(this);
  754. net_log_.AddEvent(
  755. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_BEGIN);
  756. // result_state may be ENTRY_ALREADY_RETURNED only if an optimistic create is
  757. // being performed, which must be in STATE_UNINITIALIZED.
  758. bool optimistic_create =
  759. (result_state == SimpleEntryOperation::ENTRY_ALREADY_RETURNED);
  760. DCHECK(!optimistic_create || state_ == STATE_UNINITIALIZED);
  761. if (state_ == STATE_READY) {
  762. ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
  763. NetLogSimpleEntryCreation(
  764. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
  765. net::NetLogEventPhase::NONE, this, net::OK);
  766. return;
  767. }
  768. if (state_ == STATE_FAILURE) {
  769. PostClientCallback(std::move(callback),
  770. EntryResult::MakeError(net::ERR_FAILED));
  771. NetLogSimpleEntryCreation(
  772. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
  773. net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
  774. return;
  775. }
  776. DCHECK_EQ(STATE_UNINITIALIZED, state_);
  777. DCHECK(!synchronous_entry_);
  778. state_ = STATE_IO_PENDING;
  779. const base::TimeTicks start_time = base::TimeTicks::Now();
  780. auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
  781. last_used_, last_modified_, data_size_, sparse_data_size_));
  782. int32_t trailer_prefetch_size = -1;
  783. base::Time last_used_time;
  784. if (SimpleBackendImpl* backend = backend_.get()) {
  785. if (cache_type_ == net::APP_CACHE) {
  786. trailer_prefetch_size =
  787. backend->index()->GetTrailerPrefetchSize(entry_hash_);
  788. } else {
  789. last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
  790. }
  791. }
  792. base::OnceClosure task =
  793. base::BindOnce(&SimpleSynchronousEntry::OpenOrCreateEntry, cache_type_,
  794. path_, key_, entry_hash_, index_state, optimistic_create,
  795. file_tracker_, file_operations_factory_->CreateUnbound(),
  796. trailer_prefetch_size, results.get());
  797. base::OnceClosure reply = base::BindOnce(
  798. &SimpleEntryImpl::CreationOperationComplete, this, result_state,
  799. std::move(callback), start_time, last_used_time, std::move(results),
  800. net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END);
  801. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  802. std::move(reply), entry_priority_);
  803. }
  804. void SimpleEntryImpl::CloseInternal() {
  805. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  806. if (open_count_ != 0) {
  807. // Entry got resurrected in between Close and CloseInternal, nothing to do
  808. // for now.
  809. return;
  810. }
  811. typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
  812. auto crc32s_to_write = std::make_unique<std::vector<CRCRecord>>();
  813. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
  814. if (state_ == STATE_READY) {
  815. DCHECK(synchronous_entry_);
  816. state_ = STATE_IO_PENDING;
  817. for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
  818. if (have_written_[i]) {
  819. if (GetDataSize(i) == crc32s_end_offset_[i]) {
  820. int32_t crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
  821. crc32s_to_write->push_back(CRCRecord(i, true, crc));
  822. } else {
  823. crc32s_to_write->push_back(CRCRecord(i, false, 0));
  824. }
  825. }
  826. }
  827. } else {
  828. DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
  829. }
  830. auto results = std::make_unique<SimpleEntryCloseResults>();
  831. if (synchronous_entry_) {
  832. OnceClosure task = base::BindOnce(
  833. &SimpleSynchronousEntry::Close, base::Unretained(synchronous_entry_),
  834. SimpleEntryStat(last_used_, last_modified_, data_size_,
  835. sparse_data_size_),
  836. std::move(crc32s_to_write), base::RetainedRef(stream_0_data_),
  837. results.get());
  838. OnceClosure reply = base::BindOnce(&SimpleEntryImpl::CloseOperationComplete,
  839. this, std::move(results));
  840. synchronous_entry_ = nullptr;
  841. prioritized_task_runner_->PostTaskAndReply(
  842. FROM_HERE, std::move(task), std::move(reply), entry_priority_);
  843. } else {
  844. CloseOperationComplete(std::move(results));
  845. }
  846. }
  847. int SimpleEntryImpl::ReadDataInternal(bool sync_possible,
  848. int stream_index,
  849. int offset,
  850. net::IOBuffer* buf,
  851. int buf_len,
  852. net::CompletionOnceCallback callback) {
  853. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  854. ScopedOperationRunner operation_runner(this);
  855. if (net_log_.IsCapturing()) {
  856. NetLogReadWriteData(
  857. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_BEGIN,
  858. net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
  859. }
  860. if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
  861. if (net_log_.IsCapturing()) {
  862. NetLogReadWriteComplete(net_log_,
  863. net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
  864. net::NetLogEventPhase::NONE, net::ERR_FAILED);
  865. }
  866. // Note that the API states that client-provided callbacks for entry-level
  867. // (i.e. non-backend) operations (e.g. read, write) are invoked even if
  868. // the backend was already destroyed.
  869. return PostToCallbackIfNeeded(sync_possible, std::move(callback),
  870. net::ERR_FAILED);
  871. }
  872. DCHECK_EQ(STATE_READY, state_);
  873. if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
  874. // If there is nothing to read, we bail out before setting state_ to
  875. // STATE_IO_PENDING (so ScopedOperationRunner might start us on next op
  876. // here).
  877. return PostToCallbackIfNeeded(sync_possible, std::move(callback), 0);
  878. }
  879. // Truncate read to not go past end of stream.
  880. buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
  881. // Since stream 0 data is kept in memory, it is read immediately.
  882. if (stream_index == 0) {
  883. int rv = ReadFromBuffer(stream_0_data_.get(), offset, buf_len, buf);
  884. return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
  885. }
  886. // Sometimes we can read in-ram prefetched stream 1 data immediately, too.
  887. if (stream_index == 1) {
  888. if (stream_1_prefetch_data_) {
  889. int rv =
  890. ReadFromBuffer(stream_1_prefetch_data_.get(), offset, buf_len, buf);
  891. return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
  892. }
  893. }
  894. state_ = STATE_IO_PENDING;
  895. if (doom_state_ == DOOM_NONE && backend_.get())
  896. backend_->index()->UseIfExists(entry_hash_);
  897. SimpleSynchronousEntry::ReadRequest read_req(stream_index, offset, buf_len);
  898. // Figure out if we should be computing the checksum for this read,
  899. // and whether we should be verifying it, too.
  900. if (crc32s_end_offset_[stream_index] == offset) {
  901. read_req.request_update_crc = true;
  902. read_req.previous_crc32 =
  903. offset == 0 ? crc32(0, Z_NULL, 0) : crc32s_[stream_index];
  904. // We can't verify the checksum if we already overwrote part of the file.
  905. // (It may still make sense to compute it if the overwritten area and the
  906. // about-to-read-in area are adjoint).
  907. read_req.request_verify_crc = !have_written_[stream_index];
  908. }
  909. auto result = std::make_unique<SimpleSynchronousEntry::ReadResult>();
  910. auto entry_stat = std::make_unique<SimpleEntryStat>(
  911. last_used_, last_modified_, data_size_, sparse_data_size_);
  912. OnceClosure task = base::BindOnce(
  913. &SimpleSynchronousEntry::ReadData, base::Unretained(synchronous_entry_),
  914. read_req, entry_stat.get(), base::RetainedRef(buf), result.get());
  915. OnceClosure reply = base::BindOnce(
  916. &SimpleEntryImpl::ReadOperationComplete, this, stream_index, offset,
  917. std::move(callback), std::move(entry_stat), std::move(result));
  918. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  919. std::move(reply), entry_priority_);
  920. return net::ERR_IO_PENDING;
  921. }
  922. void SimpleEntryImpl::WriteDataInternal(int stream_index,
  923. int offset,
  924. net::IOBuffer* buf,
  925. int buf_len,
  926. net::CompletionOnceCallback callback,
  927. bool truncate) {
  928. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  929. ScopedOperationRunner operation_runner(this);
  930. if (net_log_.IsCapturing()) {
  931. NetLogReadWriteData(
  932. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
  933. net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
  934. }
  935. if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
  936. if (net_log_.IsCapturing()) {
  937. NetLogReadWriteComplete(
  938. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
  939. net::NetLogEventPhase::NONE, net::ERR_FAILED);
  940. }
  941. if (!callback.is_null()) {
  942. base::SequencedTaskRunnerHandle::Get()->PostTask(
  943. FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
  944. }
  945. // |this| may be destroyed after return here.
  946. return;
  947. }
  948. DCHECK_EQ(STATE_READY, state_);
  949. // Since stream 0 data is kept in memory, it will be written immediatly.
  950. if (stream_index == 0) {
  951. int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
  952. if (!callback.is_null()) {
  953. base::SequencedTaskRunnerHandle::Get()->PostTask(
  954. FROM_HERE, base::BindOnce(std::move(callback), ret_value));
  955. }
  956. return;
  957. }
  958. // Ignore zero-length writes that do not change the file size.
  959. if (buf_len == 0) {
  960. int32_t data_size = data_size_[stream_index];
  961. if (truncate ? (offset == data_size) : (offset <= data_size)) {
  962. if (!callback.is_null()) {
  963. base::SequencedTaskRunnerHandle::Get()->PostTask(
  964. FROM_HERE, base::BindOnce(std::move(callback), 0));
  965. }
  966. return;
  967. }
  968. }
  969. state_ = STATE_IO_PENDING;
  970. if (doom_state_ == DOOM_NONE && backend_.get())
  971. backend_->index()->UseIfExists(entry_hash_);
  972. // Any stream 1 write invalidates the prefetched data.
  973. if (stream_index == 1)
  974. stream_1_prefetch_data_ = nullptr;
  975. bool request_update_crc = false;
  976. uint32_t initial_crc = 0;
  977. if (offset < crc32s_end_offset_[stream_index]) {
  978. // If a range for which the crc32 was already computed is rewritten, the
  979. // computation of the crc32 need to start from 0 again.
  980. crc32s_end_offset_[stream_index] = 0;
  981. }
  982. if (crc32s_end_offset_[stream_index] == offset) {
  983. request_update_crc = true;
  984. initial_crc = (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
  985. }
  986. // |entry_stat| needs to be initialized before modifying |data_size_|.
  987. auto entry_stat = std::make_unique<SimpleEntryStat>(
  988. last_used_, last_modified_, data_size_, sparse_data_size_);
  989. if (truncate) {
  990. data_size_[stream_index] = offset + buf_len;
  991. } else {
  992. data_size_[stream_index] = std::max(offset + buf_len,
  993. GetDataSize(stream_index));
  994. }
  995. auto write_result = std::make_unique<SimpleSynchronousEntry::WriteResult>();
  996. // Since we don't know the correct values for |last_used_| and
  997. // |last_modified_| yet, we make this approximation.
  998. last_used_ = last_modified_ = base::Time::Now();
  999. have_written_[stream_index] = true;
  1000. // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
  1001. // record will have to be rewritten.
  1002. if (stream_index == 1)
  1003. have_written_[0] = true;
  1004. // Retain a reference to |buf| in |reply| instead of |task|, so that we can
  1005. // reduce cross thread malloc/free pairs. The cross thread malloc/free pair
  1006. // increases the apparent memory usage due to the thread cached free list.
  1007. // TODO(morlovich): Remove the doom_state_ argument to WriteData, since with
  1008. // renaming rather than delete, creating a new stream 2 of doomed entry will
  1009. // just work.
  1010. OnceClosure task = base::BindOnce(
  1011. &SimpleSynchronousEntry::WriteData, base::Unretained(synchronous_entry_),
  1012. SimpleSynchronousEntry::WriteRequest(
  1013. stream_index, offset, buf_len, initial_crc, truncate,
  1014. doom_state_ != DOOM_NONE, request_update_crc),
  1015. base::Unretained(buf), entry_stat.get(), write_result.get());
  1016. OnceClosure reply =
  1017. base::BindOnce(&SimpleEntryImpl::WriteOperationComplete, this,
  1018. stream_index, std::move(callback), std::move(entry_stat),
  1019. std::move(write_result), base::RetainedRef(buf));
  1020. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  1021. std::move(reply), entry_priority_);
  1022. }
  1023. void SimpleEntryImpl::ReadSparseDataInternal(
  1024. int64_t sparse_offset,
  1025. net::IOBuffer* buf,
  1026. int buf_len,
  1027. net::CompletionOnceCallback callback) {
  1028. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1029. ScopedOperationRunner operation_runner(this);
  1030. if (net_log_.IsCapturing()) {
  1031. NetLogSparseOperation(
  1032. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_BEGIN,
  1033. net::NetLogEventPhase::NONE, sparse_offset, buf_len);
  1034. }
  1035. if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
  1036. if (net_log_.IsCapturing()) {
  1037. NetLogReadWriteComplete(
  1038. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
  1039. net::NetLogEventPhase::NONE, net::ERR_FAILED);
  1040. }
  1041. if (!callback.is_null()) {
  1042. base::SequencedTaskRunnerHandle::Get()->PostTask(
  1043. FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
  1044. }
  1045. // |this| may be destroyed after return here.
  1046. return;
  1047. }
  1048. DCHECK_EQ(STATE_READY, state_);
  1049. state_ = STATE_IO_PENDING;
  1050. auto result = std::make_unique<int>();
  1051. auto last_used = std::make_unique<base::Time>();
  1052. OnceClosure task = base::BindOnce(
  1053. &SimpleSynchronousEntry::ReadSparseData,
  1054. base::Unretained(synchronous_entry_),
  1055. SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
  1056. base::RetainedRef(buf), last_used.get(), result.get());
  1057. OnceClosure reply = base::BindOnce(
  1058. &SimpleEntryImpl::ReadSparseOperationComplete, this, std::move(callback),
  1059. std::move(last_used), std::move(result));
  1060. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  1061. std::move(reply), entry_priority_);
  1062. }
  1063. void SimpleEntryImpl::WriteSparseDataInternal(
  1064. int64_t sparse_offset,
  1065. net::IOBuffer* buf,
  1066. int buf_len,
  1067. net::CompletionOnceCallback callback) {
  1068. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1069. ScopedOperationRunner operation_runner(this);
  1070. if (net_log_.IsCapturing()) {
  1071. NetLogSparseOperation(
  1072. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_BEGIN,
  1073. net::NetLogEventPhase::NONE, sparse_offset, buf_len);
  1074. }
  1075. if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
  1076. if (net_log_.IsCapturing()) {
  1077. NetLogReadWriteComplete(
  1078. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
  1079. net::NetLogEventPhase::NONE, net::ERR_FAILED);
  1080. }
  1081. if (!callback.is_null()) {
  1082. base::SequencedTaskRunnerHandle::Get()->PostTask(
  1083. FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
  1084. }
  1085. // |this| may be destroyed after return here.
  1086. return;
  1087. }
  1088. DCHECK_EQ(STATE_READY, state_);
  1089. state_ = STATE_IO_PENDING;
  1090. uint64_t max_sparse_data_size = std::numeric_limits<int64_t>::max();
  1091. if (backend_.get()) {
  1092. uint64_t max_cache_size = backend_->index()->max_size();
  1093. max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
  1094. }
  1095. auto entry_stat = std::make_unique<SimpleEntryStat>(
  1096. last_used_, last_modified_, data_size_, sparse_data_size_);
  1097. last_used_ = last_modified_ = base::Time::Now();
  1098. auto result = std::make_unique<int>();
  1099. OnceClosure task = base::BindOnce(
  1100. &SimpleSynchronousEntry::WriteSparseData,
  1101. base::Unretained(synchronous_entry_),
  1102. SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
  1103. base::RetainedRef(buf), max_sparse_data_size, entry_stat.get(),
  1104. result.get());
  1105. OnceClosure reply = base::BindOnce(
  1106. &SimpleEntryImpl::WriteSparseOperationComplete, this, std::move(callback),
  1107. std::move(entry_stat), std::move(result));
  1108. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  1109. std::move(reply), entry_priority_);
  1110. }
  1111. void SimpleEntryImpl::GetAvailableRangeInternal(int64_t sparse_offset,
  1112. int len,
  1113. RangeResultCallback callback) {
  1114. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1115. ScopedOperationRunner operation_runner(this);
  1116. if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
  1117. if (!callback.is_null()) {
  1118. base::SequencedTaskRunnerHandle::Get()->PostTask(
  1119. FROM_HERE,
  1120. base::BindOnce(std::move(callback), RangeResult(net::ERR_FAILED)));
  1121. }
  1122. // |this| may be destroyed after return here.
  1123. return;
  1124. }
  1125. DCHECK_EQ(STATE_READY, state_);
  1126. state_ = STATE_IO_PENDING;
  1127. auto result = std::make_unique<RangeResult>();
  1128. OnceClosure task = base::BindOnce(
  1129. &SimpleSynchronousEntry::GetAvailableRange,
  1130. base::Unretained(synchronous_entry_),
  1131. SimpleSynchronousEntry::SparseRequest(sparse_offset, len), result.get());
  1132. OnceClosure reply =
  1133. base::BindOnce(&SimpleEntryImpl::GetAvailableRangeOperationComplete, this,
  1134. std::move(callback), std::move(result));
  1135. prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
  1136. std::move(reply), entry_priority_);
  1137. }
  1138. void SimpleEntryImpl::DoomEntryInternal(net::CompletionOnceCallback callback) {
  1139. if (doom_state_ == DOOM_COMPLETED) {
  1140. // During the time we were sitting on a queue, some operation failed
  1141. // and cleaned our files up, so we don't have to do anything.
  1142. DoomOperationComplete(std::move(callback), state_, net::OK);
  1143. return;
  1144. }
  1145. if (!backend_) {
  1146. // If there's no backend, we want to truncate the files rather than delete
  1147. // or rename them. Either op will update the entry directory's mtime, which
  1148. // will likely force a full index rebuild on the next startup; this is
  1149. // clearly an undesirable cost. Instead, the lesser evil is to set the entry
  1150. // files to length zero, leaving the invalid entry in the index. On the next
  1151. // attempt to open the entry, it will fail asynchronously (since the magic
  1152. // numbers will not be found), and the files will actually be removed.
  1153. // Since there is no backend, new entries to conflict with us also can't be
  1154. // created.
  1155. prioritized_task_runner_->PostTaskAndReplyWithResult(
  1156. FROM_HERE,
  1157. base::BindOnce(&SimpleSynchronousEntry::TruncateEntryFiles, path_,
  1158. entry_hash_, file_operations_factory_->CreateUnbound()),
  1159. base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
  1160. std::move(callback),
  1161. // Return to STATE_FAILURE after dooming, since no
  1162. // operation can succeed on the truncated entry files.
  1163. STATE_FAILURE),
  1164. entry_priority_);
  1165. state_ = STATE_IO_PENDING;
  1166. return;
  1167. }
  1168. if (synchronous_entry_) {
  1169. // If there is a backing object, we have to go through its instance methods,
  1170. // so that it can rename itself and keep track of the altenative name.
  1171. prioritized_task_runner_->PostTaskAndReplyWithResult(
  1172. FROM_HERE,
  1173. base::BindOnce(&SimpleSynchronousEntry::Doom,
  1174. base::Unretained(synchronous_entry_)),
  1175. base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
  1176. std::move(callback), state_),
  1177. entry_priority_);
  1178. } else {
  1179. DCHECK_EQ(STATE_UNINITIALIZED, state_);
  1180. // If nothing is open, we can just delete the files. We know they have the
  1181. // base names, since if we ever renamed them our doom_state_ would be
  1182. // DOOM_COMPLETED, and we would exit at function entry.
  1183. prioritized_task_runner_->PostTaskAndReplyWithResult(
  1184. FROM_HERE,
  1185. base::BindOnce(&SimpleSynchronousEntry::DeleteEntryFiles, path_,
  1186. cache_type_, entry_hash_,
  1187. file_operations_factory_->CreateUnbound()),
  1188. base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
  1189. std::move(callback), state_),
  1190. entry_priority_);
  1191. }
  1192. state_ = STATE_IO_PENDING;
  1193. }
  1194. void SimpleEntryImpl::CreationOperationComplete(
  1195. SimpleEntryOperation::EntryResultState result_state,
  1196. EntryResultCallback completion_callback,
  1197. const base::TimeTicks& start_time,
  1198. const base::Time index_last_used_time,
  1199. std::unique_ptr<SimpleEntryCreationResults> in_results,
  1200. net::NetLogEventType end_event_type) {
  1201. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1202. DCHECK_EQ(state_, STATE_IO_PENDING);
  1203. DCHECK(in_results);
  1204. ScopedOperationRunner operation_runner(this);
  1205. if (in_results->result != net::OK) {
  1206. if (in_results->result != net::ERR_FILE_EXISTS) {
  1207. // Here we keep index up-to-date, but don't remove ourselves from active
  1208. // entries since we may have queued operations, and it would be
  1209. // problematic to run further Creates, Opens, or Dooms if we are not
  1210. // the active entry. We can only do this because OpenEntryInternal
  1211. // and CreateEntryInternal have to start from STATE_UNINITIALIZED, so
  1212. // nothing else is going on which may be confused.
  1213. if (backend_)
  1214. backend_->index()->Remove(entry_hash_);
  1215. }
  1216. net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
  1217. PostClientCallback(std::move(completion_callback),
  1218. EntryResult::MakeError(net::ERR_FAILED));
  1219. ResetEntry();
  1220. return;
  1221. }
  1222. // If this is a successful creation (rather than open), mark all streams to be
  1223. // saved on close.
  1224. if (in_results->created) {
  1225. for (bool& have_written : have_written_)
  1226. have_written = true;
  1227. }
  1228. // Make sure to keep the index up-to-date. We likely already did this when
  1229. // CreateEntry was called, but it's possible we were sitting on a queue
  1230. // after an op that removed us.
  1231. if (backend_ && doom_state_ == DOOM_NONE)
  1232. backend_->index()->Insert(entry_hash_);
  1233. state_ = STATE_READY;
  1234. synchronous_entry_ = in_results->sync_entry;
  1235. // Copy over any pre-fetched data and its CRCs.
  1236. for (int stream = 0; stream < 2; ++stream) {
  1237. const SimpleStreamPrefetchData& prefetched =
  1238. in_results->stream_prefetch_data[stream];
  1239. if (prefetched.data.get()) {
  1240. if (stream == 0)
  1241. stream_0_data_ = prefetched.data;
  1242. else
  1243. stream_1_prefetch_data_ = prefetched.data;
  1244. // The crc was read in SimpleSynchronousEntry.
  1245. crc32s_[stream] = prefetched.stream_crc32;
  1246. crc32s_end_offset_[stream] = in_results->entry_stat.data_size(stream);
  1247. }
  1248. }
  1249. // If this entry was opened by hash, key_ could still be empty. If so, update
  1250. // it with the key read from the synchronous entry.
  1251. if (key_.empty()) {
  1252. SetKey(synchronous_entry_->key());
  1253. } else {
  1254. // This should only be triggered when creating an entry. In the open case
  1255. // the key is either copied from the arguments to open, or checked
  1256. // in the synchronous entry.
  1257. DCHECK_EQ(key_, synchronous_entry_->key());
  1258. }
  1259. // Prefer index last used time to disk's, since that may be pretty inaccurate.
  1260. if (!index_last_used_time.is_null())
  1261. in_results->entry_stat.set_last_used(index_last_used_time);
  1262. UpdateDataFromEntryStat(in_results->entry_stat);
  1263. if (cache_type_ == net::APP_CACHE && backend_.get() && backend_->index()) {
  1264. backend_->index()->SetTrailerPrefetchSize(
  1265. entry_hash_, in_results->computed_trailer_prefetch_size);
  1266. }
  1267. SIMPLE_CACHE_UMA(TIMES,
  1268. "EntryCreationTime", cache_type_,
  1269. (base::TimeTicks::Now() - start_time));
  1270. net_log_.AddEvent(end_event_type);
  1271. const bool created = in_results->created;
  1272. // We need to release `in_results` before going out of scope, because
  1273. // `operation_runner` destruction might call a close operation, that will
  1274. // ultimately release `in_results->sync_entry`, and thus leading to having a
  1275. // dangling pointer here.
  1276. in_results = nullptr;
  1277. if (result_state == SimpleEntryOperation::ENTRY_NEEDS_CALLBACK) {
  1278. ReturnEntryToCallerAsync(!created, std::move(completion_callback));
  1279. }
  1280. }
  1281. void SimpleEntryImpl::UpdateStateAfterOperationComplete(
  1282. const SimpleEntryStat& entry_stat,
  1283. int result) {
  1284. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1285. DCHECK(synchronous_entry_);
  1286. DCHECK_EQ(STATE_IO_PENDING, state_);
  1287. if (result < 0) {
  1288. state_ = STATE_FAILURE;
  1289. MarkAsDoomed(DOOM_COMPLETED);
  1290. } else {
  1291. state_ = STATE_READY;
  1292. UpdateDataFromEntryStat(entry_stat);
  1293. }
  1294. }
  1295. void SimpleEntryImpl::EntryOperationComplete(
  1296. net::CompletionOnceCallback completion_callback,
  1297. const SimpleEntryStat& entry_stat,
  1298. int result) {
  1299. UpdateStateAfterOperationComplete(entry_stat, result);
  1300. if (!completion_callback.is_null()) {
  1301. base::SequencedTaskRunnerHandle::Get()->PostTask(
  1302. FROM_HERE, base::BindOnce(std::move(completion_callback), result));
  1303. }
  1304. RunNextOperationIfNeeded();
  1305. }
  1306. void SimpleEntryImpl::ReadOperationComplete(
  1307. int stream_index,
  1308. int offset,
  1309. net::CompletionOnceCallback completion_callback,
  1310. std::unique_ptr<SimpleEntryStat> entry_stat,
  1311. std::unique_ptr<SimpleSynchronousEntry::ReadResult> read_result) {
  1312. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1313. DCHECK(synchronous_entry_);
  1314. DCHECK_EQ(STATE_IO_PENDING, state_);
  1315. DCHECK(read_result);
  1316. int result = read_result->result;
  1317. if (read_result->crc_updated) {
  1318. if (result > 0) {
  1319. DCHECK_EQ(crc32s_end_offset_[stream_index], offset);
  1320. crc32s_end_offset_[stream_index] += result;
  1321. crc32s_[stream_index] = read_result->updated_crc32;
  1322. }
  1323. }
  1324. if (result < 0) {
  1325. crc32s_end_offset_[stream_index] = 0;
  1326. }
  1327. if (net_log_.IsCapturing()) {
  1328. NetLogReadWriteComplete(net_log_,
  1329. net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
  1330. net::NetLogEventPhase::NONE, result);
  1331. }
  1332. EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
  1333. }
  1334. void SimpleEntryImpl::WriteOperationComplete(
  1335. int stream_index,
  1336. net::CompletionOnceCallback completion_callback,
  1337. std::unique_ptr<SimpleEntryStat> entry_stat,
  1338. std::unique_ptr<SimpleSynchronousEntry::WriteResult> write_result,
  1339. net::IOBuffer* buf) {
  1340. int result = write_result->result;
  1341. if (net_log_.IsCapturing()) {
  1342. NetLogReadWriteComplete(net_log_,
  1343. net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
  1344. net::NetLogEventPhase::NONE, result);
  1345. }
  1346. if (result < 0)
  1347. crc32s_end_offset_[stream_index] = 0;
  1348. if (result > 0 && write_result->crc_updated) {
  1349. crc32s_end_offset_[stream_index] += result;
  1350. crc32s_[stream_index] = write_result->updated_crc32;
  1351. }
  1352. EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
  1353. }
  1354. void SimpleEntryImpl::ReadSparseOperationComplete(
  1355. net::CompletionOnceCallback completion_callback,
  1356. std::unique_ptr<base::Time> last_used,
  1357. std::unique_ptr<int> result) {
  1358. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1359. DCHECK(synchronous_entry_);
  1360. DCHECK(result);
  1361. if (net_log_.IsCapturing()) {
  1362. NetLogReadWriteComplete(
  1363. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
  1364. net::NetLogEventPhase::NONE, *result);
  1365. }
  1366. SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
  1367. sparse_data_size_);
  1368. EntryOperationComplete(std::move(completion_callback), entry_stat, *result);
  1369. }
  1370. void SimpleEntryImpl::WriteSparseOperationComplete(
  1371. net::CompletionOnceCallback completion_callback,
  1372. std::unique_ptr<SimpleEntryStat> entry_stat,
  1373. std::unique_ptr<int> result) {
  1374. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1375. DCHECK(synchronous_entry_);
  1376. DCHECK(result);
  1377. if (net_log_.IsCapturing()) {
  1378. NetLogReadWriteComplete(
  1379. net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
  1380. net::NetLogEventPhase::NONE, *result);
  1381. }
  1382. EntryOperationComplete(std::move(completion_callback), *entry_stat, *result);
  1383. }
  1384. void SimpleEntryImpl::GetAvailableRangeOperationComplete(
  1385. RangeResultCallback completion_callback,
  1386. std::unique_ptr<RangeResult> result) {
  1387. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1388. DCHECK(synchronous_entry_);
  1389. DCHECK(result);
  1390. SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
  1391. sparse_data_size_);
  1392. UpdateStateAfterOperationComplete(entry_stat, result->net_error);
  1393. if (!completion_callback.is_null()) {
  1394. base::SequencedTaskRunnerHandle::Get()->PostTask(
  1395. FROM_HERE, base::BindOnce(std::move(completion_callback), *result));
  1396. }
  1397. RunNextOperationIfNeeded();
  1398. }
  1399. void SimpleEntryImpl::DoomOperationComplete(
  1400. net::CompletionOnceCallback callback,
  1401. State state_to_restore,
  1402. int result) {
  1403. state_ = state_to_restore;
  1404. doom_state_ = DOOM_COMPLETED;
  1405. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_END);
  1406. PostClientCallback(std::move(callback), result);
  1407. RunNextOperationIfNeeded();
  1408. if (post_doom_waiting_) {
  1409. post_doom_waiting_->OnDoomComplete(entry_hash_);
  1410. post_doom_waiting_ = nullptr;
  1411. }
  1412. }
  1413. void SimpleEntryImpl::CloseOperationComplete(
  1414. std::unique_ptr<SimpleEntryCloseResults> in_results) {
  1415. DCHECK(!synchronous_entry_);
  1416. DCHECK_EQ(0, open_count_);
  1417. DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
  1418. STATE_UNINITIALIZED == state_);
  1419. net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_END);
  1420. if (cache_type_ == net::APP_CACHE &&
  1421. in_results->estimated_trailer_prefetch_size > 0 && backend_.get() &&
  1422. backend_->index()) {
  1423. backend_->index()->SetTrailerPrefetchSize(
  1424. entry_hash_, in_results->estimated_trailer_prefetch_size);
  1425. }
  1426. ResetEntry();
  1427. RunNextOperationIfNeeded();
  1428. }
  1429. void SimpleEntryImpl::UpdateDataFromEntryStat(
  1430. const SimpleEntryStat& entry_stat) {
  1431. DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
  1432. DCHECK(synchronous_entry_);
  1433. DCHECK_EQ(STATE_READY, state_);
  1434. last_used_ = entry_stat.last_used();
  1435. last_modified_ = entry_stat.last_modified();
  1436. for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
  1437. data_size_[i] = entry_stat.data_size(i);
  1438. }
  1439. sparse_data_size_ = entry_stat.sparse_data_size();
  1440. SimpleBackendImpl* backend_ptr = backend_.get();
  1441. if (doom_state_ == DOOM_NONE && backend_ptr) {
  1442. backend_ptr->index()->UpdateEntrySize(
  1443. entry_hash_, base::checked_cast<uint32_t>(GetDiskUsage()));
  1444. }
  1445. }
  1446. int64_t SimpleEntryImpl::GetDiskUsage() const {
  1447. int64_t file_size = 0;
  1448. for (int data_size : data_size_) {
  1449. file_size += simple_util::GetFileSizeFromDataSize(key_.size(), data_size);
  1450. }
  1451. file_size += sparse_data_size_;
  1452. return file_size;
  1453. }
  1454. int SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
  1455. int offset,
  1456. int buf_len,
  1457. net::IOBuffer* out_buf) {
  1458. DCHECK_GE(buf_len, 0);
  1459. memcpy(out_buf->data(), in_buf->data() + offset, buf_len);
  1460. UpdateDataFromEntryStat(SimpleEntryStat(base::Time::Now(), last_modified_,
  1461. data_size_, sparse_data_size_));
  1462. return buf_len;
  1463. }
  1464. int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
  1465. int offset,
  1466. int buf_len,
  1467. bool truncate) {
  1468. // Currently, stream 0 is only used for HTTP headers, and always writes them
  1469. // with a single, truncating write. Detect these writes and record the size
  1470. // changes of the headers. Also, support writes to stream 0 that have
  1471. // different access patterns, as required by the API contract.
  1472. // All other clients of the Simple Cache are encouraged to use stream 1.
  1473. have_written_[0] = true;
  1474. int data_size = GetDataSize(0);
  1475. if (offset == 0 && truncate) {
  1476. stream_0_data_->SetCapacity(buf_len);
  1477. memcpy(stream_0_data_->data(), buf->data(), buf_len);
  1478. data_size_[0] = buf_len;
  1479. } else {
  1480. const int buffer_size =
  1481. truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
  1482. stream_0_data_->SetCapacity(buffer_size);
  1483. // If |stream_0_data_| was extended, the extension until offset needs to be
  1484. // zero-filled.
  1485. const int fill_size = offset <= data_size ? 0 : offset - data_size;
  1486. if (fill_size > 0)
  1487. memset(stream_0_data_->data() + data_size, 0, fill_size);
  1488. if (buf)
  1489. memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
  1490. data_size_[0] = buffer_size;
  1491. }
  1492. RecordHeaderSize(cache_type_, data_size_[0]);
  1493. base::Time modification_time = base::Time::Now();
  1494. // Reset checksum; SimpleSynchronousEntry::Close will compute it for us,
  1495. // and do it off the source creation sequence.
  1496. crc32s_end_offset_[0] = 0;
  1497. UpdateDataFromEntryStat(
  1498. SimpleEntryStat(modification_time, modification_time, data_size_,
  1499. sparse_data_size_));
  1500. return buf_len;
  1501. }
  1502. } // namespace disk_cache