backend_unittest.cc 174 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <stdint.h>
  5. #include <memory>
  6. #include "base/bind.h"
  7. #include "base/callback.h"
  8. #include "base/callback_helpers.h"
  9. #include "base/files/file.h"
  10. #include "base/files/file_util.h"
  11. #include "base/memory/memory_pressure_listener.h"
  12. #include "base/memory/raw_ptr.h"
  13. #include "base/metrics/field_trial.h"
  14. #include "base/run_loop.h"
  15. #include "base/strings/string_number_conversions.h"
  16. #include "base/strings/string_split.h"
  17. #include "base/strings/string_util.h"
  18. #include "base/strings/stringprintf.h"
  19. #include "base/task/sequenced_task_runner.h"
  20. #include "base/task/thread_pool.h"
  21. #include "base/test/metrics/histogram_tester.h"
  22. #include "base/test/scoped_feature_list.h"
  23. #include "base/test/simple_test_clock.h"
  24. #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
  25. #include "base/threading/platform_thread.h"
  26. #include "base/threading/thread_restrictions.h"
  27. #include "base/threading/thread_task_runner_handle.h"
  28. #include "base/time/time.h"
  29. #include "base/trace_event/memory_allocator_dump.h"
  30. #include "base/trace_event/process_memory_dump.h"
  31. #include "base/trace_event/traced_value.h"
  32. #include "build/build_config.h"
  33. #include "net/base/cache_type.h"
  34. #include "net/base/completion_once_callback.h"
  35. #include "net/base/io_buffer.h"
  36. #include "net/base/net_errors.h"
  37. #include "net/base/request_priority.h"
  38. #include "net/base/test_completion_callback.h"
  39. #include "net/disk_cache/backend_cleanup_tracker.h"
  40. #include "net/disk_cache/blockfile/backend_impl.h"
  41. #include "net/disk_cache/blockfile/entry_impl.h"
  42. #include "net/disk_cache/blockfile/experiments.h"
  43. #include "net/disk_cache/blockfile/histogram_macros.h"
  44. #include "net/disk_cache/blockfile/mapped_file.h"
  45. #include "net/disk_cache/cache_util.h"
  46. #include "net/disk_cache/disk_cache_test_base.h"
  47. #include "net/disk_cache/disk_cache_test_util.h"
  48. #include "net/disk_cache/memory/mem_backend_impl.h"
  49. #include "net/disk_cache/simple/simple_backend_impl.h"
  50. #include "net/disk_cache/simple/simple_entry_format.h"
  51. #include "net/disk_cache/simple/simple_histogram_enums.h"
  52. #include "net/disk_cache/simple/simple_index.h"
  53. #include "net/disk_cache/simple/simple_synchronous_entry.h"
  54. #include "net/disk_cache/simple/simple_test_util.h"
  55. #include "net/disk_cache/simple/simple_util.h"
  56. #include "net/test/gtest_util.h"
  57. #include "testing/gmock/include/gmock/gmock.h"
  58. #include "testing/gtest/include/gtest/gtest.h"
  59. #include "third_party/abseil-cpp/absl/types/optional.h"
  60. using disk_cache::EntryResult;
  61. using net::test::IsError;
  62. using net::test::IsOk;
  63. using testing::ByRef;
  64. using testing::Contains;
  65. using testing::Eq;
  66. using testing::Field;
  67. #if BUILDFLAG(IS_WIN)
  68. #include "base/win/scoped_handle.h"
  69. #include <windows.h>
  70. #endif
  71. // Provide a BackendImpl object to macros from histogram_macros.h.
  72. #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
  73. // TODO(crbug.com/949811): Fix memory leaks in tests and re-enable on LSAN.
  74. #ifdef LEAK_SANITIZER
  75. #define MAYBE_BlockFileOpenOrCreateEntry DISABLED_BlockFileOpenOrCreateEntry
  76. #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
  77. DISABLED_NonEmptyCorruptSimpleCacheDoesNotRecover
  78. #define MAYBE_SimpleOpenOrCreateEntry DISABLED_SimpleOpenOrCreateEntry
  79. #else
  80. #define MAYBE_BlockFileOpenOrCreateEntry BlockFileOpenOrCreateEntry
  81. #define MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover \
  82. NonEmptyCorruptSimpleCacheDoesNotRecover
  83. #define MAYBE_SimpleOpenOrCreateEntry SimpleOpenOrCreateEntry
  84. #endif
  85. using base::Time;
  86. namespace {
  87. const char kExistingEntryKey[] = "existing entry key";
  88. std::unique_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
  89. const base::FilePath& cache_path) {
  90. net::TestCompletionCallback cb;
  91. std::unique_ptr<disk_cache::BackendImpl> cache(
  92. std::make_unique<disk_cache::BackendImpl>(cache_path,
  93. /* cleanup_tracker = */ nullptr,
  94. /* cache_thread = */ nullptr,
  95. net::DISK_CACHE,
  96. /* net_log = */ nullptr));
  97. cache->Init(cb.callback());
  98. if (cb.WaitForResult() != net::OK)
  99. return nullptr;
  100. TestEntryResultCompletionCallback cb2;
  101. EntryResult result =
  102. cache->CreateEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
  103. result = cb2.GetResult(std::move(result));
  104. if (result.net_error() != net::OK)
  105. return nullptr;
  106. return cache;
  107. }
  108. #if BUILDFLAG(IS_FUCHSIA)
  109. // Load tests with large numbers of file descriptors perform poorly on
  110. // virtualized test execution environments.
  111. // TODO(807882): Remove this workaround when virtualized test performance
  112. // improves.
  113. const int kLargeNumEntries = 100;
  114. #else
  115. const int kLargeNumEntries = 512;
  116. #endif
  117. } // namespace
  118. // Tests that can run with different types of caches.
  119. class DiskCacheBackendTest : public DiskCacheTestWithCache {
  120. protected:
  121. // Some utility methods:
  122. // Perform IO operations on the cache until there is pending IO.
  123. int GeneratePendingIO(net::TestCompletionCallback* cb);
  124. // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
  125. // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
  126. // There are 4 entries after doomed_start and 2 after doomed_end.
  127. void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
  128. bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
  129. bool EnumerateAndMatchKeys(int max_to_open,
  130. TestIterator* iter,
  131. std::set<std::string>* keys_to_match,
  132. size_t* count);
  133. // Computes the expected size of entry metadata, i.e. the total size without
  134. // the actual data stored. This depends only on the entry's |key| size.
  135. int GetEntryMetadataSize(std::string key);
  136. // The Simple Backend only tracks the approximate sizes of entries. This
  137. // rounds the exact size appropriately.
  138. int GetRoundedSize(int exact_size);
  139. // Create a default key with the name provided, populate it with
  140. // CacheTestFillBuffer, and ensure this was done correctly.
  141. void CreateKeyAndCheck(disk_cache::Backend* cache, std::string key);
  142. // For the simple cache, wait until indexing has occurred and make sure
  143. // completes successfully.
  144. void WaitForSimpleCacheIndexAndCheck(disk_cache::Backend* cache);
  145. // Run all of the task runners untile idle, covers cache worker pools.
  146. void RunUntilIdle();
  147. // Actual tests:
  148. void BackendBasics();
  149. void BackendKeying();
  150. void BackendShutdownWithPendingFileIO(bool fast);
  151. void BackendShutdownWithPendingIO(bool fast);
  152. void BackendShutdownWithPendingCreate(bool fast);
  153. void BackendShutdownWithPendingDoom();
  154. void BackendSetSize();
  155. void BackendLoad();
  156. void BackendChain();
  157. void BackendValidEntry();
  158. void BackendInvalidEntry();
  159. void BackendInvalidEntryRead();
  160. void BackendInvalidEntryWithLoad();
  161. void BackendTrimInvalidEntry();
  162. void BackendTrimInvalidEntry2();
  163. void BackendEnumerations();
  164. void BackendEnumerations2();
  165. void BackendDoomMidEnumeration();
  166. void BackendInvalidEntryEnumeration();
  167. void BackendFixEnumerators();
  168. void BackendDoomRecent();
  169. void BackendDoomBetween();
  170. void BackendCalculateSizeOfAllEntries();
  171. void BackendCalculateSizeOfEntriesBetween(
  172. bool expect_access_time_range_comparisons);
  173. void BackendTransaction(const std::string& name, int num_entries, bool load);
  174. void BackendRecoverInsert();
  175. void BackendRecoverRemove();
  176. void BackendRecoverWithEviction();
  177. void BackendInvalidEntry2();
  178. void BackendInvalidEntry3();
  179. void BackendInvalidEntry7();
  180. void BackendInvalidEntry8();
  181. void BackendInvalidEntry9(bool eviction);
  182. void BackendInvalidEntry10(bool eviction);
  183. void BackendInvalidEntry11(bool eviction);
  184. void BackendTrimInvalidEntry12();
  185. void BackendDoomAll();
  186. void BackendDoomAll2();
  187. void BackendInvalidRankings();
  188. void BackendInvalidRankings2();
  189. void BackendDisable();
  190. void BackendDisable2();
  191. void BackendDisable3();
  192. void BackendDisable4();
  193. void BackendDisabledAPI();
  194. void BackendEviction();
  195. void BackendOpenOrCreateEntry();
  196. void BackendDeadOpenNextEntry();
  197. void BackendIteratorConcurrentDoom();
  198. void BackendValidateMigrated();
  199. };
  200. void DiskCacheBackendTest::CreateKeyAndCheck(disk_cache::Backend* cache,
  201. std::string key) {
  202. const int kBufSize = 4 * 1024;
  203. scoped_refptr<net::IOBuffer> buffer =
  204. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  205. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  206. TestEntryResultCompletionCallback cb_entry;
  207. disk_cache::EntryResult result =
  208. cache->CreateEntry(key, net::HIGHEST, cb_entry.callback());
  209. result = cb_entry.GetResult(std::move(result));
  210. ASSERT_EQ(net::OK, result.net_error());
  211. disk_cache::Entry* entry = result.ReleaseEntry();
  212. EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
  213. entry->Close();
  214. RunUntilIdle();
  215. }
  216. void DiskCacheBackendTest::WaitForSimpleCacheIndexAndCheck(
  217. disk_cache::Backend* cache) {
  218. net::TestCompletionCallback wait_for_index_cb;
  219. static_cast<disk_cache::SimpleBackendImpl*>(cache)->index()->ExecuteWhenReady(
  220. wait_for_index_cb.callback());
  221. int rv = wait_for_index_cb.WaitForResult();
  222. ASSERT_THAT(rv, IsOk());
  223. RunUntilIdle();
  224. }
  225. void DiskCacheBackendTest::RunUntilIdle() {
  226. DiskCacheTestWithCache::RunUntilIdle();
  227. base::RunLoop().RunUntilIdle();
  228. disk_cache::FlushCacheThreadForTesting();
  229. }
  230. int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
  231. if (!use_current_thread_ && !simple_cache_mode_) {
  232. ADD_FAILURE();
  233. return net::ERR_FAILED;
  234. }
  235. TestEntryResultCompletionCallback create_cb;
  236. EntryResult entry_result;
  237. entry_result =
  238. cache_->CreateEntry("some key", net::HIGHEST, create_cb.callback());
  239. entry_result = create_cb.GetResult(std::move(entry_result));
  240. if (entry_result.net_error() != net::OK)
  241. return net::ERR_CACHE_CREATE_FAILURE;
  242. disk_cache::Entry* entry = entry_result.ReleaseEntry();
  243. const int kSize = 25000;
  244. scoped_refptr<net::IOBuffer> buffer =
  245. base::MakeRefCounted<net::IOBuffer>(kSize);
  246. CacheTestFillBuffer(buffer->data(), kSize, false);
  247. int rv = net::OK;
  248. for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
  249. // We are using the current thread as the cache thread because we want to
  250. // be able to call directly this method to make sure that the OS (instead
  251. // of us switching thread) is returning IO pending.
  252. if (!simple_cache_mode_) {
  253. rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
  254. 0, i, buffer.get(), kSize, cb->callback(), false);
  255. } else {
  256. rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
  257. }
  258. if (rv == net::ERR_IO_PENDING)
  259. break;
  260. if (rv != kSize)
  261. rv = net::ERR_FAILED;
  262. }
  263. // Don't call Close() to avoid going through the queue or we'll deadlock
  264. // waiting for the operation to finish.
  265. if (!simple_cache_mode_)
  266. static_cast<disk_cache::EntryImpl*>(entry)->Release();
  267. else
  268. entry->Close();
  269. return rv;
  270. }
  271. void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
  272. base::Time* doomed_end) {
  273. InitCache();
  274. const int kSize = 50;
  275. // This must be greater than MemEntryImpl::kMaxSparseEntrySize.
  276. const int kOffset = 10 + 1024 * 1024;
  277. disk_cache::Entry* entry0 = nullptr;
  278. disk_cache::Entry* entry1 = nullptr;
  279. disk_cache::Entry* entry2 = nullptr;
  280. scoped_refptr<net::IOBuffer> buffer =
  281. base::MakeRefCounted<net::IOBuffer>(kSize);
  282. CacheTestFillBuffer(buffer->data(), kSize, false);
  283. ASSERT_THAT(CreateEntry("zeroth", &entry0), IsOk());
  284. ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
  285. ASSERT_EQ(kSize,
  286. WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
  287. entry0->Close();
  288. FlushQueueForTest();
  289. AddDelay();
  290. if (doomed_start)
  291. *doomed_start = base::Time::Now();
  292. // Order in rankings list:
  293. // first_part1, first_part2, second_part1, second_part2
  294. ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
  295. ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
  296. ASSERT_EQ(kSize,
  297. WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
  298. entry1->Close();
  299. ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
  300. ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
  301. ASSERT_EQ(kSize,
  302. WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
  303. entry2->Close();
  304. FlushQueueForTest();
  305. AddDelay();
  306. if (doomed_end)
  307. *doomed_end = base::Time::Now();
  308. // Order in rankings list:
  309. // third_part1, fourth_part1, third_part2, fourth_part2
  310. disk_cache::Entry* entry3 = nullptr;
  311. disk_cache::Entry* entry4 = nullptr;
  312. ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
  313. ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
  314. ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
  315. ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
  316. ASSERT_EQ(kSize,
  317. WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
  318. ASSERT_EQ(kSize,
  319. WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
  320. entry3->Close();
  321. entry4->Close();
  322. FlushQueueForTest();
  323. AddDelay();
  324. }
  325. // Creates entries based on random keys. Stores these keys in |key_pool|.
  326. bool DiskCacheBackendTest::CreateSetOfRandomEntries(
  327. std::set<std::string>* key_pool) {
  328. const int kNumEntries = 10;
  329. const int initial_entry_count = cache_->GetEntryCount();
  330. for (int i = 0; i < kNumEntries; ++i) {
  331. std::string key = GenerateKey(true);
  332. disk_cache::Entry* entry;
  333. if (CreateEntry(key, &entry) != net::OK) {
  334. return false;
  335. }
  336. key_pool->insert(key);
  337. entry->Close();
  338. }
  339. return key_pool->size() ==
  340. static_cast<size_t>(cache_->GetEntryCount() - initial_entry_count);
  341. }
  342. // Performs iteration over the backend and checks that the keys of entries
  343. // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
  344. // will be opened, if it is positive. Otherwise, iteration will continue until
  345. // OpenNextEntry stops returning net::OK.
  346. bool DiskCacheBackendTest::EnumerateAndMatchKeys(
  347. int max_to_open,
  348. TestIterator* iter,
  349. std::set<std::string>* keys_to_match,
  350. size_t* count) {
  351. disk_cache::Entry* entry;
  352. if (!iter)
  353. return false;
  354. while (iter->OpenNextEntry(&entry) == net::OK) {
  355. if (!entry)
  356. return false;
  357. EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
  358. entry->Close();
  359. ++(*count);
  360. if (max_to_open >= 0 && static_cast<int>(*count) >= max_to_open)
  361. break;
  362. };
  363. return true;
  364. }
  365. int DiskCacheBackendTest::GetEntryMetadataSize(std::string key) {
  366. // For blockfile and memory backends, it is just the key size.
  367. if (!simple_cache_mode_)
  368. return key.size();
  369. // For the simple cache, we must add the file header and EOF, and that for
  370. // every stream.
  371. return disk_cache::kSimpleEntryStreamCount *
  372. (sizeof(disk_cache::SimpleFileHeader) +
  373. sizeof(disk_cache::SimpleFileEOF) + key.size());
  374. }
  375. int DiskCacheBackendTest::GetRoundedSize(int exact_size) {
  376. if (!simple_cache_mode_)
  377. return exact_size;
  378. return (exact_size + 255) & 0xFFFFFF00;
  379. }
  380. void DiskCacheBackendTest::BackendBasics() {
  381. InitCache();
  382. disk_cache::Entry *entry1 = nullptr, *entry2 = nullptr;
  383. EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
  384. ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
  385. ASSERT_TRUE(nullptr != entry1);
  386. entry1->Close();
  387. entry1 = nullptr;
  388. ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
  389. ASSERT_TRUE(nullptr != entry1);
  390. entry1->Close();
  391. entry1 = nullptr;
  392. EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
  393. ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
  394. EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
  395. ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
  396. ASSERT_TRUE(nullptr != entry1);
  397. ASSERT_TRUE(nullptr != entry2);
  398. EXPECT_EQ(2, cache_->GetEntryCount());
  399. disk_cache::Entry* entry3 = nullptr;
  400. ASSERT_THAT(OpenEntry("some other key", &entry3), IsOk());
  401. ASSERT_TRUE(nullptr != entry3);
  402. EXPECT_TRUE(entry2 == entry3);
  403. EXPECT_THAT(DoomEntry("some other key"), IsOk());
  404. EXPECT_EQ(1, cache_->GetEntryCount());
  405. entry1->Close();
  406. entry2->Close();
  407. entry3->Close();
  408. EXPECT_THAT(DoomEntry("the first key"), IsOk());
  409. EXPECT_EQ(0, cache_->GetEntryCount());
  410. ASSERT_THAT(CreateEntry("the first key", &entry1), IsOk());
  411. ASSERT_THAT(CreateEntry("some other key", &entry2), IsOk());
  412. entry1->Doom();
  413. entry1->Close();
  414. EXPECT_THAT(DoomEntry("some other key"), IsOk());
  415. EXPECT_EQ(0, cache_->GetEntryCount());
  416. entry2->Close();
  417. }
  418. TEST_F(DiskCacheBackendTest, Basics) {
  419. BackendBasics();
  420. }
  421. TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
  422. SetNewEviction();
  423. BackendBasics();
  424. }
  425. TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
  426. SetMemoryOnlyMode();
  427. BackendBasics();
  428. }
  429. TEST_F(DiskCacheBackendTest, AppCacheBasics) {
  430. SetCacheType(net::APP_CACHE);
  431. BackendBasics();
  432. }
  433. TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
  434. SetCacheType(net::SHADER_CACHE);
  435. BackendBasics();
  436. }
  437. void DiskCacheBackendTest::BackendKeying() {
  438. InitCache();
  439. const char kName1[] = "the first key";
  440. const char kName2[] = "the first Key";
  441. disk_cache::Entry *entry1, *entry2;
  442. ASSERT_THAT(CreateEntry(kName1, &entry1), IsOk());
  443. ASSERT_THAT(CreateEntry(kName2, &entry2), IsOk());
  444. EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
  445. entry2->Close();
  446. char buffer[30];
  447. base::strlcpy(buffer, kName1, std::size(buffer));
  448. ASSERT_THAT(OpenEntry(buffer, &entry2), IsOk());
  449. EXPECT_TRUE(entry1 == entry2);
  450. entry2->Close();
  451. base::strlcpy(buffer + 1, kName1, std::size(buffer) - 1);
  452. ASSERT_THAT(OpenEntry(buffer + 1, &entry2), IsOk());
  453. EXPECT_TRUE(entry1 == entry2);
  454. entry2->Close();
  455. base::strlcpy(buffer + 3, kName1, std::size(buffer) - 3);
  456. ASSERT_THAT(OpenEntry(buffer + 3, &entry2), IsOk());
  457. EXPECT_TRUE(entry1 == entry2);
  458. entry2->Close();
  459. // Now verify long keys.
  460. char buffer2[20000];
  461. memset(buffer2, 's', sizeof(buffer2));
  462. buffer2[1023] = '\0';
  463. ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
  464. entry2->Close();
  465. buffer2[1023] = 'g';
  466. buffer2[19999] = '\0';
  467. ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
  468. entry2->Close();
  469. entry1->Close();
  470. // Create entries with null terminator(s), and check equality. Note we create
  471. // the strings via the ctor instead of using literals because literals are
  472. // implicitly C strings which will stop at the first null terminator.
  473. std::string key1(4, '\0');
  474. key1[1] = 's';
  475. std::string key2(3, '\0');
  476. key2[1] = 's';
  477. ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
  478. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  479. EXPECT_TRUE(entry1 != entry2) << "Different lengths";
  480. EXPECT_EQ(entry1->GetKey(), key1);
  481. EXPECT_EQ(entry2->GetKey(), key2);
  482. entry1->Close();
  483. entry2->Close();
  484. }
  485. TEST_F(DiskCacheBackendTest, Keying) {
  486. BackendKeying();
  487. }
  488. TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
  489. SetNewEviction();
  490. BackendKeying();
  491. }
  492. TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
  493. SetMemoryOnlyMode();
  494. BackendKeying();
  495. }
  496. TEST_F(DiskCacheBackendTest, AppCacheKeying) {
  497. SetCacheType(net::APP_CACHE);
  498. BackendKeying();
  499. }
  500. TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
  501. SetCacheType(net::SHADER_CACHE);
  502. BackendKeying();
  503. }
  504. TEST_F(DiskCacheTest, CreateBackend) {
  505. TestBackendResultCompletionCallback cb;
  506. {
  507. ASSERT_TRUE(CleanupCacheDir());
  508. // Test the private factory method(s).
  509. std::unique_ptr<disk_cache::Backend> cache;
  510. cache = disk_cache::MemBackendImpl::CreateBackend(0, nullptr);
  511. ASSERT_TRUE(cache.get());
  512. cache.reset();
  513. // Now test the public API.
  514. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  515. net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT,
  516. /*file_operations=*/nullptr, cache_path_, 0,
  517. disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
  518. rv = cb.GetResult(std::move(rv));
  519. ASSERT_THAT(rv.net_error, IsOk());
  520. ASSERT_TRUE(rv.backend);
  521. rv.backend.reset();
  522. rv = disk_cache::CreateCacheBackend(
  523. net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
  524. /*file_operations=*/nullptr, base::FilePath(), 0,
  525. disk_cache::ResetHandling::kNeverReset, nullptr, cb.callback());
  526. rv = cb.GetResult(std::move(rv));
  527. ASSERT_THAT(rv.net_error, IsOk());
  528. ASSERT_TRUE(rv.backend);
  529. rv.backend.reset();
  530. }
  531. base::RunLoop().RunUntilIdle();
  532. }
  533. TEST_F(DiskCacheTest, MemBackendPostCleanupCallback) {
  534. TestBackendResultCompletionCallback cb;
  535. net::TestClosure on_cleanup;
  536. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  537. net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT,
  538. /*file_operations=*/nullptr, base::FilePath(), 0,
  539. disk_cache::ResetHandling::kNeverReset, nullptr, on_cleanup.closure(),
  540. cb.callback());
  541. rv = cb.GetResult(std::move(rv));
  542. ASSERT_THAT(rv.net_error, IsOk());
  543. ASSERT_TRUE(rv.backend);
  544. // The callback should be posted after backend is destroyed.
  545. base::RunLoop().RunUntilIdle();
  546. EXPECT_FALSE(on_cleanup.have_result());
  547. rv.backend.reset();
  548. EXPECT_FALSE(on_cleanup.have_result());
  549. base::RunLoop().RunUntilIdle();
  550. EXPECT_TRUE(on_cleanup.have_result());
  551. }
  552. TEST_F(DiskCacheTest, CreateBackendDouble) {
  553. // Make sure that creation for the second backend for same path happens
  554. // after the first one completes.
  555. TestBackendResultCompletionCallback cb, cb2;
  556. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  557. net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
  558. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  559. /*net_log=*/nullptr, cb.callback());
  560. disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
  561. net::APP_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
  562. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  563. /*net_log=*/nullptr, cb2.callback());
  564. rv = cb.GetResult(std::move(rv));
  565. EXPECT_THAT(rv.net_error, IsOk());
  566. EXPECT_TRUE(rv.backend);
  567. disk_cache::FlushCacheThreadForTesting();
  568. // No rv2.backend yet.
  569. EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
  570. EXPECT_FALSE(rv2.backend);
  571. EXPECT_FALSE(cb2.have_result());
  572. rv.backend.reset();
  573. // Now rv2.backend should exist.
  574. rv2 = cb2.GetResult(std::move(rv2));
  575. EXPECT_THAT(rv2.net_error, IsOk());
  576. EXPECT_TRUE(rv2.backend);
  577. }
  578. TEST_F(DiskCacheBackendTest, CreateBackendDoubleOpenEntry) {
  579. // Demonstrate the creation sequencing with an open entry. This is done
  580. // with SimpleCache since the block-file cache cancels most of I/O on
  581. // destruction and blocks for what it can't cancel.
  582. // Don't try to sanity-check things as a blockfile cache
  583. SetSimpleCacheMode();
  584. // Make sure that creation for the second backend for same path happens
  585. // after the first one completes, and all of its ops complete.
  586. TestBackendResultCompletionCallback cb, cb2;
  587. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  588. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  589. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  590. /*net_log=*/nullptr, cb.callback());
  591. disk_cache::BackendResult rv2 = disk_cache::CreateCacheBackend(
  592. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  593. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  594. /*net_log=*/nullptr, cb2.callback());
  595. rv = cb.GetResult(std::move(rv));
  596. EXPECT_THAT(rv.net_error, IsOk());
  597. ASSERT_TRUE(rv.backend);
  598. disk_cache::FlushCacheThreadForTesting();
  599. // No cache 2 yet.
  600. EXPECT_EQ(net::ERR_IO_PENDING, rv2.net_error);
  601. EXPECT_FALSE(rv2.backend);
  602. EXPECT_FALSE(cb2.have_result());
  603. TestEntryResultCompletionCallback cb3;
  604. EntryResult entry_result =
  605. rv.backend->CreateEntry("key", net::HIGHEST, cb3.callback());
  606. entry_result = cb3.GetResult(std::move(entry_result));
  607. ASSERT_EQ(net::OK, entry_result.net_error());
  608. rv.backend.reset();
  609. // Still doesn't exist.
  610. EXPECT_FALSE(cb2.have_result());
  611. entry_result.ReleaseEntry()->Close();
  612. // Now should exist.
  613. rv2 = cb2.GetResult(std::move(rv2));
  614. EXPECT_THAT(rv2.net_error, IsOk());
  615. EXPECT_TRUE(rv2.backend);
  616. }
  617. TEST_F(DiskCacheBackendTest, CreateBackendPostCleanup) {
  618. // Test for the explicit PostCleanupCallback parameter to CreateCacheBackend.
  619. // Extravagant size payload to make reproducing races easier.
  620. const int kBufSize = 256 * 1024;
  621. scoped_refptr<net::IOBuffer> buffer =
  622. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  623. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  624. SetSimpleCacheMode();
  625. CleanupCacheDir();
  626. base::RunLoop run_loop;
  627. TestBackendResultCompletionCallback cb;
  628. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  629. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  630. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  631. /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
  632. rv = cb.GetResult(std::move(rv));
  633. EXPECT_THAT(rv.net_error, IsOk());
  634. ASSERT_TRUE(rv.backend);
  635. TestEntryResultCompletionCallback cb2;
  636. EntryResult result =
  637. rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
  638. result = cb2.GetResult(std::move(result));
  639. ASSERT_EQ(net::OK, result.net_error());
  640. disk_cache::Entry* entry = result.ReleaseEntry();
  641. EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
  642. entry->Close();
  643. rv.backend.reset();
  644. // Wait till the post-cleanup callback.
  645. run_loop.Run();
  646. // All of the payload should be on disk, despite stream 0 being written
  647. // back in the async Close()
  648. base::FilePath entry_path = cache_path_.AppendASCII(
  649. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex("key", 0));
  650. int64_t size = 0;
  651. EXPECT_TRUE(base::GetFileSize(entry_path, &size));
  652. EXPECT_GT(size, kBufSize);
  653. }
  654. TEST_F(DiskCacheBackendTest, SimpleCreateBackendRecoveryAppCache) {
  655. // Tests index recovery in APP_CACHE mode. (This is harder to test for
  656. // DISK_CACHE since post-cleanup callbacks aren't permitted there).
  657. const int kBufSize = 4 * 1024;
  658. scoped_refptr<net::IOBuffer> buffer =
  659. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  660. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  661. SetSimpleCacheMode();
  662. SetCacheType(net::APP_CACHE);
  663. DisableFirstCleanup();
  664. CleanupCacheDir();
  665. base::RunLoop run_loop;
  666. TestBackendResultCompletionCallback cb;
  667. // Create a backend with post-cleanup callback specified, in order to know
  668. // when the index has been written back (so it can be deleted race-free).
  669. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  670. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  671. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  672. /*net_log=*/nullptr, run_loop.QuitClosure(), cb.callback());
  673. rv = cb.GetResult(std::move(rv));
  674. EXPECT_THAT(rv.net_error, IsOk());
  675. ASSERT_TRUE(rv.backend);
  676. // Create an entry.
  677. TestEntryResultCompletionCallback cb2;
  678. disk_cache::EntryResult result =
  679. rv.backend->CreateEntry("key", net::HIGHEST, cb2.callback());
  680. result = cb2.GetResult(std::move(result));
  681. ASSERT_EQ(net::OK, result.net_error());
  682. disk_cache::Entry* entry = result.ReleaseEntry();
  683. EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
  684. entry->Close();
  685. rv.backend.reset();
  686. // Wait till the post-cleanup callback.
  687. run_loop.Run();
  688. // Delete the index.
  689. base::DeleteFile(
  690. cache_path_.AppendASCII("index-dir").AppendASCII("the-real-index"));
  691. // Open the cache again. The fixture will also waits for index init.
  692. InitCache();
  693. // Entry should not have a trailer size, since can't tell what it should be
  694. // when doing recovery (and definitely shouldn't interpret last use time as
  695. // such).
  696. EXPECT_EQ(0, simple_cache_impl_->index()->GetTrailerPrefetchSize(
  697. disk_cache::simple_util::GetEntryHashKey("key")));
  698. }
  699. // Tests that |BackendImpl| fails to initialize with a missing file.
  700. TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
  701. ASSERT_TRUE(CopyTestCache("bad_entry"));
  702. base::FilePath filename = cache_path_.AppendASCII("data_1");
  703. base::DeleteFile(filename);
  704. net::TestCompletionCallback cb;
  705. // Blocking shouldn't be needed to create the cache.
  706. absl::optional<base::ScopedDisallowBlocking> disallow_blocking(
  707. absl::in_place);
  708. std::unique_ptr<disk_cache::BackendImpl> cache(
  709. std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
  710. net::DISK_CACHE, nullptr));
  711. cache->Init(cb.callback());
  712. EXPECT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
  713. disallow_blocking.reset();
  714. cache.reset();
  715. DisableIntegrityCheck();
  716. }
  717. TEST_F(DiskCacheBackendTest, MemoryListensToMemoryPressure) {
  718. const int kLimit = 16 * 1024;
  719. const int kEntrySize = 256;
  720. SetMaxSize(kLimit);
  721. SetMemoryOnlyMode();
  722. InitCache();
  723. // Fill in to about 80-90% full.
  724. scoped_refptr<net::IOBuffer> buffer =
  725. base::MakeRefCounted<net::IOBuffer>(kEntrySize);
  726. CacheTestFillBuffer(buffer->data(), kEntrySize, false);
  727. for (int i = 0; i < 0.9 * (kLimit / kEntrySize); ++i) {
  728. disk_cache::Entry* entry = nullptr;
  729. ASSERT_EQ(net::OK, CreateEntry(base::NumberToString(i), &entry));
  730. EXPECT_EQ(kEntrySize,
  731. WriteData(entry, 0, 0, buffer.get(), kEntrySize, true));
  732. entry->Close();
  733. }
  734. EXPECT_GT(CalculateSizeOfAllEntries(), 0.8 * kLimit);
  735. // Signal low-memory of various sorts, and see how small it gets.
  736. base::MemoryPressureListener::NotifyMemoryPressure(
  737. base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
  738. base::RunLoop().RunUntilIdle();
  739. EXPECT_LT(CalculateSizeOfAllEntries(), 0.5 * kLimit);
  740. base::MemoryPressureListener::NotifyMemoryPressure(
  741. base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
  742. base::RunLoop().RunUntilIdle();
  743. EXPECT_LT(CalculateSizeOfAllEntries(), 0.1 * kLimit);
  744. }
  745. TEST_F(DiskCacheBackendTest, ExternalFiles) {
  746. InitCache();
  747. // First, let's create a file on the folder.
  748. base::FilePath filename = cache_path_.AppendASCII("f_000001");
  749. const int kSize = 50;
  750. scoped_refptr<net::IOBuffer> buffer1 =
  751. base::MakeRefCounted<net::IOBuffer>(kSize);
  752. CacheTestFillBuffer(buffer1->data(), kSize, false);
  753. ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
  754. // Now let's create a file with the cache.
  755. disk_cache::Entry* entry;
  756. ASSERT_THAT(CreateEntry("key", &entry), IsOk());
  757. ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
  758. entry->Close();
  759. // And verify that the first file is still there.
  760. scoped_refptr<net::IOBuffer> buffer2(
  761. base::MakeRefCounted<net::IOBuffer>(kSize));
  762. ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
  763. EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
  764. }
  765. // Tests that we deal with file-level pending operations at destruction time.
  766. void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
  767. ASSERT_TRUE(CleanupCacheDir());
  768. uint32_t flags = disk_cache::kNoBuffering;
  769. if (!fast)
  770. flags |= disk_cache::kNoRandom;
  771. if (!simple_cache_mode_)
  772. UseCurrentThread();
  773. CreateBackend(flags);
  774. net::TestCompletionCallback cb;
  775. int rv = GeneratePendingIO(&cb);
  776. // The cache destructor will see one pending operation here.
  777. cache_.reset();
  778. if (rv == net::ERR_IO_PENDING) {
  779. if (fast || simple_cache_mode_)
  780. EXPECT_FALSE(cb.have_result());
  781. else
  782. EXPECT_TRUE(cb.have_result());
  783. }
  784. base::RunLoop().RunUntilIdle();
  785. #if !BUILDFLAG(IS_IOS)
  786. // Wait for the actual operation to complete, or we'll keep a file handle that
  787. // may cause issues later. Note that on iOS systems even though this test
  788. // uses a single thread, the actual IO is posted to a worker thread and the
  789. // cache destructor breaks the link to reach cb when the operation completes.
  790. rv = cb.GetResult(rv);
  791. #endif
  792. }
  793. TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
  794. BackendShutdownWithPendingFileIO(false);
  795. }
  796. // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
  797. // builds because they contain a lot of intentional memory leaks.
  798. #if !defined(LEAK_SANITIZER)
  799. // We'll be leaking from this test.
  800. TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
  801. // The integrity test sets kNoRandom so there's a version mismatch if we don't
  802. // force new eviction.
  803. SetNewEviction();
  804. BackendShutdownWithPendingFileIO(true);
  805. }
  806. #endif
  807. // See crbug.com/330074
  808. #if !BUILDFLAG(IS_IOS)
  809. // Tests that one cache instance is not affected by another one going away.
  810. TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
  811. base::ScopedTempDir store;
  812. ASSERT_TRUE(store.CreateUniqueTempDir());
  813. net::TestCompletionCallback cb;
  814. TestBackendResultCompletionCallback create_cb;
  815. disk_cache::BackendResult backend_rv = disk_cache::CreateCacheBackend(
  816. net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
  817. store.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
  818. /* net_log = */ nullptr, create_cb.callback());
  819. backend_rv = create_cb.GetResult(std::move(backend_rv));
  820. ASSERT_THAT(backend_rv.net_error, IsOk());
  821. ASSERT_TRUE(backend_rv.backend);
  822. ASSERT_TRUE(CleanupCacheDir());
  823. SetNewEviction(); // Match the expected behavior for integrity verification.
  824. UseCurrentThread();
  825. CreateBackend(disk_cache::kNoBuffering);
  826. int rv = GeneratePendingIO(&cb);
  827. // cache_ has a pending operation, and backend_rv.backend will go away.
  828. backend_rv.backend.reset();
  829. if (rv == net::ERR_IO_PENDING)
  830. EXPECT_FALSE(cb.have_result());
  831. disk_cache::FlushCacheThreadForTesting();
  832. base::RunLoop().RunUntilIdle();
  833. // Wait for the actual operation to complete, or we'll keep a file handle that
  834. // may cause issues later.
  835. rv = cb.GetResult(rv);
  836. }
  837. #endif
  838. // Tests that we deal with background-thread pending operations.
  839. void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
  840. TestEntryResultCompletionCallback cb;
  841. {
  842. ASSERT_TRUE(CleanupCacheDir());
  843. uint32_t flags = disk_cache::kNoBuffering;
  844. if (!fast)
  845. flags |= disk_cache::kNoRandom;
  846. CreateBackend(flags);
  847. EntryResult result =
  848. cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
  849. result = cb.GetResult(std::move(result));
  850. ASSERT_THAT(result.net_error(), IsOk());
  851. result.ReleaseEntry()->Close();
  852. // The cache destructor will see one pending operation here.
  853. cache_.reset();
  854. }
  855. base::RunLoop().RunUntilIdle();
  856. EXPECT_FALSE(cb.have_result());
  857. }
  858. TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
  859. BackendShutdownWithPendingIO(false);
  860. }
  861. #if !defined(LEAK_SANITIZER)
  862. // We'll be leaking from this test.
  863. TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
  864. // The integrity test sets kNoRandom so there's a version mismatch if we don't
  865. // force new eviction.
  866. SetNewEviction();
  867. BackendShutdownWithPendingIO(true);
  868. }
  869. #endif
  870. // Tests that we deal with create-type pending operations.
  871. void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
  872. TestEntryResultCompletionCallback cb;
  873. {
  874. ASSERT_TRUE(CleanupCacheDir());
  875. disk_cache::BackendFlags flags =
  876. fast ? disk_cache::kNone : disk_cache::kNoRandom;
  877. CreateBackend(flags);
  878. EntryResult result =
  879. cache_->CreateEntry("some key", net::HIGHEST, cb.callback());
  880. ASSERT_THAT(result.net_error(), IsError(net::ERR_IO_PENDING));
  881. cache_.reset();
  882. EXPECT_FALSE(cb.have_result());
  883. }
  884. base::RunLoop().RunUntilIdle();
  885. EXPECT_FALSE(cb.have_result());
  886. }
  887. TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
  888. BackendShutdownWithPendingCreate(false);
  889. }
  890. #if !defined(LEAK_SANITIZER)
  891. // We'll be leaking an entry from this test.
  892. TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
  893. // The integrity test sets kNoRandom so there's a version mismatch if we don't
  894. // force new eviction.
  895. SetNewEviction();
  896. BackendShutdownWithPendingCreate(true);
  897. }
  898. #endif
  899. void DiskCacheBackendTest::BackendShutdownWithPendingDoom() {
  900. net::TestCompletionCallback cb;
  901. {
  902. ASSERT_TRUE(CleanupCacheDir());
  903. disk_cache::BackendFlags flags = disk_cache::kNoRandom;
  904. CreateBackend(flags);
  905. TestEntryResultCompletionCallback cb2;
  906. EntryResult result =
  907. cache_->CreateEntry("some key", net::HIGHEST, cb2.callback());
  908. result = cb2.GetResult(std::move(result));
  909. ASSERT_THAT(result.net_error(), IsOk());
  910. result.ReleaseEntry()->Close();
  911. int rv = cache_->DoomEntry("some key", net::HIGHEST, cb.callback());
  912. ASSERT_THAT(rv, IsError(net::ERR_IO_PENDING));
  913. cache_.reset();
  914. EXPECT_FALSE(cb.have_result());
  915. }
  916. base::RunLoop().RunUntilIdle();
  917. EXPECT_FALSE(cb.have_result());
  918. }
  919. TEST_F(DiskCacheBackendTest, ShutdownWithPendingDoom) {
  920. BackendShutdownWithPendingDoom();
  921. }
  922. // Disabled on android since this test requires cache creator to create
  923. // blockfile caches.
  924. #if !BUILDFLAG(IS_ANDROID)
  925. TEST_F(DiskCacheTest, TruncatedIndex) {
  926. ASSERT_TRUE(CleanupCacheDir());
  927. base::FilePath index = cache_path_.AppendASCII("index");
  928. ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
  929. TestBackendResultCompletionCallback cb;
  930. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  931. net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
  932. /*file_operations=*/nullptr, cache_path_, 0,
  933. disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
  934. cb.callback());
  935. rv = cb.GetResult(std::move(rv));
  936. ASSERT_NE(net::OK, rv.net_error);
  937. ASSERT_FALSE(rv.backend);
  938. }
  939. #endif
  940. void DiskCacheBackendTest::BackendSetSize() {
  941. const int cache_size = 0x10000; // 64 kB
  942. SetMaxSize(cache_size);
  943. InitCache();
  944. std::string first("some key");
  945. std::string second("something else");
  946. disk_cache::Entry* entry;
  947. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  948. scoped_refptr<net::IOBuffer> buffer =
  949. base::MakeRefCounted<net::IOBuffer>(cache_size);
  950. memset(buffer->data(), 0, cache_size);
  951. EXPECT_EQ(cache_size / 10,
  952. WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
  953. << "normal file";
  954. EXPECT_EQ(net::ERR_FAILED,
  955. WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
  956. << "file size above the limit";
  957. // By doubling the total size, we make this file cacheable.
  958. SetMaxSize(cache_size * 2);
  959. EXPECT_EQ(cache_size / 5,
  960. WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
  961. // Let's fill up the cache!.
  962. SetMaxSize(cache_size * 10);
  963. EXPECT_EQ(cache_size * 3 / 4,
  964. WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
  965. entry->Close();
  966. FlushQueueForTest();
  967. SetMaxSize(cache_size);
  968. // The cache is 95% full.
  969. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  970. EXPECT_EQ(cache_size / 10,
  971. WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
  972. disk_cache::Entry* entry2;
  973. ASSERT_THAT(CreateEntry("an extra key", &entry2), IsOk());
  974. EXPECT_EQ(cache_size / 10,
  975. WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
  976. entry2->Close(); // This will trigger the cache trim.
  977. EXPECT_NE(net::OK, OpenEntry(first, &entry2));
  978. FlushQueueForTest(); // Make sure that we are done trimming the cache.
  979. FlushQueueForTest(); // We may have posted two tasks to evict stuff.
  980. entry->Close();
  981. ASSERT_THAT(OpenEntry(second, &entry), IsOk());
  982. EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
  983. entry->Close();
  984. }
  985. TEST_F(DiskCacheBackendTest, SetSize) {
  986. BackendSetSize();
  987. }
  988. TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
  989. SetNewEviction();
  990. BackendSetSize();
  991. }
  992. TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
  993. SetMemoryOnlyMode();
  994. BackendSetSize();
  995. }
  996. void DiskCacheBackendTest::BackendLoad() {
  997. InitCache();
  998. int seed = static_cast<int>(Time::Now().ToInternalValue());
  999. srand(seed);
  1000. disk_cache::Entry* entries[kLargeNumEntries];
  1001. for (auto*& entry : entries) {
  1002. std::string key = GenerateKey(true);
  1003. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1004. }
  1005. EXPECT_EQ(kLargeNumEntries, cache_->GetEntryCount());
  1006. for (int i = 0; i < kLargeNumEntries; i++) {
  1007. int source1 = rand() % kLargeNumEntries;
  1008. int source2 = rand() % kLargeNumEntries;
  1009. disk_cache::Entry* temp = entries[source1];
  1010. entries[source1] = entries[source2];
  1011. entries[source2] = temp;
  1012. }
  1013. for (auto* entry : entries) {
  1014. disk_cache::Entry* new_entry;
  1015. ASSERT_THAT(OpenEntry(entry->GetKey(), &new_entry), IsOk());
  1016. EXPECT_TRUE(new_entry == entry);
  1017. new_entry->Close();
  1018. entry->Doom();
  1019. entry->Close();
  1020. }
  1021. FlushQueueForTest();
  1022. EXPECT_EQ(0, cache_->GetEntryCount());
  1023. }
  1024. TEST_F(DiskCacheBackendTest, Load) {
  1025. // Work with a tiny index table (16 entries)
  1026. SetMask(0xf);
  1027. SetMaxSize(0x100000);
  1028. BackendLoad();
  1029. }
  1030. TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
  1031. SetNewEviction();
  1032. // Work with a tiny index table (16 entries)
  1033. SetMask(0xf);
  1034. SetMaxSize(0x100000);
  1035. BackendLoad();
  1036. }
  1037. TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
  1038. SetMaxSize(0x100000);
  1039. SetMemoryOnlyMode();
  1040. BackendLoad();
  1041. }
  1042. TEST_F(DiskCacheBackendTest, AppCacheLoad) {
  1043. SetCacheType(net::APP_CACHE);
  1044. // Work with a tiny index table (16 entries)
  1045. SetMask(0xf);
  1046. SetMaxSize(0x100000);
  1047. BackendLoad();
  1048. }
  1049. TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
  1050. SetCacheType(net::SHADER_CACHE);
  1051. // Work with a tiny index table (16 entries)
  1052. SetMask(0xf);
  1053. SetMaxSize(0x100000);
  1054. BackendLoad();
  1055. }
  1056. // Tests the chaining of an entry to the current head.
  1057. void DiskCacheBackendTest::BackendChain() {
  1058. SetMask(0x1); // 2-entry table.
  1059. SetMaxSize(0x3000); // 12 kB.
  1060. InitCache();
  1061. disk_cache::Entry* entry;
  1062. ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
  1063. entry->Close();
  1064. ASSERT_THAT(CreateEntry("The Second key", &entry), IsOk());
  1065. entry->Close();
  1066. }
  1067. TEST_F(DiskCacheBackendTest, Chain) {
  1068. BackendChain();
  1069. }
  1070. TEST_F(DiskCacheBackendTest, NewEvictionChain) {
  1071. SetNewEviction();
  1072. BackendChain();
  1073. }
  1074. TEST_F(DiskCacheBackendTest, AppCacheChain) {
  1075. SetCacheType(net::APP_CACHE);
  1076. BackendChain();
  1077. }
  1078. TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
  1079. SetCacheType(net::SHADER_CACHE);
  1080. BackendChain();
  1081. }
  1082. TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
  1083. SetNewEviction();
  1084. InitCache();
  1085. disk_cache::Entry* entry;
  1086. for (int i = 0; i < 100; i++) {
  1087. std::string name(base::StringPrintf("Key %d", i));
  1088. ASSERT_THAT(CreateEntry(name, &entry), IsOk());
  1089. entry->Close();
  1090. if (i < 90) {
  1091. // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
  1092. ASSERT_THAT(OpenEntry(name, &entry), IsOk());
  1093. entry->Close();
  1094. }
  1095. }
  1096. // The first eviction must come from list 1 (10% limit), the second must come
  1097. // from list 0.
  1098. TrimForTest(false);
  1099. EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
  1100. TrimForTest(false);
  1101. EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
  1102. // Double check that we still have the list tails.
  1103. ASSERT_THAT(OpenEntry("Key 1", &entry), IsOk());
  1104. entry->Close();
  1105. ASSERT_THAT(OpenEntry("Key 91", &entry), IsOk());
  1106. entry->Close();
  1107. }
  1108. // Before looking for invalid entries, let's check a valid entry.
  1109. void DiskCacheBackendTest::BackendValidEntry() {
  1110. InitCache();
  1111. std::string key("Some key");
  1112. disk_cache::Entry* entry;
  1113. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1114. const int kSize = 50;
  1115. scoped_refptr<net::IOBuffer> buffer1 =
  1116. base::MakeRefCounted<net::IOBuffer>(kSize);
  1117. memset(buffer1->data(), 0, kSize);
  1118. base::strlcpy(buffer1->data(), "And the data to save", kSize);
  1119. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
  1120. entry->Close();
  1121. SimulateCrash();
  1122. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1123. scoped_refptr<net::IOBuffer> buffer2 =
  1124. base::MakeRefCounted<net::IOBuffer>(kSize);
  1125. memset(buffer2->data(), 0, kSize);
  1126. EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
  1127. entry->Close();
  1128. EXPECT_STREQ(buffer1->data(), buffer2->data());
  1129. }
  1130. TEST_F(DiskCacheBackendTest, ValidEntry) {
  1131. BackendValidEntry();
  1132. }
  1133. TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
  1134. SetNewEviction();
  1135. BackendValidEntry();
  1136. }
  1137. // The same logic of the previous test (ValidEntry), but this time force the
  1138. // entry to be invalid, simulating a crash in the middle.
  1139. // We'll be leaking memory from this test.
  1140. void DiskCacheBackendTest::BackendInvalidEntry() {
  1141. InitCache();
  1142. std::string key("Some key");
  1143. disk_cache::Entry* entry;
  1144. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1145. const int kSize = 50;
  1146. scoped_refptr<net::IOBuffer> buffer =
  1147. base::MakeRefCounted<net::IOBuffer>(kSize);
  1148. memset(buffer->data(), 0, kSize);
  1149. base::strlcpy(buffer->data(), "And the data to save", kSize);
  1150. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1151. SimulateCrash();
  1152. EXPECT_NE(net::OK, OpenEntry(key, &entry));
  1153. EXPECT_EQ(0, cache_->GetEntryCount());
  1154. }
  1155. #if !defined(LEAK_SANITIZER)
  1156. // We'll be leaking memory from this test.
  1157. TEST_F(DiskCacheBackendTest, InvalidEntry) {
  1158. BackendInvalidEntry();
  1159. }
  1160. // We'll be leaking memory from this test.
  1161. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
  1162. SetNewEviction();
  1163. BackendInvalidEntry();
  1164. }
  1165. // We'll be leaking memory from this test.
  1166. TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
  1167. SetCacheType(net::APP_CACHE);
  1168. BackendInvalidEntry();
  1169. }
  1170. // We'll be leaking memory from this test.
  1171. TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
  1172. SetCacheType(net::SHADER_CACHE);
  1173. BackendInvalidEntry();
  1174. }
  1175. // Almost the same test, but this time crash the cache after reading an entry.
  1176. // We'll be leaking memory from this test.
  1177. void DiskCacheBackendTest::BackendInvalidEntryRead() {
  1178. InitCache();
  1179. std::string key("Some key");
  1180. disk_cache::Entry* entry;
  1181. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1182. const int kSize = 50;
  1183. scoped_refptr<net::IOBuffer> buffer =
  1184. base::MakeRefCounted<net::IOBuffer>(kSize);
  1185. memset(buffer->data(), 0, kSize);
  1186. base::strlcpy(buffer->data(), "And the data to save", kSize);
  1187. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1188. entry->Close();
  1189. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1190. EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
  1191. SimulateCrash();
  1192. if (type_ == net::APP_CACHE) {
  1193. // Reading an entry and crashing should not make it dirty.
  1194. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1195. EXPECT_EQ(1, cache_->GetEntryCount());
  1196. entry->Close();
  1197. } else {
  1198. EXPECT_NE(net::OK, OpenEntry(key, &entry));
  1199. EXPECT_EQ(0, cache_->GetEntryCount());
  1200. }
  1201. }
  1202. // We'll be leaking memory from this test.
  1203. TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
  1204. BackendInvalidEntryRead();
  1205. }
  1206. // We'll be leaking memory from this test.
  1207. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
  1208. SetNewEviction();
  1209. BackendInvalidEntryRead();
  1210. }
  1211. // We'll be leaking memory from this test.
  1212. TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
  1213. SetCacheType(net::APP_CACHE);
  1214. BackendInvalidEntryRead();
  1215. }
  1216. // We'll be leaking memory from this test.
  1217. TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
  1218. SetCacheType(net::SHADER_CACHE);
  1219. BackendInvalidEntryRead();
  1220. }
  1221. // We'll be leaking memory from this test.
  1222. void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
  1223. // Work with a tiny index table (16 entries)
  1224. SetMask(0xf);
  1225. SetMaxSize(0x100000);
  1226. InitCache();
  1227. int seed = static_cast<int>(Time::Now().ToInternalValue());
  1228. srand(seed);
  1229. const int kNumEntries = 100;
  1230. disk_cache::Entry* entries[kNumEntries];
  1231. for (auto*& entry : entries) {
  1232. std::string key = GenerateKey(true);
  1233. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1234. }
  1235. EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
  1236. for (int i = 0; i < kNumEntries; i++) {
  1237. int source1 = rand() % kNumEntries;
  1238. int source2 = rand() % kNumEntries;
  1239. disk_cache::Entry* temp = entries[source1];
  1240. entries[source1] = entries[source2];
  1241. entries[source2] = temp;
  1242. }
  1243. std::string keys[kNumEntries];
  1244. for (int i = 0; i < kNumEntries; i++) {
  1245. keys[i] = entries[i]->GetKey();
  1246. if (i < kNumEntries / 2)
  1247. entries[i]->Close();
  1248. }
  1249. SimulateCrash();
  1250. for (int i = kNumEntries / 2; i < kNumEntries; i++) {
  1251. disk_cache::Entry* entry;
  1252. EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
  1253. }
  1254. for (int i = 0; i < kNumEntries / 2; i++) {
  1255. disk_cache::Entry* entry;
  1256. ASSERT_THAT(OpenEntry(keys[i], &entry), IsOk());
  1257. entry->Close();
  1258. }
  1259. EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
  1260. }
  1261. // We'll be leaking memory from this test.
  1262. TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
  1263. BackendInvalidEntryWithLoad();
  1264. }
  1265. // We'll be leaking memory from this test.
  1266. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
  1267. SetNewEviction();
  1268. BackendInvalidEntryWithLoad();
  1269. }
  1270. // We'll be leaking memory from this test.
  1271. TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
  1272. SetCacheType(net::APP_CACHE);
  1273. BackendInvalidEntryWithLoad();
  1274. }
  1275. // We'll be leaking memory from this test.
  1276. TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
  1277. SetCacheType(net::SHADER_CACHE);
  1278. BackendInvalidEntryWithLoad();
  1279. }
  1280. // We'll be leaking memory from this test.
  1281. void DiskCacheBackendTest::BackendTrimInvalidEntry() {
  1282. const int kSize = 0x3000; // 12 kB
  1283. SetMaxSize(kSize * 10);
  1284. InitCache();
  1285. std::string first("some key");
  1286. std::string second("something else");
  1287. disk_cache::Entry* entry;
  1288. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  1289. scoped_refptr<net::IOBuffer> buffer =
  1290. base::MakeRefCounted<net::IOBuffer>(kSize);
  1291. memset(buffer->data(), 0, kSize);
  1292. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1293. // Simulate a crash.
  1294. SimulateCrash();
  1295. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  1296. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1297. EXPECT_EQ(2, cache_->GetEntryCount());
  1298. SetMaxSize(kSize);
  1299. entry->Close(); // Trim the cache.
  1300. FlushQueueForTest();
  1301. // If we evicted the entry in less than 20mS, we have one entry in the cache;
  1302. // if it took more than that, we posted a task and we'll delete the second
  1303. // entry too.
  1304. base::RunLoop().RunUntilIdle();
  1305. // This may be not thread-safe in general, but for now it's OK so add some
  1306. // ThreadSanitizer annotations to ignore data races on cache_.
  1307. // See http://crbug.com/55970
  1308. ANNOTATE_IGNORE_READS_BEGIN();
  1309. EXPECT_GE(1, cache_->GetEntryCount());
  1310. ANNOTATE_IGNORE_READS_END();
  1311. EXPECT_NE(net::OK, OpenEntry(first, &entry));
  1312. }
  1313. // We'll be leaking memory from this test.
  1314. TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
  1315. BackendTrimInvalidEntry();
  1316. }
  1317. // We'll be leaking memory from this test.
  1318. TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
  1319. SetNewEviction();
  1320. BackendTrimInvalidEntry();
  1321. }
  1322. // We'll be leaking memory from this test.
  1323. void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
  1324. SetMask(0xf); // 16-entry table.
  1325. const int kSize = 0x3000; // 12 kB
  1326. SetMaxSize(kSize * 40);
  1327. InitCache();
  1328. scoped_refptr<net::IOBuffer> buffer =
  1329. base::MakeRefCounted<net::IOBuffer>(kSize);
  1330. memset(buffer->data(), 0, kSize);
  1331. disk_cache::Entry* entry;
  1332. // Writing 32 entries to this cache chains most of them.
  1333. for (int i = 0; i < 32; i++) {
  1334. std::string key(base::StringPrintf("some key %d", i));
  1335. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1336. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1337. entry->Close();
  1338. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1339. // Note that we are not closing the entries.
  1340. }
  1341. // Simulate a crash.
  1342. SimulateCrash();
  1343. ASSERT_THAT(CreateEntry("Something else", &entry), IsOk());
  1344. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1345. FlushQueueForTest();
  1346. EXPECT_EQ(33, cache_->GetEntryCount());
  1347. SetMaxSize(kSize);
  1348. // For the new eviction code, all corrupt entries are on the second list so
  1349. // they are not going away that easy.
  1350. if (new_eviction_) {
  1351. EXPECT_THAT(DoomAllEntries(), IsOk());
  1352. }
  1353. entry->Close(); // Trim the cache.
  1354. FlushQueueForTest();
  1355. // We may abort the eviction before cleaning up everything.
  1356. base::RunLoop().RunUntilIdle();
  1357. FlushQueueForTest();
  1358. // If it's not clear enough: we may still have eviction tasks running at this
  1359. // time, so the number of entries is changing while we read it.
  1360. ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
  1361. EXPECT_GE(30, cache_->GetEntryCount());
  1362. ANNOTATE_IGNORE_READS_AND_WRITES_END();
  1363. // For extra messiness, the integrity check for the cache can actually cause
  1364. // evictions if it's over-capacity, which would race with above. So change the
  1365. // size we pass to CheckCacheIntegrity (but don't mess with existing backend's
  1366. // state.
  1367. size_ = 0;
  1368. }
  1369. // We'll be leaking memory from this test.
  1370. TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
  1371. BackendTrimInvalidEntry2();
  1372. }
  1373. // We'll be leaking memory from this test.
  1374. TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
  1375. SetNewEviction();
  1376. BackendTrimInvalidEntry2();
  1377. }
  1378. #endif // !defined(LEAK_SANITIZER)
  1379. void DiskCacheBackendTest::BackendEnumerations() {
  1380. InitCache();
  1381. Time initial = Time::Now();
  1382. const int kNumEntries = 100;
  1383. for (int i = 0; i < kNumEntries; i++) {
  1384. std::string key = GenerateKey(true);
  1385. disk_cache::Entry* entry;
  1386. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1387. entry->Close();
  1388. }
  1389. EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
  1390. Time final = Time::Now();
  1391. disk_cache::Entry* entry;
  1392. std::unique_ptr<TestIterator> iter = CreateIterator();
  1393. int count = 0;
  1394. Time last_modified[kNumEntries];
  1395. Time last_used[kNumEntries];
  1396. while (iter->OpenNextEntry(&entry) == net::OK) {
  1397. ASSERT_TRUE(nullptr != entry);
  1398. if (count < kNumEntries) {
  1399. last_modified[count] = entry->GetLastModified();
  1400. last_used[count] = entry->GetLastUsed();
  1401. EXPECT_TRUE(initial <= last_modified[count]);
  1402. EXPECT_TRUE(final >= last_modified[count]);
  1403. }
  1404. entry->Close();
  1405. count++;
  1406. };
  1407. EXPECT_EQ(kNumEntries, count);
  1408. iter = CreateIterator();
  1409. count = 0;
  1410. // The previous enumeration should not have changed the timestamps.
  1411. while (iter->OpenNextEntry(&entry) == net::OK) {
  1412. ASSERT_TRUE(nullptr != entry);
  1413. if (count < kNumEntries) {
  1414. EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
  1415. EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
  1416. }
  1417. entry->Close();
  1418. count++;
  1419. };
  1420. EXPECT_EQ(kNumEntries, count);
  1421. }
  1422. TEST_F(DiskCacheBackendTest, Enumerations) {
  1423. BackendEnumerations();
  1424. }
  1425. TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
  1426. SetNewEviction();
  1427. BackendEnumerations();
  1428. }
  1429. TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
  1430. SetMemoryOnlyMode();
  1431. BackendEnumerations();
  1432. }
  1433. TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
  1434. SetCacheType(net::SHADER_CACHE);
  1435. BackendEnumerations();
  1436. }
  1437. TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
  1438. SetCacheType(net::APP_CACHE);
  1439. BackendEnumerations();
  1440. }
  1441. // Verifies enumerations while entries are open.
  1442. void DiskCacheBackendTest::BackendEnumerations2() {
  1443. InitCache();
  1444. const std::string first("first");
  1445. const std::string second("second");
  1446. disk_cache::Entry *entry1, *entry2;
  1447. ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
  1448. entry1->Close();
  1449. ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
  1450. entry2->Close();
  1451. FlushQueueForTest();
  1452. // Make sure that the timestamp is not the same.
  1453. AddDelay();
  1454. ASSERT_THAT(OpenEntry(second, &entry1), IsOk());
  1455. std::unique_ptr<TestIterator> iter = CreateIterator();
  1456. ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
  1457. EXPECT_EQ(entry2->GetKey(), second);
  1458. // Two entries and the iterator pointing at "first".
  1459. entry1->Close();
  1460. entry2->Close();
  1461. // The iterator should still be valid, so we should not crash.
  1462. ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
  1463. EXPECT_EQ(entry2->GetKey(), first);
  1464. entry2->Close();
  1465. iter = CreateIterator();
  1466. // Modify the oldest entry and get the newest element.
  1467. ASSERT_THAT(OpenEntry(first, &entry1), IsOk());
  1468. EXPECT_EQ(0, WriteData(entry1, 0, 200, nullptr, 0, false));
  1469. ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
  1470. if (type_ == net::APP_CACHE) {
  1471. // The list is not updated.
  1472. EXPECT_EQ(entry2->GetKey(), second);
  1473. } else {
  1474. EXPECT_EQ(entry2->GetKey(), first);
  1475. }
  1476. entry1->Close();
  1477. entry2->Close();
  1478. }
  1479. TEST_F(DiskCacheBackendTest, Enumerations2) {
  1480. BackendEnumerations2();
  1481. }
  1482. TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
  1483. SetNewEviction();
  1484. BackendEnumerations2();
  1485. }
  1486. TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
  1487. SetCacheType(net::APP_CACHE);
  1488. BackendEnumerations2();
  1489. }
  1490. TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
  1491. SetCacheType(net::SHADER_CACHE);
  1492. BackendEnumerations2();
  1493. }
  1494. void DiskCacheBackendTest::BackendDoomMidEnumeration() {
  1495. InitCache();
  1496. const int kNumEntries = 100;
  1497. std::set<std::string> keys;
  1498. for (int i = 0; i < kNumEntries; i++) {
  1499. std::string key = GenerateKey(true);
  1500. keys.insert(key);
  1501. disk_cache::Entry* entry;
  1502. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1503. entry->Close();
  1504. }
  1505. disk_cache::Entry* entry;
  1506. std::unique_ptr<TestIterator> iter = CreateIterator();
  1507. int count = 0;
  1508. while (iter->OpenNextEntry(&entry) == net::OK) {
  1509. if (count == 0) {
  1510. // Delete a random entry from the cache while in the midst of iteration.
  1511. auto key_to_doom = keys.begin();
  1512. while (*key_to_doom == entry->GetKey())
  1513. key_to_doom++;
  1514. ASSERT_THAT(DoomEntry(*key_to_doom), IsOk());
  1515. ASSERT_EQ(1u, keys.erase(*key_to_doom));
  1516. }
  1517. ASSERT_NE(nullptr, entry);
  1518. EXPECT_EQ(1u, keys.erase(entry->GetKey()));
  1519. entry->Close();
  1520. count++;
  1521. };
  1522. EXPECT_EQ(kNumEntries - 1, cache_->GetEntryCount());
  1523. EXPECT_EQ(0u, keys.size());
  1524. }
  1525. TEST_F(DiskCacheBackendTest, DoomEnumerations) {
  1526. BackendDoomMidEnumeration();
  1527. }
  1528. TEST_F(DiskCacheBackendTest, NewEvictionDoomEnumerations) {
  1529. SetNewEviction();
  1530. BackendDoomMidEnumeration();
  1531. }
  1532. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEnumerations) {
  1533. SetMemoryOnlyMode();
  1534. BackendDoomMidEnumeration();
  1535. }
  1536. TEST_F(DiskCacheBackendTest, ShaderCacheDoomEnumerations) {
  1537. SetCacheType(net::SHADER_CACHE);
  1538. BackendDoomMidEnumeration();
  1539. }
  1540. TEST_F(DiskCacheBackendTest, AppCacheDoomEnumerations) {
  1541. SetCacheType(net::APP_CACHE);
  1542. BackendDoomMidEnumeration();
  1543. }
  1544. TEST_F(DiskCacheBackendTest, SimpleDoomEnumerations) {
  1545. SetSimpleCacheMode();
  1546. BackendDoomMidEnumeration();
  1547. }
  1548. // Verify that ReadData calls do not update the LRU cache
  1549. // when using the SHADER_CACHE type.
  1550. TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
  1551. SetCacheType(net::SHADER_CACHE);
  1552. InitCache();
  1553. const std::string first("first");
  1554. const std::string second("second");
  1555. disk_cache::Entry *entry1, *entry2;
  1556. const int kSize = 50;
  1557. scoped_refptr<net::IOBuffer> buffer1 =
  1558. base::MakeRefCounted<net::IOBuffer>(kSize);
  1559. ASSERT_THAT(CreateEntry(first, &entry1), IsOk());
  1560. memset(buffer1->data(), 0, kSize);
  1561. base::strlcpy(buffer1->data(), "And the data to save", kSize);
  1562. EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
  1563. ASSERT_THAT(CreateEntry(second, &entry2), IsOk());
  1564. entry2->Close();
  1565. FlushQueueForTest();
  1566. // Make sure that the timestamp is not the same.
  1567. AddDelay();
  1568. // Read from the last item in the LRU.
  1569. EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
  1570. entry1->Close();
  1571. std::unique_ptr<TestIterator> iter = CreateIterator();
  1572. ASSERT_THAT(iter->OpenNextEntry(&entry2), IsOk());
  1573. EXPECT_EQ(entry2->GetKey(), second);
  1574. entry2->Close();
  1575. }
  1576. #if !defined(LEAK_SANITIZER)
  1577. // Verify handling of invalid entries while doing enumerations.
  1578. // We'll be leaking memory from this test.
  1579. void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
  1580. InitCache();
  1581. std::string key("Some key");
  1582. disk_cache::Entry *entry, *entry1, *entry2;
  1583. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  1584. const int kSize = 50;
  1585. scoped_refptr<net::IOBuffer> buffer1 =
  1586. base::MakeRefCounted<net::IOBuffer>(kSize);
  1587. memset(buffer1->data(), 0, kSize);
  1588. base::strlcpy(buffer1->data(), "And the data to save", kSize);
  1589. EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
  1590. entry1->Close();
  1591. ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
  1592. EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
  1593. std::string key2("Another key");
  1594. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  1595. entry2->Close();
  1596. ASSERT_EQ(2, cache_->GetEntryCount());
  1597. SimulateCrash();
  1598. std::unique_ptr<TestIterator> iter = CreateIterator();
  1599. int count = 0;
  1600. while (iter->OpenNextEntry(&entry) == net::OK) {
  1601. ASSERT_TRUE(nullptr != entry);
  1602. EXPECT_EQ(key2, entry->GetKey());
  1603. entry->Close();
  1604. count++;
  1605. };
  1606. EXPECT_EQ(1, count);
  1607. EXPECT_EQ(1, cache_->GetEntryCount());
  1608. }
  1609. // We'll be leaking memory from this test.
  1610. TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
  1611. BackendInvalidEntryEnumeration();
  1612. }
  1613. // We'll be leaking memory from this test.
  1614. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
  1615. SetNewEviction();
  1616. BackendInvalidEntryEnumeration();
  1617. }
  1618. #endif // !defined(LEAK_SANITIZER)
  1619. // Tests that if for some reason entries are modified close to existing cache
  1620. // iterators, we don't generate fatal errors or reset the cache.
  1621. void DiskCacheBackendTest::BackendFixEnumerators() {
  1622. InitCache();
  1623. int seed = static_cast<int>(Time::Now().ToInternalValue());
  1624. srand(seed);
  1625. const int kNumEntries = 10;
  1626. for (int i = 0; i < kNumEntries; i++) {
  1627. std::string key = GenerateKey(true);
  1628. disk_cache::Entry* entry;
  1629. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1630. entry->Close();
  1631. }
  1632. EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
  1633. disk_cache::Entry *entry1, *entry2;
  1634. std::unique_ptr<TestIterator> iter1 = CreateIterator(),
  1635. iter2 = CreateIterator();
  1636. ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
  1637. ASSERT_TRUE(nullptr != entry1);
  1638. entry1->Close();
  1639. entry1 = nullptr;
  1640. // Let's go to the middle of the list.
  1641. for (int i = 0; i < kNumEntries / 2; i++) {
  1642. if (entry1)
  1643. entry1->Close();
  1644. ASSERT_THAT(iter1->OpenNextEntry(&entry1), IsOk());
  1645. ASSERT_TRUE(nullptr != entry1);
  1646. ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
  1647. ASSERT_TRUE(nullptr != entry2);
  1648. entry2->Close();
  1649. }
  1650. // Messing up with entry1 will modify entry2->next.
  1651. entry1->Doom();
  1652. ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
  1653. ASSERT_TRUE(nullptr != entry2);
  1654. // The link entry2->entry1 should be broken.
  1655. EXPECT_NE(entry2->GetKey(), entry1->GetKey());
  1656. entry1->Close();
  1657. entry2->Close();
  1658. // And the second iterator should keep working.
  1659. ASSERT_THAT(iter2->OpenNextEntry(&entry2), IsOk());
  1660. ASSERT_TRUE(nullptr != entry2);
  1661. entry2->Close();
  1662. }
  1663. TEST_F(DiskCacheBackendTest, FixEnumerators) {
  1664. BackendFixEnumerators();
  1665. }
  1666. TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
  1667. SetNewEviction();
  1668. BackendFixEnumerators();
  1669. }
  1670. void DiskCacheBackendTest::BackendDoomRecent() {
  1671. InitCache();
  1672. disk_cache::Entry* entry;
  1673. ASSERT_THAT(CreateEntry("first", &entry), IsOk());
  1674. entry->Close();
  1675. ASSERT_THAT(CreateEntry("second", &entry), IsOk());
  1676. entry->Close();
  1677. FlushQueueForTest();
  1678. AddDelay();
  1679. Time middle = Time::Now();
  1680. ASSERT_THAT(CreateEntry("third", &entry), IsOk());
  1681. entry->Close();
  1682. ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
  1683. entry->Close();
  1684. FlushQueueForTest();
  1685. AddDelay();
  1686. Time final = Time::Now();
  1687. ASSERT_EQ(4, cache_->GetEntryCount());
  1688. EXPECT_THAT(DoomEntriesSince(final), IsOk());
  1689. ASSERT_EQ(4, cache_->GetEntryCount());
  1690. EXPECT_THAT(DoomEntriesSince(middle), IsOk());
  1691. ASSERT_EQ(2, cache_->GetEntryCount());
  1692. ASSERT_THAT(OpenEntry("second", &entry), IsOk());
  1693. entry->Close();
  1694. }
  1695. TEST_F(DiskCacheBackendTest, DoomRecent) {
  1696. BackendDoomRecent();
  1697. }
  1698. TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
  1699. SetNewEviction();
  1700. BackendDoomRecent();
  1701. }
  1702. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
  1703. SetMemoryOnlyMode();
  1704. BackendDoomRecent();
  1705. }
  1706. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
  1707. SetMemoryOnlyMode();
  1708. base::Time start;
  1709. InitSparseCache(&start, nullptr);
  1710. DoomEntriesSince(start);
  1711. EXPECT_EQ(1, cache_->GetEntryCount());
  1712. }
  1713. TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
  1714. base::Time start;
  1715. InitSparseCache(&start, nullptr);
  1716. DoomEntriesSince(start);
  1717. // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
  1718. // MemBackendImpl does not. Thats why expected value differs here from
  1719. // MemoryOnlyDoomEntriesSinceSparse.
  1720. EXPECT_EQ(3, cache_->GetEntryCount());
  1721. }
  1722. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
  1723. SetMemoryOnlyMode();
  1724. InitSparseCache(nullptr, nullptr);
  1725. EXPECT_THAT(DoomAllEntries(), IsOk());
  1726. EXPECT_EQ(0, cache_->GetEntryCount());
  1727. }
  1728. TEST_F(DiskCacheBackendTest, DoomAllSparse) {
  1729. InitSparseCache(nullptr, nullptr);
  1730. EXPECT_THAT(DoomAllEntries(), IsOk());
  1731. EXPECT_EQ(0, cache_->GetEntryCount());
  1732. }
  1733. // This test is for https://crbug.com/827492.
  1734. TEST_F(DiskCacheBackendTest, InMemorySparseEvict) {
  1735. const int kMaxSize = 512;
  1736. SetMaxSize(kMaxSize);
  1737. SetMemoryOnlyMode();
  1738. InitCache();
  1739. scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
  1740. CacheTestFillBuffer(buffer->data(), 64, false /* no_nulls */);
  1741. std::vector<disk_cache::ScopedEntryPtr> entries;
  1742. disk_cache::Entry* entry = nullptr;
  1743. // Create a bunch of entries
  1744. for (size_t i = 0; i < 14; i++) {
  1745. std::string name = "http://www." + base::NumberToString(i) + ".com/";
  1746. ASSERT_THAT(CreateEntry(name, &entry), IsOk());
  1747. entries.push_back(disk_cache::ScopedEntryPtr(entry));
  1748. }
  1749. // Create several sparse entries and fill with enough data to
  1750. // pass eviction threshold
  1751. ASSERT_EQ(64, WriteSparseData(entries[0].get(), 0, buffer.get(), 64));
  1752. ASSERT_EQ(net::ERR_FAILED,
  1753. WriteSparseData(entries[0].get(), 10000, buffer.get(), 4));
  1754. ASSERT_EQ(63, WriteSparseData(entries[1].get(), 0, buffer.get(), 63));
  1755. ASSERT_EQ(64, WriteSparseData(entries[2].get(), 0, buffer.get(), 64));
  1756. ASSERT_EQ(64, WriteSparseData(entries[3].get(), 0, buffer.get(), 64));
  1757. // Close all the entries, leaving a populated LRU list
  1758. // with all entries having refcount 0 (doom implies deletion)
  1759. entries.clear();
  1760. // Create a new entry, triggering buggy eviction
  1761. ASSERT_THAT(CreateEntry("http://www.14.com/", &entry), IsOk());
  1762. entry->Close();
  1763. }
  1764. void DiskCacheBackendTest::BackendDoomBetween() {
  1765. InitCache();
  1766. disk_cache::Entry* entry;
  1767. ASSERT_THAT(CreateEntry("first", &entry), IsOk());
  1768. entry->Close();
  1769. FlushQueueForTest();
  1770. AddDelay();
  1771. Time middle_start = Time::Now();
  1772. ASSERT_THAT(CreateEntry("second", &entry), IsOk());
  1773. entry->Close();
  1774. ASSERT_THAT(CreateEntry("third", &entry), IsOk());
  1775. entry->Close();
  1776. FlushQueueForTest();
  1777. AddDelay();
  1778. Time middle_end = Time::Now();
  1779. ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
  1780. entry->Close();
  1781. ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
  1782. entry->Close();
  1783. FlushQueueForTest();
  1784. AddDelay();
  1785. Time final = Time::Now();
  1786. ASSERT_EQ(4, cache_->GetEntryCount());
  1787. EXPECT_THAT(DoomEntriesBetween(middle_start, middle_end), IsOk());
  1788. ASSERT_EQ(2, cache_->GetEntryCount());
  1789. ASSERT_THAT(OpenEntry("fourth", &entry), IsOk());
  1790. entry->Close();
  1791. EXPECT_THAT(DoomEntriesBetween(middle_start, final), IsOk());
  1792. ASSERT_EQ(1, cache_->GetEntryCount());
  1793. ASSERT_THAT(OpenEntry("first", &entry), IsOk());
  1794. entry->Close();
  1795. }
  1796. TEST_F(DiskCacheBackendTest, DoomBetween) {
  1797. BackendDoomBetween();
  1798. }
  1799. TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
  1800. SetNewEviction();
  1801. BackendDoomBetween();
  1802. }
  1803. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
  1804. SetMemoryOnlyMode();
  1805. BackendDoomBetween();
  1806. }
  1807. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
  1808. SetMemoryOnlyMode();
  1809. base::Time start, end;
  1810. InitSparseCache(&start, &end);
  1811. DoomEntriesBetween(start, end);
  1812. EXPECT_EQ(3, cache_->GetEntryCount());
  1813. start = end;
  1814. end = base::Time::Now();
  1815. DoomEntriesBetween(start, end);
  1816. EXPECT_EQ(1, cache_->GetEntryCount());
  1817. }
  1818. TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
  1819. base::Time start, end;
  1820. InitSparseCache(&start, &end);
  1821. DoomEntriesBetween(start, end);
  1822. EXPECT_EQ(9, cache_->GetEntryCount());
  1823. start = end;
  1824. end = base::Time::Now();
  1825. DoomEntriesBetween(start, end);
  1826. EXPECT_EQ(3, cache_->GetEntryCount());
  1827. }
  1828. void DiskCacheBackendTest::BackendCalculateSizeOfAllEntries() {
  1829. InitCache();
  1830. // The cache is initially empty.
  1831. EXPECT_EQ(0, CalculateSizeOfAllEntries());
  1832. // Generate random entries and populate them with data of respective
  1833. // sizes 0, 1, ..., count - 1 bytes.
  1834. std::set<std::string> key_pool;
  1835. CreateSetOfRandomEntries(&key_pool);
  1836. int count = 0;
  1837. int total_size = 0;
  1838. for (std::string key : key_pool) {
  1839. std::string data(count, ' ');
  1840. scoped_refptr<net::StringIOBuffer> buffer =
  1841. base::MakeRefCounted<net::StringIOBuffer>(data);
  1842. // Alternate between writing to first two streams to test that we do not
  1843. // take only one stream into account.
  1844. disk_cache::Entry* entry;
  1845. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1846. ASSERT_EQ(count, WriteData(entry, count % 2, 0, buffer.get(), count, true));
  1847. entry->Close();
  1848. total_size += GetRoundedSize(count + GetEntryMetadataSize(key));
  1849. ++count;
  1850. }
  1851. int result = CalculateSizeOfAllEntries();
  1852. EXPECT_EQ(total_size, result);
  1853. // Add another entry and test if the size is updated. Then remove it and test
  1854. // if the size is back to original value.
  1855. {
  1856. const int last_entry_size = 47;
  1857. std::string data(last_entry_size, ' ');
  1858. scoped_refptr<net::StringIOBuffer> buffer =
  1859. base::MakeRefCounted<net::StringIOBuffer>(data);
  1860. disk_cache::Entry* entry;
  1861. std::string key = GenerateKey(true);
  1862. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1863. ASSERT_EQ(last_entry_size,
  1864. WriteData(entry, 0, 0, buffer.get(), last_entry_size, true));
  1865. entry->Close();
  1866. int new_result = CalculateSizeOfAllEntries();
  1867. EXPECT_EQ(
  1868. result + GetRoundedSize(last_entry_size + GetEntryMetadataSize(key)),
  1869. new_result);
  1870. DoomEntry(key);
  1871. new_result = CalculateSizeOfAllEntries();
  1872. EXPECT_EQ(result, new_result);
  1873. }
  1874. // After dooming the entries, the size should be back to zero.
  1875. ASSERT_THAT(DoomAllEntries(), IsOk());
  1876. EXPECT_EQ(0, CalculateSizeOfAllEntries());
  1877. }
  1878. TEST_F(DiskCacheBackendTest, CalculateSizeOfAllEntries) {
  1879. BackendCalculateSizeOfAllEntries();
  1880. }
  1881. TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfAllEntries) {
  1882. SetMemoryOnlyMode();
  1883. BackendCalculateSizeOfAllEntries();
  1884. }
  1885. TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfAllEntries) {
  1886. // Use net::APP_CACHE to make size estimations deterministic via
  1887. // non-optimistic writes.
  1888. SetCacheType(net::APP_CACHE);
  1889. SetSimpleCacheMode();
  1890. BackendCalculateSizeOfAllEntries();
  1891. }
  1892. void DiskCacheBackendTest::BackendCalculateSizeOfEntriesBetween(
  1893. bool expect_access_time_comparisons) {
  1894. InitCache();
  1895. EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  1896. Time start = Time::Now();
  1897. disk_cache::Entry* entry;
  1898. ASSERT_THAT(CreateEntry("first", &entry), IsOk());
  1899. entry->Close();
  1900. FlushQueueForTest();
  1901. base::RunLoop().RunUntilIdle();
  1902. AddDelay();
  1903. Time middle = Time::Now();
  1904. AddDelay();
  1905. ASSERT_THAT(CreateEntry("second", &entry), IsOk());
  1906. entry->Close();
  1907. ASSERT_THAT(CreateEntry("third_entry", &entry), IsOk());
  1908. entry->Close();
  1909. FlushQueueForTest();
  1910. base::RunLoop().RunUntilIdle();
  1911. AddDelay();
  1912. Time end = Time::Now();
  1913. int size_1 = GetRoundedSize(GetEntryMetadataSize("first"));
  1914. int size_2 = GetRoundedSize(GetEntryMetadataSize("second"));
  1915. int size_3 = GetRoundedSize(GetEntryMetadataSize("third_entry"));
  1916. ASSERT_EQ(3, cache_->GetEntryCount());
  1917. ASSERT_EQ(CalculateSizeOfAllEntries(),
  1918. CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  1919. if (expect_access_time_comparisons) {
  1920. int start_end = CalculateSizeOfEntriesBetween(start, end);
  1921. ASSERT_EQ(CalculateSizeOfAllEntries(), start_end);
  1922. ASSERT_EQ(size_1 + size_2 + size_3, start_end);
  1923. ASSERT_EQ(size_1, CalculateSizeOfEntriesBetween(start, middle));
  1924. ASSERT_EQ(size_2 + size_3, CalculateSizeOfEntriesBetween(middle, end));
  1925. }
  1926. // After dooming the entries, the size should be back to zero.
  1927. ASSERT_THAT(DoomAllEntries(), IsOk());
  1928. EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  1929. }
  1930. TEST_F(DiskCacheBackendTest, CalculateSizeOfEntriesBetween) {
  1931. InitCache();
  1932. ASSERT_EQ(net::ERR_NOT_IMPLEMENTED,
  1933. CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  1934. }
  1935. TEST_F(DiskCacheBackendTest, MemoryOnlyCalculateSizeOfEntriesBetween) {
  1936. SetMemoryOnlyMode();
  1937. BackendCalculateSizeOfEntriesBetween(true);
  1938. }
  1939. TEST_F(DiskCacheBackendTest, SimpleCacheCalculateSizeOfEntriesBetween) {
  1940. // Test normal mode in where access time range comparisons are supported.
  1941. SetSimpleCacheMode();
  1942. BackendCalculateSizeOfEntriesBetween(true);
  1943. }
  1944. TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheCalculateSizeOfEntriesBetween) {
  1945. // Test SimpleCache in APP_CACHE mode separately since it does not support
  1946. // access time range comparisons.
  1947. SetCacheType(net::APP_CACHE);
  1948. SetSimpleCacheMode();
  1949. BackendCalculateSizeOfEntriesBetween(false);
  1950. }
  1951. void DiskCacheBackendTest::BackendTransaction(const std::string& name,
  1952. int num_entries,
  1953. bool load) {
  1954. success_ = false;
  1955. ASSERT_TRUE(CopyTestCache(name));
  1956. DisableFirstCleanup();
  1957. uint32_t mask;
  1958. if (load) {
  1959. mask = 0xf;
  1960. SetMaxSize(0x100000);
  1961. } else {
  1962. // Clear the settings from the previous run.
  1963. mask = 0;
  1964. SetMaxSize(0);
  1965. }
  1966. SetMask(mask);
  1967. InitCache();
  1968. ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
  1969. std::string key("the first key");
  1970. disk_cache::Entry* entry1;
  1971. ASSERT_NE(net::OK, OpenEntry(key, &entry1));
  1972. int actual = cache_->GetEntryCount();
  1973. if (num_entries != actual) {
  1974. ASSERT_TRUE(load);
  1975. // If there is a heavy load, inserting an entry will make another entry
  1976. // dirty (on the hash bucket) so two entries are removed.
  1977. ASSERT_EQ(num_entries - 1, actual);
  1978. }
  1979. cache_.reset();
  1980. cache_impl_ = nullptr;
  1981. ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, MaxSize(), mask));
  1982. success_ = true;
  1983. }
  1984. void DiskCacheBackendTest::BackendRecoverInsert() {
  1985. // Tests with an empty cache.
  1986. BackendTransaction("insert_empty1", 0, false);
  1987. ASSERT_TRUE(success_) << "insert_empty1";
  1988. BackendTransaction("insert_empty2", 0, false);
  1989. ASSERT_TRUE(success_) << "insert_empty2";
  1990. BackendTransaction("insert_empty3", 0, false);
  1991. ASSERT_TRUE(success_) << "insert_empty3";
  1992. // Tests with one entry on the cache.
  1993. BackendTransaction("insert_one1", 1, false);
  1994. ASSERT_TRUE(success_) << "insert_one1";
  1995. BackendTransaction("insert_one2", 1, false);
  1996. ASSERT_TRUE(success_) << "insert_one2";
  1997. BackendTransaction("insert_one3", 1, false);
  1998. ASSERT_TRUE(success_) << "insert_one3";
  1999. // Tests with one hundred entries on the cache, tiny index.
  2000. BackendTransaction("insert_load1", 100, true);
  2001. ASSERT_TRUE(success_) << "insert_load1";
  2002. BackendTransaction("insert_load2", 100, true);
  2003. ASSERT_TRUE(success_) << "insert_load2";
  2004. }
  2005. TEST_F(DiskCacheBackendTest, RecoverInsert) {
  2006. BackendRecoverInsert();
  2007. }
  2008. TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
  2009. SetNewEviction();
  2010. BackendRecoverInsert();
  2011. }
  2012. void DiskCacheBackendTest::BackendRecoverRemove() {
  2013. // Removing the only element.
  2014. BackendTransaction("remove_one1", 0, false);
  2015. ASSERT_TRUE(success_) << "remove_one1";
  2016. BackendTransaction("remove_one2", 0, false);
  2017. ASSERT_TRUE(success_) << "remove_one2";
  2018. BackendTransaction("remove_one3", 0, false);
  2019. ASSERT_TRUE(success_) << "remove_one3";
  2020. // Removing the head.
  2021. BackendTransaction("remove_head1", 1, false);
  2022. ASSERT_TRUE(success_) << "remove_head1";
  2023. BackendTransaction("remove_head2", 1, false);
  2024. ASSERT_TRUE(success_) << "remove_head2";
  2025. BackendTransaction("remove_head3", 1, false);
  2026. ASSERT_TRUE(success_) << "remove_head3";
  2027. // Removing the tail.
  2028. BackendTransaction("remove_tail1", 1, false);
  2029. ASSERT_TRUE(success_) << "remove_tail1";
  2030. BackendTransaction("remove_tail2", 1, false);
  2031. ASSERT_TRUE(success_) << "remove_tail2";
  2032. BackendTransaction("remove_tail3", 1, false);
  2033. ASSERT_TRUE(success_) << "remove_tail3";
  2034. // Removing with one hundred entries on the cache, tiny index.
  2035. BackendTransaction("remove_load1", 100, true);
  2036. ASSERT_TRUE(success_) << "remove_load1";
  2037. BackendTransaction("remove_load2", 100, true);
  2038. ASSERT_TRUE(success_) << "remove_load2";
  2039. BackendTransaction("remove_load3", 100, true);
  2040. ASSERT_TRUE(success_) << "remove_load3";
  2041. // This case cannot be reverted.
  2042. BackendTransaction("remove_one4", 0, false);
  2043. ASSERT_TRUE(success_) << "remove_one4";
  2044. BackendTransaction("remove_head4", 1, false);
  2045. ASSERT_TRUE(success_) << "remove_head4";
  2046. }
  2047. #if BUILDFLAG(IS_WIN)
  2048. // http://crbug.com/396392
  2049. #define MAYBE_RecoverRemove DISABLED_RecoverRemove
  2050. #else
  2051. #define MAYBE_RecoverRemove RecoverRemove
  2052. #endif
  2053. TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
  2054. BackendRecoverRemove();
  2055. }
  2056. #if BUILDFLAG(IS_WIN)
  2057. // http://crbug.com/396392
  2058. #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
  2059. #else
  2060. #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
  2061. #endif
  2062. TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
  2063. SetNewEviction();
  2064. BackendRecoverRemove();
  2065. }
  2066. void DiskCacheBackendTest::BackendRecoverWithEviction() {
  2067. success_ = false;
  2068. ASSERT_TRUE(CopyTestCache("insert_load1"));
  2069. DisableFirstCleanup();
  2070. SetMask(0xf);
  2071. SetMaxSize(0x1000);
  2072. // We should not crash here.
  2073. InitCache();
  2074. DisableIntegrityCheck();
  2075. }
  2076. TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
  2077. BackendRecoverWithEviction();
  2078. }
  2079. TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
  2080. SetNewEviction();
  2081. BackendRecoverWithEviction();
  2082. }
  2083. // Tests that the |BackendImpl| fails to start with the wrong cache version.
  2084. TEST_F(DiskCacheTest, WrongVersion) {
  2085. ASSERT_TRUE(CopyTestCache("wrong_version"));
  2086. net::TestCompletionCallback cb;
  2087. std::unique_ptr<disk_cache::BackendImpl> cache(
  2088. std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
  2089. net::DISK_CACHE, nullptr));
  2090. cache->Init(cb.callback());
  2091. ASSERT_THAT(cb.WaitForResult(), IsError(net::ERR_FAILED));
  2092. }
  2093. // Tests that the disk cache successfully joins the control group, dropping the
  2094. // existing cache in favour of a new empty cache.
  2095. // Disabled on android since this test requires cache creator to create
  2096. // blockfile caches.
  2097. #if !BUILDFLAG(IS_ANDROID)
  2098. TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
  2099. std::unique_ptr<disk_cache::BackendImpl> cache =
  2100. CreateExistingEntryCache(cache_path_);
  2101. ASSERT_TRUE(cache.get());
  2102. cache.reset();
  2103. // Instantiate the SimpleCacheTrial, forcing this run into the
  2104. // ExperimentControl group.
  2105. base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
  2106. "ExperimentControl");
  2107. TestBackendResultCompletionCallback cb;
  2108. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  2109. net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
  2110. /*file_operations=*/nullptr, cache_path_, 0,
  2111. disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
  2112. cb.callback());
  2113. rv = cb.GetResult(std::move(rv));
  2114. ASSERT_THAT(rv.net_error, IsOk());
  2115. EXPECT_EQ(0, rv.backend->GetEntryCount());
  2116. }
  2117. #endif
  2118. // Tests that the disk cache can restart in the control group preserving
  2119. // existing entries.
  2120. TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
  2121. // Instantiate the SimpleCacheTrial, forcing this run into the
  2122. // ExperimentControl group.
  2123. base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
  2124. "ExperimentControl");
  2125. std::unique_ptr<disk_cache::BackendImpl> cache =
  2126. CreateExistingEntryCache(cache_path_);
  2127. ASSERT_TRUE(cache.get());
  2128. net::TestCompletionCallback cb;
  2129. const int kRestartCount = 5;
  2130. for (int i = 0; i < kRestartCount; ++i) {
  2131. cache = std::make_unique<disk_cache::BackendImpl>(
  2132. cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
  2133. cache->Init(cb.callback());
  2134. ASSERT_THAT(cb.WaitForResult(), IsOk());
  2135. EXPECT_EQ(1, cache->GetEntryCount());
  2136. TestEntryResultCompletionCallback cb2;
  2137. EntryResult result =
  2138. cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
  2139. result = cb2.GetResult(std::move(result));
  2140. result.ReleaseEntry()->Close();
  2141. }
  2142. }
  2143. // Tests that the disk cache can leave the control group preserving existing
  2144. // entries.
  2145. TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
  2146. {
  2147. // Instantiate the SimpleCacheTrial, forcing this run into the
  2148. // ExperimentControl group.
  2149. base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
  2150. "ExperimentControl");
  2151. std::unique_ptr<disk_cache::BackendImpl> cache =
  2152. CreateExistingEntryCache(cache_path_);
  2153. ASSERT_TRUE(cache.get());
  2154. }
  2155. // Instantiate the SimpleCacheTrial, forcing this run into the
  2156. // ExperimentNo group.
  2157. base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
  2158. net::TestCompletionCallback cb;
  2159. const int kRestartCount = 5;
  2160. for (int i = 0; i < kRestartCount; ++i) {
  2161. std::unique_ptr<disk_cache::BackendImpl> cache(
  2162. std::make_unique<disk_cache::BackendImpl>(cache_path_, nullptr, nullptr,
  2163. net::DISK_CACHE, nullptr));
  2164. cache->Init(cb.callback());
  2165. ASSERT_THAT(cb.WaitForResult(), IsOk());
  2166. EXPECT_EQ(1, cache->GetEntryCount());
  2167. TestEntryResultCompletionCallback cb2;
  2168. EntryResult result =
  2169. cache->OpenEntry(kExistingEntryKey, net::HIGHEST, cb2.callback());
  2170. result = cb2.GetResult(std::move(result));
  2171. ASSERT_THAT(result.net_error(), IsOk());
  2172. result.ReleaseEntry()->Close();
  2173. }
  2174. }
  2175. // Tests that the cache is properly restarted on recovery error.
  2176. // Disabled on android since this test requires cache creator to create
  2177. // blockfile caches.
  2178. #if !BUILDFLAG(IS_ANDROID)
  2179. TEST_F(DiskCacheBackendTest, DeleteOld) {
  2180. ASSERT_TRUE(CopyTestCache("wrong_version"));
  2181. SetNewEviction();
  2182. TestBackendResultCompletionCallback cb;
  2183. {
  2184. base::ScopedDisallowBlocking disallow_blocking;
  2185. base::FilePath path(cache_path_);
  2186. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  2187. net::DISK_CACHE, net::CACHE_BACKEND_BLOCKFILE,
  2188. /*file_operations=*/nullptr, path, 0,
  2189. disk_cache::ResetHandling::kResetOnError, /*net_log=*/nullptr,
  2190. cb.callback());
  2191. path.clear(); // Make sure path was captured by the previous call.
  2192. rv = cb.GetResult(std::move(rv));
  2193. ASSERT_THAT(rv.net_error, IsOk());
  2194. }
  2195. EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, /*max_size = */ 0,
  2196. mask_));
  2197. }
  2198. #endif
  2199. // We want to be able to deal with messed up entries on disk.
  2200. void DiskCacheBackendTest::BackendInvalidEntry2() {
  2201. ASSERT_TRUE(CopyTestCache("bad_entry"));
  2202. DisableFirstCleanup();
  2203. InitCache();
  2204. disk_cache::Entry *entry1, *entry2;
  2205. ASSERT_THAT(OpenEntry("the first key", &entry1), IsOk());
  2206. EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
  2207. entry1->Close();
  2208. // CheckCacheIntegrity will fail at this point.
  2209. DisableIntegrityCheck();
  2210. }
  2211. TEST_F(DiskCacheBackendTest, InvalidEntry2) {
  2212. BackendInvalidEntry2();
  2213. }
  2214. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
  2215. SetNewEviction();
  2216. BackendInvalidEntry2();
  2217. }
  2218. // Tests that we don't crash or hang when enumerating this cache.
  2219. void DiskCacheBackendTest::BackendInvalidEntry3() {
  2220. SetMask(0x1); // 2-entry table.
  2221. SetMaxSize(0x3000); // 12 kB.
  2222. DisableFirstCleanup();
  2223. InitCache();
  2224. disk_cache::Entry* entry;
  2225. std::unique_ptr<TestIterator> iter = CreateIterator();
  2226. while (iter->OpenNextEntry(&entry) == net::OK) {
  2227. entry->Close();
  2228. }
  2229. }
  2230. TEST_F(DiskCacheBackendTest, InvalidEntry3) {
  2231. ASSERT_TRUE(CopyTestCache("dirty_entry3"));
  2232. BackendInvalidEntry3();
  2233. }
  2234. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
  2235. ASSERT_TRUE(CopyTestCache("dirty_entry4"));
  2236. SetNewEviction();
  2237. BackendInvalidEntry3();
  2238. DisableIntegrityCheck();
  2239. }
  2240. // Test that we handle a dirty entry on the LRU list, already replaced with
  2241. // the same key, and with hash collisions.
  2242. TEST_F(DiskCacheBackendTest, InvalidEntry4) {
  2243. ASSERT_TRUE(CopyTestCache("dirty_entry3"));
  2244. SetMask(0x1); // 2-entry table.
  2245. SetMaxSize(0x3000); // 12 kB.
  2246. DisableFirstCleanup();
  2247. InitCache();
  2248. TrimForTest(false);
  2249. }
  2250. // Test that we handle a dirty entry on the deleted list, already replaced with
  2251. // the same key, and with hash collisions.
  2252. TEST_F(DiskCacheBackendTest, InvalidEntry5) {
  2253. ASSERT_TRUE(CopyTestCache("dirty_entry4"));
  2254. SetNewEviction();
  2255. SetMask(0x1); // 2-entry table.
  2256. SetMaxSize(0x3000); // 12 kB.
  2257. DisableFirstCleanup();
  2258. InitCache();
  2259. TrimDeletedListForTest(false);
  2260. }
  2261. TEST_F(DiskCacheBackendTest, InvalidEntry6) {
  2262. ASSERT_TRUE(CopyTestCache("dirty_entry5"));
  2263. SetMask(0x1); // 2-entry table.
  2264. SetMaxSize(0x3000); // 12 kB.
  2265. DisableFirstCleanup();
  2266. InitCache();
  2267. // There is a dirty entry (but marked as clean) at the end, pointing to a
  2268. // deleted entry through the hash collision list. We should not re-insert the
  2269. // deleted entry into the index table.
  2270. TrimForTest(false);
  2271. // The cache should be clean (as detected by CheckCacheIntegrity).
  2272. }
  2273. // Tests that we don't hang when there is a loop on the hash collision list.
  2274. // The test cache could be a result of bug 69135.
  2275. TEST_F(DiskCacheBackendTest, BadNextEntry1) {
  2276. ASSERT_TRUE(CopyTestCache("list_loop2"));
  2277. SetMask(0x1); // 2-entry table.
  2278. SetMaxSize(0x3000); // 12 kB.
  2279. DisableFirstCleanup();
  2280. InitCache();
  2281. // The second entry points at itselft, and the first entry is not accessible
  2282. // though the index, but it is at the head of the LRU.
  2283. disk_cache::Entry* entry;
  2284. ASSERT_THAT(CreateEntry("The first key", &entry), IsOk());
  2285. entry->Close();
  2286. TrimForTest(false);
  2287. TrimForTest(false);
  2288. ASSERT_THAT(OpenEntry("The first key", &entry), IsOk());
  2289. entry->Close();
  2290. EXPECT_EQ(1, cache_->GetEntryCount());
  2291. }
  2292. // Tests that we don't hang when there is a loop on the hash collision list.
  2293. // The test cache could be a result of bug 69135.
  2294. TEST_F(DiskCacheBackendTest, BadNextEntry2) {
  2295. ASSERT_TRUE(CopyTestCache("list_loop3"));
  2296. SetMask(0x1); // 2-entry table.
  2297. SetMaxSize(0x3000); // 12 kB.
  2298. DisableFirstCleanup();
  2299. InitCache();
  2300. // There is a wide loop of 5 entries.
  2301. disk_cache::Entry* entry;
  2302. ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
  2303. }
  2304. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
  2305. ASSERT_TRUE(CopyTestCache("bad_rankings3"));
  2306. DisableFirstCleanup();
  2307. SetNewEviction();
  2308. InitCache();
  2309. // The second entry is dirty, but removing it should not corrupt the list.
  2310. disk_cache::Entry* entry;
  2311. ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
  2312. ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
  2313. // This should not delete the cache.
  2314. entry->Doom();
  2315. FlushQueueForTest();
  2316. entry->Close();
  2317. ASSERT_THAT(OpenEntry("some other key", &entry), IsOk());
  2318. entry->Close();
  2319. }
  2320. // Tests handling of corrupt entries by keeping the rankings node around, with
  2321. // a fatal failure.
  2322. void DiskCacheBackendTest::BackendInvalidEntry7() {
  2323. const int kSize = 0x3000; // 12 kB.
  2324. SetMaxSize(kSize * 10);
  2325. InitCache();
  2326. std::string first("some key");
  2327. std::string second("something else");
  2328. disk_cache::Entry* entry;
  2329. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2330. entry->Close();
  2331. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2332. // Corrupt this entry.
  2333. disk_cache::EntryImpl* entry_impl =
  2334. static_cast<disk_cache::EntryImpl*>(entry);
  2335. entry_impl->rankings()->Data()->next = 0;
  2336. entry_impl->rankings()->Store();
  2337. entry->Close();
  2338. FlushQueueForTest();
  2339. EXPECT_EQ(2, cache_->GetEntryCount());
  2340. // This should detect the bad entry.
  2341. EXPECT_NE(net::OK, OpenEntry(second, &entry));
  2342. EXPECT_EQ(1, cache_->GetEntryCount());
  2343. // We should delete the cache. The list still has a corrupt node.
  2344. std::unique_ptr<TestIterator> iter = CreateIterator();
  2345. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2346. FlushQueueForTest();
  2347. EXPECT_EQ(0, cache_->GetEntryCount());
  2348. }
  2349. TEST_F(DiskCacheBackendTest, InvalidEntry7) {
  2350. BackendInvalidEntry7();
  2351. }
  2352. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
  2353. SetNewEviction();
  2354. BackendInvalidEntry7();
  2355. }
  2356. // Tests handling of corrupt entries by keeping the rankings node around, with
  2357. // a non fatal failure.
  2358. void DiskCacheBackendTest::BackendInvalidEntry8() {
  2359. const int kSize = 0x3000; // 12 kB
  2360. SetMaxSize(kSize * 10);
  2361. InitCache();
  2362. std::string first("some key");
  2363. std::string second("something else");
  2364. disk_cache::Entry* entry;
  2365. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2366. entry->Close();
  2367. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2368. // Corrupt this entry.
  2369. disk_cache::EntryImpl* entry_impl =
  2370. static_cast<disk_cache::EntryImpl*>(entry);
  2371. entry_impl->rankings()->Data()->contents = 0;
  2372. entry_impl->rankings()->Store();
  2373. entry->Close();
  2374. FlushQueueForTest();
  2375. EXPECT_EQ(2, cache_->GetEntryCount());
  2376. // This should detect the bad entry.
  2377. EXPECT_NE(net::OK, OpenEntry(second, &entry));
  2378. EXPECT_EQ(1, cache_->GetEntryCount());
  2379. // We should not delete the cache.
  2380. std::unique_ptr<TestIterator> iter = CreateIterator();
  2381. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2382. entry->Close();
  2383. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2384. EXPECT_EQ(1, cache_->GetEntryCount());
  2385. }
  2386. TEST_F(DiskCacheBackendTest, InvalidEntry8) {
  2387. BackendInvalidEntry8();
  2388. }
  2389. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
  2390. SetNewEviction();
  2391. BackendInvalidEntry8();
  2392. }
  2393. // Tests handling of corrupt entries detected by enumerations. Note that these
  2394. // tests (xx9 to xx11) are basically just going though slightly different
  2395. // codepaths so they are tighlty coupled with the code, but that is better than
  2396. // not testing error handling code.
  2397. void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
  2398. const int kSize = 0x3000; // 12 kB.
  2399. SetMaxSize(kSize * 10);
  2400. InitCache();
  2401. std::string first("some key");
  2402. std::string second("something else");
  2403. disk_cache::Entry* entry;
  2404. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2405. entry->Close();
  2406. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2407. // Corrupt this entry.
  2408. disk_cache::EntryImpl* entry_impl =
  2409. static_cast<disk_cache::EntryImpl*>(entry);
  2410. entry_impl->entry()->Data()->state = 0xbad;
  2411. entry_impl->entry()->Store();
  2412. entry->Close();
  2413. FlushQueueForTest();
  2414. EXPECT_EQ(2, cache_->GetEntryCount());
  2415. if (eviction) {
  2416. TrimForTest(false);
  2417. EXPECT_EQ(1, cache_->GetEntryCount());
  2418. TrimForTest(false);
  2419. EXPECT_EQ(1, cache_->GetEntryCount());
  2420. } else {
  2421. // We should detect the problem through the list, but we should not delete
  2422. // the entry, just fail the iteration.
  2423. std::unique_ptr<TestIterator> iter = CreateIterator();
  2424. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2425. // Now a full iteration will work, and return one entry.
  2426. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2427. entry->Close();
  2428. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2429. // This should detect what's left of the bad entry.
  2430. EXPECT_NE(net::OK, OpenEntry(second, &entry));
  2431. EXPECT_EQ(2, cache_->GetEntryCount());
  2432. }
  2433. DisableIntegrityCheck();
  2434. }
  2435. TEST_F(DiskCacheBackendTest, InvalidEntry9) {
  2436. BackendInvalidEntry9(false);
  2437. }
  2438. TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
  2439. SetNewEviction();
  2440. BackendInvalidEntry9(false);
  2441. }
  2442. TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
  2443. BackendInvalidEntry9(true);
  2444. }
  2445. TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
  2446. SetNewEviction();
  2447. BackendInvalidEntry9(true);
  2448. }
  2449. // Tests handling of corrupt entries detected by enumerations.
  2450. void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
  2451. const int kSize = 0x3000; // 12 kB.
  2452. SetMaxSize(kSize * 10);
  2453. SetNewEviction();
  2454. InitCache();
  2455. std::string first("some key");
  2456. std::string second("something else");
  2457. disk_cache::Entry* entry;
  2458. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2459. entry->Close();
  2460. ASSERT_THAT(OpenEntry(first, &entry), IsOk());
  2461. EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
  2462. entry->Close();
  2463. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2464. // Corrupt this entry.
  2465. disk_cache::EntryImpl* entry_impl =
  2466. static_cast<disk_cache::EntryImpl*>(entry);
  2467. entry_impl->entry()->Data()->state = 0xbad;
  2468. entry_impl->entry()->Store();
  2469. entry->Close();
  2470. ASSERT_THAT(CreateEntry("third", &entry), IsOk());
  2471. entry->Close();
  2472. EXPECT_EQ(3, cache_->GetEntryCount());
  2473. // We have:
  2474. // List 0: third -> second (bad).
  2475. // List 1: first.
  2476. if (eviction) {
  2477. // Detection order: second -> first -> third.
  2478. TrimForTest(false);
  2479. EXPECT_EQ(3, cache_->GetEntryCount());
  2480. TrimForTest(false);
  2481. EXPECT_EQ(2, cache_->GetEntryCount());
  2482. TrimForTest(false);
  2483. EXPECT_EQ(1, cache_->GetEntryCount());
  2484. } else {
  2485. // Detection order: third -> second -> first.
  2486. // We should detect the problem through the list, but we should not delete
  2487. // the entry.
  2488. std::unique_ptr<TestIterator> iter = CreateIterator();
  2489. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2490. entry->Close();
  2491. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2492. EXPECT_EQ(first, entry->GetKey());
  2493. entry->Close();
  2494. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2495. }
  2496. DisableIntegrityCheck();
  2497. }
  2498. TEST_F(DiskCacheBackendTest, InvalidEntry10) {
  2499. BackendInvalidEntry10(false);
  2500. }
  2501. TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
  2502. BackendInvalidEntry10(true);
  2503. }
  2504. // Tests handling of corrupt entries detected by enumerations.
  2505. void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
  2506. const int kSize = 0x3000; // 12 kB.
  2507. SetMaxSize(kSize * 10);
  2508. SetNewEviction();
  2509. InitCache();
  2510. std::string first("some key");
  2511. std::string second("something else");
  2512. disk_cache::Entry* entry;
  2513. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2514. entry->Close();
  2515. ASSERT_THAT(OpenEntry(first, &entry), IsOk());
  2516. EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
  2517. entry->Close();
  2518. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2519. entry->Close();
  2520. ASSERT_THAT(OpenEntry(second, &entry), IsOk());
  2521. EXPECT_EQ(0, WriteData(entry, 0, 200, nullptr, 0, false));
  2522. // Corrupt this entry.
  2523. disk_cache::EntryImpl* entry_impl =
  2524. static_cast<disk_cache::EntryImpl*>(entry);
  2525. entry_impl->entry()->Data()->state = 0xbad;
  2526. entry_impl->entry()->Store();
  2527. entry->Close();
  2528. ASSERT_THAT(CreateEntry("third", &entry), IsOk());
  2529. entry->Close();
  2530. FlushQueueForTest();
  2531. EXPECT_EQ(3, cache_->GetEntryCount());
  2532. // We have:
  2533. // List 0: third.
  2534. // List 1: second (bad) -> first.
  2535. if (eviction) {
  2536. // Detection order: third -> first -> second.
  2537. TrimForTest(false);
  2538. EXPECT_EQ(2, cache_->GetEntryCount());
  2539. TrimForTest(false);
  2540. EXPECT_EQ(1, cache_->GetEntryCount());
  2541. TrimForTest(false);
  2542. EXPECT_EQ(1, cache_->GetEntryCount());
  2543. } else {
  2544. // Detection order: third -> second.
  2545. // We should detect the problem through the list, but we should not delete
  2546. // the entry, just fail the iteration.
  2547. std::unique_ptr<TestIterator> iter = CreateIterator();
  2548. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2549. entry->Close();
  2550. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2551. // Now a full iteration will work, and return two entries.
  2552. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2553. entry->Close();
  2554. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2555. entry->Close();
  2556. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2557. }
  2558. DisableIntegrityCheck();
  2559. }
  2560. TEST_F(DiskCacheBackendTest, InvalidEntry11) {
  2561. BackendInvalidEntry11(false);
  2562. }
  2563. TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
  2564. BackendInvalidEntry11(true);
  2565. }
  2566. // Tests handling of corrupt entries in the middle of a long eviction run.
  2567. void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
  2568. const int kSize = 0x3000; // 12 kB
  2569. SetMaxSize(kSize * 10);
  2570. InitCache();
  2571. std::string first("some key");
  2572. std::string second("something else");
  2573. disk_cache::Entry* entry;
  2574. ASSERT_THAT(CreateEntry(first, &entry), IsOk());
  2575. entry->Close();
  2576. ASSERT_THAT(CreateEntry(second, &entry), IsOk());
  2577. // Corrupt this entry.
  2578. disk_cache::EntryImpl* entry_impl =
  2579. static_cast<disk_cache::EntryImpl*>(entry);
  2580. entry_impl->entry()->Data()->state = 0xbad;
  2581. entry_impl->entry()->Store();
  2582. entry->Close();
  2583. ASSERT_THAT(CreateEntry("third", &entry), IsOk());
  2584. entry->Close();
  2585. ASSERT_THAT(CreateEntry("fourth", &entry), IsOk());
  2586. TrimForTest(true);
  2587. EXPECT_EQ(1, cache_->GetEntryCount());
  2588. entry->Close();
  2589. DisableIntegrityCheck();
  2590. }
  2591. TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
  2592. BackendTrimInvalidEntry12();
  2593. }
  2594. TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
  2595. SetNewEviction();
  2596. BackendTrimInvalidEntry12();
  2597. }
  2598. // We want to be able to deal with messed up entries on disk.
  2599. void DiskCacheBackendTest::BackendInvalidRankings2() {
  2600. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2601. DisableFirstCleanup();
  2602. InitCache();
  2603. disk_cache::Entry *entry1, *entry2;
  2604. EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
  2605. ASSERT_THAT(OpenEntry("some other key", &entry2), IsOk());
  2606. entry2->Close();
  2607. // CheckCacheIntegrity will fail at this point.
  2608. DisableIntegrityCheck();
  2609. }
  2610. TEST_F(DiskCacheBackendTest, InvalidRankings2) {
  2611. BackendInvalidRankings2();
  2612. }
  2613. TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
  2614. SetNewEviction();
  2615. BackendInvalidRankings2();
  2616. }
  2617. // If the LRU is corrupt, we delete the cache.
  2618. void DiskCacheBackendTest::BackendInvalidRankings() {
  2619. disk_cache::Entry* entry;
  2620. std::unique_ptr<TestIterator> iter = CreateIterator();
  2621. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  2622. entry->Close();
  2623. EXPECT_EQ(2, cache_->GetEntryCount());
  2624. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
  2625. FlushQueueForTest(); // Allow the restart to finish.
  2626. EXPECT_EQ(0, cache_->GetEntryCount());
  2627. }
  2628. TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
  2629. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2630. DisableFirstCleanup();
  2631. InitCache();
  2632. BackendInvalidRankings();
  2633. }
  2634. TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
  2635. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2636. DisableFirstCleanup();
  2637. SetNewEviction();
  2638. InitCache();
  2639. BackendInvalidRankings();
  2640. }
  2641. TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
  2642. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2643. DisableFirstCleanup();
  2644. InitCache();
  2645. SetTestMode(); // Fail cache reinitialization.
  2646. BackendInvalidRankings();
  2647. }
  2648. TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
  2649. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2650. DisableFirstCleanup();
  2651. SetNewEviction();
  2652. InitCache();
  2653. SetTestMode(); // Fail cache reinitialization.
  2654. BackendInvalidRankings();
  2655. }
  2656. // If the LRU is corrupt and we have open entries, we disable the cache.
  2657. void DiskCacheBackendTest::BackendDisable() {
  2658. disk_cache::Entry *entry1, *entry2;
  2659. std::unique_ptr<TestIterator> iter = CreateIterator();
  2660. ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
  2661. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
  2662. EXPECT_EQ(0, cache_->GetEntryCount());
  2663. EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
  2664. entry1->Close();
  2665. FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
  2666. FlushQueueForTest(); // This one actually allows that task to complete.
  2667. EXPECT_EQ(0, cache_->GetEntryCount());
  2668. }
  2669. TEST_F(DiskCacheBackendTest, DisableSuccess) {
  2670. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2671. DisableFirstCleanup();
  2672. InitCache();
  2673. BackendDisable();
  2674. }
  2675. TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
  2676. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2677. DisableFirstCleanup();
  2678. SetNewEviction();
  2679. InitCache();
  2680. BackendDisable();
  2681. }
  2682. TEST_F(DiskCacheBackendTest, DisableFailure) {
  2683. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2684. DisableFirstCleanup();
  2685. InitCache();
  2686. SetTestMode(); // Fail cache reinitialization.
  2687. BackendDisable();
  2688. }
  2689. TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
  2690. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2691. DisableFirstCleanup();
  2692. SetNewEviction();
  2693. InitCache();
  2694. SetTestMode(); // Fail cache reinitialization.
  2695. BackendDisable();
  2696. }
  2697. // This is another type of corruption on the LRU; disable the cache.
  2698. void DiskCacheBackendTest::BackendDisable2() {
  2699. EXPECT_EQ(8, cache_->GetEntryCount());
  2700. disk_cache::Entry* entry;
  2701. std::unique_ptr<TestIterator> iter = CreateIterator();
  2702. int count = 0;
  2703. while (iter->OpenNextEntry(&entry) == net::OK) {
  2704. ASSERT_TRUE(nullptr != entry);
  2705. entry->Close();
  2706. count++;
  2707. ASSERT_LT(count, 9);
  2708. };
  2709. FlushQueueForTest();
  2710. EXPECT_EQ(0, cache_->GetEntryCount());
  2711. }
  2712. TEST_F(DiskCacheBackendTest, DisableSuccess2) {
  2713. ASSERT_TRUE(CopyTestCache("list_loop"));
  2714. DisableFirstCleanup();
  2715. InitCache();
  2716. BackendDisable2();
  2717. }
  2718. TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
  2719. ASSERT_TRUE(CopyTestCache("list_loop"));
  2720. DisableFirstCleanup();
  2721. SetNewEviction();
  2722. InitCache();
  2723. BackendDisable2();
  2724. }
  2725. TEST_F(DiskCacheBackendTest, DisableFailure2) {
  2726. ASSERT_TRUE(CopyTestCache("list_loop"));
  2727. DisableFirstCleanup();
  2728. InitCache();
  2729. SetTestMode(); // Fail cache reinitialization.
  2730. BackendDisable2();
  2731. }
  2732. TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
  2733. ASSERT_TRUE(CopyTestCache("list_loop"));
  2734. DisableFirstCleanup();
  2735. SetNewEviction();
  2736. InitCache();
  2737. SetTestMode(); // Fail cache reinitialization.
  2738. BackendDisable2();
  2739. }
  2740. // If the index size changes when we disable the cache, we should not crash.
  2741. void DiskCacheBackendTest::BackendDisable3() {
  2742. disk_cache::Entry *entry1, *entry2;
  2743. std::unique_ptr<TestIterator> iter = CreateIterator();
  2744. EXPECT_EQ(2, cache_->GetEntryCount());
  2745. ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
  2746. entry1->Close();
  2747. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
  2748. FlushQueueForTest();
  2749. ASSERT_THAT(CreateEntry("Something new", &entry2), IsOk());
  2750. entry2->Close();
  2751. EXPECT_EQ(1, cache_->GetEntryCount());
  2752. }
  2753. TEST_F(DiskCacheBackendTest, DisableSuccess3) {
  2754. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  2755. DisableFirstCleanup();
  2756. SetMaxSize(20 * 1024 * 1024);
  2757. InitCache();
  2758. BackendDisable3();
  2759. }
  2760. TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
  2761. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  2762. DisableFirstCleanup();
  2763. SetMaxSize(20 * 1024 * 1024);
  2764. SetNewEviction();
  2765. InitCache();
  2766. BackendDisable3();
  2767. }
  2768. // If we disable the cache, already open entries should work as far as possible.
  2769. void DiskCacheBackendTest::BackendDisable4() {
  2770. disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
  2771. std::unique_ptr<TestIterator> iter = CreateIterator();
  2772. ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
  2773. char key2[2000];
  2774. char key3[20000];
  2775. CacheTestFillBuffer(key2, sizeof(key2), true);
  2776. CacheTestFillBuffer(key3, sizeof(key3), true);
  2777. key2[sizeof(key2) - 1] = '\0';
  2778. key3[sizeof(key3) - 1] = '\0';
  2779. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  2780. ASSERT_THAT(CreateEntry(key3, &entry3), IsOk());
  2781. const int kBufSize = 20000;
  2782. scoped_refptr<net::IOBuffer> buf =
  2783. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  2784. memset(buf->data(), 0, kBufSize);
  2785. EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
  2786. EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
  2787. // This line should disable the cache but not delete it.
  2788. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
  2789. EXPECT_EQ(0, cache_->GetEntryCount());
  2790. EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
  2791. EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
  2792. EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
  2793. EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
  2794. EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
  2795. EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
  2796. EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
  2797. std::string key = entry2->GetKey();
  2798. EXPECT_EQ(sizeof(key2) - 1, key.size());
  2799. key = entry3->GetKey();
  2800. EXPECT_EQ(sizeof(key3) - 1, key.size());
  2801. entry1->Close();
  2802. entry2->Close();
  2803. entry3->Close();
  2804. FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
  2805. FlushQueueForTest(); // This one actually allows that task to complete.
  2806. EXPECT_EQ(0, cache_->GetEntryCount());
  2807. }
  2808. TEST_F(DiskCacheBackendTest, DisableSuccess4) {
  2809. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2810. DisableFirstCleanup();
  2811. InitCache();
  2812. BackendDisable4();
  2813. }
  2814. TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
  2815. ASSERT_TRUE(CopyTestCache("bad_rankings"));
  2816. DisableFirstCleanup();
  2817. SetNewEviction();
  2818. InitCache();
  2819. BackendDisable4();
  2820. }
  2821. // Tests the exposed API with a disabled cache.
  2822. void DiskCacheBackendTest::BackendDisabledAPI() {
  2823. cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
  2824. disk_cache::Entry *entry1, *entry2;
  2825. std::unique_ptr<TestIterator> iter = CreateIterator();
  2826. EXPECT_EQ(2, cache_->GetEntryCount());
  2827. ASSERT_THAT(iter->OpenNextEntry(&entry1), IsOk());
  2828. entry1->Close();
  2829. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
  2830. FlushQueueForTest();
  2831. // The cache should be disabled.
  2832. EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
  2833. EXPECT_EQ(0, cache_->GetEntryCount());
  2834. EXPECT_NE(net::OK, OpenEntry("First", &entry2));
  2835. EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
  2836. EXPECT_NE(net::OK, DoomEntry("First"));
  2837. EXPECT_NE(net::OK, DoomAllEntries());
  2838. EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
  2839. EXPECT_NE(net::OK, DoomEntriesSince(Time()));
  2840. iter = CreateIterator();
  2841. EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
  2842. base::StringPairs stats;
  2843. cache_->GetStats(&stats);
  2844. EXPECT_TRUE(stats.empty());
  2845. OnExternalCacheHit("First");
  2846. }
  2847. TEST_F(DiskCacheBackendTest, DisabledAPI) {
  2848. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  2849. DisableFirstCleanup();
  2850. InitCache();
  2851. BackendDisabledAPI();
  2852. }
  2853. TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
  2854. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  2855. DisableFirstCleanup();
  2856. SetNewEviction();
  2857. InitCache();
  2858. BackendDisabledAPI();
  2859. }
  2860. // Test that some eviction of some kind happens.
  2861. void DiskCacheBackendTest::BackendEviction() {
  2862. const int kMaxSize = 200 * 1024;
  2863. const int kMaxEntryCount = 20;
  2864. const int kWriteSize = kMaxSize / kMaxEntryCount;
  2865. const int kWriteEntryCount = kMaxEntryCount * 2;
  2866. static_assert(kWriteEntryCount * kWriteSize > kMaxSize,
  2867. "must write more than MaxSize");
  2868. SetMaxSize(kMaxSize);
  2869. InitSparseCache(nullptr, nullptr);
  2870. scoped_refptr<net::IOBuffer> buffer =
  2871. base::MakeRefCounted<net::IOBuffer>(kWriteSize);
  2872. CacheTestFillBuffer(buffer->data(), kWriteSize, false);
  2873. std::string key_prefix("prefix");
  2874. for (int i = 0; i < kWriteEntryCount; ++i) {
  2875. AddDelay();
  2876. disk_cache::Entry* entry = nullptr;
  2877. ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
  2878. IsOk());
  2879. disk_cache::ScopedEntryPtr entry_closer(entry);
  2880. EXPECT_EQ(kWriteSize,
  2881. WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
  2882. }
  2883. int size = CalculateSizeOfAllEntries();
  2884. EXPECT_GT(kMaxSize, size);
  2885. }
  2886. TEST_F(DiskCacheBackendTest, BackendEviction) {
  2887. BackendEviction();
  2888. }
  2889. TEST_F(DiskCacheBackendTest, MemoryOnlyBackendEviction) {
  2890. SetMemoryOnlyMode();
  2891. BackendEviction();
  2892. }
  2893. // TODO(morlovich): Enable BackendEviction test for simple cache after
  2894. // performance problems are addressed. See crbug.com/588184 for more
  2895. // information.
  2896. // This overly specific looking test is a regression test aimed at
  2897. // crbug.com/589186.
  2898. TEST_F(DiskCacheBackendTest, MemoryOnlyUseAfterFree) {
  2899. SetMemoryOnlyMode();
  2900. const int kMaxSize = 200 * 1024;
  2901. const int kMaxEntryCount = 20;
  2902. const int kWriteSize = kMaxSize / kMaxEntryCount;
  2903. SetMaxSize(kMaxSize);
  2904. InitCache();
  2905. scoped_refptr<net::IOBuffer> buffer =
  2906. base::MakeRefCounted<net::IOBuffer>(kWriteSize);
  2907. CacheTestFillBuffer(buffer->data(), kWriteSize, false);
  2908. // Create an entry to be our sparse entry that gets written later.
  2909. disk_cache::Entry* entry;
  2910. ASSERT_THAT(CreateEntry("first parent", &entry), IsOk());
  2911. disk_cache::ScopedEntryPtr first_parent(entry);
  2912. // Create a ton of entries, and keep them open, to put the cache well above
  2913. // its eviction threshhold.
  2914. const int kTooManyEntriesCount = kMaxEntryCount * 2;
  2915. std::list<disk_cache::ScopedEntryPtr> open_entries;
  2916. std::string key_prefix("prefix");
  2917. for (int i = 0; i < kTooManyEntriesCount; ++i) {
  2918. ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
  2919. IsOk());
  2920. // Not checking the result because it will start to fail once the max size
  2921. // is reached.
  2922. WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
  2923. open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
  2924. }
  2925. // Writing this sparse data should not crash. Ignoring the result because
  2926. // we're only concerned with not crashing in this particular test.
  2927. first_parent->WriteSparseData(32768, buffer.get(), 1024,
  2928. net::CompletionOnceCallback());
  2929. }
  2930. TEST_F(DiskCacheBackendTest, MemoryCapsWritesToMaxSize) {
  2931. // Verify that the memory backend won't grow beyond its max size if lots of
  2932. // open entries (each smaller than the max entry size) are trying to write
  2933. // beyond the max size.
  2934. SetMemoryOnlyMode();
  2935. const int kMaxSize = 100 * 1024; // 100KB cache
  2936. const int kNumEntries = 20; // 20 entries to write
  2937. const int kWriteSize = kMaxSize / 10; // Each entry writes 1/10th the max
  2938. SetMaxSize(kMaxSize);
  2939. InitCache();
  2940. scoped_refptr<net::IOBuffer> buffer =
  2941. base::MakeRefCounted<net::IOBuffer>(kWriteSize);
  2942. CacheTestFillBuffer(buffer->data(), kWriteSize, false);
  2943. // Create an entry to be the final entry that gets written later.
  2944. disk_cache::Entry* entry;
  2945. ASSERT_THAT(CreateEntry("final", &entry), IsOk());
  2946. disk_cache::ScopedEntryPtr final_entry(entry);
  2947. // Create a ton of entries, write to the cache, and keep the entries open.
  2948. // They should start failing writes once the cache fills.
  2949. std::list<disk_cache::ScopedEntryPtr> open_entries;
  2950. std::string key_prefix("prefix");
  2951. for (int i = 0; i < kNumEntries; ++i) {
  2952. ASSERT_THAT(CreateEntry(key_prefix + base::NumberToString(i), &entry),
  2953. IsOk());
  2954. WriteData(entry, 1, 0, buffer.get(), kWriteSize, false);
  2955. open_entries.push_back(disk_cache::ScopedEntryPtr(entry));
  2956. }
  2957. EXPECT_GE(kMaxSize, CalculateSizeOfAllEntries());
  2958. // Any more writing at this point should cause an error.
  2959. EXPECT_THAT(
  2960. WriteData(final_entry.get(), 1, 0, buffer.get(), kWriteSize, false),
  2961. IsError(net::ERR_INSUFFICIENT_RESOURCES));
  2962. }
  2963. TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
  2964. MessageLoopHelper helper;
  2965. ASSERT_TRUE(CleanupCacheDir());
  2966. // Want to use our thread since we call SyncInit ourselves.
  2967. std::unique_ptr<disk_cache::BackendImpl> cache(
  2968. std::make_unique<disk_cache::BackendImpl>(
  2969. cache_path_, nullptr, base::ThreadTaskRunnerHandle::Get(),
  2970. net::DISK_CACHE, nullptr));
  2971. ASSERT_TRUE(nullptr != cache.get());
  2972. cache->SetUnitTestMode();
  2973. ASSERT_THAT(cache->SyncInit(), IsOk());
  2974. // Wait for a callback that never comes... about 2 secs :). The message loop
  2975. // has to run to allow invocation of the usage timer.
  2976. helper.WaitUntilCacheIoFinished(1);
  2977. }
  2978. TEST_F(DiskCacheBackendTest, TimerNotCreated) {
  2979. ASSERT_TRUE(CopyTestCache("wrong_version"));
  2980. // Want to use our thread since we call SyncInit ourselves.
  2981. std::unique_ptr<disk_cache::BackendImpl> cache(
  2982. std::make_unique<disk_cache::BackendImpl>(
  2983. cache_path_, nullptr, base::ThreadTaskRunnerHandle::Get(),
  2984. net::DISK_CACHE, nullptr));
  2985. ASSERT_TRUE(nullptr != cache.get());
  2986. cache->SetUnitTestMode();
  2987. ASSERT_NE(net::OK, cache->SyncInit());
  2988. ASSERT_TRUE(nullptr == cache->GetTimerForTest());
  2989. DisableIntegrityCheck();
  2990. }
  2991. TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
  2992. InitCache();
  2993. disk_cache::Entry* entry;
  2994. ASSERT_THAT(CreateEntry("key", &entry), IsOk());
  2995. entry->Close();
  2996. FlushQueueForTest();
  2997. disk_cache::StatsItems stats;
  2998. cache_->GetStats(&stats);
  2999. EXPECT_FALSE(stats.empty());
  3000. disk_cache::StatsItems::value_type hits("Create hit", "0x1");
  3001. EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
  3002. cache_.reset();
  3003. // Now open the cache and verify that the stats are still there.
  3004. DisableFirstCleanup();
  3005. InitCache();
  3006. EXPECT_EQ(1, cache_->GetEntryCount());
  3007. stats.clear();
  3008. cache_->GetStats(&stats);
  3009. EXPECT_FALSE(stats.empty());
  3010. EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
  3011. }
  3012. void DiskCacheBackendTest::BackendDoomAll() {
  3013. InitCache();
  3014. disk_cache::Entry *entry1, *entry2;
  3015. ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
  3016. ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
  3017. entry1->Close();
  3018. entry2->Close();
  3019. ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
  3020. ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
  3021. ASSERT_EQ(4, cache_->GetEntryCount());
  3022. EXPECT_THAT(DoomAllEntries(), IsOk());
  3023. ASSERT_EQ(0, cache_->GetEntryCount());
  3024. // We should stop posting tasks at some point (if we post any).
  3025. base::RunLoop().RunUntilIdle();
  3026. disk_cache::Entry *entry3, *entry4;
  3027. EXPECT_NE(net::OK, OpenEntry("third", &entry3));
  3028. ASSERT_THAT(CreateEntry("third", &entry3), IsOk());
  3029. ASSERT_THAT(CreateEntry("fourth", &entry4), IsOk());
  3030. EXPECT_THAT(DoomAllEntries(), IsOk());
  3031. ASSERT_EQ(0, cache_->GetEntryCount());
  3032. entry1->Close();
  3033. entry2->Close();
  3034. entry3->Doom(); // The entry should be already doomed, but this must work.
  3035. entry3->Close();
  3036. entry4->Close();
  3037. // Now try with all references released.
  3038. ASSERT_THAT(CreateEntry("third", &entry1), IsOk());
  3039. ASSERT_THAT(CreateEntry("fourth", &entry2), IsOk());
  3040. entry1->Close();
  3041. entry2->Close();
  3042. ASSERT_EQ(2, cache_->GetEntryCount());
  3043. EXPECT_THAT(DoomAllEntries(), IsOk());
  3044. ASSERT_EQ(0, cache_->GetEntryCount());
  3045. EXPECT_THAT(DoomAllEntries(), IsOk());
  3046. }
  3047. TEST_F(DiskCacheBackendTest, DoomAll) {
  3048. BackendDoomAll();
  3049. }
  3050. TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
  3051. SetNewEviction();
  3052. BackendDoomAll();
  3053. }
  3054. TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
  3055. SetMemoryOnlyMode();
  3056. BackendDoomAll();
  3057. }
  3058. TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
  3059. SetCacheType(net::APP_CACHE);
  3060. BackendDoomAll();
  3061. }
  3062. TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
  3063. SetCacheType(net::SHADER_CACHE);
  3064. BackendDoomAll();
  3065. }
  3066. // If the index size changes when we doom the cache, we should not crash.
  3067. void DiskCacheBackendTest::BackendDoomAll2() {
  3068. EXPECT_EQ(2, cache_->GetEntryCount());
  3069. EXPECT_THAT(DoomAllEntries(), IsOk());
  3070. disk_cache::Entry* entry;
  3071. ASSERT_THAT(CreateEntry("Something new", &entry), IsOk());
  3072. entry->Close();
  3073. EXPECT_EQ(1, cache_->GetEntryCount());
  3074. }
  3075. TEST_F(DiskCacheBackendTest, DoomAll2) {
  3076. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  3077. DisableFirstCleanup();
  3078. SetMaxSize(20 * 1024 * 1024);
  3079. InitCache();
  3080. BackendDoomAll2();
  3081. }
  3082. TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
  3083. ASSERT_TRUE(CopyTestCache("bad_rankings2"));
  3084. DisableFirstCleanup();
  3085. SetMaxSize(20 * 1024 * 1024);
  3086. SetNewEviction();
  3087. InitCache();
  3088. BackendDoomAll2();
  3089. }
  3090. // We should be able to create the same entry on multiple simultaneous instances
  3091. // of the cache.
  3092. TEST_F(DiskCacheTest, MultipleInstances) {
  3093. base::ScopedTempDir store1, store2;
  3094. ASSERT_TRUE(store1.CreateUniqueTempDir());
  3095. ASSERT_TRUE(store2.CreateUniqueTempDir());
  3096. TestBackendResultCompletionCallback cb;
  3097. const int kNumberOfCaches = 2;
  3098. std::unique_ptr<disk_cache::Backend> caches[kNumberOfCaches];
  3099. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  3100. net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, /*file_operations=*/nullptr,
  3101. store1.GetPath(), 0, disk_cache::ResetHandling::kNeverReset,
  3102. /*net_log=*/nullptr, cb.callback());
  3103. rv = cb.GetResult(std::move(rv));
  3104. ASSERT_THAT(rv.net_error, IsOk());
  3105. caches[0] = std::move(rv.backend);
  3106. rv = disk_cache::CreateCacheBackend(
  3107. net::GENERATED_BYTE_CODE_CACHE, net::CACHE_BACKEND_DEFAULT,
  3108. /*file_operations=*/nullptr, store2.GetPath(), 0,
  3109. disk_cache::ResetHandling::kNeverReset, /*net_log=*/nullptr,
  3110. cb.callback());
  3111. rv = cb.GetResult(std::move(rv));
  3112. ASSERT_THAT(rv.net_error, IsOk());
  3113. caches[1] = std::move(rv.backend);
  3114. ASSERT_TRUE(caches[0].get() != nullptr && caches[1].get() != nullptr);
  3115. std::string key("the first key");
  3116. for (auto& cache : caches) {
  3117. TestEntryResultCompletionCallback cb2;
  3118. EntryResult result = cache->CreateEntry(key, net::HIGHEST, cb2.callback());
  3119. result = cb2.GetResult(std::move(result));
  3120. ASSERT_THAT(result.net_error(), IsOk());
  3121. result.ReleaseEntry()->Close();
  3122. }
  3123. }
  3124. // Test the six regions of the curve that determines the max cache size.
  3125. TEST_F(DiskCacheTest, AutomaticMaxSize) {
  3126. using disk_cache::kDefaultCacheSize;
  3127. int64_t large_size = kDefaultCacheSize;
  3128. // Region 1: expected = available * 0.8
  3129. EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
  3130. disk_cache::PreferredCacheSize(large_size - 1));
  3131. EXPECT_EQ(kDefaultCacheSize * 8 / 10,
  3132. disk_cache::PreferredCacheSize(large_size));
  3133. EXPECT_EQ(kDefaultCacheSize - 1,
  3134. disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
  3135. // Region 2: expected = default_size
  3136. EXPECT_EQ(kDefaultCacheSize,
  3137. disk_cache::PreferredCacheSize(large_size * 10 / 8));
  3138. EXPECT_EQ(kDefaultCacheSize,
  3139. disk_cache::PreferredCacheSize(large_size * 10 - 1));
  3140. // Region 3: expected = available * 0.1
  3141. EXPECT_EQ(kDefaultCacheSize, disk_cache::PreferredCacheSize(large_size * 10));
  3142. EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
  3143. disk_cache::PreferredCacheSize(large_size * 25 - 1));
  3144. // Region 4: expected = default_size * 2.5
  3145. EXPECT_EQ(kDefaultCacheSize * 25 / 10,
  3146. disk_cache::PreferredCacheSize(large_size * 25));
  3147. EXPECT_EQ(kDefaultCacheSize * 25 / 10,
  3148. disk_cache::PreferredCacheSize(large_size * 100 - 1));
  3149. EXPECT_EQ(kDefaultCacheSize * 25 / 10,
  3150. disk_cache::PreferredCacheSize(large_size * 100));
  3151. EXPECT_EQ(kDefaultCacheSize * 25 / 10,
  3152. disk_cache::PreferredCacheSize(large_size * 250 - 1));
  3153. // Region 5: expected = available * 0.1
  3154. int64_t largest_size = kDefaultCacheSize * 4;
  3155. EXPECT_EQ(kDefaultCacheSize * 25 / 10,
  3156. disk_cache::PreferredCacheSize(large_size * 250));
  3157. EXPECT_EQ(largest_size - 1,
  3158. disk_cache::PreferredCacheSize(largest_size * 100 - 1));
  3159. // Region 6: expected = largest possible size
  3160. EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 100));
  3161. EXPECT_EQ(largest_size, disk_cache::PreferredCacheSize(largest_size * 10000));
  3162. }
  3163. // Tests that we can "migrate" a running instance from one experiment group to
  3164. // another.
  3165. TEST_F(DiskCacheBackendTest, Histograms) {
  3166. InitCache();
  3167. disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
  3168. for (int i = 1; i < 3; i++) {
  3169. CACHE_UMA(HOURS, "FillupTime", i, 28);
  3170. }
  3171. }
  3172. // Make sure that we keep the total memory used by the internal buffers under
  3173. // control.
  3174. TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
  3175. InitCache();
  3176. std::string key("the first key");
  3177. disk_cache::Entry* entry;
  3178. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3179. const int kSize = 200;
  3180. scoped_refptr<net::IOBuffer> buffer =
  3181. base::MakeRefCounted<net::IOBuffer>(kSize);
  3182. CacheTestFillBuffer(buffer->data(), kSize, true);
  3183. for (int i = 0; i < 10; i++) {
  3184. SCOPED_TRACE(i);
  3185. // Allocate 2MB for this entry.
  3186. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
  3187. EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
  3188. EXPECT_EQ(kSize,
  3189. WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
  3190. EXPECT_EQ(kSize,
  3191. WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
  3192. // Delete one of the buffers and truncate the other.
  3193. EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
  3194. EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
  3195. // Delete the second buffer, writing 10 bytes to disk.
  3196. entry->Close();
  3197. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3198. }
  3199. entry->Close();
  3200. EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
  3201. }
  3202. // This test assumes at least 150MB of system memory.
  3203. TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
  3204. InitCache();
  3205. const int kOneMB = 1024 * 1024;
  3206. EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
  3207. EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
  3208. EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
  3209. EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
  3210. EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
  3211. EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
  3212. cache_impl_->BufferDeleted(kOneMB);
  3213. EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
  3214. // Check the upper limit.
  3215. EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
  3216. for (int i = 0; i < 30; i++)
  3217. cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
  3218. EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
  3219. }
  3220. // Tests that sharing of external files works and we are able to delete the
  3221. // files when we need to.
  3222. TEST_F(DiskCacheBackendTest, FileSharing) {
  3223. InitCache();
  3224. disk_cache::Addr address(0x80000001);
  3225. ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
  3226. base::FilePath name = cache_impl_->GetFileName(address);
  3227. {
  3228. auto file = base::MakeRefCounted<disk_cache::File>(false);
  3229. file->Init(name);
  3230. #if BUILDFLAG(IS_WIN)
  3231. DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
  3232. DWORD access = GENERIC_READ | GENERIC_WRITE;
  3233. base::win::ScopedHandle file2(CreateFile(name.value().c_str(), access,
  3234. sharing, nullptr, OPEN_EXISTING, 0,
  3235. nullptr));
  3236. EXPECT_FALSE(file2.IsValid());
  3237. sharing |= FILE_SHARE_DELETE;
  3238. file2.Set(CreateFile(name.value().c_str(), access, sharing, nullptr,
  3239. OPEN_EXISTING, 0, nullptr));
  3240. EXPECT_TRUE(file2.IsValid());
  3241. #endif
  3242. EXPECT_TRUE(base::DeleteFile(name));
  3243. // We should be able to use the file.
  3244. const int kSize = 200;
  3245. char buffer1[kSize];
  3246. char buffer2[kSize];
  3247. memset(buffer1, 't', kSize);
  3248. memset(buffer2, 0, kSize);
  3249. EXPECT_TRUE(file->Write(buffer1, kSize, 0));
  3250. EXPECT_TRUE(file->Read(buffer2, kSize, 0));
  3251. EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
  3252. }
  3253. base::File file(name, base::File::FLAG_OPEN | base::File::FLAG_READ);
  3254. EXPECT_FALSE(file.IsValid());
  3255. EXPECT_EQ(file.error_details(), base::File::FILE_ERROR_NOT_FOUND);
  3256. }
  3257. TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
  3258. InitCache();
  3259. disk_cache::Entry* entry;
  3260. for (int i = 0; i < 2; ++i) {
  3261. std::string key = base::StringPrintf("key%d", i);
  3262. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3263. entry->Close();
  3264. }
  3265. // Ping the oldest entry.
  3266. OnExternalCacheHit("key0");
  3267. TrimForTest(false);
  3268. // Make sure the older key remains.
  3269. EXPECT_EQ(1, cache_->GetEntryCount());
  3270. ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
  3271. entry->Close();
  3272. }
  3273. TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
  3274. SetCacheType(net::SHADER_CACHE);
  3275. InitCache();
  3276. disk_cache::Entry* entry;
  3277. for (int i = 0; i < 2; ++i) {
  3278. std::string key = base::StringPrintf("key%d", i);
  3279. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3280. entry->Close();
  3281. }
  3282. // Ping the oldest entry.
  3283. OnExternalCacheHit("key0");
  3284. TrimForTest(false);
  3285. // Make sure the older key remains.
  3286. EXPECT_EQ(1, cache_->GetEntryCount());
  3287. ASSERT_THAT(OpenEntry("key0", &entry), IsOk());
  3288. entry->Close();
  3289. }
  3290. TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
  3291. // Use net::APP_CACHE to make size estimations deterministic via
  3292. // non-optimistic writes.
  3293. SetCacheType(net::APP_CACHE);
  3294. SetSimpleCacheMode();
  3295. BackendShutdownWithPendingCreate(false);
  3296. }
  3297. TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingDoom) {
  3298. SetCacheType(net::APP_CACHE);
  3299. SetSimpleCacheMode();
  3300. BackendShutdownWithPendingDoom();
  3301. }
  3302. TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
  3303. SetCacheType(net::APP_CACHE);
  3304. SetSimpleCacheMode();
  3305. BackendShutdownWithPendingFileIO(false);
  3306. }
  3307. TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
  3308. SetSimpleCacheMode();
  3309. BackendBasics();
  3310. }
  3311. TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
  3312. SetCacheType(net::APP_CACHE);
  3313. SetSimpleCacheMode();
  3314. BackendBasics();
  3315. }
  3316. TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
  3317. SetSimpleCacheMode();
  3318. BackendKeying();
  3319. }
  3320. TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
  3321. SetSimpleCacheMode();
  3322. SetCacheType(net::APP_CACHE);
  3323. BackendKeying();
  3324. }
  3325. TEST_F(DiskCacheBackendTest, SimpleCacheLoad) {
  3326. SetMaxSize(0x100000);
  3327. SetSimpleCacheMode();
  3328. BackendLoad();
  3329. }
  3330. TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheLoad) {
  3331. SetCacheType(net::APP_CACHE);
  3332. SetSimpleCacheMode();
  3333. SetMaxSize(0x100000);
  3334. BackendLoad();
  3335. }
  3336. TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
  3337. SetSimpleCacheMode();
  3338. BackendDoomRecent();
  3339. }
  3340. // crbug.com/330926, crbug.com/370677
  3341. TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
  3342. SetSimpleCacheMode();
  3343. BackendDoomBetween();
  3344. }
  3345. TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
  3346. SetSimpleCacheMode();
  3347. BackendDoomAll();
  3348. }
  3349. TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
  3350. SetCacheType(net::APP_CACHE);
  3351. SetSimpleCacheMode();
  3352. BackendDoomAll();
  3353. }
  3354. TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
  3355. SetSimpleCacheMode();
  3356. InitCache();
  3357. const char key[] = "the first key";
  3358. disk_cache::Entry* entry = nullptr;
  3359. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3360. ASSERT_TRUE(entry != nullptr);
  3361. entry->Close();
  3362. entry = nullptr;
  3363. // To make sure the file creation completed we need to call open again so that
  3364. // we block until it actually created the files.
  3365. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3366. ASSERT_TRUE(entry != nullptr);
  3367. entry->Close();
  3368. entry = nullptr;
  3369. // Delete one of the files in the entry.
  3370. base::FilePath to_delete_file = cache_path_.AppendASCII(
  3371. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  3372. EXPECT_TRUE(base::PathExists(to_delete_file));
  3373. EXPECT_TRUE(base::DeleteFile(to_delete_file));
  3374. // Failing to open the entry should delete the rest of these files.
  3375. ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
  3376. // Confirm the rest of the files are gone.
  3377. for (int i = 1; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
  3378. base::FilePath should_be_gone_file(cache_path_.AppendASCII(
  3379. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
  3380. EXPECT_FALSE(base::PathExists(should_be_gone_file));
  3381. }
  3382. }
  3383. TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
  3384. SetSimpleCacheMode();
  3385. InitCache();
  3386. const char key[] = "the first key";
  3387. disk_cache::Entry* entry = nullptr;
  3388. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3389. disk_cache::Entry* null = nullptr;
  3390. ASSERT_NE(null, entry);
  3391. entry->Close();
  3392. entry = nullptr;
  3393. // To make sure the file creation completed we need to call open again so that
  3394. // we block until it actually created the files.
  3395. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3396. ASSERT_NE(null, entry);
  3397. entry->Close();
  3398. entry = nullptr;
  3399. // The entry is being closed on the Simple Cache worker pool
  3400. disk_cache::FlushCacheThreadForTesting();
  3401. base::RunLoop().RunUntilIdle();
  3402. // Write an invalid header for stream 0 and stream 1.
  3403. base::FilePath entry_file1_path = cache_path_.AppendASCII(
  3404. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  3405. disk_cache::SimpleFileHeader header;
  3406. header.initial_magic_number = UINT64_C(0xbadf00d);
  3407. EXPECT_EQ(static_cast<int>(sizeof(header)),
  3408. base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
  3409. sizeof(header)));
  3410. ASSERT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
  3411. }
  3412. // Tests that the Simple Cache Backend fails to initialize with non-matching
  3413. // file structure on disk.
  3414. TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
  3415. // Create a cache structure with the |BackendImpl|.
  3416. InitCache();
  3417. disk_cache::Entry* entry;
  3418. const int kSize = 50;
  3419. scoped_refptr<net::IOBuffer> buffer =
  3420. base::MakeRefCounted<net::IOBuffer>(kSize);
  3421. CacheTestFillBuffer(buffer->data(), kSize, false);
  3422. ASSERT_THAT(CreateEntry("key", &entry), IsOk());
  3423. ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
  3424. entry->Close();
  3425. cache_.reset();
  3426. // Check that the |SimpleBackendImpl| does not favor this structure.
  3427. auto simple_cache = std::make_unique<disk_cache::SimpleBackendImpl>(
  3428. /*file_operations_factory=*/nullptr, cache_path_, nullptr, nullptr, 0,
  3429. net::DISK_CACHE, nullptr);
  3430. net::TestCompletionCallback cb;
  3431. simple_cache->Init(cb.callback());
  3432. EXPECT_NE(net::OK, cb.WaitForResult());
  3433. simple_cache.reset();
  3434. DisableIntegrityCheck();
  3435. }
  3436. // Tests that the |BackendImpl| refuses to initialize on top of the files
  3437. // generated by the Simple Cache Backend.
  3438. TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
  3439. // Create a cache structure with the |SimpleBackendImpl|.
  3440. SetSimpleCacheMode();
  3441. InitCache();
  3442. disk_cache::Entry* entry;
  3443. const int kSize = 50;
  3444. scoped_refptr<net::IOBuffer> buffer =
  3445. base::MakeRefCounted<net::IOBuffer>(kSize);
  3446. CacheTestFillBuffer(buffer->data(), kSize, false);
  3447. ASSERT_THAT(CreateEntry("key", &entry), IsOk());
  3448. ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
  3449. entry->Close();
  3450. cache_.reset();
  3451. // Check that the |BackendImpl| does not favor this structure.
  3452. auto cache = std::make_unique<disk_cache::BackendImpl>(
  3453. cache_path_, nullptr, nullptr, net::DISK_CACHE, nullptr);
  3454. cache->SetUnitTestMode();
  3455. net::TestCompletionCallback cb;
  3456. cache->Init(cb.callback());
  3457. EXPECT_NE(net::OK, cb.WaitForResult());
  3458. cache.reset();
  3459. DisableIntegrityCheck();
  3460. }
  3461. TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
  3462. SetSimpleCacheMode();
  3463. BackendFixEnumerators();
  3464. }
  3465. // Tests basic functionality of the SimpleBackend implementation of the
  3466. // enumeration API.
  3467. TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
  3468. SetSimpleCacheMode();
  3469. InitCache();
  3470. std::set<std::string> key_pool;
  3471. ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
  3472. // Check that enumeration returns all entries.
  3473. std::set<std::string> keys_to_match(key_pool);
  3474. std::unique_ptr<TestIterator> iter = CreateIterator();
  3475. size_t count = 0;
  3476. ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
  3477. iter.reset();
  3478. EXPECT_EQ(key_pool.size(), count);
  3479. EXPECT_TRUE(keys_to_match.empty());
  3480. // Check that opening entries does not affect enumeration.
  3481. keys_to_match = key_pool;
  3482. iter = CreateIterator();
  3483. count = 0;
  3484. disk_cache::Entry* entry_opened_before;
  3485. ASSERT_THAT(OpenEntry(*(key_pool.begin()), &entry_opened_before), IsOk());
  3486. ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
  3487. &keys_to_match, &count));
  3488. disk_cache::Entry* entry_opened_middle;
  3489. ASSERT_EQ(net::OK, OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
  3490. ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
  3491. iter.reset();
  3492. entry_opened_before->Close();
  3493. entry_opened_middle->Close();
  3494. EXPECT_EQ(key_pool.size(), count);
  3495. EXPECT_TRUE(keys_to_match.empty());
  3496. }
  3497. // Tests that the enumerations are not affected by dooming an entry in the
  3498. // middle.
  3499. TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
  3500. SetSimpleCacheMode();
  3501. InitCache();
  3502. std::set<std::string> key_pool;
  3503. ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
  3504. // Check that enumeration returns all entries but the doomed one.
  3505. std::set<std::string> keys_to_match(key_pool);
  3506. std::unique_ptr<TestIterator> iter = CreateIterator();
  3507. size_t count = 0;
  3508. ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size() / 2, iter.get(),
  3509. &keys_to_match, &count));
  3510. std::string key_to_delete = *(keys_to_match.begin());
  3511. DoomEntry(key_to_delete);
  3512. keys_to_match.erase(key_to_delete);
  3513. key_pool.erase(key_to_delete);
  3514. ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
  3515. iter.reset();
  3516. EXPECT_EQ(key_pool.size(), count);
  3517. EXPECT_TRUE(keys_to_match.empty());
  3518. }
  3519. // Tests that enumerations are not affected by corrupt files.
  3520. TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
  3521. SetSimpleCacheMode();
  3522. InitCache();
  3523. // Create a corrupt entry.
  3524. const std::string key = "the key";
  3525. disk_cache::Entry* corrupted_entry;
  3526. ASSERT_THAT(CreateEntry(key, &corrupted_entry), IsOk());
  3527. ASSERT_TRUE(corrupted_entry);
  3528. const int kSize = 50;
  3529. scoped_refptr<net::IOBuffer> buffer =
  3530. base::MakeRefCounted<net::IOBuffer>(kSize);
  3531. CacheTestFillBuffer(buffer->data(), kSize, false);
  3532. ASSERT_EQ(kSize,
  3533. WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
  3534. ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
  3535. corrupted_entry->Close();
  3536. // Let all I/O finish so it doesn't race with corrupting the file below.
  3537. RunUntilIdle();
  3538. std::set<std::string> key_pool;
  3539. ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
  3540. EXPECT_TRUE(
  3541. disk_cache::simple_util::CreateCorruptFileForTests(key, cache_path_));
  3542. EXPECT_EQ(key_pool.size() + 1, static_cast<size_t>(cache_->GetEntryCount()));
  3543. // Check that enumeration returns all entries but the corrupt one.
  3544. std::set<std::string> keys_to_match(key_pool);
  3545. std::unique_ptr<TestIterator> iter = CreateIterator();
  3546. size_t count = 0;
  3547. ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
  3548. iter.reset();
  3549. EXPECT_EQ(key_pool.size(), count);
  3550. EXPECT_TRUE(keys_to_match.empty());
  3551. }
  3552. // Tests that enumerations don't leak memory when the backend is destructed
  3553. // mid-enumeration.
  3554. TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
  3555. SetSimpleCacheMode();
  3556. InitCache();
  3557. std::set<std::string> key_pool;
  3558. ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
  3559. std::unique_ptr<TestIterator> iter = CreateIterator();
  3560. disk_cache::Entry* entry = nullptr;
  3561. ASSERT_THAT(iter->OpenNextEntry(&entry), IsOk());
  3562. EXPECT_TRUE(entry);
  3563. disk_cache::ScopedEntryPtr entry_closer(entry);
  3564. cache_.reset();
  3565. // This test passes if we don't leak memory.
  3566. }
  3567. // Verify that tasks run in priority order when the experiment is enabled.
  3568. // Test has races, disabling until fixed: https://crbug.com/853283
  3569. TEST_F(DiskCacheBackendTest, DISABLED_SimpleCachePrioritizedEntryOrder) {
  3570. base::test::ScopedFeatureList scoped_feature_list;
  3571. SetSimpleCacheMode();
  3572. InitCache();
  3573. // Set the SimpleCache's worker pool to a sequenced type for testing
  3574. // priority order.
  3575. disk_cache::SimpleBackendImpl* simple_cache =
  3576. static_cast<disk_cache::SimpleBackendImpl*>(cache_.get());
  3577. auto task_runner = base::ThreadPool::CreateSequencedTaskRunner(
  3578. {base::TaskPriority::USER_VISIBLE, base::MayBlock()});
  3579. simple_cache->SetTaskRunnerForTesting(task_runner);
  3580. // Create three entries. Priority order is 3, 1, 2 because 3 has the highest
  3581. // request priority and 1 is created before 2.
  3582. disk_cache::Entry* entry1 = nullptr;
  3583. disk_cache::Entry* entry2 = nullptr;
  3584. disk_cache::Entry* entry3 = nullptr;
  3585. ASSERT_THAT(CreateEntryWithPriority("first", net::LOWEST, &entry1), IsOk());
  3586. ASSERT_THAT(CreateEntryWithPriority("second", net::LOWEST, &entry2), IsOk());
  3587. ASSERT_THAT(CreateEntryWithPriority("third", net::HIGHEST, &entry3), IsOk());
  3588. // Write some data to the entries.
  3589. const int kSize = 10;
  3590. scoped_refptr<net::IOBuffer> buf1 =
  3591. base::MakeRefCounted<net::IOBuffer>(kSize);
  3592. scoped_refptr<net::IOBuffer> buf2 =
  3593. base::MakeRefCounted<net::IOBuffer>(kSize);
  3594. scoped_refptr<net::IOBuffer> buf3 =
  3595. base::MakeRefCounted<net::IOBuffer>(kSize);
  3596. CacheTestFillBuffer(buf1->data(), kSize, false);
  3597. CacheTestFillBuffer(buf2->data(), kSize, false);
  3598. CacheTestFillBuffer(buf3->data(), kSize, false);
  3599. // Write to stream 2 because it's the only stream that can't be read from
  3600. // synchronously.
  3601. EXPECT_EQ(kSize, WriteData(entry1, 2, 0, buf1.get(), kSize, true));
  3602. EXPECT_EQ(kSize, WriteData(entry2, 2, 0, buf1.get(), kSize, true));
  3603. EXPECT_EQ(kSize, WriteData(entry3, 2, 0, buf1.get(), kSize, true));
  3604. // Wait until the task_runner's queue is empty (WriteData might have
  3605. // optimistically returned synchronously but still had some tasks to run in
  3606. // the worker pool.
  3607. base::RunLoop run_loop;
  3608. task_runner->PostTaskAndReply(FROM_HERE, base::DoNothing(),
  3609. run_loop.QuitClosure());
  3610. run_loop.Run();
  3611. std::vector<int> finished_read_order;
  3612. auto finished_callback = [](std::vector<int>* finished_read_order,
  3613. int entry_number, base::OnceClosure quit_closure,
  3614. int rv) {
  3615. finished_read_order->push_back(entry_number);
  3616. if (quit_closure)
  3617. std::move(quit_closure).Run();
  3618. };
  3619. scoped_refptr<net::IOBuffer> read_buf1 =
  3620. base::MakeRefCounted<net::IOBuffer>(kSize);
  3621. scoped_refptr<net::IOBuffer> read_buf2 =
  3622. base::MakeRefCounted<net::IOBuffer>(kSize);
  3623. scoped_refptr<net::IOBuffer> read_buf3 =
  3624. base::MakeRefCounted<net::IOBuffer>(kSize);
  3625. // Read from the entries in order 2, 3, 1. They should be reprioritized to
  3626. // 3, 1, 2.
  3627. base::RunLoop read_run_loop;
  3628. entry2->ReadData(2, 0, read_buf2.get(), kSize,
  3629. base::BindOnce(finished_callback, &finished_read_order, 2,
  3630. read_run_loop.QuitClosure()));
  3631. entry3->ReadData(2, 0, read_buf3.get(), kSize,
  3632. base::BindOnce(finished_callback, &finished_read_order, 3,
  3633. base::OnceClosure()));
  3634. entry1->ReadData(2, 0, read_buf1.get(), kSize,
  3635. base::BindOnce(finished_callback, &finished_read_order, 1,
  3636. base::OnceClosure()));
  3637. EXPECT_EQ(0u, finished_read_order.size());
  3638. read_run_loop.Run();
  3639. EXPECT_EQ((std::vector<int>{3, 1, 2}), finished_read_order);
  3640. entry1->Close();
  3641. entry2->Close();
  3642. entry3->Close();
  3643. }
  3644. // Tests that enumerations include entries with long keys.
  3645. TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationLongKeys) {
  3646. SetSimpleCacheMode();
  3647. InitCache();
  3648. std::set<std::string> key_pool;
  3649. ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
  3650. const size_t long_key_length =
  3651. disk_cache::SimpleSynchronousEntry::kInitialHeaderRead + 10;
  3652. std::string long_key(long_key_length, 'X');
  3653. key_pool.insert(long_key);
  3654. disk_cache::Entry* entry = nullptr;
  3655. ASSERT_THAT(CreateEntry(long_key.c_str(), &entry), IsOk());
  3656. entry->Close();
  3657. std::unique_ptr<TestIterator> iter = CreateIterator();
  3658. size_t count = 0;
  3659. EXPECT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &key_pool, &count));
  3660. EXPECT_TRUE(key_pool.empty());
  3661. }
  3662. // Tests that a SimpleCache doesn't crash when files are deleted very quickly
  3663. // after closing.
  3664. // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
  3665. TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
  3666. SetSimpleCacheMode();
  3667. for (int i = 0; i < 100; ++i) {
  3668. InitCache();
  3669. cache_.reset();
  3670. EXPECT_TRUE(CleanupCacheDir());
  3671. }
  3672. }
  3673. TEST_F(DiskCacheBackendTest, SimpleCacheLateDoom) {
  3674. SetSimpleCacheMode();
  3675. InitCache();
  3676. disk_cache::Entry *entry1, *entry2;
  3677. ASSERT_THAT(CreateEntry("first", &entry1), IsOk());
  3678. ASSERT_THAT(CreateEntry("second", &entry2), IsOk());
  3679. entry1->Close();
  3680. // Ensure that the directory mtime is flushed to disk before serializing the
  3681. // index.
  3682. disk_cache::FlushCacheThreadForTesting();
  3683. #if BUILDFLAG(IS_POSIX)
  3684. base::File cache_dir(cache_path_,
  3685. base::File::FLAG_OPEN | base::File::FLAG_READ);
  3686. EXPECT_TRUE(cache_dir.Flush());
  3687. #endif // BUILDFLAG(IS_POSIX)
  3688. cache_.reset();
  3689. disk_cache::FlushCacheThreadForTesting();
  3690. // The index is now written. Dooming the last entry can't delete a file,
  3691. // because that would advance the cache directory mtime and invalidate the
  3692. // index.
  3693. entry2->Doom();
  3694. entry2->Close();
  3695. DisableFirstCleanup();
  3696. InitCache();
  3697. EXPECT_EQ(disk_cache::SimpleIndex::INITIALIZE_METHOD_LOADED,
  3698. simple_cache_impl_->index()->init_method());
  3699. }
  3700. TEST_F(DiskCacheBackendTest, SimpleCacheNegMaxSize) {
  3701. SetMaxSize(-1);
  3702. SetSimpleCacheMode();
  3703. InitCache();
  3704. // We don't know what it will pick, but it's limited to what
  3705. // disk_cache::PreferredCacheSize would return, scaled by the size experiment,
  3706. // which only goes as much as 4x. It definitely should not be MAX_UINT64.
  3707. EXPECT_NE(simple_cache_impl_->index()->max_size(),
  3708. std::numeric_limits<uint64_t>::max());
  3709. int max_default_size =
  3710. 2 * disk_cache::PreferredCacheSize(std::numeric_limits<int32_t>::max());
  3711. ASSERT_GE(max_default_size, 0);
  3712. EXPECT_LT(simple_cache_impl_->index()->max_size(),
  3713. static_cast<unsigned>(max_default_size));
  3714. uint64_t max_size_without_scaling = simple_cache_impl_->index()->max_size();
  3715. // Scale to 200%. The size should be twice of |max_size_without_scaling| but
  3716. // since that's capped on 20% of available size, checking for the size to be
  3717. // between max_size_without_scaling and max_size_without_scaling*2.
  3718. {
  3719. base::test::ScopedFeatureList scoped_feature_list;
  3720. std::map<std::string, std::string> field_trial_params;
  3721. field_trial_params["percent_relative_size"] = "200";
  3722. scoped_feature_list.InitAndEnableFeatureWithParameters(
  3723. disk_cache::kChangeDiskCacheSizeExperiment, field_trial_params);
  3724. InitCache();
  3725. uint64_t max_size_scaled = simple_cache_impl_->index()->max_size();
  3726. EXPECT_GE(max_size_scaled, max_size_without_scaling);
  3727. EXPECT_LE(max_size_scaled, 2 * max_size_without_scaling);
  3728. }
  3729. }
  3730. TEST_F(DiskCacheBackendTest, SimpleLastModified) {
  3731. // Simple cache used to incorrectly set LastModified on entries based on
  3732. // timestamp of the cache directory, and not the entries' file
  3733. // (https://crbug.com/714143). So this test arranges for a situation
  3734. // where this would occur by doing:
  3735. // 1) Write entry 1
  3736. // 2) Delay
  3737. // 3) Write entry 2. This sets directory time stamp to be different from
  3738. // timestamp of entry 1 (due to the delay)
  3739. // It then checks whether the entry 1 got the proper timestamp or not.
  3740. SetSimpleCacheMode();
  3741. InitCache();
  3742. std::string key1 = GenerateKey(true);
  3743. std::string key2 = GenerateKey(true);
  3744. disk_cache::Entry* entry1;
  3745. ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
  3746. // Make the Create complete --- SimpleCache can handle it optimistically,
  3747. // and if we let it go fully async then trying to flush the Close might just
  3748. // flush the Create.
  3749. disk_cache::FlushCacheThreadForTesting();
  3750. base::RunLoop().RunUntilIdle();
  3751. entry1->Close();
  3752. // Make the ::Close actually complete, since it is asynchronous.
  3753. disk_cache::FlushCacheThreadForTesting();
  3754. base::RunLoop().RunUntilIdle();
  3755. Time entry1_timestamp = Time::NowFromSystemTime();
  3756. // Don't want AddDelay since it sleep 1s(!) for SimpleCache, and we don't
  3757. // care about reduced precision in index here.
  3758. while (base::Time::NowFromSystemTime() <=
  3759. (entry1_timestamp + base::Milliseconds(10))) {
  3760. base::PlatformThread::Sleep(base::Milliseconds(1));
  3761. }
  3762. disk_cache::Entry* entry2;
  3763. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  3764. entry2->Close();
  3765. disk_cache::FlushCacheThreadForTesting();
  3766. base::RunLoop().RunUntilIdle();
  3767. disk_cache::Entry* reopen_entry1;
  3768. ASSERT_THAT(OpenEntry(key1, &reopen_entry1), IsOk());
  3769. // This shouldn't pick up entry2's write time incorrectly.
  3770. EXPECT_LE(reopen_entry1->GetLastModified(), entry1_timestamp);
  3771. reopen_entry1->Close();
  3772. }
  3773. TEST_F(DiskCacheBackendTest, SimpleFdLimit) {
  3774. base::HistogramTester histogram_tester;
  3775. SetSimpleCacheMode();
  3776. // Make things blocking so CreateEntry actually waits for file to be
  3777. // created.
  3778. SetCacheType(net::APP_CACHE);
  3779. InitCache();
  3780. disk_cache::Entry* entries[kLargeNumEntries];
  3781. std::string keys[kLargeNumEntries];
  3782. for (int i = 0; i < kLargeNumEntries; ++i) {
  3783. keys[i] = GenerateKey(true);
  3784. ASSERT_THAT(CreateEntry(keys[i], &entries[i]), IsOk());
  3785. }
  3786. // Note the fixture sets the file limit to 64.
  3787. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3788. disk_cache::FD_LIMIT_CLOSE_FILE,
  3789. kLargeNumEntries - 64);
  3790. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3791. disk_cache::FD_LIMIT_REOPEN_FILE, 0);
  3792. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3793. disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
  3794. const int kSize = 25000;
  3795. scoped_refptr<net::IOBuffer> buf1 =
  3796. base::MakeRefCounted<net::IOBuffer>(kSize);
  3797. CacheTestFillBuffer(buf1->data(), kSize, false);
  3798. scoped_refptr<net::IOBuffer> buf2 =
  3799. base::MakeRefCounted<net::IOBuffer>(kSize);
  3800. CacheTestFillBuffer(buf2->data(), kSize, false);
  3801. // Doom an entry and create a new one with same name, to test that both
  3802. // re-open properly.
  3803. EXPECT_EQ(net::OK, DoomEntry(keys[0]));
  3804. disk_cache::Entry* alt_entry;
  3805. ASSERT_THAT(CreateEntry(keys[0], &alt_entry), IsOk());
  3806. // One more file closure here to accommodate for alt_entry.
  3807. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3808. disk_cache::FD_LIMIT_CLOSE_FILE,
  3809. kLargeNumEntries - 64 + 1);
  3810. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3811. disk_cache::FD_LIMIT_REOPEN_FILE, 0);
  3812. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3813. disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
  3814. // Do some writes in [1...kLargeNumEntries) range, both testing bring those in
  3815. // and kicking out [0] and [alt_entry]. These have to be to stream != 0 to
  3816. // actually need files.
  3817. for (int i = 1; i < kLargeNumEntries; ++i) {
  3818. EXPECT_EQ(kSize, WriteData(entries[i], 1, 0, buf1.get(), kSize, true));
  3819. scoped_refptr<net::IOBuffer> read_buf =
  3820. base::MakeRefCounted<net::IOBuffer>(kSize);
  3821. ASSERT_EQ(kSize, ReadData(entries[i], 1, 0, read_buf.get(), kSize));
  3822. EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
  3823. }
  3824. histogram_tester.ExpectBucketCount(
  3825. "SimpleCache.FileDescriptorLimiterAction",
  3826. disk_cache::FD_LIMIT_CLOSE_FILE,
  3827. kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1);
  3828. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3829. disk_cache::FD_LIMIT_REOPEN_FILE,
  3830. kLargeNumEntries - 1);
  3831. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3832. disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
  3833. EXPECT_EQ(kSize, WriteData(entries[0], 1, 0, buf1.get(), kSize, true));
  3834. EXPECT_EQ(kSize, WriteData(alt_entry, 1, 0, buf2.get(), kSize, true));
  3835. scoped_refptr<net::IOBuffer> read_buf =
  3836. base::MakeRefCounted<net::IOBuffer>(kSize);
  3837. ASSERT_EQ(kSize, ReadData(entries[0], 1, 0, read_buf.get(), kSize));
  3838. EXPECT_EQ(0, memcmp(read_buf->data(), buf1->data(), kSize));
  3839. scoped_refptr<net::IOBuffer> read_buf2 =
  3840. base::MakeRefCounted<net::IOBuffer>(kSize);
  3841. ASSERT_EQ(kSize, ReadData(alt_entry, 1, 0, read_buf2.get(), kSize));
  3842. EXPECT_EQ(0, memcmp(read_buf2->data(), buf2->data(), kSize));
  3843. // Two more things than last time --- entries[0] and |alt_entry|
  3844. histogram_tester.ExpectBucketCount(
  3845. "SimpleCache.FileDescriptorLimiterAction",
  3846. disk_cache::FD_LIMIT_CLOSE_FILE,
  3847. kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
  3848. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3849. disk_cache::FD_LIMIT_REOPEN_FILE,
  3850. kLargeNumEntries + 1);
  3851. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3852. disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
  3853. for (auto* entry : entries) {
  3854. entry->Close();
  3855. RunUntilIdle();
  3856. }
  3857. alt_entry->Close();
  3858. RunUntilIdle();
  3859. // Closes have to pull things in to write out the footer, but they also
  3860. // free up FDs.
  3861. histogram_tester.ExpectBucketCount(
  3862. "SimpleCache.FileDescriptorLimiterAction",
  3863. disk_cache::FD_LIMIT_CLOSE_FILE,
  3864. kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
  3865. histogram_tester.ExpectBucketCount(
  3866. "SimpleCache.FileDescriptorLimiterAction",
  3867. disk_cache::FD_LIMIT_REOPEN_FILE,
  3868. kLargeNumEntries - 64 + 1 + kLargeNumEntries - 1 + 2);
  3869. histogram_tester.ExpectBucketCount("SimpleCache.FileDescriptorLimiterAction",
  3870. disk_cache::FD_LIMIT_FAIL_REOPEN_FILE, 0);
  3871. }
  3872. TEST_F(DiskCacheBackendTest, SparseEvict) {
  3873. const int kMaxSize = 512;
  3874. SetMaxSize(kMaxSize);
  3875. InitCache();
  3876. scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
  3877. CacheTestFillBuffer(buffer->data(), 64, false);
  3878. disk_cache::Entry* entry0 = nullptr;
  3879. ASSERT_THAT(CreateEntry("http://www.0.com/", &entry0), IsOk());
  3880. disk_cache::Entry* entry1 = nullptr;
  3881. ASSERT_THAT(CreateEntry("http://www.1.com/", &entry1), IsOk());
  3882. disk_cache::Entry* entry2 = nullptr;
  3883. // This strange looking domain name affects cache trim order
  3884. // due to hashing
  3885. ASSERT_THAT(CreateEntry("http://www.15360.com/", &entry2), IsOk());
  3886. // Write sparse data to put us over the eviction threshold
  3887. ASSERT_EQ(64, WriteSparseData(entry0, 0, buffer.get(), 64));
  3888. ASSERT_EQ(1, WriteSparseData(entry0, 67108923, buffer.get(), 1));
  3889. ASSERT_EQ(1, WriteSparseData(entry1, 53, buffer.get(), 1));
  3890. ASSERT_EQ(1, WriteSparseData(entry2, 0, buffer.get(), 1));
  3891. // Closing these in a special order should not lead to buggy reentrant
  3892. // eviction.
  3893. entry1->Close();
  3894. entry2->Close();
  3895. entry0->Close();
  3896. }
  3897. TEST_F(DiskCacheBackendTest, InMemorySparseDoom) {
  3898. const int kMaxSize = 512;
  3899. SetMaxSize(kMaxSize);
  3900. SetMemoryOnlyMode();
  3901. InitCache();
  3902. scoped_refptr<net::IOBuffer> buffer = base::MakeRefCounted<net::IOBuffer>(64);
  3903. CacheTestFillBuffer(buffer->data(), 64, false);
  3904. disk_cache::Entry* entry = nullptr;
  3905. ASSERT_THAT(CreateEntry("http://www.0.com/", &entry), IsOk());
  3906. ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 4337, buffer.get(), 64));
  3907. entry->Close();
  3908. // Dooming all entries at this point should properly iterate over
  3909. // the parent and its children
  3910. DoomAllEntries();
  3911. }
  3912. TEST_F(DiskCacheBackendTest, BlockFileMaxSizeLimit) {
  3913. InitCache();
  3914. int64_t size = std::numeric_limits<int32_t>::max();
  3915. SetMaxSize(size, true /* should_succeed */);
  3916. size += 1;
  3917. SetMaxSize(size, false /* should_succeed */);
  3918. }
  3919. TEST_F(DiskCacheBackendTest, InMemoryMaxSizeLimit) {
  3920. SetMemoryOnlyMode();
  3921. InitCache();
  3922. int64_t size = std::numeric_limits<int32_t>::max();
  3923. SetMaxSize(size, true /* should_succeed */);
  3924. size += 1;
  3925. SetMaxSize(size, false /* should_succeed */);
  3926. }
  3927. TEST_F(DiskCacheBackendTest, SimpleMaxSizeLimit) {
  3928. SetSimpleCacheMode();
  3929. InitCache();
  3930. int64_t size = std::numeric_limits<int32_t>::max();
  3931. SetMaxSize(size, true /* should_succeed */);
  3932. size += 1;
  3933. SetMaxSize(size, true /* should_succeed */);
  3934. }
  3935. void DiskCacheBackendTest::BackendOpenOrCreateEntry() {
  3936. // Avoid the weird kNoRandom flag on blockfile, since this needs to
  3937. // test cleanup behavior actually used in production.
  3938. if (memory_only_) {
  3939. InitCache();
  3940. } else {
  3941. CleanupCacheDir();
  3942. // Since we're not forcing a clean shutdown, integrity check may fail.
  3943. DisableIntegrityCheck();
  3944. CreateBackend(disk_cache::kNone);
  3945. }
  3946. // Test that new key is created.
  3947. disk_cache::EntryResult es1 = OpenOrCreateEntry("first");
  3948. ASSERT_THAT(es1.net_error(), IsOk());
  3949. ASSERT_FALSE(es1.opened());
  3950. disk_cache::Entry* e1 = es1.ReleaseEntry();
  3951. ASSERT_TRUE(nullptr != e1);
  3952. // Test that existing key is opened and its entry matches.
  3953. disk_cache::EntryResult es2 = OpenOrCreateEntry("first");
  3954. ASSERT_THAT(es2.net_error(), IsOk());
  3955. ASSERT_TRUE(es2.opened());
  3956. disk_cache::Entry* e2 = es2.ReleaseEntry();
  3957. ASSERT_TRUE(nullptr != e2);
  3958. ASSERT_EQ(e1, e2);
  3959. // Test that different keys' entries are not the same.
  3960. disk_cache::EntryResult es3 = OpenOrCreateEntry("second");
  3961. ASSERT_THAT(es3.net_error(), IsOk());
  3962. ASSERT_FALSE(es3.opened());
  3963. disk_cache::Entry* e3 = es3.ReleaseEntry();
  3964. ASSERT_TRUE(nullptr != e3);
  3965. ASSERT_NE(e3, e1);
  3966. // Test that a new entry can be created with the same key as a doomed entry.
  3967. e3->Doom();
  3968. disk_cache::EntryResult es4 = OpenOrCreateEntry("second");
  3969. ASSERT_THAT(es4.net_error(), IsOk());
  3970. ASSERT_FALSE(es4.opened());
  3971. disk_cache::Entry* e4 = es4.ReleaseEntry();
  3972. ASSERT_TRUE(nullptr != e4);
  3973. ASSERT_NE(e4, e3);
  3974. // Verify the expected number of entries
  3975. ASSERT_EQ(2, cache_->GetEntryCount());
  3976. e1->Close();
  3977. e2->Close();
  3978. e3->Close();
  3979. e4->Close();
  3980. // Test proper cancellation of callback. In-memory cache
  3981. // is always synchronous, so this isn't' meaningful for it.
  3982. if (!memory_only_) {
  3983. TestEntryResultCompletionCallback callback;
  3984. // Using "first" here:
  3985. // 1) It's an existing entry, so SimpleCache can't cheat with an optimistic
  3986. // create.
  3987. // 2) "second"'s creation is a cheated post-doom create one, which also
  3988. // makes testing trickier.
  3989. EntryResult result =
  3990. cache_->OpenOrCreateEntry("first", net::HIGHEST, callback.callback());
  3991. ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
  3992. cache_ = nullptr;
  3993. // Callback is supposed to be cancelled, so have to flush everything
  3994. // to check for any trouble.
  3995. disk_cache::FlushCacheThreadForTesting();
  3996. RunUntilIdle();
  3997. EXPECT_FALSE(callback.have_result());
  3998. }
  3999. }
  4000. TEST_F(DiskCacheBackendTest, InMemoryOnlyOpenOrCreateEntry) {
  4001. SetMemoryOnlyMode();
  4002. BackendOpenOrCreateEntry();
  4003. }
  4004. TEST_F(DiskCacheBackendTest, MAYBE_BlockFileOpenOrCreateEntry) {
  4005. BackendOpenOrCreateEntry();
  4006. }
  4007. TEST_F(DiskCacheBackendTest, MAYBE_SimpleOpenOrCreateEntry) {
  4008. SetSimpleCacheMode();
  4009. BackendOpenOrCreateEntry();
  4010. }
  4011. void DiskCacheBackendTest::BackendDeadOpenNextEntry() {
  4012. InitCache();
  4013. std::unique_ptr<disk_cache::Backend::Iterator> iter =
  4014. cache_->CreateIterator();
  4015. cache_.reset();
  4016. EntryResult result = iter->OpenNextEntry(base::DoNothing());
  4017. ASSERT_EQ(net::ERR_FAILED, result.net_error());
  4018. }
  4019. TEST_F(DiskCacheBackendTest, BlockFileBackendDeadOpenNextEntry) {
  4020. BackendDeadOpenNextEntry();
  4021. }
  4022. TEST_F(DiskCacheBackendTest, SimpleBackendDeadOpenNextEntry) {
  4023. SetSimpleCacheMode();
  4024. BackendDeadOpenNextEntry();
  4025. }
  4026. TEST_F(DiskCacheBackendTest, InMemorySimpleBackendDeadOpenNextEntry) {
  4027. SetMemoryOnlyMode();
  4028. BackendDeadOpenNextEntry();
  4029. }
  4030. void DiskCacheBackendTest::BackendIteratorConcurrentDoom() {
  4031. disk_cache::Entry* entry1 = nullptr;
  4032. disk_cache::Entry* entry2 = nullptr;
  4033. EXPECT_EQ(net::OK, CreateEntry("Key0", &entry1));
  4034. EXPECT_EQ(net::OK, CreateEntry("Key1", &entry2));
  4035. std::unique_ptr<disk_cache::Backend::Iterator> iter =
  4036. cache_->CreateIterator();
  4037. disk_cache::Entry* entry3 = nullptr;
  4038. EXPECT_EQ(net::OK, OpenEntry("Key0", &entry3));
  4039. TestEntryResultCompletionCallback cb;
  4040. EntryResult result_iter = iter->OpenNextEntry(cb.callback());
  4041. result_iter = cb.GetResult(std::move(result_iter));
  4042. EXPECT_EQ(net::OK, result_iter.net_error());
  4043. net::TestCompletionCallback cb_doom;
  4044. int rv_doom = cache_->DoomAllEntries(cb_doom.callback());
  4045. EXPECT_EQ(net::OK, cb_doom.GetResult(rv_doom));
  4046. TestEntryResultCompletionCallback cb2;
  4047. EntryResult result_iter2 = iter->OpenNextEntry(cb2.callback());
  4048. result_iter2 = cb2.GetResult(std::move(result_iter2));
  4049. EXPECT_TRUE(result_iter2.net_error() == net::ERR_FAILED ||
  4050. result_iter2.net_error() == net::OK);
  4051. entry1->Close();
  4052. entry2->Close();
  4053. entry3->Close();
  4054. }
  4055. TEST_F(DiskCacheBackendTest, BlockFileIteratorConcurrentDoom) {
  4056. // Init in normal mode, bug not reproducible with kNoRandom. Still need to
  4057. // let the test fixture know the new eviction algorithm will be on.
  4058. CleanupCacheDir();
  4059. SetNewEviction();
  4060. CreateBackend(disk_cache::kNone);
  4061. BackendIteratorConcurrentDoom();
  4062. }
  4063. TEST_F(DiskCacheBackendTest, SimpleIteratorConcurrentDoom) {
  4064. SetSimpleCacheMode();
  4065. InitCache();
  4066. BackendIteratorConcurrentDoom();
  4067. }
  4068. TEST_F(DiskCacheBackendTest, InMemoryConcurrentDoom) {
  4069. SetMemoryOnlyMode();
  4070. InitCache();
  4071. BackendIteratorConcurrentDoom();
  4072. }
  4073. TEST_F(DiskCacheBackendTest, EmptyCorruptSimpleCacheRecovery) {
  4074. SetSimpleCacheMode();
  4075. const std::string kCorruptData("corrupted");
  4076. // Create a corrupt fake index in an otherwise empty simple cache.
  4077. ASSERT_TRUE(base::PathExists(cache_path_));
  4078. const base::FilePath index = cache_path_.AppendASCII("index");
  4079. ASSERT_EQ(static_cast<int>(kCorruptData.length()),
  4080. base::WriteFile(index, kCorruptData.data(), kCorruptData.length()));
  4081. TestBackendResultCompletionCallback cb;
  4082. // Simple cache should be able to recover.
  4083. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4084. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4085. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  4086. /*net_log=*/nullptr, cb.callback());
  4087. rv = cb.GetResult(std::move(rv));
  4088. EXPECT_THAT(rv.net_error, IsOk());
  4089. }
  4090. TEST_F(DiskCacheBackendTest, MAYBE_NonEmptyCorruptSimpleCacheDoesNotRecover) {
  4091. SetSimpleCacheMode();
  4092. BackendOpenOrCreateEntry();
  4093. const std::string kCorruptData("corrupted");
  4094. // Corrupt the fake index file for the populated simple cache.
  4095. ASSERT_TRUE(base::PathExists(cache_path_));
  4096. const base::FilePath index = cache_path_.AppendASCII("index");
  4097. ASSERT_EQ(static_cast<int>(kCorruptData.length()),
  4098. base::WriteFile(index, kCorruptData.data(), kCorruptData.length()));
  4099. TestBackendResultCompletionCallback cb;
  4100. // Simple cache should not be able to recover when there are entry files.
  4101. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4102. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4103. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  4104. /*net_log=*/nullptr, cb.callback());
  4105. rv = cb.GetResult(std::move(rv));
  4106. EXPECT_THAT(rv.net_error, IsError(net::ERR_FAILED));
  4107. }
  4108. TEST_F(DiskCacheBackendTest, SimpleOwnershipTransferBackendDestroyRace) {
  4109. struct CleanupContext {
  4110. explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
  4111. ~CleanupContext() {
  4112. *ran_ptr = true;
  4113. }
  4114. raw_ptr<bool> ran_ptr;
  4115. };
  4116. const char kKey[] = "skeleton";
  4117. // This test was for a fix for see https://crbug.com/946349, but the mechanics
  4118. // of that failure became impossible after a follow up API refactor. Still,
  4119. // the timing is strange, and warrant coverage; in particular this tests what
  4120. // happen if the SimpleBackendImpl is destroyed after SimpleEntryImpl
  4121. // decides to return an entry to the caller, but before the callback is run.
  4122. SetSimpleCacheMode();
  4123. InitCache();
  4124. disk_cache::Entry* entry = nullptr;
  4125. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4126. // Make sure create actually succeeds, not just optimistically.
  4127. RunUntilIdle();
  4128. bool cleanup_context_ran = false;
  4129. auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
  4130. // The OpenEntry code below will find a pre-existing entry in a READY state,
  4131. // so it will immediately post a task to return a result. Destroying the
  4132. // backend before running the event loop again will run that callback in the
  4133. // dead-backend state, while OpenEntry completion was still with it alive.
  4134. EntryResult result = cache_->OpenEntry(
  4135. kKey, net::HIGHEST,
  4136. base::BindOnce(
  4137. [](std::unique_ptr<CleanupContext>, EntryResult result) {
  4138. // The callback is here for ownership of CleanupContext,
  4139. // and it shouldn't get invoked in this test. Normal
  4140. // one would transfer result.entry to CleanupContext.
  4141. ADD_FAILURE() << "This should not actually run";
  4142. // ... but if it ran, it also shouldn't see the pointer.
  4143. EXPECT_EQ(nullptr, result.ReleaseEntry());
  4144. },
  4145. std::move(cleanup_context)));
  4146. EXPECT_EQ(net::ERR_IO_PENDING, result.net_error());
  4147. cache_.reset();
  4148. // Give CleanupContext a chance to do its thing.
  4149. RunUntilIdle();
  4150. EXPECT_TRUE(cleanup_context_ran);
  4151. entry->Close();
  4152. }
  4153. // Verify that reloading the cache will preserve indices in kNeverReset mode.
  4154. TEST_F(DiskCacheBackendTest, SimpleCacheSoftResetKeepsValues) {
  4155. SetSimpleCacheMode();
  4156. SetCacheType(net::APP_CACHE);
  4157. DisableFirstCleanup();
  4158. CleanupCacheDir();
  4159. { // Do the initial cache creation then delete the values.
  4160. TestBackendResultCompletionCallback cb;
  4161. // Create an initial back-end and wait for indexing
  4162. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4163. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4164. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  4165. /*net_log=*/nullptr, cb.callback());
  4166. rv = cb.GetResult(std::move(rv));
  4167. EXPECT_THAT(rv.net_error, IsOk());
  4168. std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
  4169. ASSERT_TRUE(cache.get());
  4170. WaitForSimpleCacheIndexAndCheck(cache.get());
  4171. // Create an entry in the cache
  4172. CreateKeyAndCheck(cache.get(), "key");
  4173. }
  4174. RunUntilIdle();
  4175. { // Do the second cache creation with no reset flag, preserving entries.
  4176. TestBackendResultCompletionCallback cb;
  4177. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4178. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4179. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  4180. /*net_log=*/nullptr, cb.callback());
  4181. rv = cb.GetResult(std::move(rv));
  4182. EXPECT_THAT(rv.net_error, IsOk());
  4183. std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
  4184. ASSERT_TRUE(cache.get());
  4185. WaitForSimpleCacheIndexAndCheck(cache.get());
  4186. // The entry should be present, as a forced reset was not called for.
  4187. EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
  4188. ->index()
  4189. ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
  4190. }
  4191. }
  4192. // Verify that reloading the cache will not preserve indices in Reset mode.
  4193. TEST_F(DiskCacheBackendTest, SimpleCacheHardResetDropsValues) {
  4194. SetSimpleCacheMode();
  4195. SetCacheType(net::APP_CACHE);
  4196. DisableFirstCleanup();
  4197. CleanupCacheDir();
  4198. { // Create the initial back-end.
  4199. TestBackendResultCompletionCallback cb;
  4200. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4201. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4202. cache_path_, 0, disk_cache::ResetHandling::kNeverReset,
  4203. /*net_log=*/nullptr, cb.callback());
  4204. rv = cb.GetResult(std::move(rv));
  4205. EXPECT_THAT(rv.net_error, IsOk());
  4206. std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
  4207. ASSERT_TRUE(cache.get());
  4208. WaitForSimpleCacheIndexAndCheck(cache.get());
  4209. // Create an entry in the cache.
  4210. CreateKeyAndCheck(cache.get(), "key");
  4211. }
  4212. RunUntilIdle();
  4213. { // Re-load cache with a reset flag, which should ignore existing entries.
  4214. TestBackendResultCompletionCallback cb;
  4215. disk_cache::BackendResult rv = disk_cache::CreateCacheBackend(
  4216. net::APP_CACHE, net::CACHE_BACKEND_SIMPLE, /*file_operations=*/nullptr,
  4217. cache_path_, 0, disk_cache::ResetHandling::kReset, /*net_log=*/nullptr,
  4218. cb.callback());
  4219. rv = cb.GetResult(std::move(rv));
  4220. EXPECT_THAT(rv.net_error, IsOk());
  4221. std::unique_ptr<disk_cache::Backend> cache = std::move(rv.backend);
  4222. ASSERT_TRUE(cache.get());
  4223. WaitForSimpleCacheIndexAndCheck(cache.get());
  4224. // The entry shouldn't be present, as a forced reset was called for.
  4225. EXPECT_FALSE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
  4226. ->index()
  4227. ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
  4228. // Add the entry back in the cache, then make sure it's present.
  4229. CreateKeyAndCheck(cache.get(), "key");
  4230. EXPECT_TRUE(static_cast<disk_cache::SimpleBackendImpl*>(cache.get())
  4231. ->index()
  4232. ->Has(disk_cache::simple_util::GetEntryHashKey("key")));
  4233. }
  4234. }
  4235. // Test to make sure cancelation of backend operation that got queued after
  4236. // a pending doom on backend destruction happens properly.
  4237. TEST_F(DiskCacheBackendTest, SimpleCancelOpPendingDoom) {
  4238. struct CleanupContext {
  4239. explicit CleanupContext(bool* ran_ptr) : ran_ptr(ran_ptr) {}
  4240. ~CleanupContext() { *ran_ptr = true; }
  4241. raw_ptr<bool> ran_ptr;
  4242. };
  4243. const char kKey[] = "skeleton";
  4244. // Disable optimistic ops.
  4245. SetCacheType(net::APP_CACHE);
  4246. SetSimpleCacheMode();
  4247. InitCache();
  4248. disk_cache::Entry* entry = nullptr;
  4249. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4250. entry->Close();
  4251. // Queue doom.
  4252. cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
  4253. // Queue create after it.
  4254. bool cleanup_context_ran = false;
  4255. auto cleanup_context = std::make_unique<CleanupContext>(&cleanup_context_ran);
  4256. EntryResult entry_result = cache_->CreateEntry(
  4257. kKey, net::HIGHEST,
  4258. base::BindOnce(
  4259. [](std::unique_ptr<CleanupContext>, EntryResult result) {
  4260. ADD_FAILURE() << "This should not actually run";
  4261. },
  4262. std::move(cleanup_context)));
  4263. EXPECT_EQ(net::ERR_IO_PENDING, entry_result.net_error());
  4264. cache_.reset();
  4265. RunUntilIdle();
  4266. EXPECT_TRUE(cleanup_context_ran);
  4267. }
  4268. TEST_F(DiskCacheBackendTest, SimpleDontLeakPostDoomCreate) {
  4269. // If an entry has been optimistically created after a pending doom, and the
  4270. // backend destroyed before the doom completed, the entry would get wedged,
  4271. // with no operations on it workable and entry leaked.
  4272. // (See https://crbug.com/1015774).
  4273. const char kKey[] = "for_lock";
  4274. const int kBufSize = 2 * 1024;
  4275. scoped_refptr<net::IOBuffer> buffer =
  4276. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4277. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  4278. SetSimpleCacheMode();
  4279. InitCache();
  4280. disk_cache::Entry* entry = nullptr;
  4281. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4282. entry->Close();
  4283. // Make sure create actually succeeds, not just optimistically.
  4284. RunUntilIdle();
  4285. // Queue doom.
  4286. int rv = cache_->DoomEntry(kKey, net::LOWEST, base::DoNothing());
  4287. ASSERT_EQ(net::ERR_IO_PENDING, rv);
  4288. // And then do a create. This actually succeeds optimistically.
  4289. EntryResult result =
  4290. cache_->CreateEntry(kKey, net::LOWEST, base::DoNothing());
  4291. ASSERT_EQ(net::OK, result.net_error());
  4292. entry = result.ReleaseEntry();
  4293. cache_.reset();
  4294. // Entry is still supposed to be operable. This part is needed to see the bug
  4295. // without a leak checker.
  4296. EXPECT_EQ(kBufSize, WriteData(entry, 1, 0, buffer.get(), kBufSize, false));
  4297. entry->Close();
  4298. // Should not have leaked files here.
  4299. }
  4300. TEST_F(DiskCacheBackendTest, BlockFileDelayedWriteFailureRecovery) {
  4301. // Test that blockfile recovers appropriately when some entries are
  4302. // in a screwed up state due to an error in delayed writeback.
  4303. //
  4304. // https://crbug.com/1086727
  4305. InitCache();
  4306. const char kKey[] = "Key2";
  4307. disk_cache::Entry* entry = nullptr;
  4308. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4309. const int kBufSize = 24320;
  4310. scoped_refptr<net::IOBuffer> buffer =
  4311. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4312. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  4313. ASSERT_EQ(kBufSize, WriteSparseData(entry, 0, buffer.get(), kBufSize));
  4314. // Setting the size limit artificially low injects a failure on writing back
  4315. // data buffered above.
  4316. SetMaxSize(4096);
  4317. // This causes SparseControl to close the child entry corresponding to
  4318. // low portion of offset space, triggering the writeback --- which fails
  4319. // due to the space cap, and in particular fails to allocate data for
  4320. // a stream, so it gets address 0.
  4321. ASSERT_EQ(net::ERR_FAILED, WriteSparseData(entry, 16773118, buffer.get(), 4));
  4322. // Now try reading the broken child. This should report an error, not
  4323. // DCHECK.
  4324. ASSERT_EQ(net::ERR_FAILED, ReadSparseData(entry, 4, buffer.get(), 4));
  4325. entry->Close();
  4326. }
  4327. TEST_F(DiskCacheBackendTest, BlockFileInsertAliasing) {
  4328. // Test for not having rankings corruption due to aliasing between iterator
  4329. // and other ranking list copies during insertion operations.
  4330. //
  4331. // https://crbug.com/1156288
  4332. // Need to disable weird extra sync behavior to hit the bug.
  4333. CreateBackend(disk_cache::kNone);
  4334. SetNewEviction(); // default, but integrity check doesn't realize that.
  4335. const char kKey[] = "Key0";
  4336. const char kKeyA[] = "KeyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA41";
  4337. disk_cache::Entry* entry = nullptr;
  4338. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4339. const int kBufSize = 61188;
  4340. scoped_refptr<net::IOBuffer> buffer =
  4341. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4342. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  4343. net::TestCompletionCallback cb_write64;
  4344. EXPECT_EQ(net::ERR_IO_PENDING,
  4345. entry->WriteSparseData(8, buffer.get(), 64, cb_write64.callback()));
  4346. net::TestCompletionCallback cb_write61k;
  4347. EXPECT_EQ(net::ERR_IO_PENDING,
  4348. entry->WriteSparseData(16773118, buffer.get(), 61188,
  4349. cb_write61k.callback()));
  4350. EXPECT_EQ(64, cb_write64.WaitForResult());
  4351. EXPECT_EQ(61188, cb_write61k.WaitForResult());
  4352. EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
  4353. std::unique_ptr<TestIterator> iter = CreateIterator();
  4354. EXPECT_EQ(4128, WriteSparseData(entry, 2147479550, buffer.get(), 4128));
  4355. EXPECT_EQ(64, WriteSparseData(entry, 8, buffer.get(), 64));
  4356. disk_cache::Entry* itEntry1 = nullptr;
  4357. ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry1));
  4358. // These are actually child nodes for range.
  4359. entry->Close();
  4360. disk_cache::Entry* itEntry2 = nullptr;
  4361. ASSERT_EQ(net::OK, iter->OpenNextEntry(&itEntry2));
  4362. net::TestCompletionCallback doom_cb;
  4363. EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomAllEntries(doom_cb.callback()));
  4364. TestEntryResultCompletionCallback cb_create1;
  4365. disk_cache::EntryResult result =
  4366. cache_->CreateEntry(kKey, net::HIGHEST, cb_create1.callback());
  4367. EXPECT_EQ(net::OK, doom_cb.WaitForResult());
  4368. result = cb_create1.WaitForResult();
  4369. EXPECT_EQ(net::OK, result.net_error());
  4370. entry = result.ReleaseEntry();
  4371. disk_cache::Entry* entryA = nullptr;
  4372. ASSERT_THAT(CreateEntry(kKeyA, &entryA), IsOk());
  4373. entryA->Close();
  4374. disk_cache::Entry* itEntry3 = nullptr;
  4375. EXPECT_EQ(net::OK, iter->OpenNextEntry(&itEntry3));
  4376. EXPECT_EQ(net::OK, DoomEntry(kKeyA));
  4377. itEntry1->Close();
  4378. entry->Close();
  4379. itEntry2->Close();
  4380. if (itEntry3)
  4381. itEntry3->Close();
  4382. }
  4383. TEST_F(DiskCacheBackendTest, MemCacheBackwardsClock) {
  4384. // Test to make sure that wall clock going backwards is tolerated.
  4385. base::SimpleTestClock clock;
  4386. clock.SetNow(base::Time::Now());
  4387. SetMemoryOnlyMode();
  4388. InitCache();
  4389. mem_cache_->SetClockForTesting(&clock);
  4390. const int kBufSize = 4 * 1024;
  4391. scoped_refptr<net::IOBuffer> buffer =
  4392. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4393. CacheTestFillBuffer(buffer->data(), kBufSize, true);
  4394. disk_cache::Entry* entry = nullptr;
  4395. ASSERT_THAT(CreateEntry("key1", &entry), IsOk());
  4396. EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
  4397. entry->Close();
  4398. clock.Advance(-base::Hours(1));
  4399. ASSERT_THAT(CreateEntry("key2", &entry), IsOk());
  4400. EXPECT_EQ(kBufSize, WriteData(entry, 0, 0, buffer.get(), kBufSize, false));
  4401. entry->Close();
  4402. EXPECT_LE(2 * kBufSize,
  4403. CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  4404. EXPECT_EQ(net::OK, DoomEntriesBetween(base::Time(), base::Time::Max()));
  4405. EXPECT_EQ(0, CalculateSizeOfEntriesBetween(base::Time(), base::Time::Max()));
  4406. EXPECT_EQ(0, CalculateSizeOfAllEntries());
  4407. }
  4408. TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexError) {
  4409. // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
  4410. // incorrectly claims the entry is missing. Regression test for
  4411. // https://crbug.com/1316034
  4412. const char kKey[] = "http://example.org";
  4413. const int kBufSize = 256;
  4414. scoped_refptr<net::IOBuffer> buffer =
  4415. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4416. CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
  4417. SetSimpleCacheMode();
  4418. InitCache();
  4419. // Create an entry.
  4420. disk_cache::Entry* entry = nullptr;
  4421. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4422. EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
  4423. /*len=*/kBufSize, /*truncate=*/false));
  4424. entry->Close();
  4425. // Mess up the index to say it's not there.
  4426. simple_cache_impl_->index()->Remove(
  4427. disk_cache::simple_util::GetEntryHashKey(kKey));
  4428. // Reopening with OpenOrCreateEntry should still work.
  4429. disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
  4430. ASSERT_THAT(result.net_error(), IsOk());
  4431. ASSERT_TRUE(result.opened());
  4432. entry = result.ReleaseEntry();
  4433. EXPECT_EQ(kBufSize, entry->GetDataSize(/*index=*/1));
  4434. entry->Close();
  4435. }
  4436. TEST_F(DiskCacheBackendTest, SimpleOpenOrCreateIndexErrorOptimistic) {
  4437. // Exercise behavior of OpenOrCreateEntry in SimpleCache where the index
  4438. // incorrectly claims the entry is missing and we do an optimistic create.
  4439. // Covers a codepath adjacent to the one that caused https://crbug.com/1316034
  4440. const char kKey[] = "http://example.org";
  4441. SetSimpleCacheMode();
  4442. InitCache();
  4443. const int kBufSize = 256;
  4444. scoped_refptr<net::IOBuffer> buffer =
  4445. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4446. CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
  4447. // Create an entry.
  4448. disk_cache::Entry* entry = nullptr;
  4449. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4450. EXPECT_EQ(kBufSize, WriteData(entry, /*index=*/1, /*offset=*/0, buffer.get(),
  4451. /*len=*/kBufSize, /*truncate=*/false));
  4452. entry->Close();
  4453. // Let all the I/O finish, so that OpenOrCreateEntry can try optimistic path.
  4454. RunUntilIdle();
  4455. // Mess up the index to say it's not there.
  4456. simple_cache_impl_->index()->Remove(
  4457. disk_cache::simple_util::GetEntryHashKey(kKey));
  4458. // Reopening with OpenOrCreateEntry should still work, but since the backend
  4459. // chose to be optimistic based on index, the result should be a fresh empty
  4460. // entry.
  4461. disk_cache::EntryResult result = OpenOrCreateEntry(kKey);
  4462. ASSERT_THAT(result.net_error(), IsOk());
  4463. ASSERT_FALSE(result.opened());
  4464. entry = result.ReleaseEntry();
  4465. EXPECT_EQ(0, entry->GetDataSize(/*index=*/1));
  4466. entry->Close();
  4467. }
  4468. TEST_F(DiskCacheBackendTest, SimpleDoomAfterBackendDestruction) {
  4469. // Test for when validating file headers/footers during close on simple
  4470. // backend fails. To get the header to be checked on close, there needs to be
  4471. // a stream 2, since 0/1 are validated on open, and no other operation must
  4472. // have happened to stream 2, since those will force it, too. A way of getting
  4473. // the validation to fail is to perform a doom on the file after the backend
  4474. // is destroyed, since that will truncated the files to mark them invalid. See
  4475. // https://crbug.com/1317884
  4476. const char kKey[] = "Key0";
  4477. const int kBufSize = 256;
  4478. scoped_refptr<net::IOBuffer> buffer =
  4479. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  4480. CacheTestFillBuffer(buffer->data(), kBufSize, /*no_nulls=*/false);
  4481. SetCacheType(net::SHADER_CACHE);
  4482. SetSimpleCacheMode();
  4483. InitCache();
  4484. disk_cache::Entry* entry = nullptr;
  4485. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4486. EXPECT_EQ(0, WriteData(entry, /*index=*/2, /*offset=*/1, buffer.get(),
  4487. /*len=*/0, /*truncate=*/false));
  4488. entry->Close();
  4489. ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
  4490. cache_.reset();
  4491. simple_cache_impl_ = nullptr; // Hygiene.
  4492. entry->Doom();
  4493. entry->Close();
  4494. }
  4495. void DiskCacheBackendTest::BackendValidateMigrated() {
  4496. // Blockfile 3.0 migration test.
  4497. DisableFirstCleanup(); // started from copied dir, not cleaned dir.
  4498. InitCache();
  4499. // The total size comes straight from the headers, and is expected to be 1258
  4500. // for either set of testdata.
  4501. EXPECT_EQ(1258, CalculateSizeOfAllEntries());
  4502. EXPECT_EQ(1, cache_->GetEntryCount());
  4503. disk_cache::Entry* entry = nullptr;
  4504. ASSERT_THAT(OpenEntry("https://example.org/data", &entry), IsOk());
  4505. // Size of the actual payload.
  4506. EXPECT_EQ(1234, entry->GetDataSize(1));
  4507. entry->Close();
  4508. }
  4509. TEST_F(DiskCacheBackendTest, BlockfileMigrate20) {
  4510. ASSERT_TRUE(CopyTestCache("good_2_0"));
  4511. BackendValidateMigrated();
  4512. }
  4513. TEST_F(DiskCacheBackendTest, BlockfileMigrate21) {
  4514. ASSERT_TRUE(CopyTestCache("good_2_1"));
  4515. BackendValidateMigrated();
  4516. }
  4517. TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction20) {
  4518. ASSERT_TRUE(CopyTestCache("good_2_0"));
  4519. SetNewEviction();
  4520. BackendValidateMigrated();
  4521. }
  4522. TEST_F(DiskCacheBackendTest, BlockfileMigrateNewEviction21) {
  4523. ASSERT_TRUE(CopyTestCache("good_2_1"));
  4524. SetNewEviction();
  4525. BackendValidateMigrated();
  4526. }