entry_unittest.cc 207 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980
  1. // Copyright (c) 2012 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <utility>
  5. #include "base/bind.h"
  6. #include "base/callback_helpers.h"
  7. #include "base/files/file.h"
  8. #include "base/files/file_util.h"
  9. #include "base/metrics/field_trial.h"
  10. #include "base/metrics/field_trial_param_associator.h"
  11. #include "base/run_loop.h"
  12. #include "base/strings/string_number_conversions.h"
  13. #include "base/strings/string_util.h"
  14. #include "base/test/metrics/histogram_tester.h"
  15. #include "base/test/scoped_feature_list.h"
  16. #include "base/threading/platform_thread.h"
  17. #include "base/time/time.h"
  18. #include "build/build_config.h"
  19. #include "net/base/completion_once_callback.h"
  20. #include "net/base/io_buffer.h"
  21. #include "net/base/net_errors.h"
  22. #include "net/base/request_priority.h"
  23. #include "net/base/test_completion_callback.h"
  24. #include "net/disk_cache/blockfile/backend_impl.h"
  25. #include "net/disk_cache/blockfile/entry_impl.h"
  26. #include "net/disk_cache/cache_util.h"
  27. #include "net/disk_cache/disk_cache_test_base.h"
  28. #include "net/disk_cache/disk_cache_test_util.h"
  29. #include "net/disk_cache/memory/mem_entry_impl.h"
  30. #include "net/disk_cache/simple/simple_backend_impl.h"
  31. #include "net/disk_cache/simple/simple_entry_format.h"
  32. #include "net/disk_cache/simple/simple_entry_impl.h"
  33. #include "net/disk_cache/simple/simple_histogram_enums.h"
  34. #include "net/disk_cache/simple/simple_synchronous_entry.h"
  35. #include "net/disk_cache/simple/simple_test_util.h"
  36. #include "net/disk_cache/simple/simple_util.h"
  37. #include "net/test/gtest_util.h"
  38. #include "testing/gmock/include/gmock/gmock.h"
  39. #include "testing/gtest/include/gtest/gtest.h"
  40. using net::test::IsError;
  41. using net::test::IsOk;
  42. using base::Time;
  43. using disk_cache::EntryResult;
  44. using disk_cache::EntryResultCallback;
  45. using disk_cache::RangeResult;
  46. using disk_cache::ScopedEntryPtr;
  47. // Tests that can run with different types of caches.
  48. class DiskCacheEntryTest : public DiskCacheTestWithCache {
  49. public:
  50. void InternalSyncIOBackground(disk_cache::Entry* entry);
  51. void ExternalSyncIOBackground(disk_cache::Entry* entry);
  52. protected:
  53. void InternalSyncIO();
  54. void InternalAsyncIO();
  55. void ExternalSyncIO();
  56. void ExternalAsyncIO();
  57. void ReleaseBuffer(int stream_index);
  58. void StreamAccess();
  59. void GetKey();
  60. void GetTimes(int stream_index);
  61. void GrowData(int stream_index);
  62. void TruncateData(int stream_index);
  63. void ZeroLengthIO(int stream_index);
  64. void Buffering();
  65. void SizeAtCreate();
  66. void SizeChanges(int stream_index);
  67. void ReuseEntry(int size, int stream_index);
  68. void InvalidData(int stream_index);
  69. void ReadWriteDestroyBuffer(int stream_index);
  70. void DoomNormalEntry();
  71. void DoomEntryNextToOpenEntry();
  72. void DoomedEntry(int stream_index);
  73. void BasicSparseIO();
  74. void HugeSparseIO();
  75. void GetAvailableRangeTest();
  76. void CouldBeSparse();
  77. void UpdateSparseEntry();
  78. void DoomSparseEntry();
  79. void PartialSparseEntry();
  80. void SparseInvalidArg();
  81. void SparseClipEnd(int64_t max_index, bool expected_unsupported);
  82. bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int data_size);
  83. bool SimpleCacheThirdStreamFileExists(const char* key);
  84. void SyncDoomEntry(const char* key);
  85. void CreateEntryWithHeaderBodyAndSideData(const std::string& key,
  86. int data_size);
  87. void TruncateFileFromEnd(int file_index,
  88. const std::string& key,
  89. int data_size,
  90. int truncate_size);
  91. void UseAfterBackendDestruction();
  92. void CloseSparseAfterBackendDestruction();
  93. void LastUsedTimePersists();
  94. void TruncateBackwards();
  95. void ZeroWriteBackwards();
  96. void SparseOffset64Bit();
  97. };
  98. // This part of the test runs on the background thread.
  99. void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
  100. const int kSize1 = 10;
  101. scoped_refptr<net::IOBuffer> buffer1 =
  102. base::MakeRefCounted<net::IOBuffer>(kSize1);
  103. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  104. EXPECT_EQ(0, entry->ReadData(0, 0, buffer1.get(), kSize1,
  105. net::CompletionOnceCallback()));
  106. base::strlcpy(buffer1->data(), "the data", kSize1);
  107. EXPECT_EQ(10, entry->WriteData(0, 0, buffer1.get(), kSize1,
  108. net::CompletionOnceCallback(), false));
  109. memset(buffer1->data(), 0, kSize1);
  110. EXPECT_EQ(10, entry->ReadData(0, 0, buffer1.get(), kSize1,
  111. net::CompletionOnceCallback()));
  112. EXPECT_STREQ("the data", buffer1->data());
  113. const int kSize2 = 5000;
  114. const int kSize3 = 10000;
  115. scoped_refptr<net::IOBuffer> buffer2 =
  116. base::MakeRefCounted<net::IOBuffer>(kSize2);
  117. scoped_refptr<net::IOBuffer> buffer3 =
  118. base::MakeRefCounted<net::IOBuffer>(kSize3);
  119. memset(buffer3->data(), 0, kSize3);
  120. CacheTestFillBuffer(buffer2->data(), kSize2, false);
  121. base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
  122. EXPECT_EQ(5000, entry->WriteData(1, 1500, buffer2.get(), kSize2,
  123. net::CompletionOnceCallback(), false));
  124. memset(buffer2->data(), 0, kSize2);
  125. EXPECT_EQ(4989, entry->ReadData(1, 1511, buffer2.get(), kSize2,
  126. net::CompletionOnceCallback()));
  127. EXPECT_STREQ("big data goes here", buffer2->data());
  128. EXPECT_EQ(5000, entry->ReadData(1, 0, buffer2.get(), kSize2,
  129. net::CompletionOnceCallback()));
  130. EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
  131. EXPECT_EQ(1500, entry->ReadData(1, 5000, buffer2.get(), kSize2,
  132. net::CompletionOnceCallback()));
  133. EXPECT_EQ(0, entry->ReadData(1, 6500, buffer2.get(), kSize2,
  134. net::CompletionOnceCallback()));
  135. EXPECT_EQ(6500, entry->ReadData(1, 0, buffer3.get(), kSize3,
  136. net::CompletionOnceCallback()));
  137. EXPECT_EQ(8192, entry->WriteData(1, 0, buffer3.get(), 8192,
  138. net::CompletionOnceCallback(), false));
  139. EXPECT_EQ(8192, entry->ReadData(1, 0, buffer3.get(), kSize3,
  140. net::CompletionOnceCallback()));
  141. EXPECT_EQ(8192, entry->GetDataSize(1));
  142. // We need to delete the memory buffer on this thread.
  143. EXPECT_EQ(0, entry->WriteData(0, 0, nullptr, 0, net::CompletionOnceCallback(),
  144. true));
  145. EXPECT_EQ(0, entry->WriteData(1, 0, nullptr, 0, net::CompletionOnceCallback(),
  146. true));
  147. }
  148. // We need to support synchronous IO even though it is not a supported operation
  149. // from the point of view of the disk cache's public interface, because we use
  150. // it internally, not just by a few tests, but as part of the implementation
  151. // (see sparse_control.cc, for example).
  152. void DiskCacheEntryTest::InternalSyncIO() {
  153. disk_cache::Entry* entry = nullptr;
  154. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  155. ASSERT_TRUE(nullptr != entry);
  156. // The bulk of the test runs from within the callback, on the cache thread.
  157. RunTaskForTest(base::BindOnce(&DiskCacheEntryTest::InternalSyncIOBackground,
  158. base::Unretained(this), entry));
  159. entry->Doom();
  160. entry->Close();
  161. FlushQueueForTest();
  162. EXPECT_EQ(0, cache_->GetEntryCount());
  163. }
  164. TEST_F(DiskCacheEntryTest, InternalSyncIO) {
  165. InitCache();
  166. InternalSyncIO();
  167. }
  168. TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
  169. SetMemoryOnlyMode();
  170. InitCache();
  171. InternalSyncIO();
  172. }
  173. void DiskCacheEntryTest::InternalAsyncIO() {
  174. disk_cache::Entry* entry = nullptr;
  175. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  176. ASSERT_TRUE(nullptr != entry);
  177. // Avoid using internal buffers for the test. We have to write something to
  178. // the entry and close it so that we flush the internal buffer to disk. After
  179. // that, IO operations will be really hitting the disk. We don't care about
  180. // the content, so just extending the entry is enough (all extensions zero-
  181. // fill any holes).
  182. EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, nullptr, 0, false));
  183. EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, nullptr, 0, false));
  184. entry->Close();
  185. ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
  186. MessageLoopHelper helper;
  187. // Let's verify that each IO goes to the right callback object.
  188. CallbackTest callback1(&helper, false);
  189. CallbackTest callback2(&helper, false);
  190. CallbackTest callback3(&helper, false);
  191. CallbackTest callback4(&helper, false);
  192. CallbackTest callback5(&helper, false);
  193. CallbackTest callback6(&helper, false);
  194. CallbackTest callback7(&helper, false);
  195. CallbackTest callback8(&helper, false);
  196. CallbackTest callback9(&helper, false);
  197. CallbackTest callback10(&helper, false);
  198. CallbackTest callback11(&helper, false);
  199. CallbackTest callback12(&helper, false);
  200. CallbackTest callback13(&helper, false);
  201. const int kSize1 = 10;
  202. const int kSize2 = 5000;
  203. const int kSize3 = 10000;
  204. scoped_refptr<net::IOBuffer> buffer1 =
  205. base::MakeRefCounted<net::IOBuffer>(kSize1);
  206. scoped_refptr<net::IOBuffer> buffer2 =
  207. base::MakeRefCounted<net::IOBuffer>(kSize2);
  208. scoped_refptr<net::IOBuffer> buffer3 =
  209. base::MakeRefCounted<net::IOBuffer>(kSize3);
  210. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  211. CacheTestFillBuffer(buffer2->data(), kSize2, false);
  212. CacheTestFillBuffer(buffer3->data(), kSize3, false);
  213. EXPECT_EQ(0, entry->ReadData(0, 15 * 1024, buffer1.get(), kSize1,
  214. base::BindOnce(&CallbackTest::Run,
  215. base::Unretained(&callback1))));
  216. base::strlcpy(buffer1->data(), "the data", kSize1);
  217. int expected = 0;
  218. int ret = entry->WriteData(
  219. 0, 0, buffer1.get(), kSize1,
  220. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)), false);
  221. EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
  222. if (net::ERR_IO_PENDING == ret)
  223. expected++;
  224. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  225. memset(buffer2->data(), 0, kSize2);
  226. ret = entry->ReadData(
  227. 0, 0, buffer2.get(), kSize1,
  228. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback3)));
  229. EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
  230. if (net::ERR_IO_PENDING == ret)
  231. expected++;
  232. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  233. EXPECT_STREQ("the data", buffer2->data());
  234. base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
  235. ret = entry->WriteData(
  236. 1, 1500, buffer2.get(), kSize2,
  237. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback4)), true);
  238. EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
  239. if (net::ERR_IO_PENDING == ret)
  240. expected++;
  241. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  242. memset(buffer3->data(), 0, kSize3);
  243. ret = entry->ReadData(
  244. 1, 1511, buffer3.get(), kSize2,
  245. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback5)));
  246. EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
  247. if (net::ERR_IO_PENDING == ret)
  248. expected++;
  249. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  250. EXPECT_STREQ("big data goes here", buffer3->data());
  251. ret = entry->ReadData(
  252. 1, 0, buffer2.get(), kSize2,
  253. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback6)));
  254. EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
  255. if (net::ERR_IO_PENDING == ret)
  256. expected++;
  257. memset(buffer3->data(), 0, kSize3);
  258. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  259. EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
  260. ret = entry->ReadData(
  261. 1, 5000, buffer2.get(), kSize2,
  262. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback7)));
  263. EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
  264. if (net::ERR_IO_PENDING == ret)
  265. expected++;
  266. ret = entry->ReadData(
  267. 1, 0, buffer3.get(), kSize3,
  268. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback9)));
  269. EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
  270. if (net::ERR_IO_PENDING == ret)
  271. expected++;
  272. ret = entry->WriteData(
  273. 1, 0, buffer3.get(), 8192,
  274. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback10)), true);
  275. EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
  276. if (net::ERR_IO_PENDING == ret)
  277. expected++;
  278. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  279. ret = entry->ReadData(
  280. 1, 0, buffer3.get(), kSize3,
  281. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback11)));
  282. EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
  283. if (net::ERR_IO_PENDING == ret)
  284. expected++;
  285. EXPECT_EQ(8192, entry->GetDataSize(1));
  286. ret = entry->ReadData(
  287. 0, 0, buffer1.get(), kSize1,
  288. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback12)));
  289. EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
  290. if (net::ERR_IO_PENDING == ret)
  291. expected++;
  292. ret = entry->ReadData(
  293. 1, 0, buffer2.get(), kSize2,
  294. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback13)));
  295. EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
  296. if (net::ERR_IO_PENDING == ret)
  297. expected++;
  298. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  299. EXPECT_FALSE(helper.callback_reused_error());
  300. entry->Doom();
  301. entry->Close();
  302. FlushQueueForTest();
  303. EXPECT_EQ(0, cache_->GetEntryCount());
  304. }
  305. TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
  306. InitCache();
  307. InternalAsyncIO();
  308. }
  309. TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
  310. SetMemoryOnlyMode();
  311. InitCache();
  312. InternalAsyncIO();
  313. }
  314. // This part of the test runs on the background thread.
  315. void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
  316. const int kSize1 = 17000;
  317. const int kSize2 = 25000;
  318. scoped_refptr<net::IOBuffer> buffer1 =
  319. base::MakeRefCounted<net::IOBuffer>(kSize1);
  320. scoped_refptr<net::IOBuffer> buffer2 =
  321. base::MakeRefCounted<net::IOBuffer>(kSize2);
  322. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  323. CacheTestFillBuffer(buffer2->data(), kSize2, false);
  324. base::strlcpy(buffer1->data(), "the data", kSize1);
  325. EXPECT_EQ(17000, entry->WriteData(0, 0, buffer1.get(), kSize1,
  326. net::CompletionOnceCallback(), false));
  327. memset(buffer1->data(), 0, kSize1);
  328. EXPECT_EQ(17000, entry->ReadData(0, 0, buffer1.get(), kSize1,
  329. net::CompletionOnceCallback()));
  330. EXPECT_STREQ("the data", buffer1->data());
  331. base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
  332. EXPECT_EQ(25000, entry->WriteData(1, 10000, buffer2.get(), kSize2,
  333. net::CompletionOnceCallback(), false));
  334. memset(buffer2->data(), 0, kSize2);
  335. EXPECT_EQ(24989, entry->ReadData(1, 10011, buffer2.get(), kSize2,
  336. net::CompletionOnceCallback()));
  337. EXPECT_STREQ("big data goes here", buffer2->data());
  338. EXPECT_EQ(25000, entry->ReadData(1, 0, buffer2.get(), kSize2,
  339. net::CompletionOnceCallback()));
  340. EXPECT_EQ(5000, entry->ReadData(1, 30000, buffer2.get(), kSize2,
  341. net::CompletionOnceCallback()));
  342. EXPECT_EQ(0, entry->ReadData(1, 35000, buffer2.get(), kSize2,
  343. net::CompletionOnceCallback()));
  344. EXPECT_EQ(17000, entry->ReadData(1, 0, buffer1.get(), kSize1,
  345. net::CompletionOnceCallback()));
  346. EXPECT_EQ(17000, entry->WriteData(1, 20000, buffer1.get(), kSize1,
  347. net::CompletionOnceCallback(), false));
  348. EXPECT_EQ(37000, entry->GetDataSize(1));
  349. // We need to delete the memory buffer on this thread.
  350. EXPECT_EQ(0, entry->WriteData(0, 0, nullptr, 0, net::CompletionOnceCallback(),
  351. true));
  352. EXPECT_EQ(0, entry->WriteData(1, 0, nullptr, 0, net::CompletionOnceCallback(),
  353. true));
  354. }
  355. void DiskCacheEntryTest::ExternalSyncIO() {
  356. disk_cache::Entry* entry;
  357. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  358. // The bulk of the test runs from within the callback, on the cache thread.
  359. RunTaskForTest(base::BindOnce(&DiskCacheEntryTest::ExternalSyncIOBackground,
  360. base::Unretained(this), entry));
  361. entry->Doom();
  362. entry->Close();
  363. FlushQueueForTest();
  364. EXPECT_EQ(0, cache_->GetEntryCount());
  365. }
  366. TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
  367. InitCache();
  368. ExternalSyncIO();
  369. }
  370. TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
  371. InitCache();
  372. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  373. ExternalSyncIO();
  374. }
  375. TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
  376. SetMemoryOnlyMode();
  377. InitCache();
  378. ExternalSyncIO();
  379. }
  380. void DiskCacheEntryTest::ExternalAsyncIO() {
  381. disk_cache::Entry* entry;
  382. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  383. int expected = 0;
  384. MessageLoopHelper helper;
  385. // Let's verify that each IO goes to the right callback object.
  386. CallbackTest callback1(&helper, false);
  387. CallbackTest callback2(&helper, false);
  388. CallbackTest callback3(&helper, false);
  389. CallbackTest callback4(&helper, false);
  390. CallbackTest callback5(&helper, false);
  391. CallbackTest callback6(&helper, false);
  392. CallbackTest callback7(&helper, false);
  393. CallbackTest callback8(&helper, false);
  394. CallbackTest callback9(&helper, false);
  395. const int kSize1 = 17000;
  396. const int kSize2 = 25000;
  397. const int kSize3 = 25000;
  398. scoped_refptr<net::IOBuffer> buffer1 =
  399. base::MakeRefCounted<net::IOBuffer>(kSize1);
  400. scoped_refptr<net::IOBuffer> buffer2 =
  401. base::MakeRefCounted<net::IOBuffer>(kSize2);
  402. scoped_refptr<net::IOBuffer> buffer3 =
  403. base::MakeRefCounted<net::IOBuffer>(kSize3);
  404. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  405. CacheTestFillBuffer(buffer2->data(), kSize2, false);
  406. CacheTestFillBuffer(buffer3->data(), kSize3, false);
  407. base::strlcpy(buffer1->data(), "the data", kSize1);
  408. int ret = entry->WriteData(
  409. 0, 0, buffer1.get(), kSize1,
  410. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback1)), false);
  411. EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
  412. if (net::ERR_IO_PENDING == ret)
  413. expected++;
  414. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  415. memset(buffer2->data(), 0, kSize1);
  416. ret = entry->ReadData(
  417. 0, 0, buffer2.get(), kSize1,
  418. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)));
  419. EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
  420. if (net::ERR_IO_PENDING == ret)
  421. expected++;
  422. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  423. EXPECT_STREQ("the data", buffer2->data());
  424. base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
  425. ret = entry->WriteData(
  426. 1, 10000, buffer2.get(), kSize2,
  427. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback3)), false);
  428. EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
  429. if (net::ERR_IO_PENDING == ret)
  430. expected++;
  431. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  432. memset(buffer3->data(), 0, kSize3);
  433. ret = entry->ReadData(
  434. 1, 10011, buffer3.get(), kSize3,
  435. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback4)));
  436. EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
  437. if (net::ERR_IO_PENDING == ret)
  438. expected++;
  439. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  440. EXPECT_STREQ("big data goes here", buffer3->data());
  441. ret = entry->ReadData(
  442. 1, 0, buffer2.get(), kSize2,
  443. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback5)));
  444. EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
  445. if (net::ERR_IO_PENDING == ret)
  446. expected++;
  447. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  448. memset(buffer3->data(), 0, kSize3);
  449. EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
  450. ret = entry->ReadData(
  451. 1, 30000, buffer2.get(), kSize2,
  452. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback6)));
  453. EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
  454. if (net::ERR_IO_PENDING == ret)
  455. expected++;
  456. ret = entry->ReadData(
  457. 1, 35000, buffer2.get(), kSize2,
  458. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback7)));
  459. EXPECT_TRUE(0 == ret || net::ERR_IO_PENDING == ret);
  460. if (net::ERR_IO_PENDING == ret)
  461. expected++;
  462. ret = entry->ReadData(
  463. 1, 0, buffer1.get(), kSize1,
  464. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback8)));
  465. EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
  466. if (net::ERR_IO_PENDING == ret)
  467. expected++;
  468. ret = entry->WriteData(
  469. 1, 20000, buffer3.get(), kSize1,
  470. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback9)), false);
  471. EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
  472. if (net::ERR_IO_PENDING == ret)
  473. expected++;
  474. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  475. EXPECT_EQ(37000, entry->GetDataSize(1));
  476. EXPECT_FALSE(helper.callback_reused_error());
  477. entry->Doom();
  478. entry->Close();
  479. FlushQueueForTest();
  480. EXPECT_EQ(0, cache_->GetEntryCount());
  481. }
  482. TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
  483. InitCache();
  484. ExternalAsyncIO();
  485. }
  486. // TODO(http://crbug.com/497101): This test is flaky.
  487. #if BUILDFLAG(IS_IOS)
  488. #define MAYBE_ExternalAsyncIONoBuffer DISABLED_ExternalAsyncIONoBuffer
  489. #else
  490. #define MAYBE_ExternalAsyncIONoBuffer ExternalAsyncIONoBuffer
  491. #endif
  492. TEST_F(DiskCacheEntryTest, MAYBE_ExternalAsyncIONoBuffer) {
  493. InitCache();
  494. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  495. ExternalAsyncIO();
  496. }
  497. TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
  498. SetMemoryOnlyMode();
  499. InitCache();
  500. ExternalAsyncIO();
  501. }
  502. // Tests that IOBuffers are not referenced after IO completes.
  503. void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
  504. disk_cache::Entry* entry = nullptr;
  505. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  506. ASSERT_TRUE(nullptr != entry);
  507. const int kBufferSize = 1024;
  508. scoped_refptr<net::IOBuffer> buffer =
  509. base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  510. CacheTestFillBuffer(buffer->data(), kBufferSize, false);
  511. net::ReleaseBufferCompletionCallback cb(buffer.get());
  512. int rv = entry->WriteData(
  513. stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
  514. EXPECT_EQ(kBufferSize, cb.GetResult(rv));
  515. entry->Close();
  516. }
  517. TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
  518. InitCache();
  519. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  520. ReleaseBuffer(0);
  521. }
  522. TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
  523. SetMemoryOnlyMode();
  524. InitCache();
  525. ReleaseBuffer(0);
  526. }
  527. void DiskCacheEntryTest::StreamAccess() {
  528. disk_cache::Entry* entry = nullptr;
  529. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  530. ASSERT_TRUE(nullptr != entry);
  531. const int kBufferSize = 1024;
  532. const int kNumStreams = 3;
  533. scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
  534. for (auto& reference_buffer : reference_buffers) {
  535. reference_buffer = base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  536. CacheTestFillBuffer(reference_buffer->data(), kBufferSize, false);
  537. }
  538. scoped_refptr<net::IOBuffer> buffer1 =
  539. base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  540. for (int i = 0; i < kNumStreams; i++) {
  541. EXPECT_EQ(
  542. kBufferSize,
  543. WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
  544. memset(buffer1->data(), 0, kBufferSize);
  545. EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
  546. EXPECT_EQ(
  547. 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
  548. }
  549. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  550. ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
  551. entry->Close();
  552. // Open the entry and read it in chunks, including a read past the end.
  553. ASSERT_THAT(OpenEntry("the first key", &entry), IsOk());
  554. ASSERT_TRUE(nullptr != entry);
  555. const int kReadBufferSize = 600;
  556. const int kFinalReadSize = kBufferSize - kReadBufferSize;
  557. static_assert(kFinalReadSize < kReadBufferSize,
  558. "should be exactly two reads");
  559. scoped_refptr<net::IOBuffer> buffer2 =
  560. base::MakeRefCounted<net::IOBuffer>(kReadBufferSize);
  561. for (int i = 0; i < kNumStreams; i++) {
  562. memset(buffer2->data(), 0, kReadBufferSize);
  563. EXPECT_EQ(kReadBufferSize,
  564. ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
  565. EXPECT_EQ(
  566. 0,
  567. memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
  568. memset(buffer2->data(), 0, kReadBufferSize);
  569. EXPECT_EQ(
  570. kFinalReadSize,
  571. ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
  572. EXPECT_EQ(0,
  573. memcmp(reference_buffers[i]->data() + kReadBufferSize,
  574. buffer2->data(),
  575. kFinalReadSize));
  576. }
  577. entry->Close();
  578. }
  579. TEST_F(DiskCacheEntryTest, StreamAccess) {
  580. InitCache();
  581. StreamAccess();
  582. }
  583. TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
  584. SetMemoryOnlyMode();
  585. InitCache();
  586. StreamAccess();
  587. }
  588. void DiskCacheEntryTest::GetKey() {
  589. std::string key("the first key");
  590. disk_cache::Entry* entry;
  591. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  592. EXPECT_EQ(key, entry->GetKey()) << "short key";
  593. entry->Close();
  594. int seed = static_cast<int>(Time::Now().ToInternalValue());
  595. srand(seed);
  596. char key_buffer[20000];
  597. CacheTestFillBuffer(key_buffer, 3000, true);
  598. key_buffer[1000] = '\0';
  599. key = key_buffer;
  600. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  601. EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
  602. entry->Close();
  603. key_buffer[1000] = 'p';
  604. key_buffer[3000] = '\0';
  605. key = key_buffer;
  606. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  607. EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
  608. entry->Close();
  609. CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
  610. key_buffer[19999] = '\0';
  611. key = key_buffer;
  612. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  613. EXPECT_TRUE(key == entry->GetKey()) << "long key";
  614. entry->Close();
  615. CacheTestFillBuffer(key_buffer, 0x4000, true);
  616. key_buffer[0x4000] = '\0';
  617. key = key_buffer;
  618. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  619. EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
  620. entry->Close();
  621. }
  622. TEST_F(DiskCacheEntryTest, GetKey) {
  623. InitCache();
  624. GetKey();
  625. }
  626. TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
  627. SetMemoryOnlyMode();
  628. InitCache();
  629. GetKey();
  630. }
  631. void DiskCacheEntryTest::GetTimes(int stream_index) {
  632. std::string key("the first key");
  633. disk_cache::Entry* entry;
  634. Time t1 = Time::Now();
  635. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  636. EXPECT_TRUE(entry->GetLastModified() >= t1);
  637. EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
  638. AddDelay();
  639. Time t2 = Time::Now();
  640. EXPECT_TRUE(t2 > t1);
  641. EXPECT_EQ(0, WriteData(entry, stream_index, 200, nullptr, 0, false));
  642. if (type_ == net::APP_CACHE) {
  643. EXPECT_TRUE(entry->GetLastModified() < t2);
  644. } else {
  645. EXPECT_TRUE(entry->GetLastModified() >= t2);
  646. }
  647. EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
  648. AddDelay();
  649. Time t3 = Time::Now();
  650. EXPECT_TRUE(t3 > t2);
  651. const int kSize = 200;
  652. scoped_refptr<net::IOBuffer> buffer =
  653. base::MakeRefCounted<net::IOBuffer>(kSize);
  654. EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
  655. if (type_ == net::APP_CACHE) {
  656. EXPECT_TRUE(entry->GetLastUsed() < t2);
  657. EXPECT_TRUE(entry->GetLastModified() < t2);
  658. } else if (type_ == net::SHADER_CACHE) {
  659. EXPECT_TRUE(entry->GetLastUsed() < t3);
  660. EXPECT_TRUE(entry->GetLastModified() < t3);
  661. } else {
  662. EXPECT_TRUE(entry->GetLastUsed() >= t3);
  663. EXPECT_TRUE(entry->GetLastModified() < t3);
  664. }
  665. entry->Close();
  666. }
  667. TEST_F(DiskCacheEntryTest, GetTimes) {
  668. InitCache();
  669. GetTimes(0);
  670. }
  671. TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
  672. SetMemoryOnlyMode();
  673. InitCache();
  674. GetTimes(0);
  675. }
  676. TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
  677. SetCacheType(net::APP_CACHE);
  678. InitCache();
  679. GetTimes(0);
  680. }
  681. TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
  682. SetCacheType(net::SHADER_CACHE);
  683. InitCache();
  684. GetTimes(0);
  685. }
  686. void DiskCacheEntryTest::GrowData(int stream_index) {
  687. std::string key1("the first key");
  688. disk_cache::Entry* entry;
  689. ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
  690. const int kSize = 20000;
  691. scoped_refptr<net::IOBuffer> buffer1 =
  692. base::MakeRefCounted<net::IOBuffer>(kSize);
  693. scoped_refptr<net::IOBuffer> buffer2 =
  694. base::MakeRefCounted<net::IOBuffer>(kSize);
  695. CacheTestFillBuffer(buffer1->data(), kSize, false);
  696. memset(buffer2->data(), 0, kSize);
  697. base::strlcpy(buffer1->data(), "the data", kSize);
  698. EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
  699. EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
  700. EXPECT_STREQ("the data", buffer2->data());
  701. EXPECT_EQ(10, entry->GetDataSize(stream_index));
  702. EXPECT_EQ(2000,
  703. WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
  704. EXPECT_EQ(2000, entry->GetDataSize(stream_index));
  705. EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
  706. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
  707. EXPECT_EQ(20000,
  708. WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
  709. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  710. EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
  711. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
  712. entry->Close();
  713. memset(buffer2->data(), 0, kSize);
  714. std::string key2("Second key");
  715. ASSERT_THAT(CreateEntry(key2, &entry), IsOk());
  716. EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
  717. EXPECT_EQ(10, entry->GetDataSize(stream_index));
  718. entry->Close();
  719. // Go from an internal address to a bigger block size.
  720. ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
  721. EXPECT_EQ(2000,
  722. WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
  723. EXPECT_EQ(2000, entry->GetDataSize(stream_index));
  724. EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
  725. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
  726. entry->Close();
  727. memset(buffer2->data(), 0, kSize);
  728. // Go from an internal address to an external one.
  729. ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
  730. EXPECT_EQ(20000,
  731. WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
  732. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  733. EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
  734. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
  735. entry->Close();
  736. // Double check the size from disk.
  737. ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
  738. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  739. // Now extend the entry without actual data.
  740. EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
  741. entry->Close();
  742. // And check again from disk.
  743. ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
  744. EXPECT_EQ(45500, entry->GetDataSize(stream_index));
  745. entry->Close();
  746. }
  747. TEST_F(DiskCacheEntryTest, GrowData) {
  748. InitCache();
  749. GrowData(0);
  750. }
  751. TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
  752. InitCache();
  753. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  754. GrowData(0);
  755. }
  756. TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
  757. SetMemoryOnlyMode();
  758. InitCache();
  759. GrowData(0);
  760. }
  761. void DiskCacheEntryTest::TruncateData(int stream_index) {
  762. std::string key("the first key");
  763. disk_cache::Entry* entry;
  764. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  765. const int kSize1 = 20000;
  766. const int kSize2 = 20000;
  767. scoped_refptr<net::IOBuffer> buffer1 =
  768. base::MakeRefCounted<net::IOBuffer>(kSize1);
  769. scoped_refptr<net::IOBuffer> buffer2 =
  770. base::MakeRefCounted<net::IOBuffer>(kSize2);
  771. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  772. memset(buffer2->data(), 0, kSize2);
  773. // Simple truncation:
  774. EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
  775. EXPECT_EQ(200, entry->GetDataSize(stream_index));
  776. EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
  777. EXPECT_EQ(200, entry->GetDataSize(stream_index));
  778. EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
  779. EXPECT_EQ(100, entry->GetDataSize(stream_index));
  780. EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
  781. EXPECT_EQ(50, entry->GetDataSize(stream_index));
  782. EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
  783. EXPECT_EQ(0, entry->GetDataSize(stream_index));
  784. entry->Close();
  785. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  786. // Go to an external file.
  787. EXPECT_EQ(20000,
  788. WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
  789. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  790. EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
  791. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
  792. memset(buffer2->data(), 0, kSize2);
  793. // External file truncation
  794. EXPECT_EQ(18000,
  795. WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
  796. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  797. EXPECT_EQ(18000,
  798. WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
  799. EXPECT_EQ(18000, entry->GetDataSize(stream_index));
  800. EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
  801. EXPECT_EQ(17500, entry->GetDataSize(stream_index));
  802. // And back to an internal block.
  803. EXPECT_EQ(600,
  804. WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
  805. EXPECT_EQ(1600, entry->GetDataSize(stream_index));
  806. EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
  807. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
  808. EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
  809. EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
  810. << "Preserves previous data";
  811. // Go from external file to zero length.
  812. EXPECT_EQ(20000,
  813. WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
  814. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  815. EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
  816. EXPECT_EQ(0, entry->GetDataSize(stream_index));
  817. entry->Close();
  818. }
  819. TEST_F(DiskCacheEntryTest, TruncateData) {
  820. InitCache();
  821. TruncateData(0);
  822. }
  823. TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
  824. InitCache();
  825. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  826. TruncateData(0);
  827. }
  828. TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
  829. SetMemoryOnlyMode();
  830. InitCache();
  831. TruncateData(0);
  832. }
  833. void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
  834. std::string key("the first key");
  835. disk_cache::Entry* entry;
  836. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  837. EXPECT_EQ(0, ReadData(entry, stream_index, 0, nullptr, 0));
  838. EXPECT_EQ(0, WriteData(entry, stream_index, 0, nullptr, 0, false));
  839. // This write should extend the entry.
  840. EXPECT_EQ(0, WriteData(entry, stream_index, 1000, nullptr, 0, false));
  841. EXPECT_EQ(0, ReadData(entry, stream_index, 500, nullptr, 0));
  842. EXPECT_EQ(0, ReadData(entry, stream_index, 2000, nullptr, 0));
  843. EXPECT_EQ(1000, entry->GetDataSize(stream_index));
  844. EXPECT_EQ(0, WriteData(entry, stream_index, 100000, nullptr, 0, true));
  845. EXPECT_EQ(0, ReadData(entry, stream_index, 50000, nullptr, 0));
  846. EXPECT_EQ(100000, entry->GetDataSize(stream_index));
  847. // Let's verify the actual content.
  848. const int kSize = 20;
  849. const char zeros[kSize] = {};
  850. scoped_refptr<net::IOBuffer> buffer =
  851. base::MakeRefCounted<net::IOBuffer>(kSize);
  852. CacheTestFillBuffer(buffer->data(), kSize, false);
  853. EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
  854. EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
  855. CacheTestFillBuffer(buffer->data(), kSize, false);
  856. EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
  857. EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
  858. CacheTestFillBuffer(buffer->data(), kSize, false);
  859. EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
  860. EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
  861. entry->Close();
  862. }
  863. TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
  864. InitCache();
  865. ZeroLengthIO(0);
  866. }
  867. TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
  868. InitCache();
  869. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  870. ZeroLengthIO(0);
  871. }
  872. TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
  873. SetMemoryOnlyMode();
  874. InitCache();
  875. ZeroLengthIO(0);
  876. }
  877. // Tests that we handle the content correctly when buffering, a feature of the
  878. // standard cache that permits fast responses to certain reads.
  879. void DiskCacheEntryTest::Buffering() {
  880. std::string key("the first key");
  881. disk_cache::Entry* entry;
  882. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  883. const int kSize = 200;
  884. scoped_refptr<net::IOBuffer> buffer1 =
  885. base::MakeRefCounted<net::IOBuffer>(kSize);
  886. scoped_refptr<net::IOBuffer> buffer2 =
  887. base::MakeRefCounted<net::IOBuffer>(kSize);
  888. CacheTestFillBuffer(buffer1->data(), kSize, true);
  889. CacheTestFillBuffer(buffer2->data(), kSize, true);
  890. EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
  891. entry->Close();
  892. // Write a little more and read what we wrote before.
  893. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  894. EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
  895. EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
  896. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  897. // Now go to an external file.
  898. EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
  899. entry->Close();
  900. // Write something else and verify old data.
  901. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  902. EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
  903. CacheTestFillBuffer(buffer2->data(), kSize, true);
  904. EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
  905. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  906. CacheTestFillBuffer(buffer2->data(), kSize, true);
  907. EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
  908. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  909. CacheTestFillBuffer(buffer2->data(), kSize, true);
  910. EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
  911. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  912. // Extend the file some more.
  913. EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
  914. entry->Close();
  915. // And now make sure that we can deal with data in both places (ram/disk).
  916. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  917. EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
  918. // We should not overwrite the data at 18000 with this.
  919. EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
  920. CacheTestFillBuffer(buffer2->data(), kSize, true);
  921. EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
  922. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  923. CacheTestFillBuffer(buffer2->data(), kSize, true);
  924. EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
  925. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  926. EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
  927. CacheTestFillBuffer(buffer2->data(), kSize, true);
  928. EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
  929. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
  930. CacheTestFillBuffer(buffer2->data(), kSize, true);
  931. EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
  932. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
  933. // Extend the file again and read before without closing the entry.
  934. EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
  935. EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
  936. CacheTestFillBuffer(buffer2->data(), kSize, true);
  937. EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
  938. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  939. CacheTestFillBuffer(buffer2->data(), kSize, true);
  940. EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
  941. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
  942. entry->Close();
  943. }
  944. TEST_F(DiskCacheEntryTest, Buffering) {
  945. InitCache();
  946. Buffering();
  947. }
  948. TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
  949. InitCache();
  950. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  951. Buffering();
  952. }
  953. // Checks that entries are zero length when created.
  954. void DiskCacheEntryTest::SizeAtCreate() {
  955. const char key[] = "the first key";
  956. disk_cache::Entry* entry;
  957. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  958. const int kNumStreams = 3;
  959. for (int i = 0; i < kNumStreams; ++i)
  960. EXPECT_EQ(0, entry->GetDataSize(i));
  961. entry->Close();
  962. }
  963. TEST_F(DiskCacheEntryTest, SizeAtCreate) {
  964. InitCache();
  965. SizeAtCreate();
  966. }
  967. TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
  968. SetMemoryOnlyMode();
  969. InitCache();
  970. SizeAtCreate();
  971. }
  972. // Some extra tests to make sure that buffering works properly when changing
  973. // the entry size.
  974. void DiskCacheEntryTest::SizeChanges(int stream_index) {
  975. std::string key("the first key");
  976. disk_cache::Entry* entry;
  977. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  978. const int kSize = 200;
  979. const char zeros[kSize] = {};
  980. scoped_refptr<net::IOBuffer> buffer1 =
  981. base::MakeRefCounted<net::IOBuffer>(kSize);
  982. scoped_refptr<net::IOBuffer> buffer2 =
  983. base::MakeRefCounted<net::IOBuffer>(kSize);
  984. CacheTestFillBuffer(buffer1->data(), kSize, true);
  985. CacheTestFillBuffer(buffer2->data(), kSize, true);
  986. EXPECT_EQ(kSize,
  987. WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
  988. EXPECT_EQ(kSize,
  989. WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
  990. EXPECT_EQ(kSize,
  991. WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
  992. entry->Close();
  993. // Extend the file and read between the old size and the new write.
  994. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  995. EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
  996. EXPECT_EQ(kSize,
  997. WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
  998. EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
  999. EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
  1000. EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
  1001. // Read at the end of the old file size.
  1002. EXPECT_EQ(
  1003. kSize,
  1004. ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
  1005. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
  1006. // Read slightly before the last write.
  1007. CacheTestFillBuffer(buffer2->data(), kSize, true);
  1008. EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
  1009. EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
  1010. EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
  1011. // Extend the entry a little more.
  1012. EXPECT_EQ(kSize,
  1013. WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
  1014. EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
  1015. CacheTestFillBuffer(buffer2->data(), kSize, true);
  1016. EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
  1017. EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
  1018. EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
  1019. // And now reduce the size.
  1020. EXPECT_EQ(kSize,
  1021. WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
  1022. EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
  1023. EXPECT_EQ(
  1024. 28,
  1025. ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
  1026. EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
  1027. // Reduce the size with a buffer that is not extending the size.
  1028. EXPECT_EQ(kSize,
  1029. WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
  1030. EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
  1031. EXPECT_EQ(kSize,
  1032. WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
  1033. EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
  1034. EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
  1035. EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
  1036. EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
  1037. // And now reduce the size below the old size.
  1038. EXPECT_EQ(kSize,
  1039. WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
  1040. EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
  1041. EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
  1042. EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
  1043. EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
  1044. // Verify that the actual file is truncated.
  1045. entry->Close();
  1046. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1047. EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
  1048. // Extend the newly opened file with a zero length write, expect zero fill.
  1049. EXPECT_EQ(
  1050. 0,
  1051. WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
  1052. EXPECT_EQ(kSize,
  1053. ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
  1054. EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
  1055. entry->Close();
  1056. }
  1057. TEST_F(DiskCacheEntryTest, SizeChanges) {
  1058. InitCache();
  1059. SizeChanges(1);
  1060. }
  1061. TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
  1062. InitCache();
  1063. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  1064. SizeChanges(1);
  1065. }
  1066. // Write more than the total cache capacity but to a single entry. |size| is the
  1067. // amount of bytes to write each time.
  1068. void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
  1069. std::string key1("the first key");
  1070. disk_cache::Entry* entry;
  1071. ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
  1072. entry->Close();
  1073. std::string key2("the second key");
  1074. ASSERT_THAT(CreateEntry(key2, &entry), IsOk());
  1075. scoped_refptr<net::IOBuffer> buffer =
  1076. base::MakeRefCounted<net::IOBuffer>(size);
  1077. CacheTestFillBuffer(buffer->data(), size, false);
  1078. for (int i = 0; i < 15; i++) {
  1079. EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
  1080. EXPECT_EQ(size,
  1081. WriteData(entry, stream_index, 0, buffer.get(), size, false));
  1082. entry->Close();
  1083. ASSERT_THAT(OpenEntry(key2, &entry), IsOk());
  1084. }
  1085. entry->Close();
  1086. ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
  1087. entry->Close();
  1088. }
  1089. TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
  1090. SetMaxSize(200 * 1024);
  1091. InitCache();
  1092. ReuseEntry(20 * 1024, 0);
  1093. }
  1094. TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
  1095. SetMemoryOnlyMode();
  1096. SetMaxSize(200 * 1024);
  1097. InitCache();
  1098. ReuseEntry(20 * 1024, 0);
  1099. }
  1100. TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
  1101. SetMaxSize(100 * 1024);
  1102. InitCache();
  1103. ReuseEntry(10 * 1024, 0);
  1104. }
  1105. TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
  1106. SetMemoryOnlyMode();
  1107. SetMaxSize(100 * 1024);
  1108. InitCache();
  1109. ReuseEntry(10 * 1024, 0);
  1110. }
  1111. // Reading somewhere that was not written should return zeros.
  1112. void DiskCacheEntryTest::InvalidData(int stream_index) {
  1113. std::string key("the first key");
  1114. disk_cache::Entry* entry;
  1115. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1116. const int kSize1 = 20000;
  1117. const int kSize2 = 20000;
  1118. const int kSize3 = 20000;
  1119. scoped_refptr<net::IOBuffer> buffer1 =
  1120. base::MakeRefCounted<net::IOBuffer>(kSize1);
  1121. scoped_refptr<net::IOBuffer> buffer2 =
  1122. base::MakeRefCounted<net::IOBuffer>(kSize2);
  1123. scoped_refptr<net::IOBuffer> buffer3 =
  1124. base::MakeRefCounted<net::IOBuffer>(kSize3);
  1125. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  1126. memset(buffer2->data(), 0, kSize2);
  1127. // Simple data grow:
  1128. EXPECT_EQ(200,
  1129. WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
  1130. EXPECT_EQ(600, entry->GetDataSize(stream_index));
  1131. EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
  1132. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
  1133. entry->Close();
  1134. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1135. // The entry is now on disk. Load it and extend it.
  1136. EXPECT_EQ(200,
  1137. WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
  1138. EXPECT_EQ(1000, entry->GetDataSize(stream_index));
  1139. EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
  1140. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
  1141. entry->Close();
  1142. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1143. // This time using truncate.
  1144. EXPECT_EQ(200,
  1145. WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
  1146. EXPECT_EQ(2000, entry->GetDataSize(stream_index));
  1147. EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
  1148. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
  1149. // Go to an external file.
  1150. EXPECT_EQ(200,
  1151. WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
  1152. EXPECT_EQ(20000, entry->GetDataSize(stream_index));
  1153. EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
  1154. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
  1155. // And back to an internal block.
  1156. EXPECT_EQ(600,
  1157. WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
  1158. EXPECT_EQ(1600, entry->GetDataSize(stream_index));
  1159. EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
  1160. EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
  1161. // Extend it again.
  1162. EXPECT_EQ(600,
  1163. WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
  1164. EXPECT_EQ(2600, entry->GetDataSize(stream_index));
  1165. EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
  1166. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
  1167. // And again (with truncation flag).
  1168. EXPECT_EQ(600,
  1169. WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
  1170. EXPECT_EQ(3600, entry->GetDataSize(stream_index));
  1171. EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
  1172. EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
  1173. entry->Close();
  1174. }
  1175. TEST_F(DiskCacheEntryTest, InvalidData) {
  1176. InitCache();
  1177. InvalidData(0);
  1178. }
  1179. TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
  1180. InitCache();
  1181. cache_impl_->SetFlags(disk_cache::kNoBuffering);
  1182. InvalidData(0);
  1183. }
  1184. TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
  1185. SetMemoryOnlyMode();
  1186. InitCache();
  1187. InvalidData(0);
  1188. }
  1189. // Tests that the cache preserves the buffer of an IO operation.
  1190. void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
  1191. std::string key("the first key");
  1192. disk_cache::Entry* entry;
  1193. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1194. const int kSize = 200;
  1195. scoped_refptr<net::IOBuffer> buffer =
  1196. base::MakeRefCounted<net::IOBuffer>(kSize);
  1197. CacheTestFillBuffer(buffer->data(), kSize, false);
  1198. net::TestCompletionCallback cb;
  1199. EXPECT_EQ(net::ERR_IO_PENDING,
  1200. entry->WriteData(
  1201. stream_index, 0, buffer.get(), kSize, cb.callback(), false));
  1202. // Release our reference to the buffer.
  1203. buffer = nullptr;
  1204. EXPECT_EQ(kSize, cb.WaitForResult());
  1205. // And now test with a Read().
  1206. buffer = base::MakeRefCounted<net::IOBuffer>(kSize);
  1207. CacheTestFillBuffer(buffer->data(), kSize, false);
  1208. EXPECT_EQ(
  1209. net::ERR_IO_PENDING,
  1210. entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
  1211. buffer = nullptr;
  1212. EXPECT_EQ(kSize, cb.WaitForResult());
  1213. entry->Close();
  1214. }
  1215. TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
  1216. InitCache();
  1217. ReadWriteDestroyBuffer(0);
  1218. }
  1219. void DiskCacheEntryTest::DoomNormalEntry() {
  1220. std::string key("the first key");
  1221. disk_cache::Entry* entry;
  1222. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1223. entry->Doom();
  1224. entry->Close();
  1225. const int kSize = 20000;
  1226. scoped_refptr<net::IOBuffer> buffer =
  1227. base::MakeRefCounted<net::IOBuffer>(kSize);
  1228. CacheTestFillBuffer(buffer->data(), kSize, true);
  1229. buffer->data()[19999] = '\0';
  1230. key = buffer->data();
  1231. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1232. EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1233. EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
  1234. entry->Doom();
  1235. entry->Close();
  1236. FlushQueueForTest();
  1237. EXPECT_EQ(0, cache_->GetEntryCount());
  1238. }
  1239. TEST_F(DiskCacheEntryTest, DoomEntry) {
  1240. InitCache();
  1241. DoomNormalEntry();
  1242. }
  1243. TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
  1244. SetMemoryOnlyMode();
  1245. InitCache();
  1246. DoomNormalEntry();
  1247. }
  1248. // Tests dooming an entry that's linked to an open entry.
  1249. void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
  1250. disk_cache::Entry* entry1;
  1251. disk_cache::Entry* entry2;
  1252. ASSERT_THAT(CreateEntry("fixed", &entry1), IsOk());
  1253. entry1->Close();
  1254. ASSERT_THAT(CreateEntry("foo", &entry1), IsOk());
  1255. entry1->Close();
  1256. ASSERT_THAT(CreateEntry("bar", &entry1), IsOk());
  1257. entry1->Close();
  1258. ASSERT_THAT(OpenEntry("foo", &entry1), IsOk());
  1259. ASSERT_THAT(OpenEntry("bar", &entry2), IsOk());
  1260. entry2->Doom();
  1261. entry2->Close();
  1262. ASSERT_THAT(OpenEntry("foo", &entry2), IsOk());
  1263. entry2->Doom();
  1264. entry2->Close();
  1265. entry1->Close();
  1266. ASSERT_THAT(OpenEntry("fixed", &entry1), IsOk());
  1267. entry1->Close();
  1268. }
  1269. TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
  1270. InitCache();
  1271. DoomEntryNextToOpenEntry();
  1272. }
  1273. TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
  1274. SetNewEviction();
  1275. InitCache();
  1276. DoomEntryNextToOpenEntry();
  1277. }
  1278. TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
  1279. SetCacheType(net::APP_CACHE);
  1280. InitCache();
  1281. DoomEntryNextToOpenEntry();
  1282. }
  1283. // Verify that basic operations work as expected with doomed entries.
  1284. void DiskCacheEntryTest::DoomedEntry(int stream_index) {
  1285. std::string key("the first key");
  1286. disk_cache::Entry* entry;
  1287. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1288. entry->Doom();
  1289. FlushQueueForTest();
  1290. EXPECT_EQ(0, cache_->GetEntryCount());
  1291. Time initial = Time::Now();
  1292. AddDelay();
  1293. const int kSize1 = 2000;
  1294. const int kSize2 = 2000;
  1295. scoped_refptr<net::IOBuffer> buffer1 =
  1296. base::MakeRefCounted<net::IOBuffer>(kSize1);
  1297. scoped_refptr<net::IOBuffer> buffer2 =
  1298. base::MakeRefCounted<net::IOBuffer>(kSize2);
  1299. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  1300. memset(buffer2->data(), 0, kSize2);
  1301. EXPECT_EQ(2000,
  1302. WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
  1303. EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
  1304. EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
  1305. EXPECT_EQ(key, entry->GetKey());
  1306. EXPECT_TRUE(initial < entry->GetLastModified());
  1307. EXPECT_TRUE(initial < entry->GetLastUsed());
  1308. entry->Close();
  1309. }
  1310. TEST_F(DiskCacheEntryTest, DoomedEntry) {
  1311. InitCache();
  1312. DoomedEntry(0);
  1313. }
  1314. TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
  1315. SetMemoryOnlyMode();
  1316. InitCache();
  1317. DoomedEntry(0);
  1318. }
  1319. // Tests that we discard entries if the data is missing.
  1320. TEST_F(DiskCacheEntryTest, MissingData) {
  1321. InitCache();
  1322. std::string key("the first key");
  1323. disk_cache::Entry* entry;
  1324. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1325. // Write to an external file.
  1326. const int kSize = 20000;
  1327. scoped_refptr<net::IOBuffer> buffer =
  1328. base::MakeRefCounted<net::IOBuffer>(kSize);
  1329. CacheTestFillBuffer(buffer->data(), kSize, false);
  1330. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  1331. entry->Close();
  1332. FlushQueueForTest();
  1333. disk_cache::Addr address(0x80000001);
  1334. base::FilePath name = cache_impl_->GetFileName(address);
  1335. EXPECT_TRUE(base::DeleteFile(name));
  1336. // Attempt to read the data.
  1337. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1338. EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
  1339. ReadData(entry, 0, 0, buffer.get(), kSize));
  1340. entry->Close();
  1341. // The entry should be gone.
  1342. ASSERT_NE(net::OK, OpenEntry(key, &entry));
  1343. }
  1344. // Test that child entries in a memory cache backend are not visible from
  1345. // enumerations.
  1346. TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
  1347. SetMemoryOnlyMode();
  1348. InitCache();
  1349. const int kSize = 4096;
  1350. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1351. CacheTestFillBuffer(buf->data(), kSize, false);
  1352. std::string key("the first key");
  1353. disk_cache::Entry* parent_entry;
  1354. ASSERT_THAT(CreateEntry(key, &parent_entry), IsOk());
  1355. // Writes to the parent entry.
  1356. EXPECT_EQ(kSize, parent_entry->WriteSparseData(
  1357. 0, buf.get(), kSize, net::CompletionOnceCallback()));
  1358. // This write creates a child entry and writes to it.
  1359. EXPECT_EQ(kSize, parent_entry->WriteSparseData(
  1360. 8192, buf.get(), kSize, net::CompletionOnceCallback()));
  1361. parent_entry->Close();
  1362. // Perform the enumerations.
  1363. std::unique_ptr<TestIterator> iter = CreateIterator();
  1364. disk_cache::Entry* entry = nullptr;
  1365. int count = 0;
  1366. while (iter->OpenNextEntry(&entry) == net::OK) {
  1367. ASSERT_TRUE(entry != nullptr);
  1368. ++count;
  1369. disk_cache::MemEntryImpl* mem_entry =
  1370. reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
  1371. EXPECT_EQ(disk_cache::MemEntryImpl::EntryType::kParent, mem_entry->type());
  1372. mem_entry->Close();
  1373. }
  1374. EXPECT_EQ(1, count);
  1375. }
  1376. // Writes |buf_1| to offset and reads it back as |buf_2|.
  1377. void VerifySparseIO(disk_cache::Entry* entry,
  1378. int64_t offset,
  1379. net::IOBuffer* buf_1,
  1380. int size,
  1381. net::IOBuffer* buf_2) {
  1382. net::TestCompletionCallback cb;
  1383. memset(buf_2->data(), 0, size);
  1384. int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
  1385. EXPECT_EQ(0, cb.GetResult(ret));
  1386. ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
  1387. EXPECT_EQ(size, cb.GetResult(ret));
  1388. ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
  1389. EXPECT_EQ(size, cb.GetResult(ret));
  1390. EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
  1391. }
  1392. // Reads |size| bytes from |entry| at |offset| and verifies that they are the
  1393. // same as the content of the provided |buffer|.
  1394. void VerifyContentSparseIO(disk_cache::Entry* entry,
  1395. int64_t offset,
  1396. char* buffer,
  1397. int size) {
  1398. net::TestCompletionCallback cb;
  1399. scoped_refptr<net::IOBuffer> buf_1 =
  1400. base::MakeRefCounted<net::IOBuffer>(size);
  1401. memset(buf_1->data(), 0, size);
  1402. int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
  1403. EXPECT_EQ(size, cb.GetResult(ret));
  1404. EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
  1405. }
  1406. void DiskCacheEntryTest::BasicSparseIO() {
  1407. std::string key("the first key");
  1408. disk_cache::Entry* entry;
  1409. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1410. const int kSize = 2048;
  1411. scoped_refptr<net::IOBuffer> buf_1 =
  1412. base::MakeRefCounted<net::IOBuffer>(kSize);
  1413. scoped_refptr<net::IOBuffer> buf_2 =
  1414. base::MakeRefCounted<net::IOBuffer>(kSize);
  1415. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1416. // Write at offset 0.
  1417. VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
  1418. // Write at offset 0x400000 (4 MB).
  1419. VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
  1420. // Write at offset 0x800000000 (32 GB).
  1421. VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
  1422. entry->Close();
  1423. // Check everything again.
  1424. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1425. VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
  1426. VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
  1427. VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
  1428. entry->Close();
  1429. }
  1430. TEST_F(DiskCacheEntryTest, BasicSparseIO) {
  1431. InitCache();
  1432. BasicSparseIO();
  1433. }
  1434. TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
  1435. SetMemoryOnlyMode();
  1436. InitCache();
  1437. BasicSparseIO();
  1438. }
  1439. void DiskCacheEntryTest::HugeSparseIO() {
  1440. std::string key("the first key");
  1441. disk_cache::Entry* entry;
  1442. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1443. // Write 1.2 MB so that we cover multiple entries.
  1444. const int kSize = 1200 * 1024;
  1445. scoped_refptr<net::IOBuffer> buf_1 =
  1446. base::MakeRefCounted<net::IOBuffer>(kSize);
  1447. scoped_refptr<net::IOBuffer> buf_2 =
  1448. base::MakeRefCounted<net::IOBuffer>(kSize);
  1449. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1450. // Write at offset 0x20F0000 (33 MB - 64 KB).
  1451. VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
  1452. entry->Close();
  1453. // Check it again.
  1454. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1455. VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
  1456. entry->Close();
  1457. }
  1458. TEST_F(DiskCacheEntryTest, HugeSparseIO) {
  1459. InitCache();
  1460. HugeSparseIO();
  1461. }
  1462. TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
  1463. SetMemoryOnlyMode();
  1464. InitCache();
  1465. HugeSparseIO();
  1466. }
  1467. void DiskCacheEntryTest::GetAvailableRangeTest() {
  1468. std::string key("the first key");
  1469. disk_cache::Entry* entry;
  1470. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1471. const int kSize = 16 * 1024;
  1472. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1473. CacheTestFillBuffer(buf->data(), kSize, false);
  1474. // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
  1475. EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
  1476. EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
  1477. // We stop at the first empty block.
  1478. TestRangeResultCompletionCallback cb;
  1479. RangeResult result = cb.GetResult(
  1480. entry->GetAvailableRange(0x20F0000, kSize * 2, cb.callback()));
  1481. EXPECT_EQ(net::OK, result.net_error);
  1482. EXPECT_EQ(kSize, result.available_len);
  1483. EXPECT_EQ(0x20F0000, result.start);
  1484. result = cb.GetResult(entry->GetAvailableRange(0, kSize, cb.callback()));
  1485. EXPECT_EQ(net::OK, result.net_error);
  1486. EXPECT_EQ(0, result.available_len);
  1487. result = cb.GetResult(
  1488. entry->GetAvailableRange(0x20F0000 - kSize, kSize, cb.callback()));
  1489. EXPECT_EQ(net::OK, result.net_error);
  1490. EXPECT_EQ(0, result.available_len);
  1491. result = cb.GetResult(entry->GetAvailableRange(0, 0x2100000, cb.callback()));
  1492. EXPECT_EQ(net::OK, result.net_error);
  1493. EXPECT_EQ(kSize, result.available_len);
  1494. EXPECT_EQ(0x20F0000, result.start);
  1495. // We should be able to Read based on the results of GetAvailableRange.
  1496. net::TestCompletionCallback read_cb;
  1497. result =
  1498. cb.GetResult(entry->GetAvailableRange(0x2100000, kSize, cb.callback()));
  1499. EXPECT_EQ(net::OK, result.net_error);
  1500. EXPECT_EQ(0, result.available_len);
  1501. int rv =
  1502. entry->ReadSparseData(result.start, buf.get(), kSize, read_cb.callback());
  1503. EXPECT_EQ(0, read_cb.GetResult(rv));
  1504. result =
  1505. cb.GetResult(entry->GetAvailableRange(0x20F2000, kSize, cb.callback()));
  1506. EXPECT_EQ(net::OK, result.net_error);
  1507. EXPECT_EQ(0x2000, result.available_len);
  1508. EXPECT_EQ(0x20F2000, result.start);
  1509. EXPECT_EQ(0x2000, ReadSparseData(entry, result.start, buf.get(), kSize));
  1510. // Make sure that we respect the |len| argument.
  1511. result = cb.GetResult(
  1512. entry->GetAvailableRange(0x20F0001 - kSize, kSize, cb.callback()));
  1513. EXPECT_EQ(net::OK, result.net_error);
  1514. EXPECT_EQ(1, result.available_len);
  1515. EXPECT_EQ(0x20F0000, result.start);
  1516. // Use very small ranges. Write at offset 50.
  1517. const int kTinyLen = 10;
  1518. EXPECT_EQ(kTinyLen, WriteSparseData(entry, 50, buf.get(), kTinyLen));
  1519. result = cb.GetResult(
  1520. entry->GetAvailableRange(kTinyLen * 2, kTinyLen, cb.callback()));
  1521. EXPECT_EQ(net::OK, result.net_error);
  1522. EXPECT_EQ(0, result.available_len);
  1523. EXPECT_EQ(kTinyLen * 2, result.start);
  1524. // Get a huge range with maximum boundary
  1525. result = cb.GetResult(entry->GetAvailableRange(
  1526. 0x2100000, std::numeric_limits<int32_t>::max(), cb.callback()));
  1527. EXPECT_EQ(net::OK, result.net_error);
  1528. EXPECT_EQ(0, result.available_len);
  1529. entry->Close();
  1530. }
  1531. TEST_F(DiskCacheEntryTest, GetAvailableRange) {
  1532. InitCache();
  1533. GetAvailableRangeTest();
  1534. }
  1535. TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
  1536. SetMemoryOnlyMode();
  1537. InitCache();
  1538. GetAvailableRangeTest();
  1539. }
  1540. TEST_F(DiskCacheEntryTest, GetAvailableRangeBlockFileDiscontinuous) {
  1541. // crbug.com/791056 --- blockfile problem when there is a sub-KiB write before
  1542. // a bunch of full 1KiB blocks, and a GetAvailableRange is issued to which
  1543. // both are a potentially relevant.
  1544. InitCache();
  1545. std::string key("the first key");
  1546. disk_cache::Entry* entry;
  1547. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1548. scoped_refptr<net::IOBuffer> buf_2k =
  1549. base::MakeRefCounted<net::IOBuffer>(2 * 1024);
  1550. CacheTestFillBuffer(buf_2k->data(), 2 * 1024, false);
  1551. const int kSmallSize = 612; // sub-1k
  1552. scoped_refptr<net::IOBuffer> buf_small =
  1553. base::MakeRefCounted<net::IOBuffer>(kSmallSize);
  1554. CacheTestFillBuffer(buf_small->data(), kSmallSize, false);
  1555. // Sets some bits for blocks representing 1K ranges [1024, 3072),
  1556. // which will be relevant for the next GetAvailableRange call.
  1557. EXPECT_EQ(2 * 1024, WriteSparseData(entry, /* offset = */ 1024, buf_2k.get(),
  1558. /* size = */ 2 * 1024));
  1559. // Now record a partial write from start of the first kb.
  1560. EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 0,
  1561. buf_small.get(), kSmallSize));
  1562. // Try to query a range starting from that block 0.
  1563. // The cache tracks: [0, 612) [1024, 3072).
  1564. // The request is for: [812, 2059) so response should be [1024, 2059), which
  1565. // has length = 1035. Previously this return a negative number for rv.
  1566. TestRangeResultCompletionCallback cb;
  1567. RangeResult result =
  1568. cb.GetResult(entry->GetAvailableRange(812, 1247, cb.callback()));
  1569. EXPECT_EQ(net::OK, result.net_error);
  1570. EXPECT_EQ(1035, result.available_len);
  1571. EXPECT_EQ(1024, result.start);
  1572. // Now query [512, 1536). This matches both [512, 612) and [1024, 1536),
  1573. // so this should return [512, 612).
  1574. result = cb.GetResult(entry->GetAvailableRange(512, 1024, cb.callback()));
  1575. EXPECT_EQ(net::OK, result.net_error);
  1576. EXPECT_EQ(100, result.available_len);
  1577. EXPECT_EQ(512, result.start);
  1578. // Now query next portion, [612, 1636). This now just should produce
  1579. // [1024, 1636)
  1580. result = cb.GetResult(entry->GetAvailableRange(612, 1024, cb.callback()));
  1581. EXPECT_EQ(net::OK, result.net_error);
  1582. EXPECT_EQ(612, result.available_len);
  1583. EXPECT_EQ(1024, result.start);
  1584. // Do a continuous small write, this one at [3072, 3684).
  1585. // This means the cache tracks [1024, 3072) via bitmaps and [3072, 3684)
  1586. // as the last write.
  1587. EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 3072,
  1588. buf_small.get(), kSmallSize));
  1589. // Query [2048, 4096). Should get [2048, 3684)
  1590. result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
  1591. EXPECT_EQ(net::OK, result.net_error);
  1592. EXPECT_EQ(1636, result.available_len);
  1593. EXPECT_EQ(2048, result.start);
  1594. // Now write at [4096, 4708). Since only one sub-kb thing is tracked, this
  1595. // now tracks [1024, 3072) via bitmaps and [4096, 4708) as the last write.
  1596. EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 4096,
  1597. buf_small.get(), kSmallSize));
  1598. // Query [2048, 4096). Should get [2048, 3072)
  1599. result = cb.GetResult(entry->GetAvailableRange(2048, 2048, cb.callback()));
  1600. EXPECT_EQ(net::OK, result.net_error);
  1601. EXPECT_EQ(1024, result.available_len);
  1602. EXPECT_EQ(2048, result.start);
  1603. // Query 2K more after that: [3072, 5120). Should get [4096, 4708)
  1604. result = cb.GetResult(entry->GetAvailableRange(3072, 2048, cb.callback()));
  1605. EXPECT_EQ(net::OK, result.net_error);
  1606. EXPECT_EQ(612, result.available_len);
  1607. EXPECT_EQ(4096, result.start);
  1608. // Also double-check that offsets within later children are correctly
  1609. // computed.
  1610. EXPECT_EQ(kSmallSize, WriteSparseData(entry, /* offset = */ 0x200400,
  1611. buf_small.get(), kSmallSize));
  1612. result =
  1613. cb.GetResult(entry->GetAvailableRange(0x100000, 0x200000, cb.callback()));
  1614. EXPECT_EQ(net::OK, result.net_error);
  1615. EXPECT_EQ(kSmallSize, result.available_len);
  1616. EXPECT_EQ(0x200400, result.start);
  1617. entry->Close();
  1618. }
  1619. // Tests that non-sequential writes that are not aligned with the minimum sparse
  1620. // data granularity (1024 bytes) do in fact result in dropped data.
  1621. TEST_F(DiskCacheEntryTest, SparseWriteDropped) {
  1622. InitCache();
  1623. std::string key("the first key");
  1624. disk_cache::Entry* entry;
  1625. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1626. const int kSize = 180;
  1627. scoped_refptr<net::IOBuffer> buf_1 =
  1628. base::MakeRefCounted<net::IOBuffer>(kSize);
  1629. scoped_refptr<net::IOBuffer> buf_2 =
  1630. base::MakeRefCounted<net::IOBuffer>(kSize);
  1631. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1632. // Do small writes (180 bytes) that get increasingly close to a 1024-byte
  1633. // boundary. All data should be dropped until a boundary is crossed, at which
  1634. // point the data after the boundary is saved (at least for a while).
  1635. int offset = 1024 - 500;
  1636. int rv = 0;
  1637. net::TestCompletionCallback cb;
  1638. TestRangeResultCompletionCallback range_cb;
  1639. RangeResult result;
  1640. for (int i = 0; i < 5; i++) {
  1641. // Check result of last GetAvailableRange.
  1642. EXPECT_EQ(0, result.available_len);
  1643. rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
  1644. EXPECT_EQ(kSize, cb.GetResult(rv));
  1645. result = range_cb.GetResult(
  1646. entry->GetAvailableRange(offset - 100, kSize, range_cb.callback()));
  1647. EXPECT_EQ(net::OK, result.net_error);
  1648. EXPECT_EQ(0, result.available_len);
  1649. result = range_cb.GetResult(
  1650. entry->GetAvailableRange(offset, kSize, range_cb.callback()));
  1651. if (!result.available_len) {
  1652. rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
  1653. EXPECT_EQ(0, cb.GetResult(rv));
  1654. }
  1655. offset += 1024 * i + 100;
  1656. }
  1657. // The last write started 100 bytes below a bundary, so there should be 80
  1658. // bytes after the boundary.
  1659. EXPECT_EQ(80, result.available_len);
  1660. EXPECT_EQ(1024 * 7, result.start);
  1661. rv = entry->ReadSparseData(result.start, buf_2.get(), kSize, cb.callback());
  1662. EXPECT_EQ(80, cb.GetResult(rv));
  1663. EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
  1664. // And even that part is dropped when another write changes the offset.
  1665. offset = result.start;
  1666. rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
  1667. EXPECT_EQ(kSize, cb.GetResult(rv));
  1668. result = range_cb.GetResult(
  1669. entry->GetAvailableRange(offset, kSize, range_cb.callback()));
  1670. EXPECT_EQ(net::OK, result.net_error);
  1671. EXPECT_EQ(0, result.available_len);
  1672. entry->Close();
  1673. }
  1674. // Tests that small sequential writes are not dropped.
  1675. TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) {
  1676. InitCache();
  1677. std::string key("the first key");
  1678. disk_cache::Entry* entry;
  1679. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1680. const int kSize = 180;
  1681. scoped_refptr<net::IOBuffer> buf_1 =
  1682. base::MakeRefCounted<net::IOBuffer>(kSize);
  1683. scoped_refptr<net::IOBuffer> buf_2 =
  1684. base::MakeRefCounted<net::IOBuffer>(kSize);
  1685. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1686. // Any starting offset is fine as long as it is 1024-bytes aligned.
  1687. int rv = 0;
  1688. RangeResult result;
  1689. net::TestCompletionCallback cb;
  1690. TestRangeResultCompletionCallback range_cb;
  1691. int64_t offset = 1024 * 11;
  1692. for (; offset < 20000; offset += kSize) {
  1693. rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
  1694. EXPECT_EQ(kSize, cb.GetResult(rv));
  1695. result = range_cb.GetResult(
  1696. entry->GetAvailableRange(offset, kSize, range_cb.callback()));
  1697. EXPECT_EQ(net::OK, result.net_error);
  1698. EXPECT_EQ(kSize, result.available_len);
  1699. EXPECT_EQ(offset, result.start);
  1700. rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
  1701. EXPECT_EQ(kSize, cb.GetResult(rv));
  1702. EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
  1703. }
  1704. entry->Close();
  1705. FlushQueueForTest();
  1706. // Verify again the last write made.
  1707. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1708. offset -= kSize;
  1709. result = range_cb.GetResult(
  1710. entry->GetAvailableRange(offset, kSize, range_cb.callback()));
  1711. EXPECT_EQ(net::OK, result.net_error);
  1712. EXPECT_EQ(kSize, result.available_len);
  1713. EXPECT_EQ(offset, result.start);
  1714. rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
  1715. EXPECT_EQ(kSize, cb.GetResult(rv));
  1716. EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
  1717. entry->Close();
  1718. }
  1719. void DiskCacheEntryTest::CouldBeSparse() {
  1720. std::string key("the first key");
  1721. disk_cache::Entry* entry;
  1722. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1723. const int kSize = 16 * 1024;
  1724. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1725. CacheTestFillBuffer(buf->data(), kSize, false);
  1726. // Write at offset 0x20F0000 (33 MB - 64 KB).
  1727. EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
  1728. EXPECT_TRUE(entry->CouldBeSparse());
  1729. entry->Close();
  1730. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1731. EXPECT_TRUE(entry->CouldBeSparse());
  1732. entry->Close();
  1733. // Now verify a regular entry.
  1734. key.assign("another key");
  1735. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1736. EXPECT_FALSE(entry->CouldBeSparse());
  1737. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
  1738. EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
  1739. EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
  1740. EXPECT_FALSE(entry->CouldBeSparse());
  1741. entry->Close();
  1742. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1743. EXPECT_FALSE(entry->CouldBeSparse());
  1744. entry->Close();
  1745. }
  1746. TEST_F(DiskCacheEntryTest, CouldBeSparse) {
  1747. InitCache();
  1748. CouldBeSparse();
  1749. }
  1750. TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
  1751. SetMemoryOnlyMode();
  1752. InitCache();
  1753. CouldBeSparse();
  1754. }
  1755. TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
  1756. SetMemoryOnlyMode();
  1757. InitCache();
  1758. const int kSize = 8192;
  1759. scoped_refptr<net::IOBuffer> buf_1 =
  1760. base::MakeRefCounted<net::IOBuffer>(kSize);
  1761. scoped_refptr<net::IOBuffer> buf_2 =
  1762. base::MakeRefCounted<net::IOBuffer>(kSize);
  1763. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1764. std::string key("the first key");
  1765. disk_cache::Entry* entry;
  1766. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1767. // This loop writes back to back starting from offset 0 and 9000.
  1768. for (int i = 0; i < kSize; i += 1024) {
  1769. scoped_refptr<net::WrappedIOBuffer> buf_3 =
  1770. base::MakeRefCounted<net::WrappedIOBuffer>(buf_1->data() + i);
  1771. VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
  1772. VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
  1773. }
  1774. // Make sure we have data written.
  1775. VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
  1776. VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
  1777. // This tests a large write that spans 3 entries from a misaligned offset.
  1778. VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
  1779. entry->Close();
  1780. }
  1781. TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
  1782. SetMemoryOnlyMode();
  1783. InitCache();
  1784. const int kSize = 8192;
  1785. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1786. CacheTestFillBuffer(buf->data(), kSize, false);
  1787. disk_cache::Entry* entry;
  1788. std::string key("the first key");
  1789. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1790. // Writes in the middle of an entry.
  1791. EXPECT_EQ(1024, entry->WriteSparseData(0, buf.get(), 1024,
  1792. net::CompletionOnceCallback()));
  1793. EXPECT_EQ(1024, entry->WriteSparseData(5120, buf.get(), 1024,
  1794. net::CompletionOnceCallback()));
  1795. EXPECT_EQ(1024, entry->WriteSparseData(10000, buf.get(), 1024,
  1796. net::CompletionOnceCallback()));
  1797. // Writes in the middle of an entry and spans 2 child entries.
  1798. EXPECT_EQ(8192, entry->WriteSparseData(50000, buf.get(), 8192,
  1799. net::CompletionOnceCallback()));
  1800. TestRangeResultCompletionCallback cb;
  1801. // Test that we stop at a discontinuous child at the second block.
  1802. RangeResult result =
  1803. cb.GetResult(entry->GetAvailableRange(0, 10000, cb.callback()));
  1804. EXPECT_EQ(net::OK, result.net_error);
  1805. EXPECT_EQ(1024, result.available_len);
  1806. EXPECT_EQ(0, result.start);
  1807. // Test that number of bytes is reported correctly when we start from the
  1808. // middle of a filled region.
  1809. result = cb.GetResult(entry->GetAvailableRange(512, 10000, cb.callback()));
  1810. EXPECT_EQ(net::OK, result.net_error);
  1811. EXPECT_EQ(512, result.available_len);
  1812. EXPECT_EQ(512, result.start);
  1813. // Test that we found bytes in the child of next block.
  1814. result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
  1815. EXPECT_EQ(net::OK, result.net_error);
  1816. EXPECT_EQ(1024, result.available_len);
  1817. EXPECT_EQ(5120, result.start);
  1818. // Test that the desired length is respected. It starts within a filled
  1819. // region.
  1820. result = cb.GetResult(entry->GetAvailableRange(5500, 512, cb.callback()));
  1821. EXPECT_EQ(net::OK, result.net_error);
  1822. EXPECT_EQ(512, result.available_len);
  1823. EXPECT_EQ(5500, result.start);
  1824. // Test that the desired length is respected. It starts before a filled
  1825. // region.
  1826. result = cb.GetResult(entry->GetAvailableRange(5000, 620, cb.callback()));
  1827. EXPECT_EQ(net::OK, result.net_error);
  1828. EXPECT_EQ(500, result.available_len);
  1829. EXPECT_EQ(5120, result.start);
  1830. // Test that multiple blocks are scanned.
  1831. result = cb.GetResult(entry->GetAvailableRange(40000, 20000, cb.callback()));
  1832. EXPECT_EQ(net::OK, result.net_error);
  1833. EXPECT_EQ(8192, result.available_len);
  1834. EXPECT_EQ(50000, result.start);
  1835. entry->Close();
  1836. }
  1837. void DiskCacheEntryTest::UpdateSparseEntry() {
  1838. std::string key("the first key");
  1839. disk_cache::Entry* entry1;
  1840. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  1841. const int kSize = 2048;
  1842. scoped_refptr<net::IOBuffer> buf_1 =
  1843. base::MakeRefCounted<net::IOBuffer>(kSize);
  1844. scoped_refptr<net::IOBuffer> buf_2 =
  1845. base::MakeRefCounted<net::IOBuffer>(kSize);
  1846. CacheTestFillBuffer(buf_1->data(), kSize, false);
  1847. // Write at offset 0.
  1848. VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
  1849. entry1->Close();
  1850. // Write at offset 2048.
  1851. ASSERT_THAT(OpenEntry(key, &entry1), IsOk());
  1852. VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
  1853. disk_cache::Entry* entry2;
  1854. ASSERT_THAT(CreateEntry("the second key", &entry2), IsOk());
  1855. entry1->Close();
  1856. entry2->Close();
  1857. FlushQueueForTest();
  1858. if (memory_only_ || simple_cache_mode_)
  1859. EXPECT_EQ(2, cache_->GetEntryCount());
  1860. else
  1861. EXPECT_EQ(3, cache_->GetEntryCount());
  1862. }
  1863. TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
  1864. InitCache();
  1865. UpdateSparseEntry();
  1866. }
  1867. TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
  1868. SetMemoryOnlyMode();
  1869. InitCache();
  1870. UpdateSparseEntry();
  1871. }
  1872. void DiskCacheEntryTest::DoomSparseEntry() {
  1873. std::string key1("the first key");
  1874. std::string key2("the second key");
  1875. disk_cache::Entry *entry1, *entry2;
  1876. ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
  1877. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  1878. const int kSize = 4 * 1024;
  1879. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1880. CacheTestFillBuffer(buf->data(), kSize, false);
  1881. int64_t offset = 1024;
  1882. // Write to a bunch of ranges.
  1883. for (int i = 0; i < 12; i++) {
  1884. EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
  1885. // Keep the second map under the default size.
  1886. if (i < 9)
  1887. EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
  1888. offset *= 4;
  1889. }
  1890. if (memory_only_ || simple_cache_mode_)
  1891. EXPECT_EQ(2, cache_->GetEntryCount());
  1892. else
  1893. EXPECT_EQ(15, cache_->GetEntryCount());
  1894. // Doom the first entry while it's still open.
  1895. entry1->Doom();
  1896. entry1->Close();
  1897. entry2->Close();
  1898. // Doom the second entry after it's fully saved.
  1899. EXPECT_THAT(DoomEntry(key2), IsOk());
  1900. // Make sure we do all needed work. This may fail for entry2 if between Close
  1901. // and DoomEntry the system decides to remove all traces of the file from the
  1902. // system cache so we don't see that there is pending IO.
  1903. base::RunLoop().RunUntilIdle();
  1904. if (memory_only_) {
  1905. EXPECT_EQ(0, cache_->GetEntryCount());
  1906. } else {
  1907. if (5 == cache_->GetEntryCount()) {
  1908. // Most likely we are waiting for the result of reading the sparse info
  1909. // (it's always async on Posix so it is easy to miss). Unfortunately we
  1910. // don't have any signal to watch for so we can only wait.
  1911. base::PlatformThread::Sleep(base::Milliseconds(500));
  1912. base::RunLoop().RunUntilIdle();
  1913. }
  1914. EXPECT_EQ(0, cache_->GetEntryCount());
  1915. }
  1916. }
  1917. TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
  1918. UseCurrentThread();
  1919. InitCache();
  1920. DoomSparseEntry();
  1921. }
  1922. TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
  1923. SetMemoryOnlyMode();
  1924. InitCache();
  1925. DoomSparseEntry();
  1926. }
  1927. // A TestCompletionCallback wrapper that deletes the cache from within the
  1928. // callback. The way TestCompletionCallback works means that all tasks (even
  1929. // new ones) are executed by the message loop before returning to the caller so
  1930. // the only way to simulate a race is to execute what we want on the callback.
  1931. class SparseTestCompletionCallback: public net::TestCompletionCallback {
  1932. public:
  1933. explicit SparseTestCompletionCallback(
  1934. std::unique_ptr<disk_cache::Backend> cache)
  1935. : cache_(std::move(cache)) {}
  1936. SparseTestCompletionCallback(const SparseTestCompletionCallback&) = delete;
  1937. SparseTestCompletionCallback& operator=(const SparseTestCompletionCallback&) =
  1938. delete;
  1939. private:
  1940. void SetResult(int result) override {
  1941. cache_.reset();
  1942. TestCompletionCallback::SetResult(result);
  1943. }
  1944. std::unique_ptr<disk_cache::Backend> cache_;
  1945. };
  1946. // Tests that we don't crash when the backend is deleted while we are working
  1947. // deleting the sub-entries of a sparse entry.
  1948. TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
  1949. UseCurrentThread();
  1950. InitCache();
  1951. std::string key("the key");
  1952. disk_cache::Entry* entry;
  1953. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1954. const int kSize = 4 * 1024;
  1955. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  1956. CacheTestFillBuffer(buf->data(), kSize, false);
  1957. int64_t offset = 1024;
  1958. // Write to a bunch of ranges.
  1959. for (int i = 0; i < 12; i++) {
  1960. EXPECT_EQ(kSize, entry->WriteSparseData(offset, buf.get(), kSize,
  1961. net::CompletionOnceCallback()));
  1962. offset *= 4;
  1963. }
  1964. EXPECT_EQ(9, cache_->GetEntryCount());
  1965. entry->Close();
  1966. disk_cache::Backend* cache = cache_.get();
  1967. SparseTestCompletionCallback cb(std::move(cache_));
  1968. int rv = cache->DoomEntry(key, net::HIGHEST, cb.callback());
  1969. EXPECT_THAT(rv, IsError(net::ERR_IO_PENDING));
  1970. EXPECT_THAT(cb.WaitForResult(), IsOk());
  1971. }
  1972. void DiskCacheEntryTest::PartialSparseEntry() {
  1973. std::string key("the first key");
  1974. disk_cache::Entry* entry;
  1975. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  1976. // We should be able to deal with IO that is not aligned to the block size
  1977. // of a sparse entry, at least to write a big range without leaving holes.
  1978. const int kSize = 4 * 1024;
  1979. const int kSmallSize = 128;
  1980. scoped_refptr<net::IOBuffer> buf1 =
  1981. base::MakeRefCounted<net::IOBuffer>(kSize);
  1982. CacheTestFillBuffer(buf1->data(), kSize, false);
  1983. // The first write is just to extend the entry. The third write occupies
  1984. // a 1KB block partially, it may not be written internally depending on the
  1985. // implementation.
  1986. EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
  1987. EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
  1988. EXPECT_EQ(kSmallSize,
  1989. WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
  1990. entry->Close();
  1991. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  1992. scoped_refptr<net::IOBuffer> buf2 =
  1993. base::MakeRefCounted<net::IOBuffer>(kSize);
  1994. memset(buf2->data(), 0, kSize);
  1995. EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
  1996. EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
  1997. EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
  1998. EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
  1999. // This read should not change anything.
  2000. if (memory_only_ || simple_cache_mode_)
  2001. EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
  2002. else
  2003. EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize));
  2004. EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
  2005. EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
  2006. TestRangeResultCompletionCallback cb;
  2007. RangeResult result;
  2008. if (memory_only_ || simple_cache_mode_) {
  2009. result = cb.GetResult(entry->GetAvailableRange(0, 600, cb.callback()));
  2010. EXPECT_EQ(net::OK, result.net_error);
  2011. EXPECT_EQ(100, result.available_len);
  2012. EXPECT_EQ(500, result.start);
  2013. } else {
  2014. result = cb.GetResult(entry->GetAvailableRange(0, 2048, cb.callback()));
  2015. EXPECT_EQ(net::OK, result.net_error);
  2016. EXPECT_EQ(1024, result.available_len);
  2017. EXPECT_EQ(1024, result.start);
  2018. }
  2019. result = cb.GetResult(entry->GetAvailableRange(kSize, kSize, cb.callback()));
  2020. EXPECT_EQ(net::OK, result.net_error);
  2021. EXPECT_EQ(500, result.available_len);
  2022. EXPECT_EQ(kSize, result.start);
  2023. result =
  2024. cb.GetResult(entry->GetAvailableRange(20 * 1024, 10000, cb.callback()));
  2025. EXPECT_EQ(net::OK, result.net_error);
  2026. if (memory_only_ || simple_cache_mode_)
  2027. EXPECT_EQ(3616, result.available_len);
  2028. else
  2029. EXPECT_EQ(3072, result.available_len);
  2030. EXPECT_EQ(20 * 1024, result.start);
  2031. // 1. Query before a filled 1KB block.
  2032. // 2. Query within a filled 1KB block.
  2033. // 3. Query beyond a filled 1KB block.
  2034. if (memory_only_ || simple_cache_mode_) {
  2035. result =
  2036. cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
  2037. EXPECT_EQ(net::OK, result.net_error);
  2038. EXPECT_EQ(3496, result.available_len);
  2039. EXPECT_EQ(20000, result.start);
  2040. } else {
  2041. result =
  2042. cb.GetResult(entry->GetAvailableRange(19400, kSize, cb.callback()));
  2043. EXPECT_EQ(net::OK, result.net_error);
  2044. EXPECT_EQ(3016, result.available_len);
  2045. EXPECT_EQ(20480, result.start);
  2046. }
  2047. result = cb.GetResult(entry->GetAvailableRange(3073, kSize, cb.callback()));
  2048. EXPECT_EQ(net::OK, result.net_error);
  2049. EXPECT_EQ(1523, result.available_len);
  2050. EXPECT_EQ(3073, result.start);
  2051. result = cb.GetResult(entry->GetAvailableRange(4600, kSize, cb.callback()));
  2052. EXPECT_EQ(net::OK, result.net_error);
  2053. EXPECT_EQ(0, result.available_len);
  2054. EXPECT_EQ(4600, result.start);
  2055. // Now make another write and verify that there is no hole in between.
  2056. EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
  2057. result = cb.GetResult(entry->GetAvailableRange(1024, 10000, cb.callback()));
  2058. EXPECT_EQ(net::OK, result.net_error);
  2059. EXPECT_EQ(7 * 1024 + 500, result.available_len);
  2060. EXPECT_EQ(1024, result.start);
  2061. EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
  2062. EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
  2063. EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
  2064. entry->Close();
  2065. }
  2066. TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
  2067. InitCache();
  2068. PartialSparseEntry();
  2069. }
  2070. TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
  2071. SetMemoryOnlyMode();
  2072. InitCache();
  2073. PartialSparseEntry();
  2074. }
  2075. void DiskCacheEntryTest::SparseInvalidArg() {
  2076. std::string key("key");
  2077. disk_cache::Entry* entry = nullptr;
  2078. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2079. const int kSize = 2048;
  2080. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  2081. CacheTestFillBuffer(buf->data(), kSize, false);
  2082. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  2083. WriteSparseData(entry, -1, buf.get(), kSize));
  2084. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  2085. WriteSparseData(entry, 0, buf.get(), -1));
  2086. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  2087. ReadSparseData(entry, -1, buf.get(), kSize));
  2088. EXPECT_EQ(net::ERR_INVALID_ARGUMENT, ReadSparseData(entry, 0, buf.get(), -1));
  2089. int64_t start_out;
  2090. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  2091. GetAvailableRange(entry, -1, kSize, &start_out));
  2092. EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
  2093. GetAvailableRange(entry, 0, -1, &start_out));
  2094. int rv = WriteSparseData(
  2095. entry, std::numeric_limits<int64_t>::max() - kSize + 1, buf.get(), kSize);
  2096. // Blockfile rejects anything over 64GiB with
  2097. // net::ERR_CACHE_OPERATION_NOT_SUPPORTED, which is also OK here, as it's not
  2098. // an overflow or something else nonsensical.
  2099. EXPECT_TRUE(rv == net::ERR_INVALID_ARGUMENT ||
  2100. rv == net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
  2101. entry->Close();
  2102. }
  2103. TEST_F(DiskCacheEntryTest, SparseInvalidArg) {
  2104. InitCache();
  2105. SparseInvalidArg();
  2106. }
  2107. TEST_F(DiskCacheEntryTest, MemoryOnlySparseInvalidArg) {
  2108. SetMemoryOnlyMode();
  2109. InitCache();
  2110. SparseInvalidArg();
  2111. }
  2112. TEST_F(DiskCacheEntryTest, SimpleSparseInvalidArg) {
  2113. SetSimpleCacheMode();
  2114. InitCache();
  2115. SparseInvalidArg();
  2116. }
  2117. void DiskCacheEntryTest::SparseClipEnd(int64_t max_index,
  2118. bool expect_unsupported) {
  2119. std::string key("key");
  2120. disk_cache::Entry* entry = nullptr;
  2121. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2122. const int kSize = 1024;
  2123. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  2124. CacheTestFillBuffer(buf->data(), kSize, false);
  2125. scoped_refptr<net::IOBuffer> read_buf =
  2126. base::MakeRefCounted<net::IOBuffer>(kSize * 2);
  2127. CacheTestFillBuffer(read_buf->data(), kSize * 2, false);
  2128. const int64_t kOffset = max_index - kSize;
  2129. int rv = WriteSparseData(entry, kOffset, buf.get(), kSize);
  2130. EXPECT_EQ(
  2131. rv, expect_unsupported ? net::ERR_CACHE_OPERATION_NOT_SUPPORTED : kSize);
  2132. // Try to read further than offset range, should get clipped (if supported).
  2133. rv = ReadSparseData(entry, kOffset, read_buf.get(), kSize * 2);
  2134. if (expect_unsupported) {
  2135. EXPECT_EQ(rv, net::ERR_CACHE_OPERATION_NOT_SUPPORTED);
  2136. } else {
  2137. EXPECT_EQ(kSize, rv);
  2138. EXPECT_EQ(0, memcmp(buf->data(), read_buf->data(), kSize));
  2139. }
  2140. TestRangeResultCompletionCallback cb;
  2141. RangeResult result = cb.GetResult(
  2142. entry->GetAvailableRange(kOffset - kSize, kSize * 3, cb.callback()));
  2143. if (expect_unsupported) {
  2144. // GetAvailableRange just returns nothing found, not an error.
  2145. EXPECT_EQ(net::OK, result.net_error);
  2146. EXPECT_EQ(result.available_len, 0);
  2147. } else {
  2148. EXPECT_EQ(net::OK, result.net_error);
  2149. EXPECT_EQ(kSize, result.available_len);
  2150. EXPECT_EQ(kOffset, result.start);
  2151. }
  2152. entry->Close();
  2153. }
  2154. TEST_F(DiskCacheEntryTest, SparseClipEnd) {
  2155. InitCache();
  2156. // Blockfile refuses to deal with sparse indices over 64GiB.
  2157. SparseClipEnd(std::numeric_limits<int64_t>::max(),
  2158. /*expected_unsupported=*/true);
  2159. }
  2160. TEST_F(DiskCacheEntryTest, SparseClipEnd2) {
  2161. InitCache();
  2162. const int64_t kLimit = 64ll * 1024 * 1024 * 1024;
  2163. // Separate test for blockfile for indices right at the edge of its address
  2164. // space limit. kLimit must match kMaxEndOffset in sparse_control.cc
  2165. SparseClipEnd(kLimit, /*expected_unsupported=*/false);
  2166. // Test with things after kLimit, too, which isn't an issue for backends
  2167. // supporting the entire 64-bit offset range.
  2168. std::string key("key2");
  2169. disk_cache::Entry* entry = nullptr;
  2170. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2171. const int kSize = 1024;
  2172. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  2173. CacheTestFillBuffer(buf->data(), kSize, false);
  2174. // Try to write after --- fails.
  2175. int rv = WriteSparseData(entry, kLimit, buf.get(), kSize);
  2176. EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
  2177. // Similarly for read.
  2178. rv = ReadSparseData(entry, kLimit, buf.get(), kSize);
  2179. EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
  2180. // GetAvailableRange just returns nothing.
  2181. TestRangeResultCompletionCallback cb;
  2182. RangeResult result =
  2183. cb.GetResult(entry->GetAvailableRange(kLimit, kSize * 3, cb.callback()));
  2184. EXPECT_EQ(net::OK, result.net_error);
  2185. EXPECT_EQ(0, result.available_len);
  2186. entry->Close();
  2187. }
  2188. TEST_F(DiskCacheEntryTest, MemoryOnlySparseClipEnd) {
  2189. SetMemoryOnlyMode();
  2190. InitCache();
  2191. SparseClipEnd(std::numeric_limits<int64_t>::max(),
  2192. /* expected_unsupported = */ false);
  2193. }
  2194. TEST_F(DiskCacheEntryTest, SimpleSparseClipEnd) {
  2195. SetSimpleCacheMode();
  2196. InitCache();
  2197. SparseClipEnd(std::numeric_limits<int64_t>::max(),
  2198. /* expected_unsupported = */ false);
  2199. }
  2200. // Tests that corrupt sparse children are removed automatically.
  2201. TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
  2202. InitCache();
  2203. std::string key("the first key");
  2204. disk_cache::Entry* entry;
  2205. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2206. const int kSize = 4 * 1024;
  2207. scoped_refptr<net::IOBuffer> buf1 =
  2208. base::MakeRefCounted<net::IOBuffer>(kSize);
  2209. CacheTestFillBuffer(buf1->data(), kSize, false);
  2210. const int k1Meg = 1024 * 1024;
  2211. EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
  2212. EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
  2213. EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
  2214. entry->Close();
  2215. EXPECT_EQ(4, cache_->GetEntryCount());
  2216. std::unique_ptr<TestIterator> iter = CreateIterator();
  2217. int count = 0;
  2218. std::string child_keys[2];
  2219. while (iter->OpenNextEntry(&entry) == net::OK) {
  2220. ASSERT_TRUE(entry != nullptr);
  2221. // Writing to an entry will alter the LRU list and invalidate the iterator.
  2222. if (entry->GetKey() != key && count < 2)
  2223. child_keys[count++] = entry->GetKey();
  2224. entry->Close();
  2225. }
  2226. for (const auto& child_key : child_keys) {
  2227. ASSERT_THAT(OpenEntry(child_key, &entry), IsOk());
  2228. // Overwrite the header's magic and signature.
  2229. EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
  2230. entry->Close();
  2231. }
  2232. EXPECT_EQ(4, cache_->GetEntryCount());
  2233. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  2234. // Two children should be gone. One while reading and one while writing.
  2235. EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
  2236. EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
  2237. EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
  2238. // We never touched this one.
  2239. EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
  2240. entry->Close();
  2241. // We re-created one of the corrupt children.
  2242. EXPECT_EQ(3, cache_->GetEntryCount());
  2243. }
  2244. TEST_F(DiskCacheEntryTest, CancelSparseIO) {
  2245. UseCurrentThread();
  2246. InitCache();
  2247. std::string key("the first key");
  2248. disk_cache::Entry* entry;
  2249. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2250. const int kSize = 40 * 1024;
  2251. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  2252. CacheTestFillBuffer(buf->data(), kSize, false);
  2253. // This will open and write two "real" entries.
  2254. net::TestCompletionCallback cb1, cb2, cb3, cb4;
  2255. int rv = entry->WriteSparseData(
  2256. 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
  2257. EXPECT_THAT(rv, IsError(net::ERR_IO_PENDING));
  2258. TestRangeResultCompletionCallback cb5;
  2259. RangeResult result =
  2260. cb5.GetResult(entry->GetAvailableRange(0, kSize, cb5.callback()));
  2261. if (!cb1.have_result()) {
  2262. // We may or may not have finished writing to the entry. If we have not,
  2263. // we cannot start another operation at this time.
  2264. EXPECT_THAT(rv, IsError(net::ERR_CACHE_OPERATION_NOT_SUPPORTED));
  2265. }
  2266. // We cancel the pending operation, and register multiple notifications.
  2267. entry->CancelSparseIO();
  2268. EXPECT_THAT(entry->ReadyForSparseIO(cb2.callback()),
  2269. IsError(net::ERR_IO_PENDING));
  2270. EXPECT_THAT(entry->ReadyForSparseIO(cb3.callback()),
  2271. IsError(net::ERR_IO_PENDING));
  2272. entry->CancelSparseIO(); // Should be a no op at this point.
  2273. EXPECT_THAT(entry->ReadyForSparseIO(cb4.callback()),
  2274. IsError(net::ERR_IO_PENDING));
  2275. if (!cb1.have_result()) {
  2276. EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
  2277. entry->ReadSparseData(result.start, buf.get(), kSize,
  2278. net::CompletionOnceCallback()));
  2279. EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
  2280. entry->WriteSparseData(result.start, buf.get(), kSize,
  2281. net::CompletionOnceCallback()));
  2282. }
  2283. // Now see if we receive all notifications. Note that we should not be able
  2284. // to write everything (unless the timing of the system is really weird).
  2285. rv = cb1.WaitForResult();
  2286. EXPECT_TRUE(rv == 4096 || rv == kSize);
  2287. EXPECT_THAT(cb2.WaitForResult(), IsOk());
  2288. EXPECT_THAT(cb3.WaitForResult(), IsOk());
  2289. EXPECT_THAT(cb4.WaitForResult(), IsOk());
  2290. result = cb5.GetResult(
  2291. entry->GetAvailableRange(result.start, kSize, cb5.callback()));
  2292. EXPECT_EQ(net::OK, result.net_error);
  2293. EXPECT_EQ(0, result.available_len);
  2294. entry->Close();
  2295. }
  2296. // Tests that we perform sanity checks on an entry's key. Note that there are
  2297. // other tests that exercise sanity checks by using saved corrupt files.
  2298. TEST_F(DiskCacheEntryTest, KeySanityCheck) {
  2299. UseCurrentThread();
  2300. InitCache();
  2301. std::string key("the first key");
  2302. disk_cache::Entry* entry;
  2303. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2304. disk_cache::EntryImpl* entry_impl =
  2305. static_cast<disk_cache::EntryImpl*>(entry);
  2306. disk_cache::EntryStore* store = entry_impl->entry()->Data();
  2307. // We have reserved space for a short key (one block), let's say that the key
  2308. // takes more than one block, and remove the NULLs after the actual key.
  2309. store->key_len = 800;
  2310. memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
  2311. entry_impl->entry()->set_modified();
  2312. entry->Close();
  2313. // We have a corrupt entry. Now reload it. We should NOT read beyond the
  2314. // allocated buffer here.
  2315. ASSERT_NE(net::OK, OpenEntry(key, &entry));
  2316. DisableIntegrityCheck();
  2317. }
  2318. TEST_F(DiskCacheEntryTest, KeySanityCheck2) {
  2319. UseCurrentThread();
  2320. InitCache();
  2321. std::string key("the first key");
  2322. disk_cache::Entry* entry;
  2323. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2324. disk_cache::EntryImpl* entry_impl =
  2325. static_cast<disk_cache::EntryImpl*>(entry);
  2326. disk_cache::EntryStore* store = entry_impl->entry()->Data();
  2327. // Fill in the rest of inline key store with non-nulls. Unlike in
  2328. // KeySanityCheck, this does not change the length to identify it as
  2329. // stored under |long_key|.
  2330. memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
  2331. entry_impl->entry()->set_modified();
  2332. entry->Close();
  2333. // We have a corrupt entry. Now reload it. We should NOT read beyond the
  2334. // allocated buffer here.
  2335. ASSERT_NE(net::OK, OpenEntry(key, &entry));
  2336. DisableIntegrityCheck();
  2337. }
  2338. TEST_F(DiskCacheEntryTest, KeySanityCheck3) {
  2339. const size_t kVeryLong = 40 * 1024;
  2340. UseCurrentThread();
  2341. InitCache();
  2342. std::string key(kVeryLong, 'a');
  2343. disk_cache::Entry* entry;
  2344. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2345. disk_cache::EntryImpl* entry_impl =
  2346. static_cast<disk_cache::EntryImpl*>(entry);
  2347. disk_cache::EntryStore* store = entry_impl->entry()->Data();
  2348. // Test meaningful when using long keys; and also want this to be
  2349. // an external file to avoid needing to duplicate offset math here.
  2350. disk_cache::Addr key_addr(store->long_key);
  2351. ASSERT_TRUE(key_addr.is_initialized());
  2352. ASSERT_TRUE(key_addr.is_separate_file());
  2353. // Close the entry before messing up its files.
  2354. entry->Close();
  2355. // Mess up the terminating null in the external key file.
  2356. auto key_file =
  2357. base::MakeRefCounted<disk_cache::File>(true /* want sync ops*/);
  2358. ASSERT_TRUE(key_file->Init(cache_impl_->GetFileName(key_addr)));
  2359. ASSERT_TRUE(key_file->Write("b", 1u, kVeryLong));
  2360. key_file = nullptr;
  2361. // This case gets graceful recovery.
  2362. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  2363. // Make sure the key object isn't messed up.
  2364. EXPECT_EQ(kVeryLong, strlen(entry->GetKey().data()));
  2365. entry->Close();
  2366. }
  2367. TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
  2368. SetSimpleCacheMode();
  2369. InitCache();
  2370. InternalAsyncIO();
  2371. }
  2372. TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
  2373. SetSimpleCacheMode();
  2374. InitCache();
  2375. ExternalAsyncIO();
  2376. }
  2377. TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
  2378. SetSimpleCacheMode();
  2379. InitCache();
  2380. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2381. EXPECT_THAT(DoomAllEntries(), IsOk());
  2382. ReleaseBuffer(i);
  2383. }
  2384. }
  2385. TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
  2386. SetSimpleCacheMode();
  2387. InitCache();
  2388. StreamAccess();
  2389. }
  2390. TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
  2391. SetSimpleCacheMode();
  2392. InitCache();
  2393. GetKey();
  2394. }
  2395. TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
  2396. SetSimpleCacheMode();
  2397. InitCache();
  2398. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2399. EXPECT_THAT(DoomAllEntries(), IsOk());
  2400. GetTimes(i);
  2401. }
  2402. }
  2403. TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
  2404. SetSimpleCacheMode();
  2405. InitCache();
  2406. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2407. EXPECT_THAT(DoomAllEntries(), IsOk());
  2408. GrowData(i);
  2409. }
  2410. }
  2411. TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
  2412. SetSimpleCacheMode();
  2413. InitCache();
  2414. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2415. EXPECT_THAT(DoomAllEntries(), IsOk());
  2416. TruncateData(i);
  2417. }
  2418. }
  2419. TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
  2420. SetSimpleCacheMode();
  2421. InitCache();
  2422. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2423. EXPECT_THAT(DoomAllEntries(), IsOk());
  2424. ZeroLengthIO(i);
  2425. }
  2426. }
  2427. TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
  2428. SetSimpleCacheMode();
  2429. InitCache();
  2430. SizeAtCreate();
  2431. }
  2432. TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
  2433. SetSimpleCacheMode();
  2434. SetMaxSize(200 * 1024);
  2435. InitCache();
  2436. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2437. EXPECT_THAT(DoomAllEntries(), IsOk());
  2438. ReuseEntry(20 * 1024, i);
  2439. }
  2440. }
  2441. TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
  2442. SetSimpleCacheMode();
  2443. SetMaxSize(100 * 1024);
  2444. InitCache();
  2445. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2446. EXPECT_THAT(DoomAllEntries(), IsOk());
  2447. ReuseEntry(10 * 1024, i);
  2448. }
  2449. }
  2450. TEST_F(DiskCacheEntryTest, SimpleCacheGiantEntry) {
  2451. const int kBufSize = 32 * 1024;
  2452. scoped_refptr<net::IOBuffer> buffer =
  2453. base::MakeRefCounted<net::IOBuffer>(kBufSize);
  2454. CacheTestFillBuffer(buffer->data(), kBufSize, false);
  2455. // Make sure SimpleCache can write up to 5MiB entry even with a 20MiB cache
  2456. // size that Android WebView uses at the time of this test's writing.
  2457. SetSimpleCacheMode();
  2458. SetMaxSize(20 * 1024 * 1024);
  2459. InitCache();
  2460. {
  2461. std::string key1("the first key");
  2462. disk_cache::Entry* entry1 = nullptr;
  2463. ASSERT_THAT(CreateEntry(key1, &entry1), IsOk());
  2464. const int kSize1 = 5 * 1024 * 1024;
  2465. EXPECT_EQ(kBufSize, WriteData(entry1, 1 /* stream */, kSize1 - kBufSize,
  2466. buffer.get(), kBufSize, true /* truncate */));
  2467. entry1->Close();
  2468. }
  2469. // ... but not bigger than that.
  2470. {
  2471. std::string key2("the second key");
  2472. disk_cache::Entry* entry2 = nullptr;
  2473. ASSERT_THAT(CreateEntry(key2, &entry2), IsOk());
  2474. const int kSize2 = 5 * 1024 * 1024 + 1;
  2475. EXPECT_EQ(net::ERR_FAILED,
  2476. WriteData(entry2, 1 /* stream */, kSize2 - kBufSize, buffer.get(),
  2477. kBufSize, true /* truncate */));
  2478. entry2->Close();
  2479. }
  2480. }
  2481. TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
  2482. SetSimpleCacheMode();
  2483. InitCache();
  2484. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2485. EXPECT_THAT(DoomAllEntries(), IsOk());
  2486. SizeChanges(i);
  2487. }
  2488. }
  2489. TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
  2490. SetSimpleCacheMode();
  2491. InitCache();
  2492. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2493. EXPECT_THAT(DoomAllEntries(), IsOk());
  2494. InvalidData(i);
  2495. }
  2496. }
  2497. TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
  2498. // Proving that the test works well with optimistic operations enabled is
  2499. // subtle, instead run only in APP_CACHE mode to disable optimistic
  2500. // operations. Stream 0 always uses optimistic operations, so the test is not
  2501. // run on stream 0.
  2502. SetCacheType(net::APP_CACHE);
  2503. SetSimpleCacheMode();
  2504. InitCache();
  2505. for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  2506. EXPECT_THAT(DoomAllEntries(), IsOk());
  2507. ReadWriteDestroyBuffer(i);
  2508. }
  2509. }
  2510. TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
  2511. SetSimpleCacheMode();
  2512. InitCache();
  2513. DoomNormalEntry();
  2514. }
  2515. TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
  2516. SetSimpleCacheMode();
  2517. InitCache();
  2518. DoomEntryNextToOpenEntry();
  2519. }
  2520. TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
  2521. SetSimpleCacheMode();
  2522. InitCache();
  2523. // Stream 2 is excluded because the implementation does not support writing to
  2524. // it on a doomed entry, if it was previously lazily omitted.
  2525. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
  2526. EXPECT_THAT(DoomAllEntries(), IsOk());
  2527. DoomedEntry(i);
  2528. }
  2529. }
  2530. // Creates an entry with corrupted last byte in stream 0.
  2531. // Requires SimpleCacheMode.
  2532. bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
  2533. int data_size) {
  2534. disk_cache::Entry* entry = nullptr;
  2535. if (CreateEntry(key, &entry) != net::OK || !entry) {
  2536. LOG(ERROR) << "Could not create entry";
  2537. return false;
  2538. }
  2539. scoped_refptr<net::IOBuffer> buffer =
  2540. base::MakeRefCounted<net::IOBuffer>(data_size);
  2541. memset(buffer->data(), 'A', data_size);
  2542. EXPECT_EQ(data_size, WriteData(entry, 1, 0, buffer.get(), data_size, false));
  2543. entry->Close();
  2544. entry = nullptr;
  2545. // Corrupt the last byte of the data.
  2546. base::FilePath entry_file0_path = cache_path_.AppendASCII(
  2547. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  2548. base::File entry_file0(entry_file0_path,
  2549. base::File::FLAG_WRITE | base::File::FLAG_OPEN);
  2550. if (!entry_file0.IsValid())
  2551. return false;
  2552. int64_t file_offset =
  2553. sizeof(disk_cache::SimpleFileHeader) + key.size() + data_size - 2;
  2554. EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
  2555. return true;
  2556. }
  2557. TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
  2558. SetSimpleCacheMode();
  2559. InitCache();
  2560. const char key[] = "the first key";
  2561. const int kLargeSize = 50000;
  2562. ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, kLargeSize));
  2563. disk_cache::Entry* entry = nullptr;
  2564. // Open the entry. Can't spot the checksum that quickly with it so
  2565. // huge.
  2566. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  2567. ScopedEntryPtr entry_closer(entry);
  2568. EXPECT_GE(kLargeSize, entry->GetDataSize(1));
  2569. scoped_refptr<net::IOBuffer> read_buffer =
  2570. base::MakeRefCounted<net::IOBuffer>(kLargeSize);
  2571. EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
  2572. ReadData(entry, 1, 0, read_buffer.get(), kLargeSize));
  2573. }
  2574. // Tests that an entry that has had an IO error occur can still be Doomed().
  2575. TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
  2576. SetSimpleCacheMode();
  2577. InitCache();
  2578. const char key[] = "the first key";
  2579. const int kLargeSize = 50000;
  2580. ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, kLargeSize));
  2581. disk_cache::Entry* entry = nullptr;
  2582. // Open the entry, forcing an IO error.
  2583. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  2584. ScopedEntryPtr entry_closer(entry);
  2585. EXPECT_GE(kLargeSize, entry->GetDataSize(1));
  2586. scoped_refptr<net::IOBuffer> read_buffer =
  2587. base::MakeRefCounted<net::IOBuffer>(kLargeSize);
  2588. EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
  2589. ReadData(entry, 1, 0, read_buffer.get(), kLargeSize));
  2590. entry->Doom(); // Should not crash.
  2591. }
  2592. TEST_F(DiskCacheEntryTest, SimpleCacheCreateAfterDiskLayerDoom) {
  2593. // Code coverage for what happens when a queued create runs after failure
  2594. // was noticed at SimpleSynchronousEntry layer.
  2595. SetSimpleCacheMode();
  2596. // Disable optimistic ops so we can block on CreateEntry and start
  2597. // WriteData off with an empty op queue.
  2598. SetCacheType(net::APP_CACHE);
  2599. InitCache();
  2600. const char key[] = "the key";
  2601. const int kSize1 = 10;
  2602. scoped_refptr<net::IOBuffer> buffer1 =
  2603. base::MakeRefCounted<net::IOBuffer>(kSize1);
  2604. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  2605. disk_cache::Entry* entry = nullptr;
  2606. ASSERT_EQ(net::OK, CreateEntry(key, &entry));
  2607. ASSERT_TRUE(entry != nullptr);
  2608. // Make an empty _1 file, to cause a stream 2 write to fail.
  2609. base::FilePath entry_file1_path = cache_path_.AppendASCII(
  2610. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 1));
  2611. base::File entry_file1(entry_file1_path,
  2612. base::File::FLAG_WRITE | base::File::FLAG_CREATE);
  2613. ASSERT_TRUE(entry_file1.IsValid());
  2614. entry->WriteData(2, 0, buffer1.get(), kSize1, net::CompletionOnceCallback(),
  2615. /* truncate= */ true);
  2616. entry->Close();
  2617. // At this point we have put WriteData & Close on the queue, and WriteData
  2618. // started, but we haven't given the event loop control so the failure
  2619. // hasn't been reported and handled here, so the entry is still active
  2620. // for the key. Queue up another create for same key, and run through the
  2621. // events.
  2622. disk_cache::Entry* entry2 = nullptr;
  2623. ASSERT_EQ(net::ERR_FAILED, CreateEntry(key, &entry2));
  2624. ASSERT_TRUE(entry2 == nullptr);
  2625. EXPECT_EQ(0, cache_->GetEntryCount());
  2626. // Should be able to create properly next time, though.
  2627. disk_cache::Entry* entry3 = nullptr;
  2628. ASSERT_EQ(net::OK, CreateEntry(key, &entry3));
  2629. ASSERT_TRUE(entry3 != nullptr);
  2630. entry3->Close();
  2631. }
  2632. TEST_F(DiskCacheEntryTest, SimpleCacheQueuedOpenOnDoomedEntry) {
  2633. // This tests the following sequence of ops:
  2634. // A = Create(K);
  2635. // Close(A);
  2636. // B = Open(K);
  2637. // Doom(K);
  2638. // Close(B);
  2639. //
  2640. // ... where the execution of the Open sits on the queue all the way till
  2641. // Doom. This now succeeds, as the doom is merely queued at time of Open,
  2642. // rather than completed.
  2643. SetSimpleCacheMode();
  2644. // Disable optimistic ops so we can block on CreateEntry and start
  2645. // WriteData off with an empty op queue.
  2646. SetCacheType(net::APP_CACHE);
  2647. InitCache();
  2648. const char key[] = "the key";
  2649. disk_cache::Entry* entry = nullptr;
  2650. ASSERT_EQ(net::OK, CreateEntry(key, &entry)); // event loop!
  2651. ASSERT_TRUE(entry != nullptr);
  2652. entry->Close();
  2653. // Done via cache_ -> no event loop.
  2654. TestEntryResultCompletionCallback cb;
  2655. EntryResult result = cache_->OpenEntry(key, net::HIGHEST, cb.callback());
  2656. ASSERT_EQ(net::ERR_IO_PENDING, result.net_error());
  2657. net::TestCompletionCallback cb2;
  2658. cache_->DoomEntry(key, net::HIGHEST, cb2.callback());
  2659. // Now event loop.
  2660. result = cb.WaitForResult();
  2661. EXPECT_EQ(net::OK, result.net_error());
  2662. result.ReleaseEntry()->Close();
  2663. EXPECT_EQ(net::OK, cb2.WaitForResult());
  2664. EXPECT_EQ(0, cache_->GetEntryCount());
  2665. }
  2666. TEST_F(DiskCacheEntryTest, SimpleCacheDoomErrorRace) {
  2667. // Code coverage for a doom racing with a doom induced by a failure.
  2668. SetSimpleCacheMode();
  2669. // Disable optimistic ops so we can block on CreateEntry and start
  2670. // WriteData off with an empty op queue.
  2671. SetCacheType(net::APP_CACHE);
  2672. InitCache();
  2673. const char kKey[] = "the first key";
  2674. const int kSize1 = 10;
  2675. scoped_refptr<net::IOBuffer> buffer1 =
  2676. base::MakeRefCounted<net::IOBuffer>(kSize1);
  2677. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  2678. disk_cache::Entry* entry = nullptr;
  2679. ASSERT_EQ(net::OK, CreateEntry(kKey, &entry));
  2680. ASSERT_TRUE(entry != nullptr);
  2681. // Now an empty _1 file, to cause a stream 2 write to fail.
  2682. base::FilePath entry_file1_path = cache_path_.AppendASCII(
  2683. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(kKey, 1));
  2684. base::File entry_file1(entry_file1_path,
  2685. base::File::FLAG_WRITE | base::File::FLAG_CREATE);
  2686. ASSERT_TRUE(entry_file1.IsValid());
  2687. entry->WriteData(2, 0, buffer1.get(), kSize1, net::CompletionOnceCallback(),
  2688. /* truncate= */ true);
  2689. net::TestCompletionCallback cb;
  2690. cache_->DoomEntry(kKey, net::HIGHEST, cb.callback());
  2691. entry->Close();
  2692. EXPECT_EQ(0, cb.WaitForResult());
  2693. }
  2694. bool TruncatePath(const base::FilePath& file_path, int64_t length) {
  2695. base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
  2696. if (!file.IsValid())
  2697. return false;
  2698. return file.SetLength(length);
  2699. }
  2700. TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
  2701. SetSimpleCacheMode();
  2702. InitCache();
  2703. const std::string key("the first key");
  2704. disk_cache::Entry* entry = nullptr;
  2705. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  2706. disk_cache::Entry* null = nullptr;
  2707. EXPECT_NE(null, entry);
  2708. entry->Close();
  2709. entry = nullptr;
  2710. // Force the entry to flush to disk, so subsequent platform file operations
  2711. // succed.
  2712. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  2713. entry->Close();
  2714. entry = nullptr;
  2715. // Truncate the file such that the length isn't sufficient to have an EOF
  2716. // record.
  2717. int kTruncationBytes = -static_cast<int>(sizeof(disk_cache::SimpleFileEOF));
  2718. const base::FilePath entry_path = cache_path_.AppendASCII(
  2719. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  2720. const int64_t invalid_size = disk_cache::simple_util::GetFileSizeFromDataSize(
  2721. key.size(), kTruncationBytes);
  2722. EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
  2723. EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
  2724. DisableIntegrityCheck();
  2725. }
  2726. TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
  2727. // Test sequence:
  2728. // Create, Write, Read, Close.
  2729. SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
  2730. SetSimpleCacheMode();
  2731. InitCache();
  2732. disk_cache::Entry* const null_entry = nullptr;
  2733. disk_cache::Entry* entry = nullptr;
  2734. EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
  2735. ASSERT_NE(null_entry, entry);
  2736. ScopedEntryPtr entry_closer(entry);
  2737. const int kBufferSize = 10;
  2738. scoped_refptr<net::IOBufferWithSize> write_buffer =
  2739. base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
  2740. CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
  2741. EXPECT_EQ(
  2742. write_buffer->size(),
  2743. WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
  2744. scoped_refptr<net::IOBufferWithSize> read_buffer =
  2745. base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
  2746. EXPECT_EQ(read_buffer->size(),
  2747. ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
  2748. }
  2749. TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
  2750. // Test sequence:
  2751. // Create, Write, Close.
  2752. SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
  2753. SetSimpleCacheMode();
  2754. InitCache();
  2755. disk_cache::Entry* const null_entry = nullptr;
  2756. MessageLoopHelper helper;
  2757. CallbackTest create_callback(&helper, false);
  2758. int expected_callback_runs = 0;
  2759. const int kBufferSize = 10;
  2760. scoped_refptr<net::IOBufferWithSize> write_buffer =
  2761. base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
  2762. disk_cache::Entry* entry = nullptr;
  2763. EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
  2764. ASSERT_NE(null_entry, entry);
  2765. ScopedEntryPtr entry_closer(entry);
  2766. CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
  2767. CallbackTest write_callback(&helper, false);
  2768. int ret = entry->WriteData(
  2769. 1, 0, write_buffer.get(), write_buffer->size(),
  2770. base::BindOnce(&CallbackTest::Run, base::Unretained(&write_callback)),
  2771. false);
  2772. ASSERT_THAT(ret, IsError(net::ERR_IO_PENDING));
  2773. helper.WaitUntilCacheIoFinished(++expected_callback_runs);
  2774. }
  2775. TEST_F(DiskCacheEntryTest,
  2776. SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
  2777. // Test sequence:
  2778. // Create, Write, Read, Close.
  2779. SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
  2780. SetSimpleCacheMode();
  2781. InitCache();
  2782. disk_cache::Entry* const null_entry = nullptr;
  2783. MessageLoopHelper helper;
  2784. disk_cache::Entry* entry = nullptr;
  2785. // Note that |entry| is only set once CreateEntry() completed which is why we
  2786. // have to wait (i.e. use the helper CreateEntry() function).
  2787. EXPECT_THAT(CreateEntry("my key", &entry), IsOk());
  2788. ASSERT_NE(null_entry, entry);
  2789. ScopedEntryPtr entry_closer(entry);
  2790. const int kBufferSize = 10;
  2791. scoped_refptr<net::IOBufferWithSize> write_buffer =
  2792. base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
  2793. CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
  2794. CallbackTest write_callback(&helper, false);
  2795. int ret = entry->WriteData(
  2796. 1, 0, write_buffer.get(), write_buffer->size(),
  2797. base::BindOnce(&CallbackTest::Run, base::Unretained(&write_callback)),
  2798. false);
  2799. EXPECT_THAT(ret, IsError(net::ERR_IO_PENDING));
  2800. int expected_callback_runs = 1;
  2801. scoped_refptr<net::IOBufferWithSize> read_buffer =
  2802. base::MakeRefCounted<net::IOBufferWithSize>(kBufferSize);
  2803. CallbackTest read_callback(&helper, false);
  2804. ret = entry->ReadData(
  2805. 1, 0, read_buffer.get(), read_buffer->size(),
  2806. base::BindOnce(&CallbackTest::Run, base::Unretained(&read_callback)));
  2807. EXPECT_THAT(ret, IsError(net::ERR_IO_PENDING));
  2808. ++expected_callback_runs;
  2809. helper.WaitUntilCacheIoFinished(expected_callback_runs);
  2810. ASSERT_EQ(read_buffer->size(), write_buffer->size());
  2811. EXPECT_EQ(
  2812. 0,
  2813. memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
  2814. }
  2815. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
  2816. // Test sequence:
  2817. // Create, Write, Read, Write, Read, Close.
  2818. SetSimpleCacheMode();
  2819. InitCache();
  2820. disk_cache::Entry* null = nullptr;
  2821. const char key[] = "the first key";
  2822. MessageLoopHelper helper;
  2823. CallbackTest callback1(&helper, false);
  2824. CallbackTest callback2(&helper, false);
  2825. CallbackTest callback3(&helper, false);
  2826. CallbackTest callback4(&helper, false);
  2827. CallbackTest callback5(&helper, false);
  2828. int expected = 0;
  2829. const int kSize1 = 10;
  2830. const int kSize2 = 20;
  2831. scoped_refptr<net::IOBuffer> buffer1 =
  2832. base::MakeRefCounted<net::IOBuffer>(kSize1);
  2833. scoped_refptr<net::IOBuffer> buffer1_read =
  2834. base::MakeRefCounted<net::IOBuffer>(kSize1);
  2835. scoped_refptr<net::IOBuffer> buffer2 =
  2836. base::MakeRefCounted<net::IOBuffer>(kSize2);
  2837. scoped_refptr<net::IOBuffer> buffer2_read =
  2838. base::MakeRefCounted<net::IOBuffer>(kSize2);
  2839. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  2840. CacheTestFillBuffer(buffer2->data(), kSize2, false);
  2841. // Create is optimistic, must return OK.
  2842. EntryResult result =
  2843. cache_->CreateEntry(key, net::HIGHEST,
  2844. base::BindOnce(&CallbackTest::RunWithEntry,
  2845. base::Unretained(&callback1)));
  2846. ASSERT_EQ(net::OK, result.net_error());
  2847. disk_cache::Entry* entry = result.ReleaseEntry();
  2848. ASSERT_NE(null, entry);
  2849. ScopedEntryPtr entry_closer(entry);
  2850. // This write may or may not be optimistic (it depends if the previous
  2851. // optimistic create already finished by the time we call the write here).
  2852. int ret = entry->WriteData(
  2853. 1, 0, buffer1.get(), kSize1,
  2854. base::BindOnce(&CallbackTest::Run, base::Unretained(&callback2)), false);
  2855. EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
  2856. if (net::ERR_IO_PENDING == ret)
  2857. expected++;
  2858. // This Read must not be optimistic, since we don't support that yet.
  2859. EXPECT_EQ(net::ERR_IO_PENDING,
  2860. entry->ReadData(1, 0, buffer1_read.get(), kSize1,
  2861. base::BindOnce(&CallbackTest::Run,
  2862. base::Unretained(&callback3))));
  2863. expected++;
  2864. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  2865. EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
  2866. // At this point after waiting, the pending operations queue on the entry
  2867. // should be empty, so the next Write operation must run as optimistic.
  2868. EXPECT_EQ(kSize2,
  2869. entry->WriteData(1, 0, buffer2.get(), kSize2,
  2870. base::BindOnce(&CallbackTest::Run,
  2871. base::Unretained(&callback4)),
  2872. false));
  2873. // Lets do another read so we block until both the write and the read
  2874. // operation finishes and we can then test for HasOneRef() below.
  2875. EXPECT_EQ(net::ERR_IO_PENDING,
  2876. entry->ReadData(1, 0, buffer2_read.get(), kSize2,
  2877. base::BindOnce(&CallbackTest::Run,
  2878. base::Unretained(&callback5))));
  2879. expected++;
  2880. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  2881. EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
  2882. // Check that we are not leaking.
  2883. EXPECT_NE(entry, null);
  2884. EXPECT_TRUE(
  2885. static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  2886. }
  2887. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
  2888. // Test sequence:
  2889. // Create, Open, Close, Close.
  2890. SetSimpleCacheMode();
  2891. InitCache();
  2892. const char key[] = "the first key";
  2893. MessageLoopHelper helper;
  2894. CallbackTest callback1(&helper, false);
  2895. CallbackTest callback2(&helper, false);
  2896. EntryResult result =
  2897. cache_->CreateEntry(key, net::HIGHEST,
  2898. base::BindOnce(&CallbackTest::RunWithEntry,
  2899. base::Unretained(&callback1)));
  2900. ASSERT_EQ(net::OK, result.net_error());
  2901. disk_cache::Entry* entry = result.ReleaseEntry();
  2902. ASSERT_NE(nullptr, entry);
  2903. ScopedEntryPtr entry_closer(entry);
  2904. EntryResult result2 =
  2905. cache_->OpenEntry(key, net::HIGHEST,
  2906. base::BindOnce(&CallbackTest::RunWithEntry,
  2907. base::Unretained(&callback2)));
  2908. ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
  2909. ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
  2910. result2 = callback2.ReleaseLastEntryResult();
  2911. EXPECT_EQ(net::OK, result2.net_error());
  2912. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  2913. EXPECT_NE(nullptr, entry2);
  2914. EXPECT_EQ(entry, entry2);
  2915. // We have to call close twice, since we called create and open above.
  2916. // (the other closes is from |entry_closer|).
  2917. entry->Close();
  2918. // Check that we are not leaking.
  2919. EXPECT_TRUE(
  2920. static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  2921. }
  2922. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
  2923. // Test sequence:
  2924. // Create, Close, Open, Close.
  2925. SetSimpleCacheMode();
  2926. InitCache();
  2927. const char key[] = "the first key";
  2928. EntryResult result =
  2929. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  2930. ASSERT_EQ(net::OK, result.net_error());
  2931. disk_cache::Entry* entry = result.ReleaseEntry();
  2932. ASSERT_NE(nullptr, entry);
  2933. entry->Close();
  2934. TestEntryResultCompletionCallback cb;
  2935. EntryResult result2 = cache_->OpenEntry(key, net::HIGHEST, cb.callback());
  2936. ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
  2937. result2 = cb.WaitForResult();
  2938. ASSERT_THAT(result2.net_error(), IsOk());
  2939. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  2940. ScopedEntryPtr entry_closer(entry2);
  2941. EXPECT_NE(nullptr, entry2);
  2942. EXPECT_EQ(entry, entry2);
  2943. // Check that we are not leaking.
  2944. EXPECT_TRUE(
  2945. static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
  2946. }
  2947. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
  2948. // Test sequence:
  2949. // Create, Close, Write, Open, Open, Close, Write, Read, Close.
  2950. SetSimpleCacheMode();
  2951. InitCache();
  2952. const char key[] = "the first key";
  2953. net::TestCompletionCallback cb;
  2954. const int kSize1 = 10;
  2955. scoped_refptr<net::IOBuffer> buffer1 =
  2956. base::MakeRefCounted<net::IOBuffer>(kSize1);
  2957. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  2958. EntryResult result =
  2959. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  2960. ASSERT_EQ(net::OK, result.net_error());
  2961. disk_cache::Entry* entry = result.ReleaseEntry();
  2962. ASSERT_NE(nullptr, entry);
  2963. entry->Close();
  2964. // Lets do a Write so we block until both the Close and the Write
  2965. // operation finishes. Write must fail since we are writing in a closed entry.
  2966. EXPECT_EQ(
  2967. net::ERR_IO_PENDING,
  2968. entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
  2969. EXPECT_THAT(cb.GetResult(net::ERR_IO_PENDING), IsError(net::ERR_FAILED));
  2970. // Finish running the pending tasks so that we fully complete the close
  2971. // operation and destroy the entry object.
  2972. base::RunLoop().RunUntilIdle();
  2973. // At this point the |entry| must have been destroyed, and called
  2974. // RemoveSelfFromBackend().
  2975. TestEntryResultCompletionCallback cb2;
  2976. EntryResult result2 = cache_->OpenEntry(key, net::HIGHEST, cb2.callback());
  2977. ASSERT_EQ(net::ERR_IO_PENDING, result2.net_error());
  2978. result2 = cb2.WaitForResult();
  2979. ASSERT_THAT(result2.net_error(), IsOk());
  2980. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  2981. EXPECT_NE(nullptr, entry2);
  2982. EntryResult result3 = cache_->OpenEntry(key, net::HIGHEST, cb2.callback());
  2983. ASSERT_EQ(net::ERR_IO_PENDING, result3.net_error());
  2984. result3 = cb2.WaitForResult();
  2985. ASSERT_THAT(result3.net_error(), IsOk());
  2986. disk_cache::Entry* entry3 = result3.ReleaseEntry();
  2987. EXPECT_NE(nullptr, entry3);
  2988. EXPECT_EQ(entry2, entry3);
  2989. entry3->Close();
  2990. // The previous Close doesn't actually closes the entry since we opened it
  2991. // twice, so the next Write operation must succeed and it must be able to
  2992. // perform it optimistically, since there is no operation running on this
  2993. // entry.
  2994. EXPECT_EQ(kSize1, entry2->WriteData(1, 0, buffer1.get(), kSize1,
  2995. net::CompletionOnceCallback(), false));
  2996. // Lets do another read so we block until both the write and the read
  2997. // operation finishes and we can then test for HasOneRef() below.
  2998. EXPECT_EQ(net::ERR_IO_PENDING,
  2999. entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
  3000. EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
  3001. // Check that we are not leaking.
  3002. EXPECT_TRUE(
  3003. static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
  3004. entry2->Close();
  3005. }
  3006. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
  3007. // Test sequence:
  3008. // Create, Doom, Write, Read, Close.
  3009. SetSimpleCacheMode();
  3010. InitCache();
  3011. const char key[] = "the first key";
  3012. net::TestCompletionCallback cb;
  3013. const int kSize1 = 10;
  3014. scoped_refptr<net::IOBuffer> buffer1 =
  3015. base::MakeRefCounted<net::IOBuffer>(kSize1);
  3016. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  3017. EntryResult result =
  3018. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3019. ASSERT_EQ(net::OK, result.net_error());
  3020. disk_cache::Entry* entry = result.ReleaseEntry();
  3021. ASSERT_NE(nullptr, entry);
  3022. ScopedEntryPtr entry_closer(entry);
  3023. entry->Doom();
  3024. EXPECT_EQ(
  3025. net::ERR_IO_PENDING,
  3026. entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
  3027. EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
  3028. EXPECT_EQ(net::ERR_IO_PENDING,
  3029. entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
  3030. EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
  3031. // Check that we are not leaking.
  3032. EXPECT_TRUE(
  3033. static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  3034. }
  3035. TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
  3036. // Test sequence:
  3037. // Create, Write, Doom, Doom, Read, Doom, Close.
  3038. SetSimpleCacheMode();
  3039. InitCache();
  3040. const char key[] = "the first key";
  3041. net::TestCompletionCallback cb;
  3042. const int kSize1 = 10;
  3043. scoped_refptr<net::IOBuffer> buffer1 =
  3044. base::MakeRefCounted<net::IOBuffer>(kSize1);
  3045. scoped_refptr<net::IOBuffer> buffer1_read =
  3046. base::MakeRefCounted<net::IOBuffer>(kSize1);
  3047. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  3048. EntryResult result =
  3049. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3050. ASSERT_EQ(net::OK, result.net_error());
  3051. disk_cache::Entry* entry = result.ReleaseEntry();
  3052. EXPECT_NE(nullptr, entry);
  3053. ScopedEntryPtr entry_closer(entry);
  3054. EXPECT_EQ(
  3055. net::ERR_IO_PENDING,
  3056. entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
  3057. EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
  3058. entry->Doom();
  3059. entry->Doom();
  3060. // This Read must not be optimistic, since we don't support that yet.
  3061. EXPECT_EQ(net::ERR_IO_PENDING,
  3062. entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
  3063. EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
  3064. EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
  3065. entry->Doom();
  3066. }
  3067. // Confirm that IO buffers are not referenced by the Simple Cache after a write
  3068. // completes.
  3069. TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
  3070. SetSimpleCacheMode();
  3071. InitCache();
  3072. const char key[] = "the first key";
  3073. // First, an optimistic create.
  3074. EntryResult result =
  3075. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3076. ASSERT_EQ(net::OK, result.net_error());
  3077. disk_cache::Entry* entry = result.ReleaseEntry();
  3078. ASSERT_TRUE(entry);
  3079. ScopedEntryPtr entry_closer(entry);
  3080. const int kWriteSize = 512;
  3081. scoped_refptr<net::IOBuffer> buffer1 =
  3082. base::MakeRefCounted<net::IOBuffer>(kWriteSize);
  3083. EXPECT_TRUE(buffer1->HasOneRef());
  3084. CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
  3085. // An optimistic write happens only when there is an empty queue of pending
  3086. // operations. To ensure the queue is empty, we issue a write and wait until
  3087. // it completes.
  3088. EXPECT_EQ(kWriteSize,
  3089. WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
  3090. EXPECT_TRUE(buffer1->HasOneRef());
  3091. // Finally, we should perform an optimistic write and confirm that all
  3092. // references to the IO buffer have been released.
  3093. EXPECT_EQ(kWriteSize, entry->WriteData(1, 0, buffer1.get(), kWriteSize,
  3094. net::CompletionOnceCallback(), false));
  3095. EXPECT_TRUE(buffer1->HasOneRef());
  3096. }
  3097. TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
  3098. // Test sequence:
  3099. // Create, Doom, Write, Close, Check files are not on disk anymore.
  3100. SetSimpleCacheMode();
  3101. InitCache();
  3102. const char key[] = "the first key";
  3103. net::TestCompletionCallback cb;
  3104. const int kSize1 = 10;
  3105. scoped_refptr<net::IOBuffer> buffer1 =
  3106. base::MakeRefCounted<net::IOBuffer>(kSize1);
  3107. CacheTestFillBuffer(buffer1->data(), kSize1, false);
  3108. EntryResult result =
  3109. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3110. ASSERT_EQ(net::OK, result.net_error());
  3111. disk_cache::Entry* entry = result.ReleaseEntry();
  3112. EXPECT_NE(nullptr, entry);
  3113. EXPECT_THAT(cache_->DoomEntry(key, net::HIGHEST, cb.callback()),
  3114. IsError(net::ERR_IO_PENDING));
  3115. EXPECT_THAT(cb.GetResult(net::ERR_IO_PENDING), IsOk());
  3116. EXPECT_EQ(
  3117. kSize1,
  3118. entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
  3119. entry->Close();
  3120. // Finish running the pending tasks so that we fully complete the close
  3121. // operation and destroy the entry object.
  3122. base::RunLoop().RunUntilIdle();
  3123. for (int i = 0; i < disk_cache::kSimpleEntryNormalFileCount; ++i) {
  3124. base::FilePath entry_file_path = cache_path_.AppendASCII(
  3125. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
  3126. base::File::Info info;
  3127. EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
  3128. }
  3129. }
  3130. TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
  3131. // This test runs as APP_CACHE to make operations more synchronous. Test
  3132. // sequence:
  3133. // Create, Doom, Create.
  3134. SetCacheType(net::APP_CACHE);
  3135. SetSimpleCacheMode();
  3136. InitCache();
  3137. const char key[] = "the first key";
  3138. TestEntryResultCompletionCallback create_callback;
  3139. EntryResult result1 = create_callback.GetResult(
  3140. cache_->CreateEntry(key, net::HIGHEST, create_callback.callback()));
  3141. ASSERT_EQ(net::OK, result1.net_error());
  3142. disk_cache::Entry* entry1 = result1.ReleaseEntry();
  3143. ScopedEntryPtr entry1_closer(entry1);
  3144. EXPECT_NE(nullptr, entry1);
  3145. net::TestCompletionCallback doom_callback;
  3146. EXPECT_EQ(net::ERR_IO_PENDING,
  3147. cache_->DoomEntry(key, net::HIGHEST, doom_callback.callback()));
  3148. EntryResult result2 = create_callback.GetResult(
  3149. cache_->CreateEntry(key, net::HIGHEST, create_callback.callback()));
  3150. ASSERT_EQ(net::OK, result2.net_error());
  3151. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  3152. ScopedEntryPtr entry2_closer(entry2);
  3153. EXPECT_THAT(doom_callback.GetResult(net::ERR_IO_PENDING), IsOk());
  3154. }
  3155. TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimistic) {
  3156. // Test that we optimize the doom -> create sequence when optimistic ops
  3157. // are on.
  3158. SetSimpleCacheMode();
  3159. InitCache();
  3160. const char kKey[] = "the key";
  3161. // Create entry and initiate its Doom.
  3162. disk_cache::Entry* entry1 = nullptr;
  3163. ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
  3164. ASSERT_TRUE(entry1 != nullptr);
  3165. net::TestCompletionCallback doom_callback;
  3166. cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
  3167. TestEntryResultCompletionCallback create_callback;
  3168. // Open entry2, with same key. With optimistic ops, this should succeed
  3169. // immediately, hence us using cache_->CreateEntry directly rather than using
  3170. // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
  3171. EntryResult result2 =
  3172. cache_->CreateEntry(kKey, net::HIGHEST, create_callback.callback());
  3173. ASSERT_EQ(net::OK, result2.net_error());
  3174. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  3175. ASSERT_NE(nullptr, entry2);
  3176. // Do some I/O to make sure it's alive.
  3177. const int kSize = 2048;
  3178. scoped_refptr<net::IOBuffer> buf_1 =
  3179. base::MakeRefCounted<net::IOBuffer>(kSize);
  3180. scoped_refptr<net::IOBuffer> buf_2 =
  3181. base::MakeRefCounted<net::IOBuffer>(kSize);
  3182. CacheTestFillBuffer(buf_1->data(), kSize, false);
  3183. EXPECT_EQ(kSize, WriteData(entry2, /* index = */ 1, /* offset = */ 0,
  3184. buf_1.get(), kSize, /* truncate = */ false));
  3185. EXPECT_EQ(kSize, ReadData(entry2, /* index = */ 1, /* offset = */ 0,
  3186. buf_2.get(), kSize));
  3187. doom_callback.WaitForResult();
  3188. entry1->Close();
  3189. entry2->Close();
  3190. }
  3191. TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateOptimisticMassDoom) {
  3192. // Test that shows that a certain DCHECK in mass doom code had to be removed
  3193. // once optimistic doom -> create was added.
  3194. SetSimpleCacheMode();
  3195. InitCache();
  3196. const char kKey[] = "the key";
  3197. // Create entry and initiate its Doom.
  3198. disk_cache::Entry* entry1 = nullptr;
  3199. ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
  3200. ASSERT_TRUE(entry1 != nullptr);
  3201. net::TestCompletionCallback doom_callback;
  3202. cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
  3203. TestEntryResultCompletionCallback create_callback;
  3204. // Open entry2, with same key. With optimistic ops, this should succeed
  3205. // immediately, hence us using cache_->CreateEntry directly rather than using
  3206. // the DiskCacheTestWithCache::CreateEntry wrapper which blocks when needed.
  3207. EntryResult result =
  3208. cache_->CreateEntry(kKey, net::HIGHEST, create_callback.callback());
  3209. ASSERT_EQ(net::OK, result.net_error());
  3210. disk_cache::Entry* entry2 = result.ReleaseEntry();
  3211. ASSERT_NE(nullptr, entry2);
  3212. net::TestCompletionCallback doomall_callback;
  3213. // This is what had code that had a no-longer valid DCHECK.
  3214. cache_->DoomAllEntries(doomall_callback.callback());
  3215. doom_callback.WaitForResult();
  3216. doomall_callback.WaitForResult();
  3217. entry1->Close();
  3218. entry2->Close();
  3219. }
  3220. TEST_F(DiskCacheEntryTest, SimpleCacheDoomOpenOptimistic) {
  3221. // Test that we optimize the doom -> optimize sequence when optimistic ops
  3222. // are on.
  3223. SetSimpleCacheMode();
  3224. InitCache();
  3225. const char kKey[] = "the key";
  3226. // Create entry and initiate its Doom.
  3227. disk_cache::Entry* entry1 = nullptr;
  3228. ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
  3229. ASSERT_TRUE(entry1 != nullptr);
  3230. entry1->Close();
  3231. net::TestCompletionCallback doom_callback;
  3232. cache_->DoomEntry(kKey, net::HIGHEST, doom_callback.callback());
  3233. // Try to open entry. This should detect a miss immediately, since it's
  3234. // the only thing after a doom.
  3235. EntryResult result2 =
  3236. cache_->OpenEntry(kKey, net::HIGHEST, EntryResultCallback());
  3237. EXPECT_EQ(net::ERR_FAILED, result2.net_error());
  3238. EXPECT_EQ(nullptr, result2.ReleaseEntry());
  3239. doom_callback.WaitForResult();
  3240. }
  3241. TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
  3242. // Test sequence:
  3243. // Create, Doom, Create, Doom (1st entry), Open.
  3244. SetSimpleCacheMode();
  3245. InitCache();
  3246. disk_cache::Entry* null = nullptr;
  3247. const char key[] = "the first key";
  3248. disk_cache::Entry* entry1 = nullptr;
  3249. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  3250. ScopedEntryPtr entry1_closer(entry1);
  3251. EXPECT_NE(null, entry1);
  3252. EXPECT_THAT(DoomEntry(key), IsOk());
  3253. disk_cache::Entry* entry2 = nullptr;
  3254. ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
  3255. ScopedEntryPtr entry2_closer(entry2);
  3256. EXPECT_NE(null, entry2);
  3257. // Redundantly dooming entry1 should not delete entry2.
  3258. disk_cache::SimpleEntryImpl* simple_entry1 =
  3259. static_cast<disk_cache::SimpleEntryImpl*>(entry1);
  3260. net::TestCompletionCallback cb;
  3261. EXPECT_EQ(net::OK,
  3262. cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
  3263. disk_cache::Entry* entry3 = nullptr;
  3264. ASSERT_THAT(OpenEntry(key, &entry3), IsOk());
  3265. ScopedEntryPtr entry3_closer(entry3);
  3266. EXPECT_NE(null, entry3);
  3267. }
  3268. TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
  3269. // Test sequence:
  3270. // Create, Doom, Create, Doom.
  3271. SetSimpleCacheMode();
  3272. InitCache();
  3273. disk_cache::Entry* null = nullptr;
  3274. const char key[] = "the first key";
  3275. disk_cache::Entry* entry1 = nullptr;
  3276. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  3277. ScopedEntryPtr entry1_closer(entry1);
  3278. EXPECT_NE(null, entry1);
  3279. entry1->Doom();
  3280. disk_cache::Entry* entry2 = nullptr;
  3281. ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
  3282. ScopedEntryPtr entry2_closer(entry2);
  3283. EXPECT_NE(null, entry2);
  3284. entry2->Doom();
  3285. // This test passes if it doesn't crash.
  3286. }
  3287. TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
  3288. // Test sequence: Create, Doom, Close, Create, Close, Open.
  3289. SetSimpleCacheMode();
  3290. InitCache();
  3291. disk_cache::Entry* null = nullptr;
  3292. const char key[] = "this is a key";
  3293. disk_cache::Entry* entry1 = nullptr;
  3294. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  3295. ScopedEntryPtr entry1_closer(entry1);
  3296. EXPECT_NE(null, entry1);
  3297. entry1->Doom();
  3298. entry1_closer.reset();
  3299. entry1 = nullptr;
  3300. disk_cache::Entry* entry2 = nullptr;
  3301. ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
  3302. ScopedEntryPtr entry2_closer(entry2);
  3303. EXPECT_NE(null, entry2);
  3304. entry2_closer.reset();
  3305. entry2 = nullptr;
  3306. disk_cache::Entry* entry3 = nullptr;
  3307. ASSERT_THAT(OpenEntry(key, &entry3), IsOk());
  3308. ScopedEntryPtr entry3_closer(entry3);
  3309. EXPECT_NE(null, entry3);
  3310. }
  3311. // Checks that an optimistic Create would fail later on a racing Open.
  3312. TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
  3313. SetSimpleCacheMode();
  3314. InitCache();
  3315. // Create a corrupt file in place of a future entry. Optimistic create should
  3316. // initially succeed, but realize later that creation failed.
  3317. const std::string key = "the key";
  3318. disk_cache::Entry* entry = nullptr;
  3319. disk_cache::Entry* entry2 = nullptr;
  3320. EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
  3321. key, cache_path_));
  3322. EntryResult result =
  3323. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3324. EXPECT_THAT(result.net_error(), IsOk());
  3325. entry = result.ReleaseEntry();
  3326. ASSERT_TRUE(entry);
  3327. ScopedEntryPtr entry_closer(entry);
  3328. ASSERT_NE(net::OK, OpenEntry(key, &entry2));
  3329. // Check that we are not leaking.
  3330. EXPECT_TRUE(
  3331. static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  3332. DisableIntegrityCheck();
  3333. }
  3334. // Tests that old entries are evicted while new entries remain in the index.
  3335. // This test relies on non-mandatory properties of the simple Cache Backend:
  3336. // LRU eviction, specific values of high-watermark and low-watermark etc.
  3337. // When changing the eviction algorithm, the test will have to be re-engineered.
  3338. TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
  3339. const int kMaxSize = 200 * 1024;
  3340. const int kWriteSize = kMaxSize / 10;
  3341. const int kNumExtraEntries = 12;
  3342. SetSimpleCacheMode();
  3343. SetMaxSize(kMaxSize);
  3344. InitCache();
  3345. std::string key1("the first key");
  3346. disk_cache::Entry* entry;
  3347. ASSERT_THAT(CreateEntry(key1, &entry), IsOk());
  3348. scoped_refptr<net::IOBuffer> buffer =
  3349. base::MakeRefCounted<net::IOBuffer>(kWriteSize);
  3350. CacheTestFillBuffer(buffer->data(), kWriteSize, false);
  3351. EXPECT_EQ(kWriteSize,
  3352. WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
  3353. entry->Close();
  3354. AddDelay();
  3355. std::string key2("the key prefix");
  3356. for (int i = 0; i < kNumExtraEntries; i++) {
  3357. if (i == kNumExtraEntries - 2) {
  3358. // Create a distinct timestamp for the last two entries. These entries
  3359. // will be checked for outliving the eviction.
  3360. AddDelay();
  3361. }
  3362. ASSERT_THAT(CreateEntry(key2 + base::NumberToString(i), &entry), IsOk());
  3363. ScopedEntryPtr entry_closer(entry);
  3364. EXPECT_EQ(kWriteSize,
  3365. WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
  3366. }
  3367. // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
  3368. // the internal knowledge about |SimpleBackendImpl|.
  3369. ASSERT_NE(net::OK, OpenEntry(key1, &entry))
  3370. << "Should have evicted the old entry";
  3371. for (int i = 0; i < 2; i++) {
  3372. int entry_no = kNumExtraEntries - i - 1;
  3373. // Generally there is no guarantee that at this point the backround eviction
  3374. // is finished. We are testing the positive case, i.e. when the eviction
  3375. // never reaches this entry, should be non-flaky.
  3376. ASSERT_EQ(net::OK, OpenEntry(key2 + base::NumberToString(entry_no), &entry))
  3377. << "Should not have evicted fresh entry " << entry_no;
  3378. entry->Close();
  3379. }
  3380. }
  3381. // Tests that if a read and a following in-flight truncate are both in progress
  3382. // simultaniously that they both can occur successfully. See
  3383. // http://crbug.com/239223
  3384. TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
  3385. SetSimpleCacheMode();
  3386. InitCache();
  3387. const char key[] = "the first key";
  3388. // We use a very large entry size here to make sure this doesn't hit
  3389. // the prefetch path for any concievable setting. Hitting prefetch would
  3390. // make us serve the read below from memory entirely on I/O thread, missing
  3391. // the point of the test which coverred two concurrent disk ops, with
  3392. // portions of work happening on the workpool.
  3393. const int kBufferSize = 50000;
  3394. scoped_refptr<net::IOBuffer> write_buffer =
  3395. base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  3396. CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
  3397. disk_cache::Entry* entry = nullptr;
  3398. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3399. EXPECT_EQ(kBufferSize,
  3400. WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
  3401. entry->Close();
  3402. entry = nullptr;
  3403. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3404. ScopedEntryPtr entry_closer(entry);
  3405. MessageLoopHelper helper;
  3406. int expected = 0;
  3407. // Make a short read.
  3408. const int kReadBufferSize = 512;
  3409. scoped_refptr<net::IOBuffer> read_buffer =
  3410. base::MakeRefCounted<net::IOBuffer>(kReadBufferSize);
  3411. CallbackTest read_callback(&helper, false);
  3412. EXPECT_EQ(net::ERR_IO_PENDING,
  3413. entry->ReadData(1, 0, read_buffer.get(), kReadBufferSize,
  3414. base::BindOnce(&CallbackTest::Run,
  3415. base::Unretained(&read_callback))));
  3416. ++expected;
  3417. // Truncate the entry to the length of that read.
  3418. scoped_refptr<net::IOBuffer> truncate_buffer =
  3419. base::MakeRefCounted<net::IOBuffer>(kReadBufferSize);
  3420. CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
  3421. CallbackTest truncate_callback(&helper, false);
  3422. EXPECT_EQ(
  3423. net::ERR_IO_PENDING,
  3424. entry->WriteData(1, 0, truncate_buffer.get(), kReadBufferSize,
  3425. base::BindOnce(&CallbackTest::Run,
  3426. base::Unretained(&truncate_callback)),
  3427. true));
  3428. ++expected;
  3429. // Wait for both the read and truncation to finish, and confirm that both
  3430. // succeeded.
  3431. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  3432. EXPECT_EQ(kReadBufferSize, read_callback.last_result());
  3433. EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
  3434. EXPECT_EQ(0,
  3435. memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
  3436. }
  3437. // Tests that if a write and a read dependant on it are both in flight
  3438. // simultaneiously that they both can complete successfully without erroneous
  3439. // early returns. See http://crbug.com/239223
  3440. TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
  3441. SetSimpleCacheMode();
  3442. InitCache();
  3443. const char key[] = "the first key";
  3444. EntryResult result =
  3445. cache_->CreateEntry(key, net::HIGHEST, EntryResultCallback());
  3446. ASSERT_EQ(net::OK, result.net_error());
  3447. disk_cache::Entry* entry = result.ReleaseEntry();
  3448. ScopedEntryPtr entry_closer(entry);
  3449. const int kBufferSize = 1024;
  3450. scoped_refptr<net::IOBuffer> write_buffer =
  3451. base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  3452. CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
  3453. MessageLoopHelper helper;
  3454. int expected = 0;
  3455. CallbackTest write_callback(&helper, false);
  3456. EXPECT_EQ(net::ERR_IO_PENDING,
  3457. entry->WriteData(1, 0, write_buffer.get(), kBufferSize,
  3458. base::BindOnce(&CallbackTest::Run,
  3459. base::Unretained(&write_callback)),
  3460. true));
  3461. ++expected;
  3462. scoped_refptr<net::IOBuffer> read_buffer =
  3463. base::MakeRefCounted<net::IOBuffer>(kBufferSize);
  3464. CallbackTest read_callback(&helper, false);
  3465. EXPECT_EQ(net::ERR_IO_PENDING,
  3466. entry->ReadData(1, 0, read_buffer.get(), kBufferSize,
  3467. base::BindOnce(&CallbackTest::Run,
  3468. base::Unretained(&read_callback))));
  3469. ++expected;
  3470. EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
  3471. EXPECT_EQ(kBufferSize, write_callback.last_result());
  3472. EXPECT_EQ(kBufferSize, read_callback.last_result());
  3473. EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
  3474. }
  3475. TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
  3476. SetSimpleCacheMode();
  3477. DisableSimpleCacheWaitForIndex();
  3478. DisableIntegrityCheck();
  3479. InitCache();
  3480. // Assume the index is not initialized, which is likely, since we are blocking
  3481. // the IO thread from executing the index finalization step.
  3482. TestEntryResultCompletionCallback cb1;
  3483. TestEntryResultCompletionCallback cb2;
  3484. EntryResult rv1 = cache_->OpenEntry("key", net::HIGHEST, cb1.callback());
  3485. EntryResult rv2 = cache_->CreateEntry("key", net::HIGHEST, cb2.callback());
  3486. rv1 = cb1.GetResult(std::move(rv1));
  3487. EXPECT_THAT(rv1.net_error(), IsError(net::ERR_FAILED));
  3488. rv2 = cb2.GetResult(std::move(rv2));
  3489. ASSERT_THAT(rv2.net_error(), IsOk());
  3490. disk_cache::Entry* entry2 = rv2.ReleaseEntry();
  3491. // Try to get an alias for entry2. Open should succeed, and return the same
  3492. // pointer.
  3493. disk_cache::Entry* entry3 = nullptr;
  3494. ASSERT_EQ(net::OK, OpenEntry("key", &entry3));
  3495. EXPECT_EQ(entry3, entry2);
  3496. entry2->Close();
  3497. entry3->Close();
  3498. }
  3499. // Checking one more scenario of overlapped reading of a bad entry.
  3500. // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
  3501. // last two reads.
  3502. TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
  3503. SetSimpleCacheMode();
  3504. InitCache();
  3505. const char key[] = "key";
  3506. int size = 50000;
  3507. ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, size));
  3508. scoped_refptr<net::IOBuffer> read_buffer1 =
  3509. base::MakeRefCounted<net::IOBuffer>(size);
  3510. scoped_refptr<net::IOBuffer> read_buffer2 =
  3511. base::MakeRefCounted<net::IOBuffer>(size);
  3512. // Advance the first reader a little.
  3513. disk_cache::Entry* entry = nullptr;
  3514. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3515. ScopedEntryPtr entry_closer(entry);
  3516. EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
  3517. // Advance the 2nd reader by the same amount.
  3518. disk_cache::Entry* entry2 = nullptr;
  3519. EXPECT_THAT(OpenEntry(key, &entry2), IsOk());
  3520. ScopedEntryPtr entry2_closer(entry2);
  3521. EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
  3522. // Continue reading 1st.
  3523. EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
  3524. // This read should fail as well because we have previous read failures.
  3525. EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
  3526. DisableIntegrityCheck();
  3527. }
  3528. // Test if we can sequentially read each subset of the data until all the data
  3529. // is read, then the CRC is calculated correctly and the reads are successful.
  3530. TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
  3531. // Test sequence:
  3532. // Create, Write, Read (first half of data), Read (second half of data),
  3533. // Close.
  3534. SetSimpleCacheMode();
  3535. InitCache();
  3536. disk_cache::Entry* null = nullptr;
  3537. const char key[] = "the first key";
  3538. const int kHalfSize = 200;
  3539. const int kSize = 2 * kHalfSize;
  3540. scoped_refptr<net::IOBuffer> buffer1 =
  3541. base::MakeRefCounted<net::IOBuffer>(kSize);
  3542. CacheTestFillBuffer(buffer1->data(), kSize, false);
  3543. disk_cache::Entry* entry = nullptr;
  3544. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3545. EXPECT_NE(null, entry);
  3546. EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
  3547. entry->Close();
  3548. disk_cache::Entry* entry2 = nullptr;
  3549. ASSERT_THAT(OpenEntry(key, &entry2), IsOk());
  3550. EXPECT_EQ(entry, entry2);
  3551. // Read the first half of the data.
  3552. int offset = 0;
  3553. int buf_len = kHalfSize;
  3554. scoped_refptr<net::IOBuffer> buffer1_read1 =
  3555. base::MakeRefCounted<net::IOBuffer>(buf_len);
  3556. EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
  3557. EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
  3558. // Read the second half of the data.
  3559. offset = buf_len;
  3560. buf_len = kHalfSize;
  3561. scoped_refptr<net::IOBuffer> buffer1_read2 =
  3562. base::MakeRefCounted<net::IOBuffer>(buf_len);
  3563. EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
  3564. char* buffer1_data = buffer1->data() + offset;
  3565. EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
  3566. // Check that we are not leaking.
  3567. EXPECT_NE(entry, null);
  3568. EXPECT_TRUE(
  3569. static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  3570. entry->Close();
  3571. entry = nullptr;
  3572. }
  3573. // Test if we can write the data not in sequence and read correctly. In
  3574. // this case the CRC will not be present.
  3575. TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
  3576. // Test sequence:
  3577. // Create, Write (second half of data), Write (first half of data), Read,
  3578. // Close.
  3579. SetSimpleCacheMode();
  3580. InitCache();
  3581. disk_cache::Entry* null = nullptr;
  3582. const char key[] = "the first key";
  3583. const int kHalfSize = 200;
  3584. const int kSize = 2 * kHalfSize;
  3585. scoped_refptr<net::IOBuffer> buffer1 =
  3586. base::MakeRefCounted<net::IOBuffer>(kSize);
  3587. scoped_refptr<net::IOBuffer> buffer2 =
  3588. base::MakeRefCounted<net::IOBuffer>(kSize);
  3589. CacheTestFillBuffer(buffer1->data(), kSize, false);
  3590. char* buffer1_data = buffer1->data() + kHalfSize;
  3591. memcpy(buffer2->data(), buffer1_data, kHalfSize);
  3592. disk_cache::Entry* entry = nullptr;
  3593. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3594. entry->Close();
  3595. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  3596. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3597. EXPECT_NE(null, entry);
  3598. int offset = kHalfSize;
  3599. int buf_len = kHalfSize;
  3600. EXPECT_EQ(buf_len,
  3601. WriteData(entry, i, offset, buffer2.get(), buf_len, false));
  3602. offset = 0;
  3603. buf_len = kHalfSize;
  3604. EXPECT_EQ(buf_len,
  3605. WriteData(entry, i, offset, buffer1.get(), buf_len, false));
  3606. entry->Close();
  3607. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3608. scoped_refptr<net::IOBuffer> buffer1_read1 =
  3609. base::MakeRefCounted<net::IOBuffer>(kSize);
  3610. EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
  3611. EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
  3612. // Check that we are not leaking.
  3613. ASSERT_NE(entry, null);
  3614. EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
  3615. entry->Close();
  3616. }
  3617. }
  3618. // Test that changing stream1 size does not affect stream0 (stream0 and stream1
  3619. // are stored in the same file in Simple Cache).
  3620. TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
  3621. SetSimpleCacheMode();
  3622. InitCache();
  3623. disk_cache::Entry* entry = nullptr;
  3624. const std::string key("the key");
  3625. const int kSize = 100;
  3626. scoped_refptr<net::IOBuffer> buffer =
  3627. base::MakeRefCounted<net::IOBuffer>(kSize);
  3628. scoped_refptr<net::IOBuffer> buffer_read =
  3629. base::MakeRefCounted<net::IOBuffer>(kSize);
  3630. CacheTestFillBuffer(buffer->data(), kSize, false);
  3631. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3632. EXPECT_TRUE(entry);
  3633. // Write something into stream0.
  3634. EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
  3635. EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
  3636. EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
  3637. entry->Close();
  3638. // Extend stream1.
  3639. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3640. int stream1_size = 100;
  3641. EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
  3642. EXPECT_EQ(stream1_size, entry->GetDataSize(1));
  3643. entry->Close();
  3644. // Check that stream0 data has not been modified and that the EOF record for
  3645. // stream 0 contains a crc.
  3646. // The entry needs to be reopened before checking the crc: Open will perform
  3647. // the synchronization with the previous Close. This ensures the EOF records
  3648. // have been written to disk before we attempt to read them independently.
  3649. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3650. base::FilePath entry_file0_path = cache_path_.AppendASCII(
  3651. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  3652. base::File entry_file0(entry_file0_path,
  3653. base::File::FLAG_READ | base::File::FLAG_OPEN);
  3654. ASSERT_TRUE(entry_file0.IsValid());
  3655. int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
  3656. int sparse_data_size = 0;
  3657. disk_cache::SimpleEntryStat entry_stat(
  3658. base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
  3659. int eof_offset = entry_stat.GetEOFOffsetInFile(key.size(), 0);
  3660. disk_cache::SimpleFileEOF eof_record;
  3661. ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
  3662. entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
  3663. sizeof(eof_record)));
  3664. EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
  3665. EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
  3666. disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
  3667. buffer_read = base::MakeRefCounted<net::IOBuffer>(kSize);
  3668. EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
  3669. EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
  3670. // Shrink stream1.
  3671. stream1_size = 50;
  3672. EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
  3673. EXPECT_EQ(stream1_size, entry->GetDataSize(1));
  3674. entry->Close();
  3675. // Check that stream0 data has not been modified.
  3676. buffer_read = base::MakeRefCounted<net::IOBuffer>(kSize);
  3677. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3678. EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
  3679. EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
  3680. entry->Close();
  3681. entry = nullptr;
  3682. }
  3683. // Test that writing within the range for which the crc has already been
  3684. // computed will properly invalidate the computed crc.
  3685. TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
  3686. // Test sequence:
  3687. // Create, Write (big data), Write (small data in the middle), Close.
  3688. // Open, Read (all), Close.
  3689. SetSimpleCacheMode();
  3690. InitCache();
  3691. disk_cache::Entry* null = nullptr;
  3692. const char key[] = "the first key";
  3693. const int kHalfSize = 200;
  3694. const int kSize = 2 * kHalfSize;
  3695. scoped_refptr<net::IOBuffer> buffer1 =
  3696. base::MakeRefCounted<net::IOBuffer>(kSize);
  3697. scoped_refptr<net::IOBuffer> buffer2 =
  3698. base::MakeRefCounted<net::IOBuffer>(kHalfSize);
  3699. CacheTestFillBuffer(buffer1->data(), kSize, false);
  3700. CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
  3701. disk_cache::Entry* entry = nullptr;
  3702. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3703. EXPECT_NE(null, entry);
  3704. entry->Close();
  3705. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  3706. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3707. int offset = 0;
  3708. int buf_len = kSize;
  3709. EXPECT_EQ(buf_len,
  3710. WriteData(entry, i, offset, buffer1.get(), buf_len, false));
  3711. offset = kHalfSize;
  3712. buf_len = kHalfSize;
  3713. EXPECT_EQ(buf_len,
  3714. WriteData(entry, i, offset, buffer2.get(), buf_len, false));
  3715. entry->Close();
  3716. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3717. scoped_refptr<net::IOBuffer> buffer1_read1 =
  3718. base::MakeRefCounted<net::IOBuffer>(kSize);
  3719. EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
  3720. EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
  3721. EXPECT_EQ(
  3722. 0,
  3723. memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
  3724. entry->Close();
  3725. }
  3726. }
  3727. bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
  3728. int third_stream_file_index =
  3729. disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
  3730. base::FilePath third_stream_file_path = cache_path_.AppendASCII(
  3731. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
  3732. key, third_stream_file_index));
  3733. return PathExists(third_stream_file_path);
  3734. }
  3735. void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
  3736. net::TestCompletionCallback callback;
  3737. cache_->DoomEntry(key, net::HIGHEST, callback.callback());
  3738. callback.WaitForResult();
  3739. }
  3740. void DiskCacheEntryTest::CreateEntryWithHeaderBodyAndSideData(
  3741. const std::string& key,
  3742. int data_size) {
  3743. // Use one buffer for simplicity.
  3744. scoped_refptr<net::IOBuffer> buffer =
  3745. base::MakeRefCounted<net::IOBuffer>(data_size);
  3746. CacheTestFillBuffer(buffer->data(), data_size, false);
  3747. disk_cache::Entry* entry = nullptr;
  3748. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3749. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  3750. EXPECT_EQ(data_size, WriteData(entry, i, /* offset */ 0, buffer.get(),
  3751. data_size, false));
  3752. }
  3753. entry->Close();
  3754. }
  3755. void DiskCacheEntryTest::TruncateFileFromEnd(int file_index,
  3756. const std::string& key,
  3757. int data_size,
  3758. int truncate_size) {
  3759. // Remove last eof bytes from cache file.
  3760. ASSERT_GT(data_size, truncate_size);
  3761. const int64_t new_size =
  3762. disk_cache::simple_util::GetFileSizeFromDataSize(key.size(), data_size) -
  3763. truncate_size;
  3764. const base::FilePath entry_path = cache_path_.AppendASCII(
  3765. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, file_index));
  3766. EXPECT_TRUE(TruncatePath(entry_path, new_size));
  3767. }
  3768. void DiskCacheEntryTest::UseAfterBackendDestruction() {
  3769. disk_cache::Entry* entry = nullptr;
  3770. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  3771. cache_.reset();
  3772. const int kSize = 100;
  3773. scoped_refptr<net::IOBuffer> buffer =
  3774. base::MakeRefCounted<net::IOBuffer>(kSize);
  3775. CacheTestFillBuffer(buffer->data(), kSize, false);
  3776. // Do some writes and reads, but don't change the result. We're OK
  3777. // with them failing, just not them crashing.
  3778. WriteData(entry, 1, 0, buffer.get(), kSize, false);
  3779. ReadData(entry, 1, 0, buffer.get(), kSize);
  3780. WriteSparseData(entry, 20000, buffer.get(), kSize);
  3781. entry->Close();
  3782. }
  3783. void DiskCacheEntryTest::CloseSparseAfterBackendDestruction() {
  3784. const int kSize = 100;
  3785. scoped_refptr<net::IOBuffer> buffer =
  3786. base::MakeRefCounted<net::IOBuffer>(kSize);
  3787. CacheTestFillBuffer(buffer->data(), kSize, false);
  3788. disk_cache::Entry* entry = nullptr;
  3789. ASSERT_THAT(CreateEntry("the first key", &entry), IsOk());
  3790. WriteSparseData(entry, 20000, buffer.get(), kSize);
  3791. cache_.reset();
  3792. // This call shouldn't DCHECK or crash.
  3793. entry->Close();
  3794. }
  3795. // Check that a newly-created entry with no third-stream writes omits the
  3796. // third stream file.
  3797. TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
  3798. SetSimpleCacheMode();
  3799. InitCache();
  3800. const char key[] = "key";
  3801. disk_cache::Entry* entry;
  3802. // Create entry and close without writing: third stream file should be
  3803. // omitted, since the stream is empty.
  3804. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3805. entry->Close();
  3806. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3807. SyncDoomEntry(key);
  3808. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3809. }
  3810. // Check that a newly-created entry with only a single zero-offset, zero-length
  3811. // write omits the third stream file.
  3812. TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
  3813. SetSimpleCacheMode();
  3814. InitCache();
  3815. const int kHalfSize = 8;
  3816. const int kSize = kHalfSize * 2;
  3817. const char key[] = "key";
  3818. scoped_refptr<net::IOBuffer> buffer =
  3819. base::MakeRefCounted<net::IOBuffer>(kSize);
  3820. CacheTestFillBuffer(buffer->data(), kHalfSize, false);
  3821. disk_cache::Entry* entry;
  3822. // Create entry, write empty buffer to third stream, and close: third stream
  3823. // should still be omitted, since the entry ignores writes that don't modify
  3824. // data or change the length.
  3825. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3826. EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
  3827. entry->Close();
  3828. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3829. SyncDoomEntry(key);
  3830. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3831. }
  3832. // Check that we can read back data written to the third stream.
  3833. TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
  3834. SetSimpleCacheMode();
  3835. InitCache();
  3836. const int kHalfSize = 8;
  3837. const int kSize = kHalfSize * 2;
  3838. const char key[] = "key";
  3839. scoped_refptr<net::IOBuffer> buffer1 =
  3840. base::MakeRefCounted<net::IOBuffer>(kSize);
  3841. scoped_refptr<net::IOBuffer> buffer2 =
  3842. base::MakeRefCounted<net::IOBuffer>(kSize);
  3843. CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
  3844. disk_cache::Entry* entry;
  3845. // Create entry, write data to third stream, and close: third stream should
  3846. // not be omitted, since it contains data. Re-open entry and ensure there
  3847. // are that many bytes in the third stream.
  3848. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3849. EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
  3850. entry->Close();
  3851. EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
  3852. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3853. EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
  3854. EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
  3855. entry->Close();
  3856. EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
  3857. SyncDoomEntry(key);
  3858. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3859. }
  3860. // Check that we remove the third stream file upon opening an entry and finding
  3861. // the third stream empty. (This is the upgrade path for entries written
  3862. // before the third stream was optional.)
  3863. TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
  3864. SetSimpleCacheMode();
  3865. InitCache();
  3866. const int kHalfSize = 8;
  3867. const int kSize = kHalfSize * 2;
  3868. const char key[] = "key";
  3869. scoped_refptr<net::IOBuffer> buffer1 =
  3870. base::MakeRefCounted<net::IOBuffer>(kSize);
  3871. scoped_refptr<net::IOBuffer> buffer2 =
  3872. base::MakeRefCounted<net::IOBuffer>(kSize);
  3873. CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
  3874. disk_cache::Entry* entry;
  3875. // Create entry, write data to third stream, truncate third stream back to
  3876. // empty, and close: third stream will not initially be omitted, since entry
  3877. // creates the file when the first significant write comes in, and only
  3878. // removes it on open if it is empty. Reopen, ensure that the file is
  3879. // deleted, and that there's no data in the third stream.
  3880. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3881. EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
  3882. EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
  3883. entry->Close();
  3884. EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
  3885. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3886. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3887. EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
  3888. entry->Close();
  3889. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3890. SyncDoomEntry(key);
  3891. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3892. }
  3893. // Check that we don't accidentally create the third stream file once the entry
  3894. // has been doomed.
  3895. TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
  3896. SetSimpleCacheMode();
  3897. InitCache();
  3898. const int kHalfSize = 8;
  3899. const int kSize = kHalfSize * 2;
  3900. const char key[] = "key";
  3901. scoped_refptr<net::IOBuffer> buffer =
  3902. base::MakeRefCounted<net::IOBuffer>(kSize);
  3903. CacheTestFillBuffer(buffer->data(), kHalfSize, false);
  3904. disk_cache::Entry* entry;
  3905. // Create entry, doom entry, write data to third stream, and close: third
  3906. // stream should not exist. (Note: We don't care if the write fails, just
  3907. // that it doesn't cause the file to be created on disk.)
  3908. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3909. entry->Doom();
  3910. WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
  3911. entry->Close();
  3912. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  3913. }
  3914. // There could be a race between Doom and an optimistic write.
  3915. TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
  3916. // Test sequence:
  3917. // Create, first Write, second Write, Close.
  3918. // Open, Close.
  3919. SetSimpleCacheMode();
  3920. InitCache();
  3921. disk_cache::Entry* null = nullptr;
  3922. const char key[] = "the first key";
  3923. const int kSize = 200;
  3924. scoped_refptr<net::IOBuffer> buffer1 =
  3925. base::MakeRefCounted<net::IOBuffer>(kSize);
  3926. scoped_refptr<net::IOBuffer> buffer2 =
  3927. base::MakeRefCounted<net::IOBuffer>(kSize);
  3928. CacheTestFillBuffer(buffer1->data(), kSize, false);
  3929. CacheTestFillBuffer(buffer2->data(), kSize, false);
  3930. // The race only happens on stream 1 and stream 2.
  3931. for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
  3932. ASSERT_THAT(DoomAllEntries(), IsOk());
  3933. disk_cache::Entry* entry = nullptr;
  3934. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3935. EXPECT_NE(null, entry);
  3936. entry->Close();
  3937. entry = nullptr;
  3938. ASSERT_THAT(DoomAllEntries(), IsOk());
  3939. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  3940. EXPECT_NE(null, entry);
  3941. int offset = 0;
  3942. int buf_len = kSize;
  3943. // This write should not be optimistic (since create is).
  3944. EXPECT_EQ(buf_len,
  3945. WriteData(entry, i, offset, buffer1.get(), buf_len, false));
  3946. offset = kSize;
  3947. // This write should be optimistic.
  3948. EXPECT_EQ(buf_len,
  3949. WriteData(entry, i, offset, buffer2.get(), buf_len, false));
  3950. entry->Close();
  3951. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  3952. EXPECT_NE(null, entry);
  3953. entry->Close();
  3954. entry = nullptr;
  3955. }
  3956. }
  3957. // Tests for a regression in crbug.com/317138 , in which deleting an already
  3958. // doomed entry was removing the active entry from the index.
  3959. TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
  3960. SetSimpleCacheMode();
  3961. InitCache();
  3962. disk_cache::Entry* null = nullptr;
  3963. const char key[] = "this is a key";
  3964. disk_cache::Entry* entry1 = nullptr;
  3965. ASSERT_THAT(CreateEntry(key, &entry1), IsOk());
  3966. ScopedEntryPtr entry1_closer(entry1);
  3967. EXPECT_NE(null, entry1);
  3968. entry1->Doom();
  3969. disk_cache::Entry* entry2 = nullptr;
  3970. ASSERT_THAT(CreateEntry(key, &entry2), IsOk());
  3971. ScopedEntryPtr entry2_closer(entry2);
  3972. EXPECT_NE(null, entry2);
  3973. entry2_closer.reset();
  3974. // Closing then reopening entry2 insures that entry2 is serialized, and so
  3975. // it can be opened from files without error.
  3976. entry2 = nullptr;
  3977. ASSERT_THAT(OpenEntry(key, &entry2), IsOk());
  3978. EXPECT_NE(null, entry2);
  3979. entry2_closer.reset(entry2);
  3980. scoped_refptr<disk_cache::SimpleEntryImpl>
  3981. entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
  3982. // If crbug.com/317138 has regressed, this will remove |entry2| from
  3983. // the backend's |active_entries_| while |entry2| is still alive and its
  3984. // files are still on disk.
  3985. entry1_closer.reset();
  3986. entry1 = nullptr;
  3987. // Close does not have a callback. However, we need to be sure the close is
  3988. // finished before we continue the test. We can take advantage of how the ref
  3989. // counting of a SimpleEntryImpl works to fake out a callback: When the
  3990. // last Close() call is made to an entry, an IO operation is sent to the
  3991. // synchronous entry to close the platform files. This IO operation holds a
  3992. // ref pointer to the entry, which expires when the operation is done. So,
  3993. // we take a refpointer, and watch the SimpleEntry object until it has only
  3994. // one ref; this indicates the IO operation is complete.
  3995. while (!entry1_refptr->HasOneRef()) {
  3996. base::PlatformThread::YieldCurrentThread();
  3997. base::RunLoop().RunUntilIdle();
  3998. }
  3999. entry1_refptr = nullptr;
  4000. // In the bug case, this new entry ends up being a duplicate object pointing
  4001. // at the same underlying files.
  4002. disk_cache::Entry* entry3 = nullptr;
  4003. EXPECT_THAT(OpenEntry(key, &entry3), IsOk());
  4004. ScopedEntryPtr entry3_closer(entry3);
  4005. EXPECT_NE(null, entry3);
  4006. // The test passes if these two dooms do not crash.
  4007. entry2->Doom();
  4008. entry3->Doom();
  4009. }
  4010. TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
  4011. SetSimpleCacheMode();
  4012. InitCache();
  4013. BasicSparseIO();
  4014. }
  4015. TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
  4016. SetSimpleCacheMode();
  4017. InitCache();
  4018. HugeSparseIO();
  4019. }
  4020. TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
  4021. SetSimpleCacheMode();
  4022. InitCache();
  4023. GetAvailableRangeTest();
  4024. }
  4025. TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
  4026. SetSimpleCacheMode();
  4027. InitCache();
  4028. UpdateSparseEntry();
  4029. }
  4030. TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
  4031. SetSimpleCacheMode();
  4032. InitCache();
  4033. DoomSparseEntry();
  4034. }
  4035. TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
  4036. SetSimpleCacheMode();
  4037. InitCache();
  4038. PartialSparseEntry();
  4039. }
  4040. TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
  4041. const int kSize = 1024;
  4042. SetSimpleCacheMode();
  4043. // An entry is allowed sparse data 1/10 the size of the cache, so this size
  4044. // allows for one |kSize|-sized range plus overhead, but not two ranges.
  4045. SetMaxSize(kSize * 15);
  4046. InitCache();
  4047. const char key[] = "key";
  4048. disk_cache::Entry* null = nullptr;
  4049. disk_cache::Entry* entry;
  4050. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4051. EXPECT_NE(null, entry);
  4052. scoped_refptr<net::IOBuffer> buffer =
  4053. base::MakeRefCounted<net::IOBuffer>(kSize);
  4054. CacheTestFillBuffer(buffer->data(), kSize, false);
  4055. net::TestCompletionCallback callback;
  4056. int ret;
  4057. // Verify initial conditions.
  4058. ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
  4059. EXPECT_EQ(0, callback.GetResult(ret));
  4060. ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
  4061. EXPECT_EQ(0, callback.GetResult(ret));
  4062. // Write a range and make sure it reads back.
  4063. ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
  4064. EXPECT_EQ(kSize, callback.GetResult(ret));
  4065. ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
  4066. EXPECT_EQ(kSize, callback.GetResult(ret));
  4067. // Write another range and make sure it reads back.
  4068. ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
  4069. EXPECT_EQ(kSize, callback.GetResult(ret));
  4070. ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
  4071. EXPECT_EQ(kSize, callback.GetResult(ret));
  4072. // Make sure the first range was removed when the second was written.
  4073. ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
  4074. EXPECT_EQ(0, callback.GetResult(ret));
  4075. // Close and reopen the entry and make sure the first entry is still absent
  4076. // and the second entry is still present.
  4077. entry->Close();
  4078. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4079. ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
  4080. EXPECT_EQ(0, callback.GetResult(ret));
  4081. ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
  4082. EXPECT_EQ(kSize, callback.GetResult(ret));
  4083. entry->Close();
  4084. }
  4085. TEST_F(DiskCacheEntryTest, SimpleCacheNoBodyEOF) {
  4086. SetSimpleCacheMode();
  4087. InitCache();
  4088. const std::string key("the first key");
  4089. const int kSize = 1024;
  4090. CreateEntryWithHeaderBodyAndSideData(key, kSize);
  4091. disk_cache::Entry* entry = nullptr;
  4092. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4093. entry->Close();
  4094. TruncateFileFromEnd(0 /*header and body file index*/, key, kSize,
  4095. static_cast<int>(sizeof(disk_cache::SimpleFileEOF)));
  4096. EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
  4097. }
  4098. TEST_F(DiskCacheEntryTest, SimpleCacheNoSideDataEOF) {
  4099. SetSimpleCacheMode();
  4100. InitCache();
  4101. const char key[] = "the first key";
  4102. const int kSize = 1024;
  4103. CreateEntryWithHeaderBodyAndSideData(key, kSize);
  4104. disk_cache::Entry* entry = nullptr;
  4105. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4106. entry->Close();
  4107. TruncateFileFromEnd(1 /*side data file_index*/, key, kSize,
  4108. static_cast<int>(sizeof(disk_cache::SimpleFileEOF)));
  4109. EXPECT_THAT(OpenEntry(key, &entry), IsOk());
  4110. // The corrupted stream should have been deleted.
  4111. EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
  4112. // _0 should still exist.
  4113. base::FilePath path_0 = cache_path_.AppendASCII(
  4114. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
  4115. EXPECT_TRUE(base::PathExists(path_0));
  4116. scoped_refptr<net::IOBuffer> check_stream_data =
  4117. base::MakeRefCounted<net::IOBuffer>(kSize);
  4118. EXPECT_EQ(kSize, ReadData(entry, 0, 0, check_stream_data.get(), kSize));
  4119. EXPECT_EQ(kSize, ReadData(entry, 1, 0, check_stream_data.get(), kSize));
  4120. EXPECT_EQ(0, entry->GetDataSize(2));
  4121. entry->Close();
  4122. }
  4123. TEST_F(DiskCacheEntryTest, SimpleCacheReadWithoutKeySHA256) {
  4124. // This test runs as APP_CACHE to make operations more synchronous.
  4125. SetCacheType(net::APP_CACHE);
  4126. SetSimpleCacheMode();
  4127. InitCache();
  4128. disk_cache::Entry* entry;
  4129. std::string key("a key");
  4130. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4131. const std::string stream_0_data = "data for stream zero";
  4132. scoped_refptr<net::IOBuffer> stream_0_iobuffer =
  4133. base::MakeRefCounted<net::StringIOBuffer>(stream_0_data);
  4134. EXPECT_EQ(static_cast<int>(stream_0_data.size()),
  4135. WriteData(entry, 0, 0, stream_0_iobuffer.get(),
  4136. stream_0_data.size(), false));
  4137. const std::string stream_1_data = "FOR STREAM ONE, QUITE DIFFERENT THINGS";
  4138. scoped_refptr<net::IOBuffer> stream_1_iobuffer =
  4139. base::MakeRefCounted<net::StringIOBuffer>(stream_1_data);
  4140. EXPECT_EQ(static_cast<int>(stream_1_data.size()),
  4141. WriteData(entry, 1, 0, stream_1_iobuffer.get(),
  4142. stream_1_data.size(), false));
  4143. entry->Close();
  4144. base::RunLoop().RunUntilIdle();
  4145. disk_cache::FlushCacheThreadForTesting();
  4146. base::RunLoop().RunUntilIdle();
  4147. EXPECT_TRUE(
  4148. disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
  4149. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4150. ScopedEntryPtr entry_closer(entry);
  4151. EXPECT_EQ(static_cast<int>(stream_0_data.size()), entry->GetDataSize(0));
  4152. scoped_refptr<net::IOBuffer> check_stream_0_data =
  4153. base::MakeRefCounted<net::IOBuffer>(stream_0_data.size());
  4154. EXPECT_EQ(
  4155. static_cast<int>(stream_0_data.size()),
  4156. ReadData(entry, 0, 0, check_stream_0_data.get(), stream_0_data.size()));
  4157. EXPECT_EQ(0, stream_0_data.compare(0, std::string::npos,
  4158. check_stream_0_data->data(),
  4159. stream_0_data.size()));
  4160. EXPECT_EQ(static_cast<int>(stream_1_data.size()), entry->GetDataSize(1));
  4161. scoped_refptr<net::IOBuffer> check_stream_1_data =
  4162. base::MakeRefCounted<net::IOBuffer>(stream_1_data.size());
  4163. EXPECT_EQ(
  4164. static_cast<int>(stream_1_data.size()),
  4165. ReadData(entry, 1, 0, check_stream_1_data.get(), stream_1_data.size()));
  4166. EXPECT_EQ(0, stream_1_data.compare(0, std::string::npos,
  4167. check_stream_1_data->data(),
  4168. stream_1_data.size()));
  4169. }
  4170. TEST_F(DiskCacheEntryTest, SimpleCacheDoubleOpenWithoutKeySHA256) {
  4171. // This test runs as APP_CACHE to make operations more synchronous.
  4172. SetCacheType(net::APP_CACHE);
  4173. SetSimpleCacheMode();
  4174. InitCache();
  4175. disk_cache::Entry* entry;
  4176. std::string key("a key");
  4177. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4178. entry->Close();
  4179. base::RunLoop().RunUntilIdle();
  4180. disk_cache::FlushCacheThreadForTesting();
  4181. base::RunLoop().RunUntilIdle();
  4182. EXPECT_TRUE(
  4183. disk_cache::simple_util::RemoveKeySHA256FromEntry(key, cache_path_));
  4184. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4185. entry->Close();
  4186. base::RunLoop().RunUntilIdle();
  4187. disk_cache::FlushCacheThreadForTesting();
  4188. base::RunLoop().RunUntilIdle();
  4189. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4190. entry->Close();
  4191. }
  4192. TEST_F(DiskCacheEntryTest, SimpleCacheReadCorruptKeySHA256) {
  4193. // This test runs as APP_CACHE to make operations more synchronous.
  4194. SetCacheType(net::APP_CACHE);
  4195. SetSimpleCacheMode();
  4196. InitCache();
  4197. disk_cache::Entry* entry;
  4198. std::string key("a key");
  4199. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4200. entry->Close();
  4201. base::RunLoop().RunUntilIdle();
  4202. disk_cache::FlushCacheThreadForTesting();
  4203. base::RunLoop().RunUntilIdle();
  4204. EXPECT_TRUE(
  4205. disk_cache::simple_util::CorruptKeySHA256FromEntry(key, cache_path_));
  4206. EXPECT_NE(net::OK, OpenEntry(key, &entry));
  4207. }
  4208. TEST_F(DiskCacheEntryTest, SimpleCacheReadCorruptLength) {
  4209. SetCacheType(net::APP_CACHE);
  4210. SetSimpleCacheMode();
  4211. InitCache();
  4212. disk_cache::Entry* entry;
  4213. std::string key("a key");
  4214. ASSERT_EQ(net::OK, CreateEntry(key, &entry));
  4215. entry->Close();
  4216. base::RunLoop().RunUntilIdle();
  4217. disk_cache::FlushCacheThreadForTesting();
  4218. base::RunLoop().RunUntilIdle();
  4219. EXPECT_TRUE(
  4220. disk_cache::simple_util::CorruptStream0LengthFromEntry(key, cache_path_));
  4221. EXPECT_NE(net::OK, OpenEntry(key, &entry));
  4222. }
  4223. TEST_F(DiskCacheEntryTest, SimpleCacheCreateRecoverFromRmdir) {
  4224. // This test runs as APP_CACHE to make operations more synchronous.
  4225. // (in particular we want to see if create succeeded or not, so we don't
  4226. // want an optimistic one).
  4227. SetCacheType(net::APP_CACHE);
  4228. SetSimpleCacheMode();
  4229. InitCache();
  4230. // Pretend someone deleted the cache dir. This shouldn't be too scary in
  4231. // the test since cache_path_ is set as:
  4232. // CHECK(temp_dir_.CreateUniqueTempDir());
  4233. // cache_path_ = temp_dir_.GetPath().AppendASCII("cache");
  4234. disk_cache::DeleteCache(cache_path_,
  4235. true /* delete the dir, what we really want*/);
  4236. disk_cache::Entry* entry;
  4237. std::string key("a key");
  4238. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4239. entry->Close();
  4240. }
  4241. TEST_F(DiskCacheEntryTest, SimpleCacheSparseErrorHandling) {
  4242. // If there is corruption in sparse file, we should delete all the files
  4243. // before returning the failure. Further additional sparse operations in
  4244. // failure state should fail gracefully.
  4245. SetSimpleCacheMode();
  4246. InitCache();
  4247. std::string key("a key");
  4248. disk_cache::SimpleFileTracker::EntryFileKey num_key(
  4249. disk_cache::simple_util::GetEntryHashKey(key));
  4250. base::FilePath path_0 = cache_path_.AppendASCII(
  4251. disk_cache::simple_util::GetFilenameFromEntryFileKeyAndFileIndex(num_key,
  4252. 0));
  4253. base::FilePath path_s = cache_path_.AppendASCII(
  4254. disk_cache::simple_util::GetSparseFilenameFromEntryFileKey(num_key));
  4255. disk_cache::Entry* entry = nullptr;
  4256. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4257. const int kSize = 1024;
  4258. scoped_refptr<net::IOBuffer> buffer =
  4259. base::MakeRefCounted<net::IOBuffer>(kSize);
  4260. CacheTestFillBuffer(buffer->data(), kSize, false);
  4261. EXPECT_EQ(kSize, WriteSparseData(entry, 0, buffer.get(), kSize));
  4262. entry->Close();
  4263. disk_cache::FlushCacheThreadForTesting();
  4264. EXPECT_TRUE(base::PathExists(path_0));
  4265. EXPECT_TRUE(base::PathExists(path_s));
  4266. // Now corrupt the _s file in a way that makes it look OK on open, but not on
  4267. // read.
  4268. base::File file_s(path_s, base::File::FLAG_OPEN | base::File::FLAG_READ |
  4269. base::File::FLAG_WRITE);
  4270. ASSERT_TRUE(file_s.IsValid());
  4271. file_s.SetLength(sizeof(disk_cache::SimpleFileHeader) +
  4272. sizeof(disk_cache::SimpleFileSparseRangeHeader) +
  4273. key.size());
  4274. file_s.Close();
  4275. // Re-open, it should still be fine.
  4276. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4277. // Read should fail though.
  4278. EXPECT_EQ(net::ERR_CACHE_READ_FAILURE,
  4279. ReadSparseData(entry, 0, buffer.get(), kSize));
  4280. // At the point read returns to us, the files should already been gone.
  4281. EXPECT_FALSE(base::PathExists(path_0));
  4282. EXPECT_FALSE(base::PathExists(path_s));
  4283. // Re-trying should still fail. Not DCHECK-fail.
  4284. EXPECT_EQ(net::ERR_FAILED, ReadSparseData(entry, 0, buffer.get(), kSize));
  4285. // Similarly for other ops.
  4286. EXPECT_EQ(net::ERR_FAILED, WriteSparseData(entry, 0, buffer.get(), kSize));
  4287. net::TestCompletionCallback cb;
  4288. TestRangeResultCompletionCallback range_cb;
  4289. RangeResult result = range_cb.GetResult(
  4290. entry->GetAvailableRange(0, 1024, range_cb.callback()));
  4291. EXPECT_EQ(net::ERR_FAILED, result.net_error);
  4292. entry->Close();
  4293. disk_cache::FlushCacheThreadForTesting();
  4294. // Closing shouldn't resurrect files, either.
  4295. EXPECT_FALSE(base::PathExists(path_0));
  4296. EXPECT_FALSE(base::PathExists(path_s));
  4297. }
  4298. TEST_F(DiskCacheEntryTest, SimpleCacheCreateCollision) {
  4299. // These two keys collide; this test is that we properly handled creation
  4300. // of both.
  4301. const char kCollKey1[] =
  4302. "\xfb\x4e\x9c\x1d\x66\x71\xf7\x54\xa3\x11\xa0\x7e\x16\xa5\x68\xf6";
  4303. const char kCollKey2[] =
  4304. "\xbc\x60\x64\x92\xbc\xa0\x5c\x15\x17\x93\x29\x2d\xe4\x21\xbd\x03";
  4305. const int kSize = 256;
  4306. scoped_refptr<net::IOBuffer> buffer1 =
  4307. base::MakeRefCounted<net::IOBuffer>(kSize);
  4308. scoped_refptr<net::IOBuffer> buffer2 =
  4309. base::MakeRefCounted<net::IOBuffer>(kSize);
  4310. scoped_refptr<net::IOBuffer> read_buffer =
  4311. base::MakeRefCounted<net::IOBuffer>(kSize);
  4312. CacheTestFillBuffer(buffer1->data(), kSize, false);
  4313. CacheTestFillBuffer(buffer2->data(), kSize, false);
  4314. SetSimpleCacheMode();
  4315. InitCache();
  4316. disk_cache::Entry* entry1;
  4317. ASSERT_THAT(CreateEntry(kCollKey1, &entry1), IsOk());
  4318. disk_cache::Entry* entry2;
  4319. ASSERT_THAT(CreateEntry(kCollKey2, &entry2), IsOk());
  4320. // Make sure that entry was actually created and we didn't just succeed
  4321. // optimistically. (Oddly I can't seem to hit the sequence of events required
  4322. // for the bug that used to be here if I just set this to APP_CACHE).
  4323. EXPECT_EQ(kSize, WriteData(entry2, 0, 0, buffer2.get(), kSize, false));
  4324. // entry1 is still usable, though, and distinct (we just won't be able to
  4325. // re-open it).
  4326. EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
  4327. EXPECT_EQ(kSize, ReadData(entry1, 0, 0, read_buffer.get(), kSize));
  4328. EXPECT_EQ(0, memcmp(buffer1->data(), read_buffer->data(), kSize));
  4329. EXPECT_EQ(kSize, ReadData(entry2, 0, 0, read_buffer.get(), kSize));
  4330. EXPECT_EQ(0, memcmp(buffer2->data(), read_buffer->data(), kSize));
  4331. entry1->Close();
  4332. entry2->Close();
  4333. }
  4334. TEST_F(DiskCacheEntryTest, SimpleCacheConvertToSparseStream2LeftOver) {
  4335. // Testcase for what happens when we have a sparse stream and a left over
  4336. // empty stream 2 file.
  4337. const int kSize = 10;
  4338. scoped_refptr<net::IOBuffer> buffer =
  4339. base::MakeRefCounted<net::IOBuffer>(kSize);
  4340. CacheTestFillBuffer(buffer->data(), kSize, false);
  4341. SetSimpleCacheMode();
  4342. InitCache();
  4343. disk_cache::Entry* entry;
  4344. std::string key("a key");
  4345. ASSERT_THAT(CreateEntry(key, &entry), IsOk());
  4346. // Create an empty stream 2. To do that, we first make a non-empty one, then
  4347. // truncate it (since otherwise the write would just get ignored).
  4348. EXPECT_EQ(kSize, WriteData(entry, /* stream = */ 2, /* offset = */ 0,
  4349. buffer.get(), kSize, false));
  4350. EXPECT_EQ(0, WriteData(entry, /* stream = */ 2, /* offset = */ 0,
  4351. buffer.get(), 0, true));
  4352. EXPECT_EQ(kSize, WriteSparseData(entry, 5, buffer.get(), kSize));
  4353. entry->Close();
  4354. // Reopen, and try to get the sparse data back.
  4355. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4356. scoped_refptr<net::IOBuffer> buffer2 =
  4357. base::MakeRefCounted<net::IOBuffer>(kSize);
  4358. EXPECT_EQ(kSize, ReadSparseData(entry, 5, buffer2.get(), kSize));
  4359. EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
  4360. entry->Close();
  4361. }
  4362. TEST_F(DiskCacheEntryTest, SimpleCacheLazyStream2CreateFailure) {
  4363. // Testcase for what happens when lazy-creation of stream 2 fails.
  4364. const int kSize = 10;
  4365. scoped_refptr<net::IOBuffer> buffer =
  4366. base::MakeRefCounted<net::IOBuffer>(kSize);
  4367. CacheTestFillBuffer(buffer->data(), kSize, false);
  4368. // Synchronous ops, for ease of disk state;
  4369. SetCacheType(net::APP_CACHE);
  4370. SetSimpleCacheMode();
  4371. InitCache();
  4372. const char kKey[] = "a key";
  4373. disk_cache::Entry* entry = nullptr;
  4374. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4375. // Create _1 file for stream 2; this should inject a failure when the cache
  4376. // tries to create it itself.
  4377. base::FilePath entry_file1_path = cache_path_.AppendASCII(
  4378. disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(kKey, 1));
  4379. base::File entry_file1(entry_file1_path,
  4380. base::File::FLAG_WRITE | base::File::FLAG_CREATE);
  4381. ASSERT_TRUE(entry_file1.IsValid());
  4382. entry_file1.Close();
  4383. EXPECT_EQ(net::ERR_CACHE_WRITE_FAILURE,
  4384. WriteData(entry, /* index = */ 2, /* offset = */ 0, buffer.get(),
  4385. kSize, /* truncate = */ false));
  4386. entry->Close();
  4387. }
  4388. TEST_F(DiskCacheEntryTest, SimpleCacheChecksumpScrewUp) {
  4389. // Test for a bug that occurred during development of movement of CRC
  4390. // computation off I/O thread.
  4391. const int kSize = 10;
  4392. scoped_refptr<net::IOBuffer> buffer =
  4393. base::MakeRefCounted<net::IOBuffer>(kSize);
  4394. CacheTestFillBuffer(buffer->data(), kSize, false);
  4395. const int kDoubleSize = kSize * 2;
  4396. scoped_refptr<net::IOBuffer> big_buffer =
  4397. base::MakeRefCounted<net::IOBuffer>(kDoubleSize);
  4398. CacheTestFillBuffer(big_buffer->data(), kDoubleSize, false);
  4399. SetSimpleCacheMode();
  4400. InitCache();
  4401. const char kKey[] = "a key";
  4402. disk_cache::Entry* entry = nullptr;
  4403. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4404. // Write out big_buffer for the double range. Checksum will be set to this.
  4405. ASSERT_EQ(kDoubleSize,
  4406. WriteData(entry, 1, 0, big_buffer.get(), kDoubleSize, false));
  4407. // Reset remembered position to 0 by writing at an earlier non-zero offset.
  4408. ASSERT_EQ(1, WriteData(entry, /* stream = */ 1, /* offset = */ 1,
  4409. big_buffer.get(), /* len = */ 1, false));
  4410. // Now write out the half-range twice. An intermediate revision would
  4411. // incorrectly compute checksum as if payload was buffer followed by buffer
  4412. // rather than buffer followed by end of big_buffer.
  4413. ASSERT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, false));
  4414. ASSERT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, false));
  4415. entry->Close();
  4416. ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
  4417. scoped_refptr<net::IOBuffer> buffer2 =
  4418. base::MakeRefCounted<net::IOBuffer>(kSize);
  4419. EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
  4420. EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
  4421. EXPECT_EQ(kSize, ReadData(entry, 1, kSize, buffer2.get(), kSize));
  4422. EXPECT_EQ(0, memcmp(big_buffer->data() + kSize, buffer2->data(), kSize));
  4423. entry->Close();
  4424. }
  4425. TEST_F(DiskCacheEntryTest, SimpleUseAfterBackendDestruction) {
  4426. SetSimpleCacheMode();
  4427. InitCache();
  4428. UseAfterBackendDestruction();
  4429. }
  4430. TEST_F(DiskCacheEntryTest, MemoryOnlyUseAfterBackendDestruction) {
  4431. // https://crbug.com/741620
  4432. SetMemoryOnlyMode();
  4433. InitCache();
  4434. UseAfterBackendDestruction();
  4435. }
  4436. TEST_F(DiskCacheEntryTest, SimpleCloseSparseAfterBackendDestruction) {
  4437. SetSimpleCacheMode();
  4438. InitCache();
  4439. CloseSparseAfterBackendDestruction();
  4440. }
  4441. TEST_F(DiskCacheEntryTest, MemoryOnlyCloseSparseAfterBackendDestruction) {
  4442. // https://crbug.com/946434
  4443. SetMemoryOnlyMode();
  4444. InitCache();
  4445. CloseSparseAfterBackendDestruction();
  4446. }
  4447. void DiskCacheEntryTest::LastUsedTimePersists() {
  4448. // Make sure that SetLastUsedTimeForTest persists. When used with SimpleCache,
  4449. // this also checks that Entry::GetLastUsed is based on information in index,
  4450. // when available, not atime on disk, which can be inaccurate.
  4451. const char kKey[] = "a key";
  4452. InitCache();
  4453. disk_cache::Entry* entry1 = nullptr;
  4454. ASSERT_THAT(CreateEntry(kKey, &entry1), IsOk());
  4455. ASSERT_TRUE(nullptr != entry1);
  4456. base::Time modified_last_used = entry1->GetLastUsed() - base::Minutes(5);
  4457. entry1->SetLastUsedTimeForTest(modified_last_used);
  4458. entry1->Close();
  4459. disk_cache::Entry* entry2 = nullptr;
  4460. ASSERT_THAT(OpenEntry(kKey, &entry2), IsOk());
  4461. ASSERT_TRUE(nullptr != entry2);
  4462. base::TimeDelta diff = modified_last_used - entry2->GetLastUsed();
  4463. EXPECT_LT(diff, base::Seconds(2));
  4464. EXPECT_GT(diff, -base::Seconds(2));
  4465. entry2->Close();
  4466. }
  4467. TEST_F(DiskCacheEntryTest, LastUsedTimePersists) {
  4468. LastUsedTimePersists();
  4469. }
  4470. TEST_F(DiskCacheEntryTest, SimpleLastUsedTimePersists) {
  4471. SetSimpleCacheMode();
  4472. LastUsedTimePersists();
  4473. }
  4474. TEST_F(DiskCacheEntryTest, MemoryOnlyLastUsedTimePersists) {
  4475. SetMemoryOnlyMode();
  4476. LastUsedTimePersists();
  4477. }
  4478. void DiskCacheEntryTest::TruncateBackwards() {
  4479. const char kKey[] = "a key";
  4480. disk_cache::Entry* entry = nullptr;
  4481. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4482. ASSERT_TRUE(entry != nullptr);
  4483. const int kBigSize = 40 * 1024;
  4484. const int kSmallSize = 9727;
  4485. scoped_refptr<net::IOBuffer> buffer =
  4486. base::MakeRefCounted<net::IOBuffer>(kBigSize);
  4487. CacheTestFillBuffer(buffer->data(), kBigSize, false);
  4488. scoped_refptr<net::IOBuffer> read_buf =
  4489. base::MakeRefCounted<net::IOBuffer>(kBigSize);
  4490. ASSERT_EQ(kSmallSize, WriteData(entry, /* index = */ 0,
  4491. /* offset = */ kBigSize, buffer.get(),
  4492. /* size = */ kSmallSize,
  4493. /* truncate = */ false));
  4494. memset(read_buf->data(), 0, kBigSize);
  4495. ASSERT_EQ(kSmallSize, ReadData(entry, /* index = */ 0,
  4496. /* offset = */ kBigSize, read_buf.get(),
  4497. /* size = */ kSmallSize));
  4498. EXPECT_EQ(0, memcmp(read_buf->data(), buffer->data(), kSmallSize));
  4499. // A partly overlapping truncate before the previous write.
  4500. ASSERT_EQ(kBigSize,
  4501. WriteData(entry, /* index = */ 0,
  4502. /* offset = */ 3, buffer.get(), /* size = */ kBigSize,
  4503. /* truncate = */ true));
  4504. memset(read_buf->data(), 0, kBigSize);
  4505. ASSERT_EQ(kBigSize,
  4506. ReadData(entry, /* index = */ 0,
  4507. /* offset = */ 3, read_buf.get(), /* size = */ kBigSize));
  4508. EXPECT_EQ(0, memcmp(read_buf->data(), buffer->data(), kBigSize));
  4509. EXPECT_EQ(kBigSize + 3, entry->GetDataSize(0));
  4510. entry->Close();
  4511. }
  4512. TEST_F(DiskCacheEntryTest, TruncateBackwards) {
  4513. // https://crbug.com/946539/
  4514. InitCache();
  4515. TruncateBackwards();
  4516. }
  4517. TEST_F(DiskCacheEntryTest, SimpleTruncateBackwards) {
  4518. SetSimpleCacheMode();
  4519. InitCache();
  4520. TruncateBackwards();
  4521. }
  4522. TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateBackwards) {
  4523. SetMemoryOnlyMode();
  4524. InitCache();
  4525. TruncateBackwards();
  4526. }
  4527. void DiskCacheEntryTest::ZeroWriteBackwards() {
  4528. const char kKey[] = "a key";
  4529. disk_cache::Entry* entry = nullptr;
  4530. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4531. ASSERT_TRUE(entry != nullptr);
  4532. const int kSize = 1024;
  4533. scoped_refptr<net::IOBuffer> buffer =
  4534. base::MakeRefCounted<net::IOBuffer>(kSize);
  4535. CacheTestFillBuffer(buffer->data(), kSize, false);
  4536. // Offset here needs to be > blockfile's kMaxBlockSize to hit
  4537. // https://crbug.com/946538, as writes close to beginning are handled
  4538. // specially.
  4539. EXPECT_EQ(0, WriteData(entry, /* index = */ 0,
  4540. /* offset = */ 17000, buffer.get(),
  4541. /* size = */ 0, /* truncate = */ true));
  4542. EXPECT_EQ(0, WriteData(entry, /* index = */ 0,
  4543. /* offset = */ 0, buffer.get(),
  4544. /* size = */ 0, /* truncate = */ false));
  4545. EXPECT_EQ(kSize, ReadData(entry, /* index = */ 0,
  4546. /* offset = */ 0, buffer.get(),
  4547. /* size = */ kSize));
  4548. for (int i = 0; i < kSize; ++i) {
  4549. EXPECT_EQ(0, buffer->data()[i]) << i;
  4550. }
  4551. entry->Close();
  4552. }
  4553. TEST_F(DiskCacheEntryTest, ZeroWriteBackwards) {
  4554. // https://crbug.com/946538/
  4555. InitCache();
  4556. ZeroWriteBackwards();
  4557. }
  4558. TEST_F(DiskCacheEntryTest, SimpleZeroWriteBackwards) {
  4559. SetSimpleCacheMode();
  4560. InitCache();
  4561. ZeroWriteBackwards();
  4562. }
  4563. TEST_F(DiskCacheEntryTest, MemoryOnlyZeroWriteBackwards) {
  4564. SetMemoryOnlyMode();
  4565. InitCache();
  4566. ZeroWriteBackwards();
  4567. }
  4568. void DiskCacheEntryTest::SparseOffset64Bit() {
  4569. // Offsets to sparse ops are 64-bit, make sure we keep track of all of them.
  4570. // (Or, as at least in case of blockfile, fail things cleanly, as it has a
  4571. // cap on max offset that's much lower).
  4572. bool blockfile = !memory_only_ && !simple_cache_mode_;
  4573. InitCache();
  4574. const char kKey[] = "a key";
  4575. disk_cache::Entry* entry = nullptr;
  4576. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4577. ASSERT_TRUE(entry != nullptr);
  4578. const int kSize = 1024;
  4579. // One bit set very high, so intermediate truncations to 32-bit would drop it
  4580. // even if they happen after a bunch of shifting right.
  4581. const int64_t kOffset = (1ll << 61);
  4582. scoped_refptr<net::IOBuffer> buffer =
  4583. base::MakeRefCounted<net::IOBuffer>(kSize);
  4584. CacheTestFillBuffer(buffer->data(), kSize, false);
  4585. EXPECT_EQ(blockfile ? net::ERR_CACHE_OPERATION_NOT_SUPPORTED : kSize,
  4586. WriteSparseData(entry, kOffset, buffer.get(), kSize));
  4587. int64_t start_out = -1;
  4588. EXPECT_EQ(0, GetAvailableRange(entry, /* offset = */ 0, kSize, &start_out));
  4589. start_out = -1;
  4590. EXPECT_EQ(blockfile ? 0 : kSize,
  4591. GetAvailableRange(entry, kOffset, kSize, &start_out));
  4592. EXPECT_EQ(kOffset, start_out);
  4593. entry->Close();
  4594. }
  4595. TEST_F(DiskCacheEntryTest, SparseOffset64Bit) {
  4596. InitCache();
  4597. SparseOffset64Bit();
  4598. }
  4599. TEST_F(DiskCacheEntryTest, SimpleSparseOffset64Bit) {
  4600. SetSimpleCacheMode();
  4601. InitCache();
  4602. SparseOffset64Bit();
  4603. }
  4604. TEST_F(DiskCacheEntryTest, MemoryOnlySparseOffset64Bit) {
  4605. // https://crbug.com/946436
  4606. SetMemoryOnlyMode();
  4607. InitCache();
  4608. SparseOffset64Bit();
  4609. }
  4610. TEST_F(DiskCacheEntryTest, SimpleCacheCloseResurrection) {
  4611. const int kSize = 10;
  4612. scoped_refptr<net::IOBuffer> buffer =
  4613. base::MakeRefCounted<net::IOBuffer>(kSize);
  4614. CacheTestFillBuffer(buffer->data(), kSize, false);
  4615. const char kKey[] = "key";
  4616. SetSimpleCacheMode();
  4617. InitCache();
  4618. disk_cache::Entry* entry = nullptr;
  4619. ASSERT_THAT(CreateEntry(kKey, &entry), IsOk());
  4620. ASSERT_TRUE(entry != nullptr);
  4621. // Let optimistic create finish.
  4622. base::RunLoop().RunUntilIdle();
  4623. disk_cache::FlushCacheThreadForTesting();
  4624. base::RunLoop().RunUntilIdle();
  4625. int rv = entry->WriteData(1, 0, buffer.get(), kSize,
  4626. net::CompletionOnceCallback(), false);
  4627. // Write should be optimistic.
  4628. ASSERT_EQ(kSize, rv);
  4629. // Since the write is still pending, the open will get queued...
  4630. TestEntryResultCompletionCallback cb_open;
  4631. EntryResult result2 =
  4632. cache_->OpenEntry(kKey, net::HIGHEST, cb_open.callback());
  4633. EXPECT_EQ(net::ERR_IO_PENDING, result2.net_error());
  4634. // ... as the open is queued, this Close will temporarily reduce the number
  4635. // of external references to 0. This should not break things.
  4636. entry->Close();
  4637. // Wait till open finishes.
  4638. result2 = cb_open.GetResult(std::move(result2));
  4639. ASSERT_EQ(net::OK, result2.net_error());
  4640. disk_cache::Entry* entry2 = result2.ReleaseEntry();
  4641. ASSERT_TRUE(entry2 != nullptr);
  4642. // Get first close a chance to finish.
  4643. base::RunLoop().RunUntilIdle();
  4644. disk_cache::FlushCacheThreadForTesting();
  4645. base::RunLoop().RunUntilIdle();
  4646. // Make sure |entry2| is still usable.
  4647. scoped_refptr<net::IOBuffer> buffer2 =
  4648. base::MakeRefCounted<net::IOBuffer>(kSize);
  4649. memset(buffer2->data(), 0, kSize);
  4650. EXPECT_EQ(kSize, ReadData(entry2, 1, 0, buffer2.get(), kSize));
  4651. EXPECT_EQ(0, memcmp(buffer->data(), buffer2->data(), kSize));
  4652. entry2->Close();
  4653. }
  4654. TEST_F(DiskCacheEntryTest, BlockFileSparsePendingAfterDtor) {
  4655. // Test of behavior of ~EntryImpl for sparse entry that runs after backend
  4656. // destruction.
  4657. //
  4658. // Hand-creating the backend for realistic shutdown behavior.
  4659. CleanupCacheDir();
  4660. CreateBackend(disk_cache::kNone);
  4661. disk_cache::Entry* entry = nullptr;
  4662. ASSERT_THAT(CreateEntry("key", &entry), IsOk());
  4663. ASSERT_TRUE(entry != nullptr);
  4664. const int kSize = 61184;
  4665. scoped_refptr<net::IOBuffer> buf = base::MakeRefCounted<net::IOBuffer>(kSize);
  4666. CacheTestFillBuffer(buf->data(), kSize, false);
  4667. // The write pattern here avoids the second write being handled by the
  4668. // buffering layer, making SparseControl have to deal with its asynchrony.
  4669. EXPECT_EQ(1, WriteSparseData(entry, 65535, buf.get(), 1));
  4670. EXPECT_EQ(net::ERR_IO_PENDING,
  4671. entry->WriteSparseData(2560, buf.get(), kSize, base::DoNothing()));
  4672. entry->Close();
  4673. cache_.reset();
  4674. // Create a new instance as a way of flushing the thread.
  4675. InitCache();
  4676. FlushQueueForTest();
  4677. }
  4678. class DiskCacheSimplePrefetchTest : public DiskCacheEntryTest {
  4679. public:
  4680. DiskCacheSimplePrefetchTest() = default;
  4681. enum { kEntrySize = 1024 };
  4682. void SetUp() override {
  4683. payload_ = base::MakeRefCounted<net::IOBuffer>(kEntrySize);
  4684. CacheTestFillBuffer(payload_->data(), kEntrySize, false);
  4685. DiskCacheEntryTest::SetUp();
  4686. }
  4687. void SetupFullAndTrailerPrefetch(int full_size,
  4688. int trailer_speculative_size) {
  4689. std::map<std::string, std::string> params;
  4690. params[disk_cache::kSimpleCacheFullPrefetchBytesParam] =
  4691. base::NumberToString(full_size);
  4692. params[disk_cache::kSimpleCacheTrailerPrefetchSpeculativeBytesParam] =
  4693. base::NumberToString(trailer_speculative_size);
  4694. scoped_feature_list_.InitAndEnableFeatureWithParameters(
  4695. disk_cache::kSimpleCachePrefetchExperiment, params);
  4696. }
  4697. void SetupFullPrefetch(int size) { SetupFullAndTrailerPrefetch(size, 0); }
  4698. void InitCacheAndCreateEntry(const std::string& key) {
  4699. SetSimpleCacheMode();
  4700. SetCacheType(SimpleCacheType());
  4701. InitCache();
  4702. disk_cache::Entry* entry;
  4703. ASSERT_EQ(net::OK, CreateEntry(key, &entry));
  4704. // Use stream 1 since that's what new prefetch stuff is about.
  4705. ASSERT_EQ(kEntrySize,
  4706. WriteData(entry, 1, 0, payload_.get(), kEntrySize, false));
  4707. entry->Close();
  4708. }
  4709. virtual net::CacheType SimpleCacheType() const { return net::DISK_CACHE; }
  4710. void InitCacheAndCreateEntryWithNoCrc(const std::string& key) {
  4711. const int kHalfSize = kEntrySize / 2;
  4712. const int kRemSize = kEntrySize - kHalfSize;
  4713. SetSimpleCacheMode();
  4714. InitCache();
  4715. disk_cache::Entry* entry;
  4716. ASSERT_EQ(net::OK, CreateEntry(key, &entry));
  4717. // Use stream 1 since that's what new prefetch stuff is about.
  4718. ASSERT_EQ(kEntrySize,
  4719. WriteData(entry, 1, 0, payload_.get(), kEntrySize, false));
  4720. // Overwrite later part of the buffer, since we can't keep track of
  4721. // the checksum in that case. Do it with identical contents, though,
  4722. // so that the only difference between here and InitCacheAndCreateEntry()
  4723. // would be whether the result has a checkum or not.
  4724. scoped_refptr<net::IOBuffer> second_half =
  4725. base::MakeRefCounted<net::IOBuffer>(kRemSize);
  4726. memcpy(second_half->data(), payload_->data() + kHalfSize, kRemSize);
  4727. ASSERT_EQ(kRemSize, WriteData(entry, 1, kHalfSize, second_half.get(),
  4728. kRemSize, false));
  4729. entry->Close();
  4730. }
  4731. void TryRead(const std::string& key, bool expect_preread_stream1) {
  4732. disk_cache::Entry* entry = nullptr;
  4733. ASSERT_THAT(OpenEntry(key, &entry), IsOk());
  4734. scoped_refptr<net::IOBuffer> read_buf =
  4735. base::MakeRefCounted<net::IOBuffer>(kEntrySize);
  4736. net::TestCompletionCallback cb;
  4737. int rv = entry->ReadData(1, 0, read_buf.get(), kEntrySize, cb.callback());
  4738. // if preload happened, sync reply is expected.
  4739. if (expect_preread_stream1)
  4740. EXPECT_EQ(kEntrySize, rv);
  4741. else
  4742. EXPECT_EQ(net::ERR_IO_PENDING, rv);
  4743. rv = cb.GetResult(rv);
  4744. EXPECT_EQ(kEntrySize, rv);
  4745. EXPECT_EQ(0, memcmp(read_buf->data(), payload_->data(), kEntrySize));
  4746. entry->Close();
  4747. }
  4748. protected:
  4749. scoped_refptr<net::IOBuffer> payload_;
  4750. base::test::ScopedFeatureList scoped_feature_list_;
  4751. };
  4752. TEST_F(DiskCacheSimplePrefetchTest, NoPrefetch) {
  4753. base::HistogramTester histogram_tester;
  4754. SetupFullPrefetch(0);
  4755. const char kKey[] = "a key";
  4756. InitCacheAndCreateEntry(kKey);
  4757. TryRead(kKey, /* expect_preread_stream1 */ false);
  4758. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4759. disk_cache::OPEN_PREFETCH_NONE, 1);
  4760. }
  4761. TEST_F(DiskCacheSimplePrefetchTest, YesPrefetch) {
  4762. base::HistogramTester histogram_tester;
  4763. SetupFullPrefetch(2 * kEntrySize);
  4764. const char kKey[] = "a key";
  4765. InitCacheAndCreateEntry(kKey);
  4766. TryRead(kKey, /* expect_preread_stream1 */ true);
  4767. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4768. disk_cache::OPEN_PREFETCH_FULL, 1);
  4769. }
  4770. TEST_F(DiskCacheSimplePrefetchTest, YesPrefetchNoRead) {
  4771. base::HistogramTester histogram_tester;
  4772. SetupFullPrefetch(2 * kEntrySize);
  4773. const char kKey[] = "a key";
  4774. InitCacheAndCreateEntry(kKey);
  4775. disk_cache::Entry* entry = nullptr;
  4776. ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
  4777. entry->Close();
  4778. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4779. disk_cache::OPEN_PREFETCH_FULL, 1);
  4780. }
  4781. // This makes sure we detect checksum error on entry that's small enough to be
  4782. // prefetched. This is like DiskCacheEntryTest.BadChecksum, but we make sure
  4783. // to configure prefetch explicitly.
  4784. TEST_F(DiskCacheSimplePrefetchTest, BadChecksumSmall) {
  4785. SetupFullPrefetch(1024); // bigger than stuff below.
  4786. SetSimpleCacheMode();
  4787. InitCache();
  4788. const char key[] = "the first key";
  4789. ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, 10));
  4790. disk_cache::Entry* entry = nullptr;
  4791. // Open the entry. Since we made a small entry, we will detect the CRC
  4792. // problem at open.
  4793. EXPECT_THAT(OpenEntry(key, &entry), IsError(net::ERR_FAILED));
  4794. }
  4795. TEST_F(DiskCacheSimplePrefetchTest, ChecksumNoPrefetch) {
  4796. base::HistogramTester histogram_tester;
  4797. SetupFullPrefetch(0);
  4798. const char kKey[] = "a key";
  4799. InitCacheAndCreateEntry(kKey);
  4800. TryRead(kKey, /* expect_preread_stream1 */ false);
  4801. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
  4802. disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
  4803. }
  4804. TEST_F(DiskCacheSimplePrefetchTest, NoChecksumNoPrefetch) {
  4805. base::HistogramTester histogram_tester;
  4806. SetupFullPrefetch(0);
  4807. const char kKey[] = "a key";
  4808. InitCacheAndCreateEntryWithNoCrc(kKey);
  4809. TryRead(kKey, /* expect_preread_stream1 */ false);
  4810. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
  4811. disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
  4812. }
  4813. TEST_F(DiskCacheSimplePrefetchTest, ChecksumPrefetch) {
  4814. base::HistogramTester histogram_tester;
  4815. SetupFullPrefetch(2 * kEntrySize);
  4816. const char kKey[] = "a key";
  4817. InitCacheAndCreateEntry(kKey);
  4818. TryRead(kKey, /* expect_preread_stream1 */ true);
  4819. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
  4820. disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
  4821. }
  4822. TEST_F(DiskCacheSimplePrefetchTest, NoChecksumPrefetch) {
  4823. base::HistogramTester histogram_tester;
  4824. SetupFullPrefetch(2 * kEntrySize);
  4825. const char kKey[] = "a key";
  4826. InitCacheAndCreateEntryWithNoCrc(kKey);
  4827. TryRead(kKey, /* expect_preread_stream1 */ true);
  4828. // EOF check is recorded even if there is no CRC there.
  4829. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncCheckEOFResult",
  4830. disk_cache::CHECK_EOF_RESULT_SUCCESS, 2);
  4831. }
  4832. TEST_F(DiskCacheSimplePrefetchTest, PrefetchReadsSync) {
  4833. // Make sure we can read things synchronously after prefetch.
  4834. SetupFullPrefetch(32768); // way bigger than kEntrySize
  4835. const char kKey[] = "a key";
  4836. InitCacheAndCreateEntry(kKey);
  4837. disk_cache::Entry* entry = nullptr;
  4838. ASSERT_THAT(OpenEntry(kKey, &entry), IsOk());
  4839. scoped_refptr<net::IOBuffer> read_buf =
  4840. base::MakeRefCounted<net::IOBuffer>(kEntrySize);
  4841. // That this is entry->ReadData(...) rather than ReadData(entry, ...) is
  4842. // meaningful here, as the latter is a helper in the test fixture that blocks
  4843. // if needed.
  4844. EXPECT_EQ(kEntrySize, entry->ReadData(1, 0, read_buf.get(), kEntrySize,
  4845. net::CompletionOnceCallback()));
  4846. EXPECT_EQ(0, memcmp(read_buf->data(), payload_->data(), kEntrySize));
  4847. entry->Close();
  4848. }
  4849. TEST_F(DiskCacheSimplePrefetchTest, NoFullNoSpeculative) {
  4850. base::HistogramTester histogram_tester;
  4851. SetupFullAndTrailerPrefetch(0, 0);
  4852. const char kKey[] = "a key";
  4853. InitCacheAndCreateEntry(kKey);
  4854. TryRead(kKey, /* expect_preread_stream1 */ false);
  4855. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4856. disk_cache::OPEN_PREFETCH_NONE, 1);
  4857. }
  4858. TEST_F(DiskCacheSimplePrefetchTest, NoFullSmallSpeculative) {
  4859. base::HistogramTester histogram_tester;
  4860. SetupFullAndTrailerPrefetch(0, kEntrySize / 2);
  4861. const char kKey[] = "a key";
  4862. InitCacheAndCreateEntry(kKey);
  4863. TryRead(kKey, /* expect_preread_stream1 */ false);
  4864. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4865. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4866. }
  4867. TEST_F(DiskCacheSimplePrefetchTest, NoFullLargeSpeculative) {
  4868. base::HistogramTester histogram_tester;
  4869. // A large speculative trailer prefetch that exceeds the entry file
  4870. // size should effectively trigger full prefetch behavior.
  4871. SetupFullAndTrailerPrefetch(0, kEntrySize * 2);
  4872. const char kKey[] = "a key";
  4873. InitCacheAndCreateEntry(kKey);
  4874. TryRead(kKey, /* expect_preread_stream1 */ true);
  4875. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4876. disk_cache::OPEN_PREFETCH_FULL, 1);
  4877. }
  4878. TEST_F(DiskCacheSimplePrefetchTest, SmallFullNoSpeculative) {
  4879. base::HistogramTester histogram_tester;
  4880. SetupFullAndTrailerPrefetch(kEntrySize / 2, 0);
  4881. const char kKey[] = "a key";
  4882. InitCacheAndCreateEntry(kKey);
  4883. TryRead(kKey, /* expect_preread_stream1 */ false);
  4884. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4885. disk_cache::OPEN_PREFETCH_NONE, 1);
  4886. }
  4887. TEST_F(DiskCacheSimplePrefetchTest, LargeFullNoSpeculative) {
  4888. base::HistogramTester histogram_tester;
  4889. SetupFullAndTrailerPrefetch(kEntrySize * 2, 0);
  4890. const char kKey[] = "a key";
  4891. InitCacheAndCreateEntry(kKey);
  4892. TryRead(kKey, /* expect_preread_stream1 */ true);
  4893. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4894. disk_cache::OPEN_PREFETCH_FULL, 1);
  4895. }
  4896. TEST_F(DiskCacheSimplePrefetchTest, SmallFullSmallSpeculative) {
  4897. base::HistogramTester histogram_tester;
  4898. SetupFullAndTrailerPrefetch(kEntrySize / 2, kEntrySize / 2);
  4899. const char kKey[] = "a key";
  4900. InitCacheAndCreateEntry(kKey);
  4901. TryRead(kKey, /* expect_preread_stream1 */ false);
  4902. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4903. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4904. }
  4905. TEST_F(DiskCacheSimplePrefetchTest, LargeFullSmallSpeculative) {
  4906. base::HistogramTester histogram_tester;
  4907. // Full prefetch takes precedence over a trailer speculative prefetch.
  4908. SetupFullAndTrailerPrefetch(kEntrySize * 2, kEntrySize / 2);
  4909. const char kKey[] = "a key";
  4910. InitCacheAndCreateEntry(kKey);
  4911. TryRead(kKey, /* expect_preread_stream1 */ true);
  4912. histogram_tester.ExpectUniqueSample("SimpleCache.Http.SyncOpenPrefetchMode",
  4913. disk_cache::OPEN_PREFETCH_FULL, 1);
  4914. }
  4915. class DiskCacheSimpleAppCachePrefetchTest : public DiskCacheSimplePrefetchTest {
  4916. public:
  4917. // APP_CACHE mode will enable trailer prefetch hint support.
  4918. net::CacheType SimpleCacheType() const override { return net::APP_CACHE; }
  4919. };
  4920. TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullNoSpeculative) {
  4921. base::HistogramTester histogram_tester;
  4922. SetupFullAndTrailerPrefetch(0, 0);
  4923. const char kKey[] = "a key";
  4924. InitCacheAndCreateEntry(kKey);
  4925. TryRead(kKey, /* expect_preread_stream1 */ false);
  4926. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4927. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4928. }
  4929. TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullSmallSpeculative) {
  4930. base::HistogramTester histogram_tester;
  4931. SetupFullAndTrailerPrefetch(0, kEntrySize / 2);
  4932. const char kKey[] = "a key";
  4933. InitCacheAndCreateEntry(kKey);
  4934. TryRead(kKey, /* expect_preread_stream1 */ false);
  4935. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4936. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4937. }
  4938. TEST_F(DiskCacheSimpleAppCachePrefetchTest, NoFullLargeSpeculative) {
  4939. base::HistogramTester histogram_tester;
  4940. // Even though the speculative trailer prefetch size is larger than the
  4941. // file size, the hint should take precedence and still perform a limited
  4942. // trailer prefetch.
  4943. SetupFullAndTrailerPrefetch(0, kEntrySize * 2);
  4944. const char kKey[] = "a key";
  4945. InitCacheAndCreateEntry(kKey);
  4946. TryRead(kKey, /* expect_preread_stream1 */ false);
  4947. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4948. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4949. }
  4950. TEST_F(DiskCacheSimpleAppCachePrefetchTest, SmallFullNoSpeculative) {
  4951. base::HistogramTester histogram_tester;
  4952. SetupFullAndTrailerPrefetch(kEntrySize / 2, 0);
  4953. const char kKey[] = "a key";
  4954. InitCacheAndCreateEntry(kKey);
  4955. TryRead(kKey, /* expect_preread_stream1 */ false);
  4956. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4957. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4958. }
  4959. TEST_F(DiskCacheSimpleAppCachePrefetchTest, LargeFullNoSpeculative) {
  4960. base::HistogramTester histogram_tester;
  4961. // Full prefetch takes precedence over a trailer hint prefetch.
  4962. SetupFullAndTrailerPrefetch(kEntrySize * 2, 0);
  4963. const char kKey[] = "a key";
  4964. InitCacheAndCreateEntry(kKey);
  4965. TryRead(kKey, /* expect_preread_stream1 */ true);
  4966. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4967. disk_cache::OPEN_PREFETCH_FULL, 1);
  4968. }
  4969. TEST_F(DiskCacheSimpleAppCachePrefetchTest, SmallFullSmallSpeculative) {
  4970. base::HistogramTester histogram_tester;
  4971. SetupFullAndTrailerPrefetch(kEntrySize / 2, kEntrySize / 2);
  4972. const char kKey[] = "a key";
  4973. InitCacheAndCreateEntry(kKey);
  4974. TryRead(kKey, /* expect_preread_stream1 */ false);
  4975. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4976. disk_cache::OPEN_PREFETCH_TRAILER, 1);
  4977. }
  4978. TEST_F(DiskCacheSimpleAppCachePrefetchTest, LargeFullSmallSpeculative) {
  4979. base::HistogramTester histogram_tester;
  4980. // Full prefetch takes precedence over a trailer speculative prefetch.
  4981. SetupFullAndTrailerPrefetch(kEntrySize * 2, kEntrySize / 2);
  4982. const char kKey[] = "a key";
  4983. InitCacheAndCreateEntry(kKey);
  4984. TryRead(kKey, /* expect_preread_stream1 */ true);
  4985. histogram_tester.ExpectUniqueSample("SimpleCache.App.SyncOpenPrefetchMode",
  4986. disk_cache::OPEN_PREFETCH_FULL, 1);
  4987. }