data.c 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/data.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/f2fs_fs.h>
  10. #include <linux/buffer_head.h>
  11. #include <linux/mpage.h>
  12. #include <linux/writeback.h>
  13. #include <linux/backing-dev.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/bio.h>
  17. #include <linux/blk-crypto.h>
  18. #include <linux/swap.h>
  19. #include <linux/prefetch.h>
  20. #include <linux/uio.h>
  21. #include <linux/cleancache.h>
  22. #include <linux/sched/signal.h>
  23. #include <linux/fiemap.h>
  24. #include "f2fs.h"
  25. #include "node.h"
  26. #include "segment.h"
  27. #include <trace/events/f2fs.h>
  28. #include <trace/events/android_fs.h>
  29. #define NUM_PREALLOC_POST_READ_CTXS 128
  30. static struct kmem_cache *bio_post_read_ctx_cache;
  31. static struct kmem_cache *bio_entry_slab;
  32. static mempool_t *bio_post_read_ctx_pool;
  33. static struct bio_set f2fs_bioset;
  34. #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
  35. int __init f2fs_init_bioset(void)
  36. {
  37. if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
  38. 0, BIOSET_NEED_BVECS))
  39. return -ENOMEM;
  40. return 0;
  41. }
  42. void f2fs_destroy_bioset(void)
  43. {
  44. bioset_exit(&f2fs_bioset);
  45. }
  46. static bool __is_cp_guaranteed(struct page *page)
  47. {
  48. struct address_space *mapping = page->mapping;
  49. struct inode *inode;
  50. struct f2fs_sb_info *sbi;
  51. if (!mapping)
  52. return false;
  53. inode = mapping->host;
  54. sbi = F2FS_I_SB(inode);
  55. if (inode->i_ino == F2FS_META_INO(sbi) ||
  56. inode->i_ino == F2FS_NODE_INO(sbi) ||
  57. S_ISDIR(inode->i_mode))
  58. return true;
  59. if (f2fs_is_compressed_page(page))
  60. return false;
  61. if ((S_ISREG(inode->i_mode) &&
  62. (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
  63. page_private_gcing(page))
  64. return true;
  65. return false;
  66. }
  67. static enum count_type __read_io_type(struct page *page)
  68. {
  69. struct address_space *mapping = page_file_mapping(page);
  70. if (mapping) {
  71. struct inode *inode = mapping->host;
  72. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  73. if (inode->i_ino == F2FS_META_INO(sbi))
  74. return F2FS_RD_META;
  75. if (inode->i_ino == F2FS_NODE_INO(sbi))
  76. return F2FS_RD_NODE;
  77. }
  78. return F2FS_RD_DATA;
  79. }
  80. /* postprocessing steps for read bios */
  81. enum bio_post_read_step {
  82. #ifdef CONFIG_FS_ENCRYPTION
  83. STEP_DECRYPT = 1 << 0,
  84. #else
  85. STEP_DECRYPT = 0, /* compile out the decryption-related code */
  86. #endif
  87. #ifdef CONFIG_F2FS_FS_COMPRESSION
  88. STEP_DECOMPRESS = 1 << 1,
  89. #else
  90. STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
  91. #endif
  92. #ifdef CONFIG_FS_VERITY
  93. STEP_VERITY = 1 << 2,
  94. #else
  95. STEP_VERITY = 0, /* compile out the verity-related code */
  96. #endif
  97. };
  98. struct bio_post_read_ctx {
  99. struct bio *bio;
  100. struct f2fs_sb_info *sbi;
  101. struct work_struct work;
  102. unsigned int enabled_steps;
  103. };
  104. static void f2fs_finish_read_bio(struct bio *bio)
  105. {
  106. struct bio_vec *bv;
  107. struct bvec_iter_all iter_all;
  108. /*
  109. * Update and unlock the bio's pagecache pages, and put the
  110. * decompression context for any compressed pages.
  111. */
  112. bio_for_each_segment_all(bv, bio, iter_all) {
  113. struct page *page = bv->bv_page;
  114. if (f2fs_is_compressed_page(page)) {
  115. if (bio->bi_status)
  116. f2fs_end_read_compressed_page(page, true, 0);
  117. f2fs_put_page_dic(page);
  118. continue;
  119. }
  120. /* PG_error was set if decryption or verity failed. */
  121. if (bio->bi_status || PageError(page)) {
  122. ClearPageUptodate(page);
  123. /* will re-read again later */
  124. ClearPageError(page);
  125. } else {
  126. SetPageUptodate(page);
  127. }
  128. dec_page_count(F2FS_P_SB(page), __read_io_type(page));
  129. unlock_page(page);
  130. }
  131. if (bio->bi_private)
  132. mempool_free(bio->bi_private, bio_post_read_ctx_pool);
  133. bio_put(bio);
  134. }
  135. static void f2fs_verify_bio(struct work_struct *work)
  136. {
  137. struct bio_post_read_ctx *ctx =
  138. container_of(work, struct bio_post_read_ctx, work);
  139. struct bio *bio = ctx->bio;
  140. bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
  141. /*
  142. * fsverity_verify_bio() may call readpages() again, and while verity
  143. * will be disabled for this, decryption and/or decompression may still
  144. * be needed, resulting in another bio_post_read_ctx being allocated.
  145. * So to prevent deadlocks we need to release the current ctx to the
  146. * mempool first. This assumes that verity is the last post-read step.
  147. */
  148. mempool_free(ctx, bio_post_read_ctx_pool);
  149. bio->bi_private = NULL;
  150. /*
  151. * Verify the bio's pages with fs-verity. Exclude compressed pages,
  152. * as those were handled separately by f2fs_end_read_compressed_page().
  153. */
  154. if (may_have_compressed_pages) {
  155. struct bio_vec *bv;
  156. struct bvec_iter_all iter_all;
  157. bio_for_each_segment_all(bv, bio, iter_all) {
  158. struct page *page = bv->bv_page;
  159. if (!f2fs_is_compressed_page(page) &&
  160. !PageError(page) && !fsverity_verify_page(page))
  161. SetPageError(page);
  162. }
  163. } else {
  164. fsverity_verify_bio(bio);
  165. }
  166. f2fs_finish_read_bio(bio);
  167. }
  168. /*
  169. * If the bio's data needs to be verified with fs-verity, then enqueue the
  170. * verity work for the bio. Otherwise finish the bio now.
  171. *
  172. * Note that to avoid deadlocks, the verity work can't be done on the
  173. * decryption/decompression workqueue. This is because verifying the data pages
  174. * can involve reading verity metadata pages from the file, and these verity
  175. * metadata pages may be encrypted and/or compressed.
  176. */
  177. static void f2fs_verify_and_finish_bio(struct bio *bio)
  178. {
  179. struct bio_post_read_ctx *ctx = bio->bi_private;
  180. if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
  181. INIT_WORK(&ctx->work, f2fs_verify_bio);
  182. fsverity_enqueue_verify_work(&ctx->work);
  183. } else {
  184. f2fs_finish_read_bio(bio);
  185. }
  186. }
  187. /*
  188. * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
  189. * remaining page was read by @ctx->bio.
  190. *
  191. * Note that a bio may span clusters (even a mix of compressed and uncompressed
  192. * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
  193. * that the bio includes at least one compressed page. The actual decompression
  194. * is done on a per-cluster basis, not a per-bio basis.
  195. */
  196. static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
  197. {
  198. struct bio_vec *bv;
  199. struct bvec_iter_all iter_all;
  200. bool all_compressed = true;
  201. block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
  202. bio_for_each_segment_all(bv, ctx->bio, iter_all) {
  203. struct page *page = bv->bv_page;
  204. /* PG_error was set if decryption failed. */
  205. if (f2fs_is_compressed_page(page))
  206. f2fs_end_read_compressed_page(page, PageError(page),
  207. blkaddr);
  208. else
  209. all_compressed = false;
  210. blkaddr++;
  211. }
  212. /*
  213. * Optimization: if all the bio's pages are compressed, then scheduling
  214. * the per-bio verity work is unnecessary, as verity will be fully
  215. * handled at the compression cluster level.
  216. */
  217. if (all_compressed)
  218. ctx->enabled_steps &= ~STEP_VERITY;
  219. }
  220. static void f2fs_post_read_work(struct work_struct *work)
  221. {
  222. struct bio_post_read_ctx *ctx =
  223. container_of(work, struct bio_post_read_ctx, work);
  224. if (ctx->enabled_steps & STEP_DECRYPT)
  225. fscrypt_decrypt_bio(ctx->bio);
  226. if (ctx->enabled_steps & STEP_DECOMPRESS)
  227. f2fs_handle_step_decompress(ctx);
  228. f2fs_verify_and_finish_bio(ctx->bio);
  229. }
  230. static void f2fs_read_end_io(struct bio *bio)
  231. {
  232. struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
  233. struct bio_post_read_ctx *ctx = bio->bi_private;
  234. if (time_to_inject(sbi, FAULT_READ_IO)) {
  235. f2fs_show_injection_info(sbi, FAULT_READ_IO);
  236. bio->bi_status = BLK_STS_IOERR;
  237. }
  238. if (bio->bi_status) {
  239. f2fs_finish_read_bio(bio);
  240. return;
  241. }
  242. if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
  243. INIT_WORK(&ctx->work, f2fs_post_read_work);
  244. queue_work(ctx->sbi->post_read_wq, &ctx->work);
  245. } else {
  246. f2fs_verify_and_finish_bio(bio);
  247. }
  248. }
  249. static void f2fs_write_end_io(struct bio *bio)
  250. {
  251. struct f2fs_sb_info *sbi = bio->bi_private;
  252. struct bio_vec *bvec;
  253. struct bvec_iter_all iter_all;
  254. if (time_to_inject(sbi, FAULT_WRITE_IO)) {
  255. f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
  256. bio->bi_status = BLK_STS_IOERR;
  257. }
  258. bio_for_each_segment_all(bvec, bio, iter_all) {
  259. struct page *page = bvec->bv_page;
  260. enum count_type type = WB_DATA_TYPE(page);
  261. if (page_private_dummy(page)) {
  262. clear_page_private_dummy(page);
  263. unlock_page(page);
  264. mempool_free(page, sbi->write_io_dummy);
  265. if (unlikely(bio->bi_status))
  266. f2fs_stop_checkpoint(sbi, true);
  267. continue;
  268. }
  269. fscrypt_finalize_bounce_page(&page);
  270. #ifdef CONFIG_F2FS_FS_COMPRESSION
  271. if (f2fs_is_compressed_page(page)) {
  272. f2fs_compress_write_end_io(bio, page);
  273. continue;
  274. }
  275. #endif
  276. if (unlikely(bio->bi_status)) {
  277. mapping_set_error(page->mapping, -EIO);
  278. if (type == F2FS_WB_CP_DATA)
  279. f2fs_stop_checkpoint(sbi, true);
  280. }
  281. f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
  282. page->index != nid_of_node(page));
  283. dec_page_count(sbi, type);
  284. if (f2fs_in_warm_node_list(sbi, page))
  285. f2fs_del_fsync_node_entry(sbi, page);
  286. clear_page_private_gcing(page);
  287. end_page_writeback(page);
  288. }
  289. if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
  290. wq_has_sleeper(&sbi->cp_wait))
  291. wake_up(&sbi->cp_wait);
  292. bio_put(bio);
  293. }
  294. struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
  295. block_t blk_addr, struct bio *bio)
  296. {
  297. struct block_device *bdev = sbi->sb->s_bdev;
  298. int i;
  299. if (f2fs_is_multi_device(sbi)) {
  300. for (i = 0; i < sbi->s_ndevs; i++) {
  301. if (FDEV(i).start_blk <= blk_addr &&
  302. FDEV(i).end_blk >= blk_addr) {
  303. blk_addr -= FDEV(i).start_blk;
  304. bdev = FDEV(i).bdev;
  305. break;
  306. }
  307. }
  308. }
  309. if (bio) {
  310. bio_set_dev(bio, bdev);
  311. bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
  312. }
  313. return bdev;
  314. }
  315. int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
  316. {
  317. int i;
  318. if (!f2fs_is_multi_device(sbi))
  319. return 0;
  320. for (i = 0; i < sbi->s_ndevs; i++)
  321. if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
  322. return i;
  323. return 0;
  324. }
  325. /*
  326. * Return true, if pre_bio's bdev is same as its target device.
  327. */
  328. static bool __same_bdev(struct f2fs_sb_info *sbi,
  329. block_t blk_addr, struct bio *bio)
  330. {
  331. struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
  332. return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
  333. }
  334. static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
  335. {
  336. struct f2fs_sb_info *sbi = fio->sbi;
  337. struct bio *bio;
  338. bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
  339. f2fs_target_device(sbi, fio->new_blkaddr, bio);
  340. if (is_read_io(fio->op)) {
  341. bio->bi_end_io = f2fs_read_end_io;
  342. bio->bi_private = NULL;
  343. } else {
  344. bio->bi_end_io = f2fs_write_end_io;
  345. bio->bi_private = sbi;
  346. bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
  347. fio->type, fio->temp);
  348. }
  349. if (fio->io_wbc)
  350. wbc_init_bio(fio->io_wbc, bio);
  351. return bio;
  352. }
  353. static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
  354. pgoff_t first_idx,
  355. const struct f2fs_io_info *fio,
  356. gfp_t gfp_mask)
  357. {
  358. /*
  359. * The f2fs garbage collector sets ->encrypted_page when it wants to
  360. * read/write raw data without encryption.
  361. */
  362. if (!fio || !fio->encrypted_page)
  363. fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
  364. else if (fscrypt_inode_should_skip_dm_default_key(inode))
  365. bio_set_skip_dm_default_key(bio);
  366. }
  367. static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
  368. pgoff_t next_idx,
  369. const struct f2fs_io_info *fio)
  370. {
  371. /*
  372. * The f2fs garbage collector sets ->encrypted_page when it wants to
  373. * read/write raw data without encryption.
  374. */
  375. if (fio && fio->encrypted_page)
  376. return !bio_has_crypt_ctx(bio) &&
  377. (bio_should_skip_dm_default_key(bio) ==
  378. fscrypt_inode_should_skip_dm_default_key(inode));
  379. return fscrypt_mergeable_bio(bio, inode, next_idx);
  380. }
  381. static inline void __submit_bio(struct f2fs_sb_info *sbi,
  382. struct bio *bio, enum page_type type)
  383. {
  384. if (!is_read_io(bio_op(bio))) {
  385. unsigned int start;
  386. if (type != DATA && type != NODE)
  387. goto submit_io;
  388. if (f2fs_lfs_mode(sbi) && current->plug)
  389. blk_finish_plug(current->plug);
  390. if (!F2FS_IO_ALIGNED(sbi))
  391. goto submit_io;
  392. start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
  393. start %= F2FS_IO_SIZE(sbi);
  394. if (start == 0)
  395. goto submit_io;
  396. /* fill dummy pages */
  397. for (; start < F2FS_IO_SIZE(sbi); start++) {
  398. struct page *page =
  399. mempool_alloc(sbi->write_io_dummy,
  400. GFP_NOIO | __GFP_NOFAIL);
  401. f2fs_bug_on(sbi, !page);
  402. lock_page(page);
  403. zero_user_segment(page, 0, PAGE_SIZE);
  404. set_page_private_dummy(page);
  405. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
  406. f2fs_bug_on(sbi, 1);
  407. }
  408. /*
  409. * In the NODE case, we lose next block address chain. So, we
  410. * need to do checkpoint in f2fs_sync_file.
  411. */
  412. if (type == NODE)
  413. set_sbi_flag(sbi, SBI_NEED_CP);
  414. }
  415. submit_io:
  416. if (is_read_io(bio_op(bio)))
  417. trace_f2fs_submit_read_bio(sbi->sb, type, bio);
  418. else
  419. trace_f2fs_submit_write_bio(sbi->sb, type, bio);
  420. submit_bio(bio);
  421. }
  422. void f2fs_submit_bio(struct f2fs_sb_info *sbi,
  423. struct bio *bio, enum page_type type)
  424. {
  425. __submit_bio(sbi, bio, type);
  426. }
  427. static void __attach_io_flag(struct f2fs_io_info *fio)
  428. {
  429. struct f2fs_sb_info *sbi = fio->sbi;
  430. unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
  431. unsigned int io_flag, fua_flag, meta_flag;
  432. if (fio->type == DATA)
  433. io_flag = sbi->data_io_flag;
  434. else if (fio->type == NODE)
  435. io_flag = sbi->node_io_flag;
  436. else
  437. return;
  438. fua_flag = io_flag & temp_mask;
  439. meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
  440. /*
  441. * data/node io flag bits per temp:
  442. * REQ_META | REQ_FUA |
  443. * 5 | 4 | 3 | 2 | 1 | 0 |
  444. * Cold | Warm | Hot | Cold | Warm | Hot |
  445. */
  446. if ((1 << fio->temp) & meta_flag)
  447. fio->op_flags |= REQ_META;
  448. if ((1 << fio->temp) & fua_flag)
  449. fio->op_flags |= REQ_FUA;
  450. }
  451. static void __submit_merged_bio(struct f2fs_bio_info *io)
  452. {
  453. struct f2fs_io_info *fio = &io->fio;
  454. if (!io->bio)
  455. return;
  456. __attach_io_flag(fio);
  457. bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
  458. if (is_read_io(fio->op))
  459. trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
  460. else
  461. trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
  462. __submit_bio(io->sbi, io->bio, fio->type);
  463. io->bio = NULL;
  464. }
  465. static bool __has_merged_page(struct bio *bio, struct inode *inode,
  466. struct page *page, nid_t ino)
  467. {
  468. struct bio_vec *bvec;
  469. struct bvec_iter_all iter_all;
  470. if (!bio)
  471. return false;
  472. if (!inode && !page && !ino)
  473. return true;
  474. bio_for_each_segment_all(bvec, bio, iter_all) {
  475. struct page *target = bvec->bv_page;
  476. if (fscrypt_is_bounce_page(target)) {
  477. target = fscrypt_pagecache_page(target);
  478. if (IS_ERR(target))
  479. continue;
  480. }
  481. if (f2fs_is_compressed_page(target)) {
  482. target = f2fs_compress_control_page(target);
  483. if (IS_ERR(target))
  484. continue;
  485. }
  486. if (inode && inode == target->mapping->host)
  487. return true;
  488. if (page && page == target)
  489. return true;
  490. if (ino && ino == ino_of_node(target))
  491. return true;
  492. }
  493. return false;
  494. }
  495. static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
  496. enum page_type type, enum temp_type temp)
  497. {
  498. enum page_type btype = PAGE_TYPE_OF_BIO(type);
  499. struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
  500. f2fs_down_write(&io->io_rwsem);
  501. /* change META to META_FLUSH in the checkpoint procedure */
  502. if (type >= META_FLUSH) {
  503. io->fio.type = META_FLUSH;
  504. io->fio.op = REQ_OP_WRITE;
  505. io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
  506. if (!test_opt(sbi, NOBARRIER))
  507. io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
  508. }
  509. __submit_merged_bio(io);
  510. f2fs_up_write(&io->io_rwsem);
  511. }
  512. static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
  513. struct inode *inode, struct page *page,
  514. nid_t ino, enum page_type type, bool force)
  515. {
  516. enum temp_type temp;
  517. bool ret = true;
  518. for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
  519. if (!force) {
  520. enum page_type btype = PAGE_TYPE_OF_BIO(type);
  521. struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
  522. f2fs_down_read(&io->io_rwsem);
  523. ret = __has_merged_page(io->bio, inode, page, ino);
  524. f2fs_up_read(&io->io_rwsem);
  525. }
  526. if (ret)
  527. __f2fs_submit_merged_write(sbi, type, temp);
  528. /* TODO: use HOT temp only for meta pages now. */
  529. if (type >= META)
  530. break;
  531. }
  532. }
  533. void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
  534. {
  535. __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
  536. }
  537. void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
  538. struct inode *inode, struct page *page,
  539. nid_t ino, enum page_type type)
  540. {
  541. __submit_merged_write_cond(sbi, inode, page, ino, type, false);
  542. }
  543. void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
  544. {
  545. f2fs_submit_merged_write(sbi, DATA);
  546. f2fs_submit_merged_write(sbi, NODE);
  547. f2fs_submit_merged_write(sbi, META);
  548. }
  549. /*
  550. * Fill the locked page with data located in the block address.
  551. * A caller needs to unlock the page on failure.
  552. */
  553. int f2fs_submit_page_bio(struct f2fs_io_info *fio)
  554. {
  555. struct bio *bio;
  556. struct page *page = fio->encrypted_page ?
  557. fio->encrypted_page : fio->page;
  558. if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
  559. fio->is_por ? META_POR : (__is_meta_io(fio) ?
  560. META_GENERIC : DATA_GENERIC_ENHANCE)))
  561. return -EFSCORRUPTED;
  562. trace_f2fs_submit_page_bio(page, fio);
  563. /* Allocate a new bio */
  564. bio = __bio_alloc(fio, 1);
  565. f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
  566. fio->page->index, fio, GFP_NOIO);
  567. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  568. bio_put(bio);
  569. return -EFAULT;
  570. }
  571. if (fio->io_wbc && !is_read_io(fio->op))
  572. wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
  573. __attach_io_flag(fio);
  574. bio_set_op_attrs(bio, fio->op, fio->op_flags);
  575. inc_page_count(fio->sbi, is_read_io(fio->op) ?
  576. __read_io_type(page): WB_DATA_TYPE(fio->page));
  577. __submit_bio(fio->sbi, bio, fio->type);
  578. return 0;
  579. }
  580. static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
  581. block_t last_blkaddr, block_t cur_blkaddr)
  582. {
  583. if (unlikely(sbi->max_io_bytes &&
  584. bio->bi_iter.bi_size >= sbi->max_io_bytes))
  585. return false;
  586. if (last_blkaddr + 1 != cur_blkaddr)
  587. return false;
  588. return __same_bdev(sbi, cur_blkaddr, bio);
  589. }
  590. static bool io_type_is_mergeable(struct f2fs_bio_info *io,
  591. struct f2fs_io_info *fio)
  592. {
  593. if (io->fio.op != fio->op)
  594. return false;
  595. return io->fio.op_flags == fio->op_flags;
  596. }
  597. static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
  598. struct f2fs_bio_info *io,
  599. struct f2fs_io_info *fio,
  600. block_t last_blkaddr,
  601. block_t cur_blkaddr)
  602. {
  603. if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
  604. unsigned int filled_blocks =
  605. F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
  606. unsigned int io_size = F2FS_IO_SIZE(sbi);
  607. unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
  608. /* IOs in bio is aligned and left space of vectors is not enough */
  609. if (!(filled_blocks % io_size) && left_vecs < io_size)
  610. return false;
  611. }
  612. if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
  613. return false;
  614. return io_type_is_mergeable(io, fio);
  615. }
  616. static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
  617. struct page *page, enum temp_type temp)
  618. {
  619. struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
  620. struct bio_entry *be;
  621. be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
  622. be->bio = bio;
  623. bio_get(bio);
  624. if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
  625. f2fs_bug_on(sbi, 1);
  626. f2fs_down_write(&io->bio_list_lock);
  627. list_add_tail(&be->list, &io->bio_list);
  628. f2fs_up_write(&io->bio_list_lock);
  629. }
  630. static void del_bio_entry(struct bio_entry *be)
  631. {
  632. list_del(&be->list);
  633. kmem_cache_free(bio_entry_slab, be);
  634. }
  635. static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
  636. struct page *page)
  637. {
  638. struct f2fs_sb_info *sbi = fio->sbi;
  639. enum temp_type temp;
  640. bool found = false;
  641. int ret = -EAGAIN;
  642. for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
  643. struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
  644. struct list_head *head = &io->bio_list;
  645. struct bio_entry *be;
  646. f2fs_down_write(&io->bio_list_lock);
  647. list_for_each_entry(be, head, list) {
  648. if (be->bio != *bio)
  649. continue;
  650. found = true;
  651. f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
  652. *fio->last_block,
  653. fio->new_blkaddr));
  654. if (f2fs_crypt_mergeable_bio(*bio,
  655. fio->page->mapping->host,
  656. fio->page->index, fio) &&
  657. bio_add_page(*bio, page, PAGE_SIZE, 0) ==
  658. PAGE_SIZE) {
  659. ret = 0;
  660. break;
  661. }
  662. /* page can't be merged into bio; submit the bio */
  663. del_bio_entry(be);
  664. __submit_bio(sbi, *bio, DATA);
  665. break;
  666. }
  667. f2fs_up_write(&io->bio_list_lock);
  668. }
  669. if (ret) {
  670. bio_put(*bio);
  671. *bio = NULL;
  672. }
  673. return ret;
  674. }
  675. void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
  676. struct bio **bio, struct page *page)
  677. {
  678. enum temp_type temp;
  679. bool found = false;
  680. struct bio *target = bio ? *bio : NULL;
  681. for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
  682. struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
  683. struct list_head *head = &io->bio_list;
  684. struct bio_entry *be;
  685. if (list_empty(head))
  686. continue;
  687. f2fs_down_read(&io->bio_list_lock);
  688. list_for_each_entry(be, head, list) {
  689. if (target)
  690. found = (target == be->bio);
  691. else
  692. found = __has_merged_page(be->bio, NULL,
  693. page, 0);
  694. if (found)
  695. break;
  696. }
  697. f2fs_up_read(&io->bio_list_lock);
  698. if (!found)
  699. continue;
  700. found = false;
  701. f2fs_down_write(&io->bio_list_lock);
  702. list_for_each_entry(be, head, list) {
  703. if (target)
  704. found = (target == be->bio);
  705. else
  706. found = __has_merged_page(be->bio, NULL,
  707. page, 0);
  708. if (found) {
  709. target = be->bio;
  710. del_bio_entry(be);
  711. break;
  712. }
  713. }
  714. f2fs_up_write(&io->bio_list_lock);
  715. }
  716. if (found)
  717. __submit_bio(sbi, target, DATA);
  718. if (bio && *bio) {
  719. bio_put(*bio);
  720. *bio = NULL;
  721. }
  722. }
  723. int f2fs_merge_page_bio(struct f2fs_io_info *fio)
  724. {
  725. struct bio *bio = *fio->bio;
  726. struct page *page = fio->encrypted_page ?
  727. fio->encrypted_page : fio->page;
  728. if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
  729. __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
  730. return -EFSCORRUPTED;
  731. trace_f2fs_submit_page_bio(page, fio);
  732. if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
  733. fio->new_blkaddr))
  734. f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
  735. alloc_new:
  736. if (!bio) {
  737. bio = __bio_alloc(fio, BIO_MAX_PAGES);
  738. __attach_io_flag(fio);
  739. f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
  740. fio->page->index, fio, GFP_NOIO);
  741. bio_set_op_attrs(bio, fio->op, fio->op_flags);
  742. add_bio_entry(fio->sbi, bio, page, fio->temp);
  743. } else {
  744. if (add_ipu_page(fio, &bio, page))
  745. goto alloc_new;
  746. }
  747. if (fio->io_wbc)
  748. wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
  749. inc_page_count(fio->sbi, WB_DATA_TYPE(page));
  750. *fio->last_block = fio->new_blkaddr;
  751. *fio->bio = bio;
  752. return 0;
  753. }
  754. void f2fs_submit_page_write(struct f2fs_io_info *fio)
  755. {
  756. struct f2fs_sb_info *sbi = fio->sbi;
  757. enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
  758. struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
  759. struct page *bio_page;
  760. f2fs_bug_on(sbi, is_read_io(fio->op));
  761. f2fs_down_write(&io->io_rwsem);
  762. next:
  763. if (fio->in_list) {
  764. spin_lock(&io->io_lock);
  765. if (list_empty(&io->io_list)) {
  766. spin_unlock(&io->io_lock);
  767. goto out;
  768. }
  769. fio = list_first_entry(&io->io_list,
  770. struct f2fs_io_info, list);
  771. list_del(&fio->list);
  772. spin_unlock(&io->io_lock);
  773. }
  774. verify_fio_blkaddr(fio);
  775. if (fio->encrypted_page)
  776. bio_page = fio->encrypted_page;
  777. else if (fio->compressed_page)
  778. bio_page = fio->compressed_page;
  779. else
  780. bio_page = fio->page;
  781. /* set submitted = true as a return value */
  782. fio->submitted = true;
  783. inc_page_count(sbi, WB_DATA_TYPE(bio_page));
  784. if (io->bio &&
  785. (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
  786. fio->new_blkaddr) ||
  787. !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
  788. bio_page->index, fio)))
  789. __submit_merged_bio(io);
  790. alloc_new:
  791. if (io->bio == NULL) {
  792. if (F2FS_IO_ALIGNED(sbi) &&
  793. (fio->type == DATA || fio->type == NODE) &&
  794. fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
  795. dec_page_count(sbi, WB_DATA_TYPE(bio_page));
  796. fio->retry = true;
  797. goto skip;
  798. }
  799. io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
  800. f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
  801. bio_page->index, fio, GFP_NOIO);
  802. io->fio = *fio;
  803. }
  804. if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
  805. __submit_merged_bio(io);
  806. goto alloc_new;
  807. }
  808. if (fio->io_wbc)
  809. wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
  810. io->last_block_in_bio = fio->new_blkaddr;
  811. trace_f2fs_submit_page_write(fio->page, fio);
  812. skip:
  813. if (fio->in_list)
  814. goto next;
  815. out:
  816. if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
  817. !f2fs_is_checkpoint_ready(sbi))
  818. __submit_merged_bio(io);
  819. f2fs_up_write(&io->io_rwsem);
  820. }
  821. static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
  822. unsigned nr_pages, unsigned op_flag,
  823. pgoff_t first_idx, bool for_write)
  824. {
  825. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  826. struct bio *bio;
  827. struct bio_post_read_ctx *ctx;
  828. unsigned int post_read_steps = 0;
  829. bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
  830. min_t(int, nr_pages, BIO_MAX_PAGES),
  831. &f2fs_bioset);
  832. if (!bio)
  833. return ERR_PTR(-ENOMEM);
  834. f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
  835. f2fs_target_device(sbi, blkaddr, bio);
  836. bio->bi_end_io = f2fs_read_end_io;
  837. bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
  838. if (fscrypt_inode_uses_fs_layer_crypto(inode))
  839. post_read_steps |= STEP_DECRYPT;
  840. if (f2fs_need_verity(inode, first_idx))
  841. post_read_steps |= STEP_VERITY;
  842. /*
  843. * STEP_DECOMPRESS is handled specially, since a compressed file might
  844. * contain both compressed and uncompressed clusters. We'll allocate a
  845. * bio_post_read_ctx if the file is compressed, but the caller is
  846. * responsible for enabling STEP_DECOMPRESS if it's actually needed.
  847. */
  848. if (post_read_steps || f2fs_compressed_file(inode)) {
  849. /* Due to the mempool, this never fails. */
  850. ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
  851. ctx->bio = bio;
  852. ctx->sbi = sbi;
  853. ctx->enabled_steps = post_read_steps;
  854. bio->bi_private = ctx;
  855. }
  856. return bio;
  857. }
  858. /* This can handle encryption stuffs */
  859. static int f2fs_submit_page_read(struct inode *inode, struct page *page,
  860. block_t blkaddr, int op_flags, bool for_write)
  861. {
  862. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  863. struct bio *bio;
  864. bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
  865. page->index, for_write);
  866. if (IS_ERR(bio))
  867. return PTR_ERR(bio);
  868. /* wait for GCed page writeback via META_MAPPING */
  869. f2fs_wait_on_block_writeback(inode, blkaddr);
  870. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  871. bio_put(bio);
  872. return -EFAULT;
  873. }
  874. ClearPageError(page);
  875. inc_page_count(sbi, F2FS_RD_DATA);
  876. f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
  877. __submit_bio(sbi, bio, DATA);
  878. return 0;
  879. }
  880. static void __set_data_blkaddr(struct dnode_of_data *dn)
  881. {
  882. struct f2fs_node *rn = F2FS_NODE(dn->node_page);
  883. __le32 *addr_array;
  884. int base = 0;
  885. if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
  886. base = get_extra_isize(dn->inode);
  887. /* Get physical address of data block */
  888. addr_array = blkaddr_in_node(rn);
  889. addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
  890. }
  891. /*
  892. * Lock ordering for the change of data block address:
  893. * ->data_page
  894. * ->node_page
  895. * update block addresses in the node page
  896. */
  897. void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
  898. {
  899. f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
  900. __set_data_blkaddr(dn);
  901. if (set_page_dirty(dn->node_page))
  902. dn->node_changed = true;
  903. }
  904. void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
  905. {
  906. dn->data_blkaddr = blkaddr;
  907. f2fs_set_data_blkaddr(dn);
  908. f2fs_update_extent_cache(dn);
  909. }
  910. /* dn->ofs_in_node will be returned with up-to-date last block pointer */
  911. int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
  912. {
  913. struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
  914. int err;
  915. if (!count)
  916. return 0;
  917. if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
  918. return -EPERM;
  919. if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
  920. return err;
  921. trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
  922. dn->ofs_in_node, count);
  923. f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
  924. for (; count > 0; dn->ofs_in_node++) {
  925. block_t blkaddr = f2fs_data_blkaddr(dn);
  926. if (blkaddr == NULL_ADDR) {
  927. dn->data_blkaddr = NEW_ADDR;
  928. __set_data_blkaddr(dn);
  929. count--;
  930. }
  931. }
  932. if (set_page_dirty(dn->node_page))
  933. dn->node_changed = true;
  934. return 0;
  935. }
  936. /* Should keep dn->ofs_in_node unchanged */
  937. int f2fs_reserve_new_block(struct dnode_of_data *dn)
  938. {
  939. unsigned int ofs_in_node = dn->ofs_in_node;
  940. int ret;
  941. ret = f2fs_reserve_new_blocks(dn, 1);
  942. dn->ofs_in_node = ofs_in_node;
  943. return ret;
  944. }
  945. int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
  946. {
  947. bool need_put = dn->inode_page ? false : true;
  948. int err;
  949. err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
  950. if (err)
  951. return err;
  952. if (dn->data_blkaddr == NULL_ADDR)
  953. err = f2fs_reserve_new_block(dn);
  954. if (err || need_put)
  955. f2fs_put_dnode(dn);
  956. return err;
  957. }
  958. int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
  959. {
  960. struct extent_info ei = {0, 0, 0};
  961. struct inode *inode = dn->inode;
  962. if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  963. dn->data_blkaddr = ei.blk + index - ei.fofs;
  964. return 0;
  965. }
  966. return f2fs_reserve_block(dn, index);
  967. }
  968. struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
  969. int op_flags, bool for_write)
  970. {
  971. struct address_space *mapping = inode->i_mapping;
  972. struct dnode_of_data dn;
  973. struct page *page;
  974. struct extent_info ei = {0,0,0};
  975. int err;
  976. page = f2fs_grab_cache_page(mapping, index, for_write);
  977. if (!page)
  978. return ERR_PTR(-ENOMEM);
  979. if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  980. dn.data_blkaddr = ei.blk + index - ei.fofs;
  981. if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
  982. DATA_GENERIC_ENHANCE_READ)) {
  983. err = -EFSCORRUPTED;
  984. goto put_err;
  985. }
  986. goto got_it;
  987. }
  988. set_new_dnode(&dn, inode, NULL, NULL, 0);
  989. err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
  990. if (err)
  991. goto put_err;
  992. f2fs_put_dnode(&dn);
  993. if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
  994. err = -ENOENT;
  995. goto put_err;
  996. }
  997. if (dn.data_blkaddr != NEW_ADDR &&
  998. !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
  999. dn.data_blkaddr,
  1000. DATA_GENERIC_ENHANCE)) {
  1001. err = -EFSCORRUPTED;
  1002. goto put_err;
  1003. }
  1004. got_it:
  1005. if (PageUptodate(page)) {
  1006. unlock_page(page);
  1007. return page;
  1008. }
  1009. /*
  1010. * A new dentry page is allocated but not able to be written, since its
  1011. * new inode page couldn't be allocated due to -ENOSPC.
  1012. * In such the case, its blkaddr can be remained as NEW_ADDR.
  1013. * see, f2fs_add_link -> f2fs_get_new_data_page ->
  1014. * f2fs_init_inode_metadata.
  1015. */
  1016. if (dn.data_blkaddr == NEW_ADDR) {
  1017. zero_user_segment(page, 0, PAGE_SIZE);
  1018. if (!PageUptodate(page))
  1019. SetPageUptodate(page);
  1020. unlock_page(page);
  1021. return page;
  1022. }
  1023. err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
  1024. op_flags, for_write);
  1025. if (err)
  1026. goto put_err;
  1027. return page;
  1028. put_err:
  1029. f2fs_put_page(page, 1);
  1030. return ERR_PTR(err);
  1031. }
  1032. struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
  1033. {
  1034. struct address_space *mapping = inode->i_mapping;
  1035. struct page *page;
  1036. page = find_get_page(mapping, index);
  1037. if (page && PageUptodate(page))
  1038. return page;
  1039. f2fs_put_page(page, 0);
  1040. page = f2fs_get_read_data_page(inode, index, 0, false);
  1041. if (IS_ERR(page))
  1042. return page;
  1043. if (PageUptodate(page))
  1044. return page;
  1045. wait_on_page_locked(page);
  1046. if (unlikely(!PageUptodate(page))) {
  1047. f2fs_put_page(page, 0);
  1048. return ERR_PTR(-EIO);
  1049. }
  1050. return page;
  1051. }
  1052. /*
  1053. * If it tries to access a hole, return an error.
  1054. * Because, the callers, functions in dir.c and GC, should be able to know
  1055. * whether this page exists or not.
  1056. */
  1057. struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
  1058. bool for_write)
  1059. {
  1060. struct address_space *mapping = inode->i_mapping;
  1061. struct page *page;
  1062. repeat:
  1063. page = f2fs_get_read_data_page(inode, index, 0, for_write);
  1064. if (IS_ERR(page))
  1065. return page;
  1066. /* wait for read completion */
  1067. lock_page(page);
  1068. if (unlikely(page->mapping != mapping)) {
  1069. f2fs_put_page(page, 1);
  1070. goto repeat;
  1071. }
  1072. if (unlikely(!PageUptodate(page))) {
  1073. f2fs_put_page(page, 1);
  1074. return ERR_PTR(-EIO);
  1075. }
  1076. return page;
  1077. }
  1078. /*
  1079. * Caller ensures that this data page is never allocated.
  1080. * A new zero-filled data page is allocated in the page cache.
  1081. *
  1082. * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
  1083. * f2fs_unlock_op().
  1084. * Note that, ipage is set only by make_empty_dir, and if any error occur,
  1085. * ipage should be released by this function.
  1086. */
  1087. struct page *f2fs_get_new_data_page(struct inode *inode,
  1088. struct page *ipage, pgoff_t index, bool new_i_size)
  1089. {
  1090. struct address_space *mapping = inode->i_mapping;
  1091. struct page *page;
  1092. struct dnode_of_data dn;
  1093. int err;
  1094. page = f2fs_grab_cache_page(mapping, index, true);
  1095. if (!page) {
  1096. /*
  1097. * before exiting, we should make sure ipage will be released
  1098. * if any error occur.
  1099. */
  1100. f2fs_put_page(ipage, 1);
  1101. return ERR_PTR(-ENOMEM);
  1102. }
  1103. set_new_dnode(&dn, inode, ipage, NULL, 0);
  1104. err = f2fs_reserve_block(&dn, index);
  1105. if (err) {
  1106. f2fs_put_page(page, 1);
  1107. return ERR_PTR(err);
  1108. }
  1109. if (!ipage)
  1110. f2fs_put_dnode(&dn);
  1111. if (PageUptodate(page))
  1112. goto got_it;
  1113. if (dn.data_blkaddr == NEW_ADDR) {
  1114. zero_user_segment(page, 0, PAGE_SIZE);
  1115. if (!PageUptodate(page))
  1116. SetPageUptodate(page);
  1117. } else {
  1118. f2fs_put_page(page, 1);
  1119. /* if ipage exists, blkaddr should be NEW_ADDR */
  1120. f2fs_bug_on(F2FS_I_SB(inode), ipage);
  1121. page = f2fs_get_lock_data_page(inode, index, true);
  1122. if (IS_ERR(page))
  1123. return page;
  1124. }
  1125. got_it:
  1126. if (new_i_size && i_size_read(inode) <
  1127. ((loff_t)(index + 1) << PAGE_SHIFT))
  1128. f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
  1129. return page;
  1130. }
  1131. static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
  1132. {
  1133. struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
  1134. struct f2fs_summary sum;
  1135. struct node_info ni;
  1136. block_t old_blkaddr;
  1137. blkcnt_t count = 1;
  1138. int err;
  1139. if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
  1140. return -EPERM;
  1141. err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
  1142. if (err)
  1143. return err;
  1144. dn->data_blkaddr = f2fs_data_blkaddr(dn);
  1145. if (dn->data_blkaddr != NULL_ADDR)
  1146. goto alloc;
  1147. if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
  1148. return err;
  1149. alloc:
  1150. set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
  1151. old_blkaddr = dn->data_blkaddr;
  1152. f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
  1153. &sum, seg_type, NULL);
  1154. if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
  1155. invalidate_mapping_pages(META_MAPPING(sbi),
  1156. old_blkaddr, old_blkaddr);
  1157. f2fs_invalidate_compress_page(sbi, old_blkaddr);
  1158. }
  1159. f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
  1160. /*
  1161. * i_size will be updated by direct_IO. Otherwise, we'll get stale
  1162. * data from unwritten block via dio_read.
  1163. */
  1164. return 0;
  1165. }
  1166. int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
  1167. {
  1168. struct inode *inode = file_inode(iocb->ki_filp);
  1169. struct f2fs_map_blocks map;
  1170. int flag;
  1171. int err = 0;
  1172. bool direct_io = iocb->ki_flags & IOCB_DIRECT;
  1173. map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
  1174. map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
  1175. if (map.m_len > map.m_lblk)
  1176. map.m_len -= map.m_lblk;
  1177. else
  1178. map.m_len = 0;
  1179. map.m_next_pgofs = NULL;
  1180. map.m_next_extent = NULL;
  1181. map.m_seg_type = NO_CHECK_TYPE;
  1182. map.m_may_create = true;
  1183. if (direct_io) {
  1184. map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
  1185. flag = f2fs_force_buffered_io(inode, iocb, from) ?
  1186. F2FS_GET_BLOCK_PRE_AIO :
  1187. F2FS_GET_BLOCK_PRE_DIO;
  1188. goto map_blocks;
  1189. }
  1190. if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
  1191. err = f2fs_convert_inline_inode(inode);
  1192. if (err)
  1193. return err;
  1194. }
  1195. if (f2fs_has_inline_data(inode))
  1196. return err;
  1197. flag = F2FS_GET_BLOCK_PRE_AIO;
  1198. map_blocks:
  1199. err = f2fs_map_blocks(inode, &map, 1, flag);
  1200. if (map.m_len > 0 && err == -ENOSPC) {
  1201. if (!direct_io)
  1202. set_inode_flag(inode, FI_NO_PREALLOC);
  1203. err = 0;
  1204. }
  1205. return err;
  1206. }
  1207. void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
  1208. {
  1209. if (flag == F2FS_GET_BLOCK_PRE_AIO) {
  1210. if (lock)
  1211. f2fs_down_read(&sbi->node_change);
  1212. else
  1213. f2fs_up_read(&sbi->node_change);
  1214. } else {
  1215. if (lock)
  1216. f2fs_lock_op(sbi);
  1217. else
  1218. f2fs_unlock_op(sbi);
  1219. }
  1220. }
  1221. /*
  1222. * f2fs_map_blocks() tries to find or build mapping relationship which
  1223. * maps continuous logical blocks to physical blocks, and return such
  1224. * info via f2fs_map_blocks structure.
  1225. */
  1226. int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
  1227. int create, int flag)
  1228. {
  1229. unsigned int maxblocks = map->m_len;
  1230. struct dnode_of_data dn;
  1231. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1232. int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
  1233. pgoff_t pgofs, end_offset, end;
  1234. int err = 0, ofs = 1;
  1235. unsigned int ofs_in_node, last_ofs_in_node;
  1236. blkcnt_t prealloc;
  1237. struct extent_info ei = {0,0,0};
  1238. block_t blkaddr;
  1239. unsigned int start_pgofs;
  1240. if (!maxblocks)
  1241. return 0;
  1242. map->m_len = 0;
  1243. map->m_flags = 0;
  1244. /* it only supports block size == page size */
  1245. pgofs = (pgoff_t)map->m_lblk;
  1246. end = pgofs + maxblocks;
  1247. if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
  1248. if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
  1249. map->m_may_create)
  1250. goto next_dnode;
  1251. map->m_pblk = ei.blk + pgofs - ei.fofs;
  1252. map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
  1253. map->m_flags = F2FS_MAP_MAPPED;
  1254. if (map->m_next_extent)
  1255. *map->m_next_extent = pgofs + map->m_len;
  1256. /* for hardware encryption, but to avoid potential issue in future */
  1257. if (flag == F2FS_GET_BLOCK_DIO)
  1258. f2fs_wait_on_block_writeback_range(inode,
  1259. map->m_pblk, map->m_len);
  1260. goto out;
  1261. }
  1262. next_dnode:
  1263. if (map->m_may_create)
  1264. f2fs_do_map_lock(sbi, flag, true);
  1265. /* When reading holes, we need its node page */
  1266. set_new_dnode(&dn, inode, NULL, NULL, 0);
  1267. err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
  1268. if (err) {
  1269. if (flag == F2FS_GET_BLOCK_BMAP)
  1270. map->m_pblk = 0;
  1271. if (err == -ENOENT) {
  1272. /*
  1273. * There is one exceptional case that read_node_page()
  1274. * may return -ENOENT due to filesystem has been
  1275. * shutdown or cp_error, so force to convert error
  1276. * number to EIO for such case.
  1277. */
  1278. if (map->m_may_create &&
  1279. (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
  1280. f2fs_cp_error(sbi))) {
  1281. err = -EIO;
  1282. goto unlock_out;
  1283. }
  1284. err = 0;
  1285. if (map->m_next_pgofs)
  1286. *map->m_next_pgofs =
  1287. f2fs_get_next_page_offset(&dn, pgofs);
  1288. if (map->m_next_extent)
  1289. *map->m_next_extent =
  1290. f2fs_get_next_page_offset(&dn, pgofs);
  1291. }
  1292. goto unlock_out;
  1293. }
  1294. start_pgofs = pgofs;
  1295. prealloc = 0;
  1296. last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
  1297. end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
  1298. next_block:
  1299. blkaddr = f2fs_data_blkaddr(&dn);
  1300. if (__is_valid_data_blkaddr(blkaddr) &&
  1301. !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
  1302. err = -EFSCORRUPTED;
  1303. goto sync_out;
  1304. }
  1305. if (__is_valid_data_blkaddr(blkaddr)) {
  1306. /* use out-place-update for driect IO under LFS mode */
  1307. if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
  1308. map->m_may_create) {
  1309. err = __allocate_data_block(&dn, map->m_seg_type);
  1310. if (err)
  1311. goto sync_out;
  1312. blkaddr = dn.data_blkaddr;
  1313. set_inode_flag(inode, FI_APPEND_WRITE);
  1314. }
  1315. } else {
  1316. if (create) {
  1317. if (unlikely(f2fs_cp_error(sbi))) {
  1318. err = -EIO;
  1319. goto sync_out;
  1320. }
  1321. if (flag == F2FS_GET_BLOCK_PRE_AIO) {
  1322. if (blkaddr == NULL_ADDR) {
  1323. prealloc++;
  1324. last_ofs_in_node = dn.ofs_in_node;
  1325. }
  1326. } else {
  1327. WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
  1328. flag != F2FS_GET_BLOCK_DIO);
  1329. err = __allocate_data_block(&dn,
  1330. map->m_seg_type);
  1331. if (!err)
  1332. set_inode_flag(inode, FI_APPEND_WRITE);
  1333. }
  1334. if (err)
  1335. goto sync_out;
  1336. map->m_flags |= F2FS_MAP_NEW;
  1337. blkaddr = dn.data_blkaddr;
  1338. } else {
  1339. if (flag == F2FS_GET_BLOCK_BMAP) {
  1340. map->m_pblk = 0;
  1341. goto sync_out;
  1342. }
  1343. if (flag == F2FS_GET_BLOCK_PRECACHE)
  1344. goto sync_out;
  1345. if (flag == F2FS_GET_BLOCK_FIEMAP &&
  1346. blkaddr == NULL_ADDR) {
  1347. if (map->m_next_pgofs)
  1348. *map->m_next_pgofs = pgofs + 1;
  1349. goto sync_out;
  1350. }
  1351. if (flag != F2FS_GET_BLOCK_FIEMAP) {
  1352. /* for defragment case */
  1353. if (map->m_next_pgofs)
  1354. *map->m_next_pgofs = pgofs + 1;
  1355. goto sync_out;
  1356. }
  1357. }
  1358. }
  1359. if (flag == F2FS_GET_BLOCK_PRE_AIO)
  1360. goto skip;
  1361. if (map->m_len == 0) {
  1362. /* preallocated unwritten block should be mapped for fiemap. */
  1363. if (blkaddr == NEW_ADDR)
  1364. map->m_flags |= F2FS_MAP_UNWRITTEN;
  1365. map->m_flags |= F2FS_MAP_MAPPED;
  1366. map->m_pblk = blkaddr;
  1367. map->m_len = 1;
  1368. } else if ((map->m_pblk != NEW_ADDR &&
  1369. blkaddr == (map->m_pblk + ofs)) ||
  1370. (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
  1371. flag == F2FS_GET_BLOCK_PRE_DIO) {
  1372. ofs++;
  1373. map->m_len++;
  1374. } else {
  1375. goto sync_out;
  1376. }
  1377. skip:
  1378. dn.ofs_in_node++;
  1379. pgofs++;
  1380. /* preallocate blocks in batch for one dnode page */
  1381. if (flag == F2FS_GET_BLOCK_PRE_AIO &&
  1382. (pgofs == end || dn.ofs_in_node == end_offset)) {
  1383. dn.ofs_in_node = ofs_in_node;
  1384. err = f2fs_reserve_new_blocks(&dn, prealloc);
  1385. if (err)
  1386. goto sync_out;
  1387. map->m_len += dn.ofs_in_node - ofs_in_node;
  1388. if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
  1389. err = -ENOSPC;
  1390. goto sync_out;
  1391. }
  1392. dn.ofs_in_node = end_offset;
  1393. }
  1394. if (pgofs >= end)
  1395. goto sync_out;
  1396. else if (dn.ofs_in_node < end_offset)
  1397. goto next_block;
  1398. if (flag == F2FS_GET_BLOCK_PRECACHE) {
  1399. if (map->m_flags & F2FS_MAP_MAPPED) {
  1400. unsigned int ofs = start_pgofs - map->m_lblk;
  1401. f2fs_update_extent_cache_range(&dn,
  1402. start_pgofs, map->m_pblk + ofs,
  1403. map->m_len - ofs);
  1404. }
  1405. }
  1406. f2fs_put_dnode(&dn);
  1407. if (map->m_may_create) {
  1408. f2fs_do_map_lock(sbi, flag, false);
  1409. f2fs_balance_fs(sbi, dn.node_changed);
  1410. }
  1411. goto next_dnode;
  1412. sync_out:
  1413. /* for hardware encryption, but to avoid potential issue in future */
  1414. if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
  1415. f2fs_wait_on_block_writeback_range(inode,
  1416. map->m_pblk, map->m_len);
  1417. if (flag == F2FS_GET_BLOCK_PRECACHE) {
  1418. if (map->m_flags & F2FS_MAP_MAPPED) {
  1419. unsigned int ofs = start_pgofs - map->m_lblk;
  1420. f2fs_update_extent_cache_range(&dn,
  1421. start_pgofs, map->m_pblk + ofs,
  1422. map->m_len - ofs);
  1423. }
  1424. if (map->m_next_extent)
  1425. *map->m_next_extent = pgofs + 1;
  1426. }
  1427. f2fs_put_dnode(&dn);
  1428. unlock_out:
  1429. if (map->m_may_create) {
  1430. f2fs_do_map_lock(sbi, flag, false);
  1431. f2fs_balance_fs(sbi, dn.node_changed);
  1432. }
  1433. out:
  1434. trace_f2fs_map_blocks(inode, map, err);
  1435. return err;
  1436. }
  1437. bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
  1438. {
  1439. struct f2fs_map_blocks map;
  1440. block_t last_lblk;
  1441. int err;
  1442. if (pos + len > i_size_read(inode))
  1443. return false;
  1444. map.m_lblk = F2FS_BYTES_TO_BLK(pos);
  1445. map.m_next_pgofs = NULL;
  1446. map.m_next_extent = NULL;
  1447. map.m_seg_type = NO_CHECK_TYPE;
  1448. map.m_may_create = false;
  1449. last_lblk = F2FS_BLK_ALIGN(pos + len);
  1450. while (map.m_lblk < last_lblk) {
  1451. map.m_len = last_lblk - map.m_lblk;
  1452. err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
  1453. if (err || map.m_len == 0)
  1454. return false;
  1455. map.m_lblk += map.m_len;
  1456. }
  1457. return true;
  1458. }
  1459. static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
  1460. {
  1461. return (bytes >> inode->i_blkbits);
  1462. }
  1463. static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
  1464. {
  1465. return (blks << inode->i_blkbits);
  1466. }
  1467. static int __get_data_block(struct inode *inode, sector_t iblock,
  1468. struct buffer_head *bh, int create, int flag,
  1469. pgoff_t *next_pgofs, int seg_type, bool may_write)
  1470. {
  1471. struct f2fs_map_blocks map;
  1472. int err;
  1473. map.m_lblk = iblock;
  1474. map.m_len = bytes_to_blks(inode, bh->b_size);
  1475. map.m_next_pgofs = next_pgofs;
  1476. map.m_next_extent = NULL;
  1477. map.m_seg_type = seg_type;
  1478. map.m_may_create = may_write;
  1479. err = f2fs_map_blocks(inode, &map, create, flag);
  1480. if (!err) {
  1481. map_bh(bh, inode->i_sb, map.m_pblk);
  1482. bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
  1483. bh->b_size = blks_to_bytes(inode, map.m_len);
  1484. }
  1485. return err;
  1486. }
  1487. static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
  1488. struct buffer_head *bh_result, int create)
  1489. {
  1490. return __get_data_block(inode, iblock, bh_result, create,
  1491. F2FS_GET_BLOCK_DIO, NULL,
  1492. f2fs_rw_hint_to_seg_type(inode->i_write_hint),
  1493. true);
  1494. }
  1495. static int get_data_block_dio(struct inode *inode, sector_t iblock,
  1496. struct buffer_head *bh_result, int create)
  1497. {
  1498. return __get_data_block(inode, iblock, bh_result, create,
  1499. F2FS_GET_BLOCK_DIO, NULL,
  1500. f2fs_rw_hint_to_seg_type(inode->i_write_hint),
  1501. false);
  1502. }
  1503. static int f2fs_xattr_fiemap(struct inode *inode,
  1504. struct fiemap_extent_info *fieinfo)
  1505. {
  1506. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1507. struct page *page;
  1508. struct node_info ni;
  1509. __u64 phys = 0, len;
  1510. __u32 flags;
  1511. nid_t xnid = F2FS_I(inode)->i_xattr_nid;
  1512. int err = 0;
  1513. if (f2fs_has_inline_xattr(inode)) {
  1514. int offset;
  1515. page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
  1516. inode->i_ino, false);
  1517. if (!page)
  1518. return -ENOMEM;
  1519. err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
  1520. if (err) {
  1521. f2fs_put_page(page, 1);
  1522. return err;
  1523. }
  1524. phys = blks_to_bytes(inode, ni.blk_addr);
  1525. offset = offsetof(struct f2fs_inode, i_addr) +
  1526. sizeof(__le32) * (DEF_ADDRS_PER_INODE -
  1527. get_inline_xattr_addrs(inode));
  1528. phys += offset;
  1529. len = inline_xattr_size(inode);
  1530. f2fs_put_page(page, 1);
  1531. flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
  1532. if (!xnid)
  1533. flags |= FIEMAP_EXTENT_LAST;
  1534. err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
  1535. trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
  1536. if (err || err == 1)
  1537. return err;
  1538. }
  1539. if (xnid) {
  1540. page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
  1541. if (!page)
  1542. return -ENOMEM;
  1543. err = f2fs_get_node_info(sbi, xnid, &ni, false);
  1544. if (err) {
  1545. f2fs_put_page(page, 1);
  1546. return err;
  1547. }
  1548. phys = blks_to_bytes(inode, ni.blk_addr);
  1549. len = inode->i_sb->s_blocksize;
  1550. f2fs_put_page(page, 1);
  1551. flags = FIEMAP_EXTENT_LAST;
  1552. }
  1553. if (phys) {
  1554. err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
  1555. trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
  1556. }
  1557. return (err < 0 ? err : 0);
  1558. }
  1559. static loff_t max_inode_blocks(struct inode *inode)
  1560. {
  1561. loff_t result = ADDRS_PER_INODE(inode);
  1562. loff_t leaf_count = ADDRS_PER_BLOCK(inode);
  1563. /* two direct node blocks */
  1564. result += (leaf_count * 2);
  1565. /* two indirect node blocks */
  1566. leaf_count *= NIDS_PER_BLOCK;
  1567. result += (leaf_count * 2);
  1568. /* one double indirect node block */
  1569. leaf_count *= NIDS_PER_BLOCK;
  1570. result += leaf_count;
  1571. return result;
  1572. }
  1573. int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  1574. u64 start, u64 len)
  1575. {
  1576. struct f2fs_map_blocks map;
  1577. sector_t start_blk, last_blk;
  1578. pgoff_t next_pgofs;
  1579. u64 logical = 0, phys = 0, size = 0;
  1580. u32 flags = 0;
  1581. int ret = 0;
  1582. bool compr_cluster = false, compr_appended;
  1583. unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
  1584. unsigned int count_in_cluster = 0;
  1585. loff_t maxbytes;
  1586. if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
  1587. ret = f2fs_precache_extents(inode);
  1588. if (ret)
  1589. return ret;
  1590. }
  1591. ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
  1592. if (ret)
  1593. return ret;
  1594. inode_lock(inode);
  1595. maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
  1596. if (start > maxbytes) {
  1597. ret = -EFBIG;
  1598. goto out;
  1599. }
  1600. if (len > maxbytes || (maxbytes - len) < start)
  1601. len = maxbytes - start;
  1602. if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
  1603. ret = f2fs_xattr_fiemap(inode, fieinfo);
  1604. goto out;
  1605. }
  1606. if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
  1607. ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
  1608. if (ret != -EAGAIN)
  1609. goto out;
  1610. }
  1611. if (bytes_to_blks(inode, len) == 0)
  1612. len = blks_to_bytes(inode, 1);
  1613. start_blk = bytes_to_blks(inode, start);
  1614. last_blk = bytes_to_blks(inode, start + len - 1);
  1615. next:
  1616. memset(&map, 0, sizeof(map));
  1617. map.m_lblk = start_blk;
  1618. map.m_len = bytes_to_blks(inode, len);
  1619. map.m_next_pgofs = &next_pgofs;
  1620. map.m_seg_type = NO_CHECK_TYPE;
  1621. if (compr_cluster) {
  1622. map.m_lblk += 1;
  1623. map.m_len = cluster_size - count_in_cluster;
  1624. }
  1625. ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
  1626. if (ret)
  1627. goto out;
  1628. /* HOLE */
  1629. if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
  1630. start_blk = next_pgofs;
  1631. if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
  1632. max_inode_blocks(inode)))
  1633. goto prep_next;
  1634. flags |= FIEMAP_EXTENT_LAST;
  1635. }
  1636. compr_appended = false;
  1637. /* In a case of compressed cluster, append this to the last extent */
  1638. if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
  1639. !(map.m_flags & F2FS_MAP_FLAGS))) {
  1640. compr_appended = true;
  1641. goto skip_fill;
  1642. }
  1643. if (size) {
  1644. flags |= FIEMAP_EXTENT_MERGED;
  1645. if (IS_ENCRYPTED(inode))
  1646. flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
  1647. ret = fiemap_fill_next_extent(fieinfo, logical,
  1648. phys, size, flags);
  1649. trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
  1650. if (ret)
  1651. goto out;
  1652. size = 0;
  1653. }
  1654. if (start_blk > last_blk)
  1655. goto out;
  1656. skip_fill:
  1657. if (map.m_pblk == COMPRESS_ADDR) {
  1658. compr_cluster = true;
  1659. count_in_cluster = 1;
  1660. } else if (compr_appended) {
  1661. unsigned int appended_blks = cluster_size -
  1662. count_in_cluster + 1;
  1663. size += blks_to_bytes(inode, appended_blks);
  1664. start_blk += appended_blks;
  1665. compr_cluster = false;
  1666. } else {
  1667. logical = blks_to_bytes(inode, start_blk);
  1668. phys = __is_valid_data_blkaddr(map.m_pblk) ?
  1669. blks_to_bytes(inode, map.m_pblk) : 0;
  1670. size = blks_to_bytes(inode, map.m_len);
  1671. flags = 0;
  1672. if (compr_cluster) {
  1673. flags = FIEMAP_EXTENT_ENCODED;
  1674. count_in_cluster += map.m_len;
  1675. if (count_in_cluster == cluster_size) {
  1676. compr_cluster = false;
  1677. size += blks_to_bytes(inode, 1);
  1678. }
  1679. } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
  1680. flags = FIEMAP_EXTENT_UNWRITTEN;
  1681. }
  1682. start_blk += bytes_to_blks(inode, size);
  1683. }
  1684. prep_next:
  1685. cond_resched();
  1686. if (fatal_signal_pending(current))
  1687. ret = -EINTR;
  1688. else
  1689. goto next;
  1690. out:
  1691. if (ret == 1)
  1692. ret = 0;
  1693. inode_unlock(inode);
  1694. return ret;
  1695. }
  1696. static inline loff_t f2fs_readpage_limit(struct inode *inode)
  1697. {
  1698. if (IS_ENABLED(CONFIG_FS_VERITY) &&
  1699. (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
  1700. return inode->i_sb->s_maxbytes;
  1701. return i_size_read(inode);
  1702. }
  1703. static int f2fs_read_single_page(struct inode *inode, struct page *page,
  1704. unsigned nr_pages,
  1705. struct f2fs_map_blocks *map,
  1706. struct bio **bio_ret,
  1707. sector_t *last_block_in_bio,
  1708. bool is_readahead)
  1709. {
  1710. struct bio *bio = *bio_ret;
  1711. const unsigned blocksize = blks_to_bytes(inode, 1);
  1712. sector_t block_in_file;
  1713. sector_t last_block;
  1714. sector_t last_block_in_file;
  1715. sector_t block_nr;
  1716. int ret = 0;
  1717. block_in_file = (sector_t)page_index(page);
  1718. last_block = block_in_file + nr_pages;
  1719. last_block_in_file = bytes_to_blks(inode,
  1720. f2fs_readpage_limit(inode) + blocksize - 1);
  1721. if (last_block > last_block_in_file)
  1722. last_block = last_block_in_file;
  1723. /* just zeroing out page which is beyond EOF */
  1724. if (block_in_file >= last_block)
  1725. goto zero_out;
  1726. /*
  1727. * Map blocks using the previous result first.
  1728. */
  1729. if ((map->m_flags & F2FS_MAP_MAPPED) &&
  1730. block_in_file > map->m_lblk &&
  1731. block_in_file < (map->m_lblk + map->m_len))
  1732. goto got_it;
  1733. /*
  1734. * Then do more f2fs_map_blocks() calls until we are
  1735. * done with this page.
  1736. */
  1737. map->m_lblk = block_in_file;
  1738. map->m_len = last_block - block_in_file;
  1739. ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
  1740. if (ret)
  1741. goto out;
  1742. got_it:
  1743. if ((map->m_flags & F2FS_MAP_MAPPED)) {
  1744. block_nr = map->m_pblk + block_in_file - map->m_lblk;
  1745. SetPageMappedToDisk(page);
  1746. if (!PageUptodate(page) && (!PageSwapCache(page) &&
  1747. !cleancache_get_page(page))) {
  1748. SetPageUptodate(page);
  1749. goto confused;
  1750. }
  1751. if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
  1752. DATA_GENERIC_ENHANCE_READ)) {
  1753. ret = -EFSCORRUPTED;
  1754. goto out;
  1755. }
  1756. } else {
  1757. zero_out:
  1758. zero_user_segment(page, 0, PAGE_SIZE);
  1759. if (f2fs_need_verity(inode, page->index) &&
  1760. !fsverity_verify_page(page)) {
  1761. ret = -EIO;
  1762. goto out;
  1763. }
  1764. if (!PageUptodate(page))
  1765. SetPageUptodate(page);
  1766. unlock_page(page);
  1767. goto out;
  1768. }
  1769. /*
  1770. * This page will go to BIO. Do we need to send this
  1771. * BIO off first?
  1772. */
  1773. if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
  1774. *last_block_in_bio, block_nr) ||
  1775. !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
  1776. submit_and_realloc:
  1777. __submit_bio(F2FS_I_SB(inode), bio, DATA);
  1778. bio = NULL;
  1779. }
  1780. if (bio == NULL) {
  1781. bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
  1782. is_readahead ? REQ_RAHEAD : 0, page->index,
  1783. false);
  1784. if (IS_ERR(bio)) {
  1785. ret = PTR_ERR(bio);
  1786. bio = NULL;
  1787. goto out;
  1788. }
  1789. }
  1790. /*
  1791. * If the page is under writeback, we need to wait for
  1792. * its completion to see the correct decrypted data.
  1793. */
  1794. f2fs_wait_on_block_writeback(inode, block_nr);
  1795. if (bio_add_page(bio, page, blocksize, 0) < blocksize)
  1796. goto submit_and_realloc;
  1797. inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
  1798. f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
  1799. ClearPageError(page);
  1800. *last_block_in_bio = block_nr;
  1801. goto out;
  1802. confused:
  1803. if (bio) {
  1804. __submit_bio(F2FS_I_SB(inode), bio, DATA);
  1805. bio = NULL;
  1806. }
  1807. unlock_page(page);
  1808. out:
  1809. *bio_ret = bio;
  1810. return ret;
  1811. }
  1812. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1813. int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
  1814. unsigned nr_pages, sector_t *last_block_in_bio,
  1815. bool is_readahead, bool for_write)
  1816. {
  1817. struct dnode_of_data dn;
  1818. struct inode *inode = cc->inode;
  1819. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1820. struct bio *bio = *bio_ret;
  1821. unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
  1822. sector_t last_block_in_file;
  1823. const unsigned blocksize = blks_to_bytes(inode, 1);
  1824. struct decompress_io_ctx *dic = NULL;
  1825. int i;
  1826. int ret = 0;
  1827. f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
  1828. last_block_in_file = bytes_to_blks(inode,
  1829. f2fs_readpage_limit(inode) + blocksize - 1);
  1830. /* get rid of pages beyond EOF */
  1831. for (i = 0; i < cc->cluster_size; i++) {
  1832. struct page *page = cc->rpages[i];
  1833. if (!page)
  1834. continue;
  1835. if ((sector_t)page->index >= last_block_in_file) {
  1836. zero_user_segment(page, 0, PAGE_SIZE);
  1837. if (!PageUptodate(page))
  1838. SetPageUptodate(page);
  1839. } else if (!PageUptodate(page)) {
  1840. continue;
  1841. }
  1842. unlock_page(page);
  1843. if (for_write)
  1844. put_page(page);
  1845. cc->rpages[i] = NULL;
  1846. cc->nr_rpages--;
  1847. }
  1848. /* we are done since all pages are beyond EOF */
  1849. if (f2fs_cluster_is_empty(cc))
  1850. goto out;
  1851. set_new_dnode(&dn, inode, NULL, NULL, 0);
  1852. ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
  1853. if (ret)
  1854. goto out;
  1855. f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
  1856. for (i = 1; i < cc->cluster_size; i++) {
  1857. block_t blkaddr;
  1858. blkaddr = data_blkaddr(dn.inode, dn.node_page,
  1859. dn.ofs_in_node + i);
  1860. if (!__is_valid_data_blkaddr(blkaddr))
  1861. break;
  1862. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
  1863. ret = -EFAULT;
  1864. goto out_put_dnode;
  1865. }
  1866. cc->nr_cpages++;
  1867. }
  1868. /* nothing to decompress */
  1869. if (cc->nr_cpages == 0) {
  1870. ret = 0;
  1871. goto out_put_dnode;
  1872. }
  1873. dic = f2fs_alloc_dic(cc);
  1874. if (IS_ERR(dic)) {
  1875. ret = PTR_ERR(dic);
  1876. goto out_put_dnode;
  1877. }
  1878. for (i = 0; i < cc->nr_cpages; i++) {
  1879. struct page *page = dic->cpages[i];
  1880. block_t blkaddr;
  1881. struct bio_post_read_ctx *ctx;
  1882. blkaddr = data_blkaddr(dn.inode, dn.node_page,
  1883. dn.ofs_in_node + i + 1);
  1884. f2fs_wait_on_block_writeback(inode, blkaddr);
  1885. if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
  1886. if (atomic_dec_and_test(&dic->remaining_pages))
  1887. f2fs_decompress_cluster(dic);
  1888. continue;
  1889. }
  1890. if (bio && (!page_is_mergeable(sbi, bio,
  1891. *last_block_in_bio, blkaddr) ||
  1892. !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
  1893. submit_and_realloc:
  1894. __submit_bio(sbi, bio, DATA);
  1895. bio = NULL;
  1896. }
  1897. if (!bio) {
  1898. bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
  1899. is_readahead ? REQ_RAHEAD : 0,
  1900. page->index, for_write);
  1901. if (IS_ERR(bio)) {
  1902. ret = PTR_ERR(bio);
  1903. f2fs_decompress_end_io(dic, ret);
  1904. f2fs_put_dnode(&dn);
  1905. *bio_ret = NULL;
  1906. return ret;
  1907. }
  1908. }
  1909. if (bio_add_page(bio, page, blocksize, 0) < blocksize)
  1910. goto submit_and_realloc;
  1911. ctx = bio->bi_private;
  1912. ctx->enabled_steps |= STEP_DECOMPRESS;
  1913. refcount_inc(&dic->refcnt);
  1914. inc_page_count(sbi, F2FS_RD_DATA);
  1915. f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
  1916. f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
  1917. ClearPageError(page);
  1918. *last_block_in_bio = blkaddr;
  1919. }
  1920. f2fs_put_dnode(&dn);
  1921. *bio_ret = bio;
  1922. return 0;
  1923. out_put_dnode:
  1924. f2fs_put_dnode(&dn);
  1925. out:
  1926. for (i = 0; i < cc->cluster_size; i++) {
  1927. if (cc->rpages[i]) {
  1928. ClearPageUptodate(cc->rpages[i]);
  1929. ClearPageError(cc->rpages[i]);
  1930. unlock_page(cc->rpages[i]);
  1931. }
  1932. }
  1933. *bio_ret = bio;
  1934. return ret;
  1935. }
  1936. #endif
  1937. /*
  1938. * This function was originally taken from fs/mpage.c, and customized for f2fs.
  1939. * Major change was from block_size == page_size in f2fs by default.
  1940. */
  1941. static int f2fs_mpage_readpages(struct inode *inode,
  1942. struct readahead_control *rac, struct page *page)
  1943. {
  1944. struct bio *bio = NULL;
  1945. sector_t last_block_in_bio = 0;
  1946. struct f2fs_map_blocks map;
  1947. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1948. struct compress_ctx cc = {
  1949. .inode = inode,
  1950. .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
  1951. .cluster_size = F2FS_I(inode)->i_cluster_size,
  1952. .cluster_idx = NULL_CLUSTER,
  1953. .rpages = NULL,
  1954. .cpages = NULL,
  1955. .nr_rpages = 0,
  1956. .nr_cpages = 0,
  1957. };
  1958. #endif
  1959. unsigned nr_pages = rac ? readahead_count(rac) : 1;
  1960. unsigned max_nr_pages = nr_pages;
  1961. int ret = 0;
  1962. map.m_pblk = 0;
  1963. map.m_lblk = 0;
  1964. map.m_len = 0;
  1965. map.m_flags = 0;
  1966. map.m_next_pgofs = NULL;
  1967. map.m_next_extent = NULL;
  1968. map.m_seg_type = NO_CHECK_TYPE;
  1969. map.m_may_create = false;
  1970. for (; nr_pages; nr_pages--) {
  1971. if (rac) {
  1972. page = readahead_page(rac);
  1973. prefetchw(&page->flags);
  1974. }
  1975. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1976. if (f2fs_compressed_file(inode)) {
  1977. /* there are remained comressed pages, submit them */
  1978. if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
  1979. ret = f2fs_read_multi_pages(&cc, &bio,
  1980. max_nr_pages,
  1981. &last_block_in_bio,
  1982. rac != NULL, false);
  1983. f2fs_destroy_compress_ctx(&cc, false);
  1984. if (ret)
  1985. goto set_error_page;
  1986. }
  1987. ret = f2fs_is_compressed_cluster(inode, page->index);
  1988. if (ret < 0)
  1989. goto set_error_page;
  1990. else if (!ret)
  1991. goto read_single_page;
  1992. ret = f2fs_init_compress_ctx(&cc);
  1993. if (ret)
  1994. goto set_error_page;
  1995. f2fs_compress_ctx_add_page(&cc, page);
  1996. goto next_page;
  1997. }
  1998. read_single_page:
  1999. #endif
  2000. ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
  2001. &bio, &last_block_in_bio, rac);
  2002. if (ret) {
  2003. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2004. set_error_page:
  2005. #endif
  2006. SetPageError(page);
  2007. zero_user_segment(page, 0, PAGE_SIZE);
  2008. unlock_page(page);
  2009. }
  2010. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2011. next_page:
  2012. #endif
  2013. if (rac)
  2014. put_page(page);
  2015. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2016. if (f2fs_compressed_file(inode)) {
  2017. /* last page */
  2018. if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
  2019. ret = f2fs_read_multi_pages(&cc, &bio,
  2020. max_nr_pages,
  2021. &last_block_in_bio,
  2022. rac != NULL, false);
  2023. f2fs_destroy_compress_ctx(&cc, false);
  2024. }
  2025. }
  2026. #endif
  2027. }
  2028. if (bio)
  2029. __submit_bio(F2FS_I_SB(inode), bio, DATA);
  2030. return ret;
  2031. }
  2032. static int f2fs_read_data_page(struct file *file, struct page *page)
  2033. {
  2034. struct inode *inode = page_file_mapping(page)->host;
  2035. int ret = -EAGAIN;
  2036. trace_f2fs_readpage(page, DATA);
  2037. if (!f2fs_is_compress_backend_ready(inode)) {
  2038. unlock_page(page);
  2039. return -EOPNOTSUPP;
  2040. }
  2041. /* If the file has inline data, try to read it directly */
  2042. if (f2fs_has_inline_data(inode))
  2043. ret = f2fs_read_inline_data(inode, page);
  2044. if (ret == -EAGAIN)
  2045. ret = f2fs_mpage_readpages(inode, NULL, page);
  2046. return ret;
  2047. }
  2048. static void f2fs_readahead(struct readahead_control *rac)
  2049. {
  2050. struct inode *inode = rac->mapping->host;
  2051. trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
  2052. if (!f2fs_is_compress_backend_ready(inode))
  2053. return;
  2054. /* If the file has inline data, skip readpages */
  2055. if (f2fs_has_inline_data(inode))
  2056. return;
  2057. f2fs_mpage_readpages(inode, rac, NULL);
  2058. }
  2059. int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
  2060. {
  2061. struct inode *inode = fio->page->mapping->host;
  2062. struct page *mpage, *page;
  2063. gfp_t gfp_flags = GFP_NOFS;
  2064. if (!f2fs_encrypted_file(inode))
  2065. return 0;
  2066. page = fio->compressed_page ? fio->compressed_page : fio->page;
  2067. /* wait for GCed page writeback via META_MAPPING */
  2068. f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
  2069. if (fscrypt_inode_uses_inline_crypto(inode))
  2070. return 0;
  2071. retry_encrypt:
  2072. fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
  2073. PAGE_SIZE, 0, gfp_flags);
  2074. if (IS_ERR(fio->encrypted_page)) {
  2075. /* flush pending IOs and wait for a while in the ENOMEM case */
  2076. if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
  2077. f2fs_flush_merged_writes(fio->sbi);
  2078. congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
  2079. gfp_flags |= __GFP_NOFAIL;
  2080. goto retry_encrypt;
  2081. }
  2082. return PTR_ERR(fio->encrypted_page);
  2083. }
  2084. mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
  2085. if (mpage) {
  2086. if (PageUptodate(mpage))
  2087. memcpy(page_address(mpage),
  2088. page_address(fio->encrypted_page), PAGE_SIZE);
  2089. f2fs_put_page(mpage, 1);
  2090. }
  2091. return 0;
  2092. }
  2093. static inline bool check_inplace_update_policy(struct inode *inode,
  2094. struct f2fs_io_info *fio)
  2095. {
  2096. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2097. unsigned int policy = SM_I(sbi)->ipu_policy;
  2098. if (policy & (0x1 << F2FS_IPU_FORCE))
  2099. return true;
  2100. if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
  2101. return true;
  2102. if (policy & (0x1 << F2FS_IPU_UTIL) &&
  2103. utilization(sbi) > SM_I(sbi)->min_ipu_util)
  2104. return true;
  2105. if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
  2106. utilization(sbi) > SM_I(sbi)->min_ipu_util)
  2107. return true;
  2108. /*
  2109. * IPU for rewrite async pages
  2110. */
  2111. if (policy & (0x1 << F2FS_IPU_ASYNC) &&
  2112. fio && fio->op == REQ_OP_WRITE &&
  2113. !(fio->op_flags & REQ_SYNC) &&
  2114. !IS_ENCRYPTED(inode))
  2115. return true;
  2116. /* this is only set during fdatasync */
  2117. if (policy & (0x1 << F2FS_IPU_FSYNC) &&
  2118. is_inode_flag_set(inode, FI_NEED_IPU))
  2119. return true;
  2120. if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
  2121. !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
  2122. return true;
  2123. return false;
  2124. }
  2125. bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
  2126. {
  2127. /* swap file is migrating in aligned write mode */
  2128. if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
  2129. return false;
  2130. if (f2fs_is_pinned_file(inode))
  2131. return true;
  2132. /* if this is cold file, we should overwrite to avoid fragmentation */
  2133. if (file_is_cold(inode))
  2134. return true;
  2135. return check_inplace_update_policy(inode, fio);
  2136. }
  2137. bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
  2138. {
  2139. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2140. /* The below cases were checked when setting it. */
  2141. if (f2fs_is_pinned_file(inode))
  2142. return false;
  2143. if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
  2144. return true;
  2145. if (f2fs_lfs_mode(sbi))
  2146. return true;
  2147. if (S_ISDIR(inode->i_mode))
  2148. return true;
  2149. if (IS_NOQUOTA(inode))
  2150. return true;
  2151. if (f2fs_is_atomic_file(inode))
  2152. return true;
  2153. /* swap file is migrating in aligned write mode */
  2154. if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
  2155. return true;
  2156. if (fio) {
  2157. if (page_private_gcing(fio->page))
  2158. return true;
  2159. if (page_private_dummy(fio->page))
  2160. return true;
  2161. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
  2162. f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
  2163. return true;
  2164. }
  2165. return false;
  2166. }
  2167. static inline bool need_inplace_update(struct f2fs_io_info *fio)
  2168. {
  2169. struct inode *inode = fio->page->mapping->host;
  2170. if (f2fs_should_update_outplace(inode, fio))
  2171. return false;
  2172. return f2fs_should_update_inplace(inode, fio);
  2173. }
  2174. int f2fs_do_write_data_page(struct f2fs_io_info *fio)
  2175. {
  2176. struct page *page = fio->page;
  2177. struct inode *inode = page->mapping->host;
  2178. struct dnode_of_data dn;
  2179. struct extent_info ei = {0,0,0};
  2180. struct node_info ni;
  2181. bool ipu_force = false;
  2182. int err = 0;
  2183. set_new_dnode(&dn, inode, NULL, NULL, 0);
  2184. if (need_inplace_update(fio) &&
  2185. f2fs_lookup_extent_cache(inode, page->index, &ei)) {
  2186. fio->old_blkaddr = ei.blk + page->index - ei.fofs;
  2187. if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
  2188. DATA_GENERIC_ENHANCE))
  2189. return -EFSCORRUPTED;
  2190. ipu_force = true;
  2191. fio->need_lock = LOCK_DONE;
  2192. goto got_it;
  2193. }
  2194. /* Deadlock due to between page->lock and f2fs_lock_op */
  2195. if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
  2196. return -EAGAIN;
  2197. err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
  2198. if (err)
  2199. goto out;
  2200. fio->old_blkaddr = dn.data_blkaddr;
  2201. /* This page is already truncated */
  2202. if (fio->old_blkaddr == NULL_ADDR) {
  2203. ClearPageUptodate(page);
  2204. clear_page_private_gcing(page);
  2205. goto out_writepage;
  2206. }
  2207. got_it:
  2208. if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
  2209. !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
  2210. DATA_GENERIC_ENHANCE)) {
  2211. err = -EFSCORRUPTED;
  2212. goto out_writepage;
  2213. }
  2214. /*
  2215. * If current allocation needs SSR,
  2216. * it had better in-place writes for updated data.
  2217. */
  2218. if (ipu_force ||
  2219. (__is_valid_data_blkaddr(fio->old_blkaddr) &&
  2220. need_inplace_update(fio))) {
  2221. err = f2fs_encrypt_one_page(fio);
  2222. if (err)
  2223. goto out_writepage;
  2224. set_page_writeback(page);
  2225. ClearPageError(page);
  2226. f2fs_put_dnode(&dn);
  2227. if (fio->need_lock == LOCK_REQ)
  2228. f2fs_unlock_op(fio->sbi);
  2229. err = f2fs_inplace_write_data(fio);
  2230. if (err) {
  2231. if (fscrypt_inode_uses_fs_layer_crypto(inode))
  2232. fscrypt_finalize_bounce_page(&fio->encrypted_page);
  2233. if (PageWriteback(page))
  2234. end_page_writeback(page);
  2235. } else {
  2236. set_inode_flag(inode, FI_UPDATE_WRITE);
  2237. }
  2238. trace_f2fs_do_write_data_page(fio->page, IPU);
  2239. return err;
  2240. }
  2241. if (fio->need_lock == LOCK_RETRY) {
  2242. if (!f2fs_trylock_op(fio->sbi)) {
  2243. err = -EAGAIN;
  2244. goto out_writepage;
  2245. }
  2246. fio->need_lock = LOCK_REQ;
  2247. }
  2248. err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
  2249. if (err)
  2250. goto out_writepage;
  2251. fio->version = ni.version;
  2252. err = f2fs_encrypt_one_page(fio);
  2253. if (err)
  2254. goto out_writepage;
  2255. set_page_writeback(page);
  2256. ClearPageError(page);
  2257. if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
  2258. f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
  2259. /* LFS mode write path */
  2260. f2fs_outplace_write_data(&dn, fio);
  2261. trace_f2fs_do_write_data_page(page, OPU);
  2262. set_inode_flag(inode, FI_APPEND_WRITE);
  2263. if (page->index == 0)
  2264. set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
  2265. out_writepage:
  2266. f2fs_put_dnode(&dn);
  2267. out:
  2268. if (fio->need_lock == LOCK_REQ)
  2269. f2fs_unlock_op(fio->sbi);
  2270. return err;
  2271. }
  2272. int f2fs_write_single_data_page(struct page *page, int *submitted,
  2273. struct bio **bio,
  2274. sector_t *last_block,
  2275. struct writeback_control *wbc,
  2276. enum iostat_type io_type,
  2277. int compr_blocks,
  2278. bool allow_balance)
  2279. {
  2280. struct inode *inode = page->mapping->host;
  2281. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2282. loff_t i_size = i_size_read(inode);
  2283. const pgoff_t end_index = ((unsigned long long)i_size)
  2284. >> PAGE_SHIFT;
  2285. loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
  2286. unsigned offset = 0;
  2287. bool need_balance_fs = false;
  2288. int err = 0;
  2289. struct f2fs_io_info fio = {
  2290. .sbi = sbi,
  2291. .ino = inode->i_ino,
  2292. .type = DATA,
  2293. .op = REQ_OP_WRITE,
  2294. .op_flags = wbc_to_write_flags(wbc),
  2295. .old_blkaddr = NULL_ADDR,
  2296. .page = page,
  2297. .encrypted_page = NULL,
  2298. .submitted = false,
  2299. .compr_blocks = compr_blocks,
  2300. .need_lock = LOCK_RETRY,
  2301. .io_type = io_type,
  2302. .io_wbc = wbc,
  2303. .bio = bio,
  2304. .last_block = last_block,
  2305. };
  2306. trace_f2fs_writepage(page, DATA);
  2307. /* we should bypass data pages to proceed the kworkder jobs */
  2308. if (unlikely(f2fs_cp_error(sbi))) {
  2309. mapping_set_error(page->mapping, -EIO);
  2310. /*
  2311. * don't drop any dirty dentry pages for keeping lastest
  2312. * directory structure.
  2313. */
  2314. if (S_ISDIR(inode->i_mode))
  2315. goto redirty_out;
  2316. goto out;
  2317. }
  2318. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  2319. goto redirty_out;
  2320. if (page->index < end_index ||
  2321. f2fs_verity_in_progress(inode) ||
  2322. compr_blocks)
  2323. goto write;
  2324. /*
  2325. * If the offset is out-of-range of file size,
  2326. * this page does not have to be written to disk.
  2327. */
  2328. offset = i_size & (PAGE_SIZE - 1);
  2329. if ((page->index >= end_index + 1) || !offset)
  2330. goto out;
  2331. zero_user_segment(page, offset, PAGE_SIZE);
  2332. write:
  2333. if (f2fs_is_drop_cache(inode))
  2334. goto out;
  2335. /* we should not write 0'th page having journal header */
  2336. if (f2fs_is_volatile_file(inode) && (!page->index ||
  2337. (!wbc->for_reclaim &&
  2338. f2fs_available_free_memory(sbi, BASE_CHECK))))
  2339. goto redirty_out;
  2340. /* Dentry/quota blocks are controlled by checkpoint */
  2341. if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
  2342. /*
  2343. * We need to wait for node_write to avoid block allocation during
  2344. * checkpoint. This can only happen to quota writes which can cause
  2345. * the below discard race condition.
  2346. */
  2347. if (IS_NOQUOTA(inode))
  2348. f2fs_down_read(&sbi->node_write);
  2349. fio.need_lock = LOCK_DONE;
  2350. err = f2fs_do_write_data_page(&fio);
  2351. if (IS_NOQUOTA(inode))
  2352. f2fs_up_read(&sbi->node_write);
  2353. goto done;
  2354. }
  2355. if (!wbc->for_reclaim)
  2356. need_balance_fs = true;
  2357. else if (has_not_enough_free_secs(sbi, 0, 0))
  2358. goto redirty_out;
  2359. else
  2360. set_inode_flag(inode, FI_HOT_DATA);
  2361. err = -EAGAIN;
  2362. if (f2fs_has_inline_data(inode)) {
  2363. err = f2fs_write_inline_data(inode, page);
  2364. if (!err)
  2365. goto out;
  2366. }
  2367. if (err == -EAGAIN) {
  2368. err = f2fs_do_write_data_page(&fio);
  2369. if (err == -EAGAIN) {
  2370. fio.need_lock = LOCK_REQ;
  2371. err = f2fs_do_write_data_page(&fio);
  2372. }
  2373. }
  2374. if (err) {
  2375. file_set_keep_isize(inode);
  2376. } else {
  2377. spin_lock(&F2FS_I(inode)->i_size_lock);
  2378. if (F2FS_I(inode)->last_disk_size < psize)
  2379. F2FS_I(inode)->last_disk_size = psize;
  2380. spin_unlock(&F2FS_I(inode)->i_size_lock);
  2381. }
  2382. done:
  2383. if (err && err != -ENOENT)
  2384. goto redirty_out;
  2385. out:
  2386. inode_dec_dirty_pages(inode);
  2387. if (err) {
  2388. ClearPageUptodate(page);
  2389. clear_page_private_gcing(page);
  2390. }
  2391. if (wbc->for_reclaim) {
  2392. f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
  2393. clear_inode_flag(inode, FI_HOT_DATA);
  2394. f2fs_remove_dirty_inode(inode);
  2395. submitted = NULL;
  2396. }
  2397. unlock_page(page);
  2398. if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
  2399. !F2FS_I(inode)->cp_task && allow_balance)
  2400. f2fs_balance_fs(sbi, need_balance_fs);
  2401. if (unlikely(f2fs_cp_error(sbi))) {
  2402. f2fs_submit_merged_write(sbi, DATA);
  2403. f2fs_submit_merged_ipu_write(sbi, bio, NULL);
  2404. submitted = NULL;
  2405. }
  2406. if (submitted)
  2407. *submitted = fio.submitted ? 1 : 0;
  2408. return 0;
  2409. redirty_out:
  2410. redirty_page_for_writepage(wbc, page);
  2411. /*
  2412. * pageout() in MM traslates EAGAIN, so calls handle_write_error()
  2413. * -> mapping_set_error() -> set_bit(AS_EIO, ...).
  2414. * file_write_and_wait_range() will see EIO error, which is critical
  2415. * to return value of fsync() followed by atomic_write failure to user.
  2416. */
  2417. if (!err || wbc->for_reclaim)
  2418. return AOP_WRITEPAGE_ACTIVATE;
  2419. unlock_page(page);
  2420. return err;
  2421. }
  2422. static int f2fs_write_data_page(struct page *page,
  2423. struct writeback_control *wbc)
  2424. {
  2425. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2426. struct inode *inode = page->mapping->host;
  2427. if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
  2428. goto out;
  2429. if (f2fs_compressed_file(inode)) {
  2430. if (f2fs_is_compressed_cluster(inode, page->index)) {
  2431. redirty_page_for_writepage(wbc, page);
  2432. return AOP_WRITEPAGE_ACTIVATE;
  2433. }
  2434. }
  2435. out:
  2436. #endif
  2437. return f2fs_write_single_data_page(page, NULL, NULL, NULL,
  2438. wbc, FS_DATA_IO, 0, true);
  2439. }
  2440. /*
  2441. * This function was copied from write_cche_pages from mm/page-writeback.c.
  2442. * The major change is making write step of cold data page separately from
  2443. * warm/hot data page.
  2444. */
  2445. static int f2fs_write_cache_pages(struct address_space *mapping,
  2446. struct writeback_control *wbc,
  2447. enum iostat_type io_type)
  2448. {
  2449. int ret = 0;
  2450. int done = 0, retry = 0;
  2451. struct pagevec pvec;
  2452. struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
  2453. struct bio *bio = NULL;
  2454. sector_t last_block;
  2455. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2456. struct inode *inode = mapping->host;
  2457. struct compress_ctx cc = {
  2458. .inode = inode,
  2459. .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
  2460. .cluster_size = F2FS_I(inode)->i_cluster_size,
  2461. .cluster_idx = NULL_CLUSTER,
  2462. .rpages = NULL,
  2463. .nr_rpages = 0,
  2464. .cpages = NULL,
  2465. .rbuf = NULL,
  2466. .cbuf = NULL,
  2467. .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
  2468. .private = NULL,
  2469. };
  2470. #endif
  2471. int nr_pages;
  2472. pgoff_t index;
  2473. pgoff_t end; /* Inclusive */
  2474. pgoff_t done_index;
  2475. int range_whole = 0;
  2476. xa_mark_t tag;
  2477. int nwritten = 0;
  2478. int submitted = 0;
  2479. int i;
  2480. pagevec_init(&pvec);
  2481. if (get_dirty_pages(mapping->host) <=
  2482. SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
  2483. set_inode_flag(mapping->host, FI_HOT_DATA);
  2484. else
  2485. clear_inode_flag(mapping->host, FI_HOT_DATA);
  2486. if (wbc->range_cyclic) {
  2487. index = mapping->writeback_index; /* prev offset */
  2488. end = -1;
  2489. } else {
  2490. index = wbc->range_start >> PAGE_SHIFT;
  2491. end = wbc->range_end >> PAGE_SHIFT;
  2492. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  2493. range_whole = 1;
  2494. }
  2495. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  2496. tag = PAGECACHE_TAG_TOWRITE;
  2497. else
  2498. tag = PAGECACHE_TAG_DIRTY;
  2499. retry:
  2500. retry = 0;
  2501. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  2502. tag_pages_for_writeback(mapping, index, end);
  2503. done_index = index;
  2504. while (!done && !retry && (index <= end)) {
  2505. nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
  2506. tag);
  2507. if (nr_pages == 0)
  2508. break;
  2509. for (i = 0; i < nr_pages; i++) {
  2510. struct page *page = pvec.pages[i];
  2511. bool need_readd;
  2512. readd:
  2513. need_readd = false;
  2514. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2515. if (f2fs_compressed_file(inode)) {
  2516. ret = f2fs_init_compress_ctx(&cc);
  2517. if (ret) {
  2518. done = 1;
  2519. break;
  2520. }
  2521. if (!f2fs_cluster_can_merge_page(&cc,
  2522. page->index)) {
  2523. ret = f2fs_write_multi_pages(&cc,
  2524. &submitted, wbc, io_type);
  2525. if (!ret)
  2526. need_readd = true;
  2527. goto result;
  2528. }
  2529. if (unlikely(f2fs_cp_error(sbi)))
  2530. goto lock_page;
  2531. if (f2fs_cluster_is_empty(&cc)) {
  2532. void *fsdata = NULL;
  2533. struct page *pagep;
  2534. int ret2;
  2535. ret2 = f2fs_prepare_compress_overwrite(
  2536. inode, &pagep,
  2537. page->index, &fsdata);
  2538. if (ret2 < 0) {
  2539. ret = ret2;
  2540. done = 1;
  2541. break;
  2542. } else if (ret2 &&
  2543. !f2fs_compress_write_end(inode,
  2544. fsdata, page->index,
  2545. 1)) {
  2546. retry = 1;
  2547. break;
  2548. }
  2549. } else {
  2550. goto lock_page;
  2551. }
  2552. }
  2553. #endif
  2554. /* give a priority to WB_SYNC threads */
  2555. if (atomic_read(&sbi->wb_sync_req[DATA]) &&
  2556. wbc->sync_mode == WB_SYNC_NONE) {
  2557. done = 1;
  2558. break;
  2559. }
  2560. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2561. lock_page:
  2562. #endif
  2563. done_index = page->index;
  2564. retry_write:
  2565. lock_page(page);
  2566. if (unlikely(page->mapping != mapping)) {
  2567. continue_unlock:
  2568. unlock_page(page);
  2569. continue;
  2570. }
  2571. if (!PageDirty(page)) {
  2572. /* someone wrote it for us */
  2573. goto continue_unlock;
  2574. }
  2575. if (PageWriteback(page)) {
  2576. if (wbc->sync_mode != WB_SYNC_NONE)
  2577. f2fs_wait_on_page_writeback(page,
  2578. DATA, true, true);
  2579. else
  2580. goto continue_unlock;
  2581. }
  2582. if (!clear_page_dirty_for_io(page))
  2583. goto continue_unlock;
  2584. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2585. if (f2fs_compressed_file(inode)) {
  2586. get_page(page);
  2587. f2fs_compress_ctx_add_page(&cc, page);
  2588. continue;
  2589. }
  2590. #endif
  2591. ret = f2fs_write_single_data_page(page, &submitted,
  2592. &bio, &last_block, wbc, io_type,
  2593. 0, true);
  2594. if (ret == AOP_WRITEPAGE_ACTIVATE)
  2595. unlock_page(page);
  2596. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2597. result:
  2598. #endif
  2599. nwritten += submitted;
  2600. wbc->nr_to_write -= submitted;
  2601. if (unlikely(ret)) {
  2602. /*
  2603. * keep nr_to_write, since vfs uses this to
  2604. * get # of written pages.
  2605. */
  2606. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  2607. ret = 0;
  2608. goto next;
  2609. } else if (ret == -EAGAIN) {
  2610. ret = 0;
  2611. if (wbc->sync_mode == WB_SYNC_ALL) {
  2612. cond_resched();
  2613. congestion_wait(BLK_RW_ASYNC,
  2614. DEFAULT_IO_TIMEOUT);
  2615. goto retry_write;
  2616. }
  2617. goto next;
  2618. }
  2619. done_index = page->index + 1;
  2620. done = 1;
  2621. break;
  2622. }
  2623. if (wbc->nr_to_write <= 0 &&
  2624. wbc->sync_mode == WB_SYNC_NONE) {
  2625. done = 1;
  2626. break;
  2627. }
  2628. next:
  2629. if (need_readd)
  2630. goto readd;
  2631. }
  2632. pagevec_release(&pvec);
  2633. cond_resched();
  2634. }
  2635. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2636. /* flush remained pages in compress cluster */
  2637. if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
  2638. ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
  2639. nwritten += submitted;
  2640. wbc->nr_to_write -= submitted;
  2641. if (ret) {
  2642. done = 1;
  2643. retry = 0;
  2644. }
  2645. }
  2646. if (f2fs_compressed_file(inode))
  2647. f2fs_destroy_compress_ctx(&cc, false);
  2648. #endif
  2649. if (retry) {
  2650. index = 0;
  2651. end = -1;
  2652. goto retry;
  2653. }
  2654. if (wbc->range_cyclic && !done)
  2655. done_index = 0;
  2656. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  2657. mapping->writeback_index = done_index;
  2658. if (nwritten)
  2659. f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
  2660. NULL, 0, DATA);
  2661. /* submit cached bio of IPU write */
  2662. if (bio)
  2663. f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
  2664. return ret;
  2665. }
  2666. static inline bool __should_serialize_io(struct inode *inode,
  2667. struct writeback_control *wbc)
  2668. {
  2669. /* to avoid deadlock in path of data flush */
  2670. if (F2FS_I(inode)->cp_task)
  2671. return false;
  2672. if (!S_ISREG(inode->i_mode))
  2673. return false;
  2674. if (IS_NOQUOTA(inode))
  2675. return false;
  2676. if (f2fs_need_compress_data(inode))
  2677. return true;
  2678. if (wbc->sync_mode != WB_SYNC_ALL)
  2679. return true;
  2680. if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
  2681. return true;
  2682. return false;
  2683. }
  2684. static int __f2fs_write_data_pages(struct address_space *mapping,
  2685. struct writeback_control *wbc,
  2686. enum iostat_type io_type)
  2687. {
  2688. struct inode *inode = mapping->host;
  2689. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2690. struct blk_plug plug;
  2691. int ret;
  2692. bool locked = false;
  2693. /* deal with chardevs and other special file */
  2694. if (!mapping->a_ops->writepage)
  2695. return 0;
  2696. /* skip writing if there is no dirty page in this inode */
  2697. if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
  2698. return 0;
  2699. /* during POR, we don't need to trigger writepage at all. */
  2700. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  2701. goto skip_write;
  2702. if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
  2703. wbc->sync_mode == WB_SYNC_NONE &&
  2704. get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
  2705. f2fs_available_free_memory(sbi, DIRTY_DENTS))
  2706. goto skip_write;
  2707. /* skip writing during file defragment */
  2708. if (is_inode_flag_set(inode, FI_DO_DEFRAG))
  2709. goto skip_write;
  2710. trace_f2fs_writepages(mapping->host, wbc, DATA);
  2711. /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
  2712. if (wbc->sync_mode == WB_SYNC_ALL)
  2713. atomic_inc(&sbi->wb_sync_req[DATA]);
  2714. else if (atomic_read(&sbi->wb_sync_req[DATA])) {
  2715. /* to avoid potential deadlock */
  2716. if (current->plug)
  2717. blk_finish_plug(current->plug);
  2718. goto skip_write;
  2719. }
  2720. if (__should_serialize_io(inode, wbc)) {
  2721. mutex_lock(&sbi->writepages);
  2722. locked = true;
  2723. }
  2724. blk_start_plug(&plug);
  2725. ret = f2fs_write_cache_pages(mapping, wbc, io_type);
  2726. blk_finish_plug(&plug);
  2727. if (locked)
  2728. mutex_unlock(&sbi->writepages);
  2729. if (wbc->sync_mode == WB_SYNC_ALL)
  2730. atomic_dec(&sbi->wb_sync_req[DATA]);
  2731. /*
  2732. * if some pages were truncated, we cannot guarantee its mapping->host
  2733. * to detect pending bios.
  2734. */
  2735. f2fs_remove_dirty_inode(inode);
  2736. return ret;
  2737. skip_write:
  2738. wbc->pages_skipped += get_dirty_pages(inode);
  2739. trace_f2fs_writepages(mapping->host, wbc, DATA);
  2740. return 0;
  2741. }
  2742. static int f2fs_write_data_pages(struct address_space *mapping,
  2743. struct writeback_control *wbc)
  2744. {
  2745. struct inode *inode = mapping->host;
  2746. return __f2fs_write_data_pages(mapping, wbc,
  2747. F2FS_I(inode)->cp_task == current ?
  2748. FS_CP_DATA_IO : FS_DATA_IO);
  2749. }
  2750. static void f2fs_write_failed(struct address_space *mapping, loff_t to)
  2751. {
  2752. struct inode *inode = mapping->host;
  2753. loff_t i_size = i_size_read(inode);
  2754. if (IS_NOQUOTA(inode))
  2755. return;
  2756. /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
  2757. if (to > i_size && !f2fs_verity_in_progress(inode)) {
  2758. f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  2759. f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
  2760. truncate_pagecache(inode, i_size);
  2761. f2fs_truncate_blocks(inode, i_size, true);
  2762. f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
  2763. f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  2764. }
  2765. }
  2766. static int prepare_write_begin(struct f2fs_sb_info *sbi,
  2767. struct page *page, loff_t pos, unsigned len,
  2768. block_t *blk_addr, bool *node_changed)
  2769. {
  2770. struct inode *inode = page->mapping->host;
  2771. pgoff_t index = page->index;
  2772. struct dnode_of_data dn;
  2773. struct page *ipage;
  2774. bool locked = false;
  2775. struct extent_info ei = {0,0,0};
  2776. int err = 0;
  2777. int flag;
  2778. /*
  2779. * we already allocated all the blocks, so we don't need to get
  2780. * the block addresses when there is no need to fill the page.
  2781. */
  2782. if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
  2783. !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
  2784. !f2fs_verity_in_progress(inode))
  2785. return 0;
  2786. /* f2fs_lock_op avoids race between write CP and convert_inline_page */
  2787. if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
  2788. flag = F2FS_GET_BLOCK_DEFAULT;
  2789. else
  2790. flag = F2FS_GET_BLOCK_PRE_AIO;
  2791. if (f2fs_has_inline_data(inode) ||
  2792. (pos & PAGE_MASK) >= i_size_read(inode)) {
  2793. f2fs_do_map_lock(sbi, flag, true);
  2794. locked = true;
  2795. }
  2796. restart:
  2797. /* check inline_data */
  2798. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  2799. if (IS_ERR(ipage)) {
  2800. err = PTR_ERR(ipage);
  2801. goto unlock_out;
  2802. }
  2803. set_new_dnode(&dn, inode, ipage, ipage, 0);
  2804. if (f2fs_has_inline_data(inode)) {
  2805. if (pos + len <= MAX_INLINE_DATA(inode)) {
  2806. f2fs_do_read_inline_data(page, ipage);
  2807. set_inode_flag(inode, FI_DATA_EXIST);
  2808. if (inode->i_nlink)
  2809. set_page_private_inline(ipage);
  2810. } else {
  2811. err = f2fs_convert_inline_page(&dn, page);
  2812. if (err)
  2813. goto out;
  2814. if (dn.data_blkaddr == NULL_ADDR)
  2815. err = f2fs_get_block(&dn, index);
  2816. }
  2817. } else if (locked) {
  2818. err = f2fs_get_block(&dn, index);
  2819. } else {
  2820. if (f2fs_lookup_extent_cache(inode, index, &ei)) {
  2821. dn.data_blkaddr = ei.blk + index - ei.fofs;
  2822. } else {
  2823. /* hole case */
  2824. err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
  2825. if (err || dn.data_blkaddr == NULL_ADDR) {
  2826. f2fs_put_dnode(&dn);
  2827. f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
  2828. true);
  2829. WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
  2830. locked = true;
  2831. goto restart;
  2832. }
  2833. }
  2834. }
  2835. /* convert_inline_page can make node_changed */
  2836. *blk_addr = dn.data_blkaddr;
  2837. *node_changed = dn.node_changed;
  2838. out:
  2839. f2fs_put_dnode(&dn);
  2840. unlock_out:
  2841. if (locked)
  2842. f2fs_do_map_lock(sbi, flag, false);
  2843. return err;
  2844. }
  2845. static int f2fs_write_begin(struct file *file, struct address_space *mapping,
  2846. loff_t pos, unsigned len, unsigned flags,
  2847. struct page **pagep, void **fsdata)
  2848. {
  2849. struct inode *inode = mapping->host;
  2850. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2851. struct page *page = NULL;
  2852. pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
  2853. bool need_balance = false, drop_atomic = false;
  2854. block_t blkaddr = NULL_ADDR;
  2855. int err = 0;
  2856. /*
  2857. * Should avoid quota operations which can make deadlock:
  2858. * kswapd -> f2fs_evict_inode -> dquot_drop ->
  2859. * f2fs_dquot_commit -> f2fs_write_begin ->
  2860. * d_obtain_alias -> __d_alloc -> kmem_cache_alloc(GFP_KERNEL)
  2861. */
  2862. if (trace_android_fs_datawrite_start_enabled() && !IS_NOQUOTA(inode)) {
  2863. char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  2864. path = android_fstrace_get_pathname(pathbuf,
  2865. MAX_TRACE_PATHBUF_LEN,
  2866. inode);
  2867. trace_android_fs_datawrite_start(inode, pos, len,
  2868. current->pid, path,
  2869. current->comm);
  2870. }
  2871. trace_f2fs_write_begin(inode, pos, len, flags);
  2872. if (!f2fs_is_checkpoint_ready(sbi)) {
  2873. err = -ENOSPC;
  2874. goto fail;
  2875. }
  2876. if ((f2fs_is_atomic_file(inode) &&
  2877. !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
  2878. is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
  2879. err = -ENOMEM;
  2880. drop_atomic = true;
  2881. goto fail;
  2882. }
  2883. /*
  2884. * We should check this at this moment to avoid deadlock on inode page
  2885. * and #0 page. The locking rule for inline_data conversion should be:
  2886. * lock_page(page #0) -> lock_page(inode_page)
  2887. */
  2888. if (index != 0) {
  2889. err = f2fs_convert_inline_inode(inode);
  2890. if (err)
  2891. goto fail;
  2892. }
  2893. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2894. if (f2fs_compressed_file(inode)) {
  2895. int ret;
  2896. *fsdata = NULL;
  2897. if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
  2898. goto repeat;
  2899. ret = f2fs_prepare_compress_overwrite(inode, pagep,
  2900. index, fsdata);
  2901. if (ret < 0) {
  2902. err = ret;
  2903. goto fail;
  2904. } else if (ret) {
  2905. return 0;
  2906. }
  2907. }
  2908. #endif
  2909. repeat:
  2910. /*
  2911. * Do not use grab_cache_page_write_begin() to avoid deadlock due to
  2912. * wait_for_stable_page. Will wait that below with our IO control.
  2913. */
  2914. page = f2fs_pagecache_get_page(mapping, index,
  2915. FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
  2916. if (!page) {
  2917. err = -ENOMEM;
  2918. goto fail;
  2919. }
  2920. /* TODO: cluster can be compressed due to race with .writepage */
  2921. *pagep = page;
  2922. err = prepare_write_begin(sbi, page, pos, len,
  2923. &blkaddr, &need_balance);
  2924. if (err)
  2925. goto fail;
  2926. if (need_balance && !IS_NOQUOTA(inode) &&
  2927. has_not_enough_free_secs(sbi, 0, 0)) {
  2928. unlock_page(page);
  2929. f2fs_balance_fs(sbi, true);
  2930. lock_page(page);
  2931. if (page->mapping != mapping) {
  2932. /* The page got truncated from under us */
  2933. f2fs_put_page(page, 1);
  2934. goto repeat;
  2935. }
  2936. }
  2937. f2fs_wait_on_page_writeback(page, DATA, false, true);
  2938. if (len == PAGE_SIZE || PageUptodate(page))
  2939. return 0;
  2940. if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
  2941. !f2fs_verity_in_progress(inode)) {
  2942. zero_user_segment(page, len, PAGE_SIZE);
  2943. return 0;
  2944. }
  2945. if (blkaddr == NEW_ADDR) {
  2946. zero_user_segment(page, 0, PAGE_SIZE);
  2947. SetPageUptodate(page);
  2948. } else {
  2949. if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
  2950. DATA_GENERIC_ENHANCE_READ)) {
  2951. err = -EFSCORRUPTED;
  2952. goto fail;
  2953. }
  2954. err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
  2955. if (err)
  2956. goto fail;
  2957. lock_page(page);
  2958. if (unlikely(page->mapping != mapping)) {
  2959. f2fs_put_page(page, 1);
  2960. goto repeat;
  2961. }
  2962. if (unlikely(!PageUptodate(page))) {
  2963. err = -EIO;
  2964. goto fail;
  2965. }
  2966. }
  2967. return 0;
  2968. fail:
  2969. f2fs_put_page(page, 1);
  2970. f2fs_write_failed(mapping, pos + len);
  2971. if (drop_atomic)
  2972. f2fs_drop_inmem_pages_all(sbi, false);
  2973. return err;
  2974. }
  2975. static int f2fs_write_end(struct file *file,
  2976. struct address_space *mapping,
  2977. loff_t pos, unsigned len, unsigned copied,
  2978. struct page *page, void *fsdata)
  2979. {
  2980. struct inode *inode = page->mapping->host;
  2981. trace_android_fs_datawrite_end(inode, pos, len);
  2982. trace_f2fs_write_end(inode, pos, len, copied);
  2983. /*
  2984. * This should be come from len == PAGE_SIZE, and we expect copied
  2985. * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
  2986. * let generic_perform_write() try to copy data again through copied=0.
  2987. */
  2988. if (!PageUptodate(page)) {
  2989. if (unlikely(copied != len))
  2990. copied = 0;
  2991. else
  2992. SetPageUptodate(page);
  2993. }
  2994. #ifdef CONFIG_F2FS_FS_COMPRESSION
  2995. /* overwrite compressed file */
  2996. if (f2fs_compressed_file(inode) && fsdata) {
  2997. f2fs_compress_write_end(inode, fsdata, page->index, copied);
  2998. f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
  2999. if (pos + copied > i_size_read(inode) &&
  3000. !f2fs_verity_in_progress(inode))
  3001. f2fs_i_size_write(inode, pos + copied);
  3002. return copied;
  3003. }
  3004. #endif
  3005. if (!copied)
  3006. goto unlock_out;
  3007. set_page_dirty(page);
  3008. if (pos + copied > i_size_read(inode) &&
  3009. !f2fs_verity_in_progress(inode))
  3010. f2fs_i_size_write(inode, pos + copied);
  3011. unlock_out:
  3012. f2fs_put_page(page, 1);
  3013. f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
  3014. return copied;
  3015. }
  3016. static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
  3017. loff_t offset)
  3018. {
  3019. unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
  3020. unsigned blkbits = i_blkbits;
  3021. unsigned blocksize_mask = (1 << blkbits) - 1;
  3022. unsigned long align = offset | iov_iter_alignment(iter);
  3023. struct block_device *bdev = inode->i_sb->s_bdev;
  3024. if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
  3025. return 1;
  3026. if (align & blocksize_mask) {
  3027. if (bdev)
  3028. blkbits = blksize_bits(bdev_logical_block_size(bdev));
  3029. blocksize_mask = (1 << blkbits) - 1;
  3030. if (align & blocksize_mask)
  3031. return -EINVAL;
  3032. return 1;
  3033. }
  3034. return 0;
  3035. }
  3036. static void f2fs_dio_end_io(struct bio *bio)
  3037. {
  3038. struct f2fs_private_dio *dio = bio->bi_private;
  3039. dec_page_count(F2FS_I_SB(dio->inode),
  3040. dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
  3041. bio->bi_private = dio->orig_private;
  3042. bio->bi_end_io = dio->orig_end_io;
  3043. kfree(dio);
  3044. bio_endio(bio);
  3045. }
  3046. static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
  3047. loff_t file_offset)
  3048. {
  3049. struct f2fs_private_dio *dio;
  3050. bool write = (bio_op(bio) == REQ_OP_WRITE);
  3051. dio = f2fs_kzalloc(F2FS_I_SB(inode),
  3052. sizeof(struct f2fs_private_dio), GFP_NOFS);
  3053. if (!dio)
  3054. goto out;
  3055. dio->inode = inode;
  3056. dio->orig_end_io = bio->bi_end_io;
  3057. dio->orig_private = bio->bi_private;
  3058. dio->write = write;
  3059. bio->bi_end_io = f2fs_dio_end_io;
  3060. bio->bi_private = dio;
  3061. inc_page_count(F2FS_I_SB(inode),
  3062. write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
  3063. submit_bio(bio);
  3064. return;
  3065. out:
  3066. bio->bi_status = BLK_STS_IOERR;
  3067. bio_endio(bio);
  3068. }
  3069. static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  3070. {
  3071. struct address_space *mapping = iocb->ki_filp->f_mapping;
  3072. struct inode *inode = mapping->host;
  3073. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3074. struct f2fs_inode_info *fi = F2FS_I(inode);
  3075. size_t count = iov_iter_count(iter);
  3076. loff_t offset = iocb->ki_pos;
  3077. int rw = iov_iter_rw(iter);
  3078. int err;
  3079. enum rw_hint hint = iocb->ki_hint;
  3080. int whint_mode = F2FS_OPTION(sbi).whint_mode;
  3081. bool do_opu;
  3082. err = check_direct_IO(inode, iter, offset);
  3083. if (err)
  3084. return err < 0 ? err : 0;
  3085. if (f2fs_force_buffered_io(inode, iocb, iter))
  3086. return 0;
  3087. do_opu = allow_outplace_dio(inode, iocb, iter);
  3088. trace_f2fs_direct_IO_enter(inode, offset, count, rw);
  3089. if (trace_android_fs_dataread_start_enabled() &&
  3090. (rw == READ)) {
  3091. char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  3092. path = android_fstrace_get_pathname(pathbuf,
  3093. MAX_TRACE_PATHBUF_LEN,
  3094. inode);
  3095. trace_android_fs_dataread_start(inode, offset,
  3096. count, current->pid, path,
  3097. current->comm);
  3098. }
  3099. if (trace_android_fs_datawrite_start_enabled() &&
  3100. (rw == WRITE)) {
  3101. char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  3102. path = android_fstrace_get_pathname(pathbuf,
  3103. MAX_TRACE_PATHBUF_LEN,
  3104. inode);
  3105. trace_android_fs_datawrite_start(inode, offset, count,
  3106. current->pid, path,
  3107. current->comm);
  3108. }
  3109. if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
  3110. iocb->ki_hint = WRITE_LIFE_NOT_SET;
  3111. if (iocb->ki_flags & IOCB_NOWAIT) {
  3112. if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[rw])) {
  3113. iocb->ki_hint = hint;
  3114. err = -EAGAIN;
  3115. goto out;
  3116. }
  3117. if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
  3118. f2fs_up_read(&fi->i_gc_rwsem[rw]);
  3119. iocb->ki_hint = hint;
  3120. err = -EAGAIN;
  3121. goto out;
  3122. }
  3123. } else {
  3124. f2fs_down_read(&fi->i_gc_rwsem[rw]);
  3125. if (do_opu)
  3126. f2fs_down_read(&fi->i_gc_rwsem[READ]);
  3127. }
  3128. err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
  3129. iter, rw == WRITE ? get_data_block_dio_write :
  3130. get_data_block_dio, NULL, f2fs_dio_submit_bio,
  3131. rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
  3132. DIO_SKIP_HOLES);
  3133. if (do_opu)
  3134. f2fs_up_read(&fi->i_gc_rwsem[READ]);
  3135. f2fs_up_read(&fi->i_gc_rwsem[rw]);
  3136. if (rw == WRITE) {
  3137. if (whint_mode == WHINT_MODE_OFF)
  3138. iocb->ki_hint = hint;
  3139. if (err > 0) {
  3140. f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
  3141. err);
  3142. if (!do_opu)
  3143. set_inode_flag(inode, FI_UPDATE_WRITE);
  3144. } else if (err == -EIOCBQUEUED) {
  3145. f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
  3146. count - iov_iter_count(iter));
  3147. } else if (err < 0) {
  3148. f2fs_write_failed(mapping, offset + count);
  3149. }
  3150. } else {
  3151. if (err > 0)
  3152. f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
  3153. else if (err == -EIOCBQUEUED)
  3154. f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
  3155. count - iov_iter_count(iter));
  3156. }
  3157. out:
  3158. if (trace_android_fs_dataread_start_enabled() &&
  3159. (rw == READ))
  3160. trace_android_fs_dataread_end(inode, offset, count);
  3161. if (trace_android_fs_datawrite_start_enabled() &&
  3162. (rw == WRITE))
  3163. trace_android_fs_datawrite_end(inode, offset, count);
  3164. trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
  3165. return err;
  3166. }
  3167. void f2fs_invalidate_page(struct page *page, unsigned int offset,
  3168. unsigned int length)
  3169. {
  3170. struct inode *inode = page->mapping->host;
  3171. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3172. if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
  3173. (offset % PAGE_SIZE || length != PAGE_SIZE))
  3174. return;
  3175. if (PageDirty(page)) {
  3176. if (inode->i_ino == F2FS_META_INO(sbi)) {
  3177. dec_page_count(sbi, F2FS_DIRTY_META);
  3178. } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
  3179. dec_page_count(sbi, F2FS_DIRTY_NODES);
  3180. } else {
  3181. inode_dec_dirty_pages(inode);
  3182. f2fs_remove_dirty_inode(inode);
  3183. }
  3184. }
  3185. clear_page_private_gcing(page);
  3186. if (test_opt(sbi, COMPRESS_CACHE)) {
  3187. if (f2fs_compressed_file(inode))
  3188. f2fs_invalidate_compress_pages(sbi, inode->i_ino);
  3189. if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
  3190. clear_page_private_data(page);
  3191. }
  3192. if (page_private_atomic(page))
  3193. return f2fs_drop_inmem_page(inode, page);
  3194. detach_page_private(page);
  3195. set_page_private(page, 0);
  3196. }
  3197. int f2fs_release_page(struct page *page, gfp_t wait)
  3198. {
  3199. /* If this is dirty page, keep PagePrivate */
  3200. if (PageDirty(page))
  3201. return 0;
  3202. /* This is atomic written page, keep Private */
  3203. if (page_private_atomic(page))
  3204. return 0;
  3205. if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
  3206. struct f2fs_sb_info *sbi = F2FS_P_SB(page);
  3207. struct inode *inode = page->mapping->host;
  3208. if (f2fs_compressed_file(inode))
  3209. f2fs_invalidate_compress_pages(sbi, inode->i_ino);
  3210. if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
  3211. clear_page_private_data(page);
  3212. }
  3213. clear_page_private_gcing(page);
  3214. detach_page_private(page);
  3215. set_page_private(page, 0);
  3216. return 1;
  3217. }
  3218. static int f2fs_set_data_page_dirty(struct page *page)
  3219. {
  3220. struct inode *inode = page_file_mapping(page)->host;
  3221. trace_f2fs_set_page_dirty(page, DATA);
  3222. if (!PageUptodate(page))
  3223. SetPageUptodate(page);
  3224. if (PageSwapCache(page))
  3225. return __set_page_dirty_nobuffers(page);
  3226. if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
  3227. if (!page_private_atomic(page)) {
  3228. f2fs_register_inmem_page(inode, page);
  3229. return 1;
  3230. }
  3231. /*
  3232. * Previously, this page has been registered, we just
  3233. * return here.
  3234. */
  3235. return 0;
  3236. }
  3237. if (!PageDirty(page)) {
  3238. __set_page_dirty_nobuffers(page);
  3239. f2fs_update_dirty_page(inode, page);
  3240. return 1;
  3241. }
  3242. return 0;
  3243. }
  3244. static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
  3245. {
  3246. #ifdef CONFIG_F2FS_FS_COMPRESSION
  3247. struct dnode_of_data dn;
  3248. sector_t start_idx, blknr = 0;
  3249. int ret;
  3250. start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
  3251. set_new_dnode(&dn, inode, NULL, NULL, 0);
  3252. ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
  3253. if (ret)
  3254. return 0;
  3255. if (dn.data_blkaddr != COMPRESS_ADDR) {
  3256. dn.ofs_in_node += block - start_idx;
  3257. blknr = f2fs_data_blkaddr(&dn);
  3258. if (!__is_valid_data_blkaddr(blknr))
  3259. blknr = 0;
  3260. }
  3261. f2fs_put_dnode(&dn);
  3262. return blknr;
  3263. #else
  3264. return 0;
  3265. #endif
  3266. }
  3267. static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
  3268. {
  3269. struct inode *inode = mapping->host;
  3270. sector_t blknr = 0;
  3271. if (f2fs_has_inline_data(inode))
  3272. goto out;
  3273. /* make sure allocating whole blocks */
  3274. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  3275. filemap_write_and_wait(mapping);
  3276. /* Block number less than F2FS MAX BLOCKS */
  3277. if (unlikely(block >= max_file_blocks(inode)))
  3278. goto out;
  3279. if (f2fs_compressed_file(inode)) {
  3280. blknr = f2fs_bmap_compress(inode, block);
  3281. } else {
  3282. struct f2fs_map_blocks map;
  3283. memset(&map, 0, sizeof(map));
  3284. map.m_lblk = block;
  3285. map.m_len = 1;
  3286. map.m_next_pgofs = NULL;
  3287. map.m_seg_type = NO_CHECK_TYPE;
  3288. if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
  3289. blknr = map.m_pblk;
  3290. }
  3291. out:
  3292. trace_f2fs_bmap(inode, block, blknr);
  3293. return blknr;
  3294. }
  3295. #ifdef CONFIG_MIGRATION
  3296. #include <linux/migrate.h>
  3297. int f2fs_migrate_page(struct address_space *mapping,
  3298. struct page *newpage, struct page *page, enum migrate_mode mode)
  3299. {
  3300. int rc, extra_count;
  3301. struct f2fs_inode_info *fi = F2FS_I(mapping->host);
  3302. bool atomic_written = page_private_atomic(page);
  3303. BUG_ON(PageWriteback(page));
  3304. /* migrating an atomic written page is safe with the inmem_lock hold */
  3305. if (atomic_written) {
  3306. if (mode != MIGRATE_SYNC)
  3307. return -EBUSY;
  3308. if (!mutex_trylock(&fi->inmem_lock))
  3309. return -EAGAIN;
  3310. }
  3311. /* one extra reference was held for atomic_write page */
  3312. extra_count = atomic_written ? 1 : 0;
  3313. rc = migrate_page_move_mapping(mapping, newpage,
  3314. page, extra_count);
  3315. if (rc != MIGRATEPAGE_SUCCESS) {
  3316. if (atomic_written)
  3317. mutex_unlock(&fi->inmem_lock);
  3318. return rc;
  3319. }
  3320. if (atomic_written) {
  3321. struct inmem_pages *cur;
  3322. list_for_each_entry(cur, &fi->inmem_pages, list)
  3323. if (cur->page == page) {
  3324. cur->page = newpage;
  3325. break;
  3326. }
  3327. mutex_unlock(&fi->inmem_lock);
  3328. put_page(page);
  3329. get_page(newpage);
  3330. }
  3331. /* guarantee to start from no stale private field */
  3332. set_page_private(newpage, 0);
  3333. if (PagePrivate(page)) {
  3334. set_page_private(newpage, page_private(page));
  3335. SetPagePrivate(newpage);
  3336. get_page(newpage);
  3337. set_page_private(page, 0);
  3338. ClearPagePrivate(page);
  3339. put_page(page);
  3340. }
  3341. if (mode != MIGRATE_SYNC_NO_COPY)
  3342. migrate_page_copy(newpage, page);
  3343. else
  3344. migrate_page_states(newpage, page);
  3345. return MIGRATEPAGE_SUCCESS;
  3346. }
  3347. #endif
  3348. #ifdef CONFIG_SWAP
  3349. static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
  3350. unsigned int blkcnt)
  3351. {
  3352. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3353. unsigned int blkofs;
  3354. unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
  3355. unsigned int secidx = start_blk / blk_per_sec;
  3356. unsigned int end_sec = secidx + blkcnt / blk_per_sec;
  3357. int ret = 0;
  3358. f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  3359. f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
  3360. set_inode_flag(inode, FI_ALIGNED_WRITE);
  3361. for (; secidx < end_sec; secidx++) {
  3362. f2fs_down_write(&sbi->pin_sem);
  3363. f2fs_lock_op(sbi);
  3364. f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
  3365. f2fs_unlock_op(sbi);
  3366. set_inode_flag(inode, FI_DO_DEFRAG);
  3367. for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
  3368. struct page *page;
  3369. unsigned int blkidx = secidx * blk_per_sec + blkofs;
  3370. page = f2fs_get_lock_data_page(inode, blkidx, true);
  3371. if (IS_ERR(page)) {
  3372. f2fs_up_write(&sbi->pin_sem);
  3373. ret = PTR_ERR(page);
  3374. goto done;
  3375. }
  3376. set_page_dirty(page);
  3377. f2fs_put_page(page, 1);
  3378. }
  3379. clear_inode_flag(inode, FI_DO_DEFRAG);
  3380. ret = filemap_fdatawrite(inode->i_mapping);
  3381. f2fs_up_write(&sbi->pin_sem);
  3382. if (ret)
  3383. break;
  3384. }
  3385. done:
  3386. clear_inode_flag(inode, FI_DO_DEFRAG);
  3387. clear_inode_flag(inode, FI_ALIGNED_WRITE);
  3388. f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
  3389. f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  3390. return ret;
  3391. }
  3392. static int check_swap_activate(struct swap_info_struct *sis,
  3393. struct file *swap_file, sector_t *span)
  3394. {
  3395. struct address_space *mapping = swap_file->f_mapping;
  3396. struct inode *inode = mapping->host;
  3397. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3398. sector_t cur_lblock;
  3399. sector_t last_lblock;
  3400. sector_t pblock;
  3401. sector_t lowest_pblock = -1;
  3402. sector_t highest_pblock = 0;
  3403. int nr_extents = 0;
  3404. unsigned long nr_pblocks;
  3405. unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
  3406. unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
  3407. unsigned int not_aligned = 0;
  3408. int ret = 0;
  3409. /*
  3410. * Map all the blocks into the extent list. This code doesn't try
  3411. * to be very smart.
  3412. */
  3413. cur_lblock = 0;
  3414. last_lblock = bytes_to_blks(inode, i_size_read(inode));
  3415. while (cur_lblock < last_lblock && cur_lblock < sis->max) {
  3416. struct f2fs_map_blocks map;
  3417. retry:
  3418. cond_resched();
  3419. memset(&map, 0, sizeof(map));
  3420. map.m_lblk = cur_lblock;
  3421. map.m_len = last_lblock - cur_lblock;
  3422. map.m_next_pgofs = NULL;
  3423. map.m_next_extent = NULL;
  3424. map.m_seg_type = NO_CHECK_TYPE;
  3425. map.m_may_create = false;
  3426. ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
  3427. if (ret)
  3428. goto out;
  3429. /* hole */
  3430. if (!(map.m_flags & F2FS_MAP_FLAGS)) {
  3431. f2fs_err(sbi, "Swapfile has holes");
  3432. ret = -EINVAL;
  3433. goto out;
  3434. }
  3435. pblock = map.m_pblk;
  3436. nr_pblocks = map.m_len;
  3437. if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
  3438. nr_pblocks & sec_blks_mask) {
  3439. not_aligned++;
  3440. nr_pblocks = roundup(nr_pblocks, blks_per_sec);
  3441. if (cur_lblock + nr_pblocks > sis->max)
  3442. nr_pblocks -= blks_per_sec;
  3443. if (!nr_pblocks) {
  3444. /* this extent is last one */
  3445. nr_pblocks = map.m_len;
  3446. f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
  3447. goto next;
  3448. }
  3449. ret = f2fs_migrate_blocks(inode, cur_lblock,
  3450. nr_pblocks);
  3451. if (ret)
  3452. goto out;
  3453. goto retry;
  3454. }
  3455. next:
  3456. if (cur_lblock + nr_pblocks >= sis->max)
  3457. nr_pblocks = sis->max - cur_lblock;
  3458. if (cur_lblock) { /* exclude the header page */
  3459. if (pblock < lowest_pblock)
  3460. lowest_pblock = pblock;
  3461. if (pblock + nr_pblocks - 1 > highest_pblock)
  3462. highest_pblock = pblock + nr_pblocks - 1;
  3463. }
  3464. /*
  3465. * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
  3466. */
  3467. ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
  3468. if (ret < 0)
  3469. goto out;
  3470. nr_extents += ret;
  3471. cur_lblock += nr_pblocks;
  3472. }
  3473. ret = nr_extents;
  3474. *span = 1 + highest_pblock - lowest_pblock;
  3475. if (cur_lblock == 0)
  3476. cur_lblock = 1; /* force Empty message */
  3477. sis->max = cur_lblock;
  3478. sis->pages = cur_lblock - 1;
  3479. sis->highest_bit = cur_lblock - 1;
  3480. out:
  3481. if (not_aligned)
  3482. f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
  3483. not_aligned, blks_per_sec * F2FS_BLKSIZE);
  3484. return ret;
  3485. }
  3486. static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
  3487. sector_t *span)
  3488. {
  3489. struct inode *inode = file_inode(file);
  3490. int ret;
  3491. if (!S_ISREG(inode->i_mode))
  3492. return -EINVAL;
  3493. if (f2fs_readonly(F2FS_I_SB(inode)->sb))
  3494. return -EROFS;
  3495. if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
  3496. f2fs_err(F2FS_I_SB(inode),
  3497. "Swapfile not supported in LFS mode");
  3498. return -EINVAL;
  3499. }
  3500. ret = f2fs_convert_inline_inode(inode);
  3501. if (ret)
  3502. return ret;
  3503. if (!f2fs_disable_compressed_file(inode))
  3504. return -EINVAL;
  3505. f2fs_precache_extents(inode);
  3506. ret = check_swap_activate(sis, file, span);
  3507. if (ret < 0)
  3508. return ret;
  3509. set_inode_flag(inode, FI_PIN_FILE);
  3510. f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
  3511. return ret;
  3512. }
  3513. static void f2fs_swap_deactivate(struct file *file)
  3514. {
  3515. struct inode *inode = file_inode(file);
  3516. clear_inode_flag(inode, FI_PIN_FILE);
  3517. }
  3518. #else
  3519. static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
  3520. sector_t *span)
  3521. {
  3522. return -EOPNOTSUPP;
  3523. }
  3524. static void f2fs_swap_deactivate(struct file *file)
  3525. {
  3526. }
  3527. #endif
  3528. const struct address_space_operations f2fs_dblock_aops = {
  3529. .readpage = f2fs_read_data_page,
  3530. .readahead = f2fs_readahead,
  3531. .writepage = f2fs_write_data_page,
  3532. .writepages = f2fs_write_data_pages,
  3533. .write_begin = f2fs_write_begin,
  3534. .write_end = f2fs_write_end,
  3535. .set_page_dirty = f2fs_set_data_page_dirty,
  3536. .invalidatepage = f2fs_invalidate_page,
  3537. .releasepage = f2fs_release_page,
  3538. .direct_IO = f2fs_direct_IO,
  3539. .bmap = f2fs_bmap,
  3540. .swap_activate = f2fs_swap_activate,
  3541. .swap_deactivate = f2fs_swap_deactivate,
  3542. #ifdef CONFIG_MIGRATION
  3543. .migratepage = f2fs_migrate_page,
  3544. #endif
  3545. };
  3546. void f2fs_clear_page_cache_dirty_tag(struct page *page)
  3547. {
  3548. struct address_space *mapping = page_mapping(page);
  3549. unsigned long flags;
  3550. xa_lock_irqsave(&mapping->i_pages, flags);
  3551. __xa_clear_mark(&mapping->i_pages, page_index(page),
  3552. PAGECACHE_TAG_DIRTY);
  3553. xa_unlock_irqrestore(&mapping->i_pages, flags);
  3554. }
  3555. int __init f2fs_init_post_read_processing(void)
  3556. {
  3557. bio_post_read_ctx_cache =
  3558. kmem_cache_create("f2fs_bio_post_read_ctx",
  3559. sizeof(struct bio_post_read_ctx), 0, 0, NULL);
  3560. if (!bio_post_read_ctx_cache)
  3561. goto fail;
  3562. bio_post_read_ctx_pool =
  3563. mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
  3564. bio_post_read_ctx_cache);
  3565. if (!bio_post_read_ctx_pool)
  3566. goto fail_free_cache;
  3567. return 0;
  3568. fail_free_cache:
  3569. kmem_cache_destroy(bio_post_read_ctx_cache);
  3570. fail:
  3571. return -ENOMEM;
  3572. }
  3573. void f2fs_destroy_post_read_processing(void)
  3574. {
  3575. mempool_destroy(bio_post_read_ctx_pool);
  3576. kmem_cache_destroy(bio_post_read_ctx_cache);
  3577. }
  3578. int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
  3579. {
  3580. if (!f2fs_sb_has_encrypt(sbi) &&
  3581. !f2fs_sb_has_verity(sbi) &&
  3582. !f2fs_sb_has_compression(sbi))
  3583. return 0;
  3584. sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
  3585. WQ_UNBOUND | WQ_HIGHPRI,
  3586. num_online_cpus());
  3587. if (!sbi->post_read_wq)
  3588. return -ENOMEM;
  3589. return 0;
  3590. }
  3591. void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
  3592. {
  3593. if (sbi->post_read_wq)
  3594. destroy_workqueue(sbi->post_read_wq);
  3595. }
  3596. int __init f2fs_init_bio_entry_cache(void)
  3597. {
  3598. bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
  3599. sizeof(struct bio_entry));
  3600. if (!bio_entry_slab)
  3601. return -ENOMEM;
  3602. return 0;
  3603. }
  3604. void f2fs_destroy_bio_entry_cache(void)
  3605. {
  3606. kmem_cache_destroy(bio_entry_slab);
  3607. }