f2fs.h 143 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * fs/f2fs/f2fs.h
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #ifndef _LINUX_F2FS_H
  9. #define _LINUX_F2FS_H
  10. #include <linux/uio.h>
  11. #include <linux/types.h>
  12. #include <linux/page-flags.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/slab.h>
  15. #include <linux/crc32.h>
  16. #include <linux/magic.h>
  17. #include <linux/kobject.h>
  18. #include <linux/sched.h>
  19. #include <linux/cred.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/bio.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/quotaops.h>
  24. #include <linux/part_stat.h>
  25. #include <crypto/hash.h>
  26. #include <linux/fscrypt.h>
  27. #include <linux/fsverity.h>
  28. #ifdef CONFIG_F2FS_CHECK_FS
  29. #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
  30. #else
  31. #define f2fs_bug_on(sbi, condition) \
  32. do { \
  33. if (WARN_ON(condition)) \
  34. set_sbi_flag(sbi, SBI_NEED_FSCK); \
  35. } while (0)
  36. #endif
  37. enum {
  38. FAULT_KMALLOC,
  39. FAULT_KVMALLOC,
  40. FAULT_PAGE_ALLOC,
  41. FAULT_PAGE_GET,
  42. FAULT_ALLOC_NID,
  43. FAULT_ORPHAN,
  44. FAULT_BLOCK,
  45. FAULT_DIR_DEPTH,
  46. FAULT_EVICT_INODE,
  47. FAULT_TRUNCATE,
  48. FAULT_READ_IO,
  49. FAULT_CHECKPOINT,
  50. FAULT_DISCARD,
  51. FAULT_WRITE_IO,
  52. FAULT_MAX,
  53. };
  54. #ifdef CONFIG_F2FS_FAULT_INJECTION
  55. #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
  56. struct f2fs_fault_info {
  57. atomic_t inject_ops;
  58. unsigned int inject_rate;
  59. unsigned int inject_type;
  60. };
  61. extern const char *f2fs_fault_name[FAULT_MAX];
  62. #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
  63. #endif
  64. /*
  65. * For mount options
  66. */
  67. #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  68. #define F2FS_MOUNT_DISCARD 0x00000004
  69. #define F2FS_MOUNT_NOHEAP 0x00000008
  70. #define F2FS_MOUNT_XATTR_USER 0x00000010
  71. #define F2FS_MOUNT_POSIX_ACL 0x00000020
  72. #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  73. #define F2FS_MOUNT_INLINE_XATTR 0x00000080
  74. #define F2FS_MOUNT_INLINE_DATA 0x00000100
  75. #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
  76. #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
  77. #define F2FS_MOUNT_NOBARRIER 0x00000800
  78. #define F2FS_MOUNT_FASTBOOT 0x00001000
  79. #define F2FS_MOUNT_EXTENT_CACHE 0x00002000
  80. #define F2FS_MOUNT_DATA_FLUSH 0x00008000
  81. #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
  82. #define F2FS_MOUNT_USRQUOTA 0x00080000
  83. #define F2FS_MOUNT_GRPQUOTA 0x00100000
  84. #define F2FS_MOUNT_PRJQUOTA 0x00200000
  85. #define F2FS_MOUNT_QUOTA 0x00400000
  86. #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
  87. #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
  88. #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
  89. #define F2FS_MOUNT_NORECOVERY 0x04000000
  90. #define F2FS_MOUNT_ATGC 0x08000000
  91. #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
  92. #define F2FS_MOUNT_GC_MERGE 0x20000000
  93. #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
  94. #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
  95. #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
  96. #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
  97. #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
  98. #define ver_after(a, b) (typecheck(unsigned long long, a) && \
  99. typecheck(unsigned long long, b) && \
  100. ((long long)((a) - (b)) > 0))
  101. typedef u32 block_t; /*
  102. * should not change u32, since it is the on-disk block
  103. * address format, __le32.
  104. */
  105. typedef u32 nid_t;
  106. #define COMPRESS_EXT_NUM 16
  107. /*
  108. * An implementation of an rwsem that is explicitly unfair to readers. This
  109. * prevents priority inversion when a low-priority reader acquires the read lock
  110. * while sleeping on the write lock but the write lock is needed by
  111. * higher-priority clients.
  112. */
  113. struct f2fs_rwsem {
  114. struct rw_semaphore internal_rwsem;
  115. wait_queue_head_t read_waiters;
  116. };
  117. struct f2fs_mount_info {
  118. unsigned int opt;
  119. int write_io_size_bits; /* Write IO size bits */
  120. block_t root_reserved_blocks; /* root reserved blocks */
  121. kuid_t s_resuid; /* reserved blocks for uid */
  122. kgid_t s_resgid; /* reserved blocks for gid */
  123. int active_logs; /* # of active logs */
  124. int inline_xattr_size; /* inline xattr size */
  125. #ifdef CONFIG_F2FS_FAULT_INJECTION
  126. struct f2fs_fault_info fault_info; /* For fault injection */
  127. #endif
  128. #ifdef CONFIG_QUOTA
  129. /* Names of quota files with journalled quota */
  130. char *s_qf_names[MAXQUOTAS];
  131. int s_jquota_fmt; /* Format of quota to use */
  132. #endif
  133. /* For which write hints are passed down to block layer */
  134. int whint_mode;
  135. int alloc_mode; /* segment allocation policy */
  136. int fsync_mode; /* fsync policy */
  137. int fs_mode; /* fs mode: LFS or ADAPTIVE */
  138. int bggc_mode; /* bggc mode: off, on or sync */
  139. struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
  140. block_t unusable_cap_perc; /* percentage for cap */
  141. block_t unusable_cap; /* Amount of space allowed to be
  142. * unusable when disabling checkpoint
  143. */
  144. /* For compression */
  145. unsigned char compress_algorithm; /* algorithm type */
  146. unsigned char compress_log_size; /* cluster log size */
  147. unsigned char compress_level; /* compress level */
  148. bool compress_chksum; /* compressed data chksum */
  149. unsigned char compress_ext_cnt; /* extension count */
  150. int compress_mode; /* compression mode */
  151. unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
  152. };
  153. #define F2FS_FEATURE_ENCRYPT 0x0001
  154. #define F2FS_FEATURE_BLKZONED 0x0002
  155. #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
  156. #define F2FS_FEATURE_EXTRA_ATTR 0x0008
  157. #define F2FS_FEATURE_PRJQUOTA 0x0010
  158. #define F2FS_FEATURE_INODE_CHKSUM 0x0020
  159. #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
  160. #define F2FS_FEATURE_QUOTA_INO 0x0080
  161. #define F2FS_FEATURE_INODE_CRTIME 0x0100
  162. #define F2FS_FEATURE_LOST_FOUND 0x0200
  163. #define F2FS_FEATURE_VERITY 0x0400
  164. #define F2FS_FEATURE_SB_CHKSUM 0x0800
  165. #define F2FS_FEATURE_CASEFOLD 0x1000
  166. #define F2FS_FEATURE_COMPRESSION 0x2000
  167. #define F2FS_FEATURE_RO 0x4000
  168. #define __F2FS_HAS_FEATURE(raw_super, mask) \
  169. ((raw_super->feature & cpu_to_le32(mask)) != 0)
  170. #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
  171. #define F2FS_SET_FEATURE(sbi, mask) \
  172. (sbi->raw_super->feature |= cpu_to_le32(mask))
  173. #define F2FS_CLEAR_FEATURE(sbi, mask) \
  174. (sbi->raw_super->feature &= ~cpu_to_le32(mask))
  175. /*
  176. * Default values for user and/or group using reserved blocks
  177. */
  178. #define F2FS_DEF_RESUID 0
  179. #define F2FS_DEF_RESGID 0
  180. /*
  181. * For checkpoint manager
  182. */
  183. enum {
  184. NAT_BITMAP,
  185. SIT_BITMAP
  186. };
  187. #define CP_UMOUNT 0x00000001
  188. #define CP_FASTBOOT 0x00000002
  189. #define CP_SYNC 0x00000004
  190. #define CP_RECOVERY 0x00000008
  191. #define CP_DISCARD 0x00000010
  192. #define CP_TRIMMED 0x00000020
  193. #define CP_PAUSE 0x00000040
  194. #define CP_RESIZE 0x00000080
  195. #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
  196. #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
  197. #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
  198. #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
  199. #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
  200. #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
  201. #define DEF_CP_INTERVAL 60 /* 60 secs */
  202. #define DEF_IDLE_INTERVAL 5 /* 5 secs */
  203. #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
  204. #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
  205. #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
  206. struct cp_control {
  207. int reason;
  208. __u64 trim_start;
  209. __u64 trim_end;
  210. __u64 trim_minlen;
  211. };
  212. /*
  213. * indicate meta/data type
  214. */
  215. enum {
  216. META_CP,
  217. META_NAT,
  218. META_SIT,
  219. META_SSA,
  220. META_MAX,
  221. META_POR,
  222. DATA_GENERIC, /* check range only */
  223. DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
  224. DATA_GENERIC_ENHANCE_READ, /*
  225. * strong check on range and segment
  226. * bitmap but no warning due to race
  227. * condition of read on truncated area
  228. * by extent_cache
  229. */
  230. META_GENERIC,
  231. };
  232. /* for the list of ino */
  233. enum {
  234. ORPHAN_INO, /* for orphan ino list */
  235. APPEND_INO, /* for append ino list */
  236. UPDATE_INO, /* for update ino list */
  237. TRANS_DIR_INO, /* for trasactions dir ino list */
  238. FLUSH_INO, /* for multiple device flushing */
  239. MAX_INO_ENTRY, /* max. list */
  240. };
  241. struct ino_entry {
  242. struct list_head list; /* list head */
  243. nid_t ino; /* inode number */
  244. unsigned int dirty_device; /* dirty device bitmap */
  245. };
  246. /* for the list of inodes to be GCed */
  247. struct inode_entry {
  248. struct list_head list; /* list head */
  249. struct inode *inode; /* vfs inode pointer */
  250. };
  251. struct fsync_node_entry {
  252. struct list_head list; /* list head */
  253. struct page *page; /* warm node page pointer */
  254. unsigned int seq_id; /* sequence id */
  255. };
  256. struct ckpt_req {
  257. struct completion wait; /* completion for checkpoint done */
  258. struct llist_node llnode; /* llist_node to be linked in wait queue */
  259. int ret; /* return code of checkpoint */
  260. ktime_t queue_time; /* request queued time */
  261. };
  262. struct ckpt_req_control {
  263. struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
  264. int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
  265. wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
  266. atomic_t issued_ckpt; /* # of actually issued ckpts */
  267. atomic_t total_ckpt; /* # of total ckpts */
  268. atomic_t queued_ckpt; /* # of queued ckpts */
  269. struct llist_head issue_list; /* list for command issue */
  270. spinlock_t stat_lock; /* lock for below checkpoint time stats */
  271. unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
  272. unsigned int peak_time; /* peak wait time in msec until now */
  273. };
  274. /* for the bitmap indicate blocks to be discarded */
  275. struct discard_entry {
  276. struct list_head list; /* list head */
  277. block_t start_blkaddr; /* start blockaddr of current segment */
  278. unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
  279. };
  280. /* default discard granularity of inner discard thread, unit: block count */
  281. #define DEFAULT_DISCARD_GRANULARITY 16
  282. /* max discard pend list number */
  283. #define MAX_PLIST_NUM 512
  284. #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
  285. (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
  286. enum {
  287. D_PREP, /* initial */
  288. D_PARTIAL, /* partially submitted */
  289. D_SUBMIT, /* all submitted */
  290. D_DONE, /* finished */
  291. };
  292. struct discard_info {
  293. block_t lstart; /* logical start address */
  294. block_t len; /* length */
  295. block_t start; /* actual start address in dev */
  296. };
  297. struct discard_cmd {
  298. struct rb_node rb_node; /* rb node located in rb-tree */
  299. union {
  300. struct {
  301. block_t lstart; /* logical start address */
  302. block_t len; /* length */
  303. block_t start; /* actual start address in dev */
  304. };
  305. struct discard_info di; /* discard info */
  306. };
  307. struct list_head list; /* command list */
  308. struct completion wait; /* compleation */
  309. struct block_device *bdev; /* bdev */
  310. unsigned short ref; /* reference count */
  311. unsigned char state; /* state */
  312. unsigned char queued; /* queued discard */
  313. int error; /* bio error */
  314. spinlock_t lock; /* for state/bio_ref updating */
  315. unsigned short bio_ref; /* bio reference count */
  316. };
  317. enum {
  318. DPOLICY_BG,
  319. DPOLICY_FORCE,
  320. DPOLICY_FSTRIM,
  321. DPOLICY_UMOUNT,
  322. MAX_DPOLICY,
  323. };
  324. struct discard_policy {
  325. int type; /* type of discard */
  326. unsigned int min_interval; /* used for candidates exist */
  327. unsigned int mid_interval; /* used for device busy */
  328. unsigned int max_interval; /* used for candidates not exist */
  329. unsigned int max_requests; /* # of discards issued per round */
  330. unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
  331. bool io_aware; /* issue discard in idle time */
  332. bool sync; /* submit discard with REQ_SYNC flag */
  333. bool ordered; /* issue discard by lba order */
  334. bool timeout; /* discard timeout for put_super */
  335. unsigned int granularity; /* discard granularity */
  336. };
  337. struct discard_cmd_control {
  338. struct task_struct *f2fs_issue_discard; /* discard thread */
  339. struct list_head entry_list; /* 4KB discard entry list */
  340. struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
  341. struct list_head wait_list; /* store on-flushing entries */
  342. struct list_head fstrim_list; /* in-flight discard from fstrim */
  343. wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
  344. unsigned int discard_wake; /* to wake up discard thread */
  345. struct mutex cmd_lock;
  346. unsigned int nr_discards; /* # of discards in the list */
  347. unsigned int max_discards; /* max. discards to be issued */
  348. unsigned int discard_granularity; /* discard granularity */
  349. unsigned int undiscard_blks; /* # of undiscard blocks */
  350. unsigned int next_pos; /* next discard position */
  351. atomic_t issued_discard; /* # of issued discard */
  352. atomic_t queued_discard; /* # of queued discard */
  353. atomic_t discard_cmd_cnt; /* # of cached cmd count */
  354. struct rb_root_cached root; /* root of discard rb-tree */
  355. bool rbtree_check; /* config for consistence check */
  356. };
  357. /* for the list of fsync inodes, used only during recovery */
  358. struct fsync_inode_entry {
  359. struct list_head list; /* list head */
  360. struct inode *inode; /* vfs inode pointer */
  361. block_t blkaddr; /* block address locating the last fsync */
  362. block_t last_dentry; /* block address locating the last dentry */
  363. };
  364. #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
  365. #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
  366. #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
  367. #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
  368. #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
  369. #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
  370. #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
  371. #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
  372. static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
  373. {
  374. int before = nats_in_cursum(journal);
  375. journal->n_nats = cpu_to_le16(before + i);
  376. return before;
  377. }
  378. static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
  379. {
  380. int before = sits_in_cursum(journal);
  381. journal->n_sits = cpu_to_le16(before + i);
  382. return before;
  383. }
  384. static inline bool __has_cursum_space(struct f2fs_journal *journal,
  385. int size, int type)
  386. {
  387. if (type == NAT_JOURNAL)
  388. return size <= MAX_NAT_JENTRIES(journal);
  389. return size <= MAX_SIT_JENTRIES(journal);
  390. }
  391. /* for inline stuff */
  392. #define DEF_INLINE_RESERVED_SIZE 1
  393. static inline int get_extra_isize(struct inode *inode);
  394. static inline int get_inline_xattr_addrs(struct inode *inode);
  395. #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
  396. (CUR_ADDRS_PER_INODE(inode) - \
  397. get_inline_xattr_addrs(inode) - \
  398. DEF_INLINE_RESERVED_SIZE))
  399. /* for inline dir */
  400. #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
  401. ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
  402. BITS_PER_BYTE + 1))
  403. #define INLINE_DENTRY_BITMAP_SIZE(inode) \
  404. DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
  405. #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
  406. ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
  407. NR_INLINE_DENTRY(inode) + \
  408. INLINE_DENTRY_BITMAP_SIZE(inode)))
  409. /*
  410. * For INODE and NODE manager
  411. */
  412. /* for directory operations */
  413. struct f2fs_filename {
  414. /*
  415. * The filename the user specified. This is NULL for some
  416. * filesystem-internal operations, e.g. converting an inline directory
  417. * to a non-inline one, or roll-forward recovering an encrypted dentry.
  418. */
  419. const struct qstr *usr_fname;
  420. /*
  421. * The on-disk filename. For encrypted directories, this is encrypted.
  422. * This may be NULL for lookups in an encrypted dir without the key.
  423. */
  424. struct fscrypt_str disk_name;
  425. /* The dirhash of this filename */
  426. f2fs_hash_t hash;
  427. #ifdef CONFIG_FS_ENCRYPTION
  428. /*
  429. * For lookups in encrypted directories: either the buffer backing
  430. * disk_name, or a buffer that holds the decoded no-key name.
  431. */
  432. struct fscrypt_str crypto_buf;
  433. #endif
  434. #ifdef CONFIG_UNICODE
  435. /*
  436. * For casefolded directories: the casefolded name, but it's left NULL
  437. * if the original name is not valid Unicode, if the directory is both
  438. * casefolded and encrypted and its encryption key is unavailable, or if
  439. * the filesystem is doing an internal operation where usr_fname is also
  440. * NULL. In all these cases we fall back to treating the name as an
  441. * opaque byte sequence.
  442. */
  443. struct fscrypt_str cf_name;
  444. #endif
  445. };
  446. struct f2fs_dentry_ptr {
  447. struct inode *inode;
  448. void *bitmap;
  449. struct f2fs_dir_entry *dentry;
  450. __u8 (*filename)[F2FS_SLOT_LEN];
  451. int max;
  452. int nr_bitmap;
  453. };
  454. static inline void make_dentry_ptr_block(struct inode *inode,
  455. struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
  456. {
  457. d->inode = inode;
  458. d->max = NR_DENTRY_IN_BLOCK;
  459. d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
  460. d->bitmap = t->dentry_bitmap;
  461. d->dentry = t->dentry;
  462. d->filename = t->filename;
  463. }
  464. static inline void make_dentry_ptr_inline(struct inode *inode,
  465. struct f2fs_dentry_ptr *d, void *t)
  466. {
  467. int entry_cnt = NR_INLINE_DENTRY(inode);
  468. int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
  469. int reserved_size = INLINE_RESERVED_SIZE(inode);
  470. d->inode = inode;
  471. d->max = entry_cnt;
  472. d->nr_bitmap = bitmap_size;
  473. d->bitmap = t;
  474. d->dentry = t + bitmap_size + reserved_size;
  475. d->filename = t + bitmap_size + reserved_size +
  476. SIZE_OF_DIR_ENTRY * entry_cnt;
  477. }
  478. /*
  479. * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
  480. * as its node offset to distinguish from index node blocks.
  481. * But some bits are used to mark the node block.
  482. */
  483. #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
  484. >> OFFSET_BIT_SHIFT)
  485. enum {
  486. ALLOC_NODE, /* allocate a new node page if needed */
  487. LOOKUP_NODE, /* look up a node without readahead */
  488. LOOKUP_NODE_RA, /*
  489. * look up a node with readahead called
  490. * by get_data_block.
  491. */
  492. };
  493. #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
  494. /* congestion wait timeout value, default: 20ms */
  495. #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
  496. /* maximum retry quota flush count */
  497. #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
  498. #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
  499. #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
  500. /* for in-memory extent cache entry */
  501. #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
  502. /* number of extent info in extent cache we try to shrink */
  503. #define EXTENT_CACHE_SHRINK_NUMBER 128
  504. struct rb_entry {
  505. struct rb_node rb_node; /* rb node located in rb-tree */
  506. union {
  507. struct {
  508. unsigned int ofs; /* start offset of the entry */
  509. unsigned int len; /* length of the entry */
  510. };
  511. unsigned long long key; /* 64-bits key */
  512. } __packed;
  513. };
  514. struct extent_info {
  515. unsigned int fofs; /* start offset in a file */
  516. unsigned int len; /* length of the extent */
  517. u32 blk; /* start block address of the extent */
  518. };
  519. struct extent_node {
  520. struct rb_node rb_node; /* rb node located in rb-tree */
  521. struct extent_info ei; /* extent info */
  522. struct list_head list; /* node in global extent list of sbi */
  523. struct extent_tree *et; /* extent tree pointer */
  524. };
  525. struct extent_tree {
  526. nid_t ino; /* inode number */
  527. struct rb_root_cached root; /* root of extent info rb-tree */
  528. struct extent_node *cached_en; /* recently accessed extent node */
  529. struct extent_info largest; /* largested extent info */
  530. struct list_head list; /* to be used by sbi->zombie_list */
  531. rwlock_t lock; /* protect extent info rb-tree */
  532. atomic_t node_cnt; /* # of extent node in rb-tree*/
  533. bool largest_updated; /* largest extent updated */
  534. };
  535. /*
  536. * This structure is taken from ext4_map_blocks.
  537. *
  538. * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
  539. */
  540. #define F2FS_MAP_NEW (1 << BH_New)
  541. #define F2FS_MAP_MAPPED (1 << BH_Mapped)
  542. #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
  543. #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
  544. F2FS_MAP_UNWRITTEN)
  545. struct f2fs_map_blocks {
  546. block_t m_pblk;
  547. block_t m_lblk;
  548. unsigned int m_len;
  549. unsigned int m_flags;
  550. pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
  551. pgoff_t *m_next_extent; /* point to next possible extent */
  552. int m_seg_type;
  553. bool m_may_create; /* indicate it is from write path */
  554. };
  555. /* for flag in get_data_block */
  556. enum {
  557. F2FS_GET_BLOCK_DEFAULT,
  558. F2FS_GET_BLOCK_FIEMAP,
  559. F2FS_GET_BLOCK_BMAP,
  560. F2FS_GET_BLOCK_DIO,
  561. F2FS_GET_BLOCK_PRE_DIO,
  562. F2FS_GET_BLOCK_PRE_AIO,
  563. F2FS_GET_BLOCK_PRECACHE,
  564. };
  565. /*
  566. * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  567. */
  568. #define FADVISE_COLD_BIT 0x01
  569. #define FADVISE_LOST_PINO_BIT 0x02
  570. #define FADVISE_ENCRYPT_BIT 0x04
  571. #define FADVISE_ENC_NAME_BIT 0x08
  572. #define FADVISE_KEEP_SIZE_BIT 0x10
  573. #define FADVISE_HOT_BIT 0x20
  574. #define FADVISE_VERITY_BIT 0x40
  575. #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
  576. #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
  577. #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
  578. #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
  579. #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
  580. #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
  581. #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
  582. #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
  583. #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
  584. #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
  585. #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
  586. #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
  587. #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
  588. #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
  589. #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
  590. #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
  591. #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
  592. #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
  593. #define DEF_DIR_LEVEL 0
  594. enum {
  595. GC_FAILURE_PIN,
  596. GC_FAILURE_ATOMIC,
  597. MAX_GC_FAILURE
  598. };
  599. /* used for f2fs_inode_info->flags */
  600. enum {
  601. FI_NEW_INODE, /* indicate newly allocated inode */
  602. FI_DIRTY_INODE, /* indicate inode is dirty or not */
  603. FI_AUTO_RECOVER, /* indicate inode is recoverable */
  604. FI_DIRTY_DIR, /* indicate directory has dirty pages */
  605. FI_INC_LINK, /* need to increment i_nlink */
  606. FI_ACL_MODE, /* indicate acl mode */
  607. FI_NO_ALLOC, /* should not allocate any blocks */
  608. FI_FREE_NID, /* free allocated nide */
  609. FI_NO_EXTENT, /* not to use the extent cache */
  610. FI_INLINE_XATTR, /* used for inline xattr */
  611. FI_INLINE_DATA, /* used for inline data*/
  612. FI_INLINE_DENTRY, /* used for inline dentry */
  613. FI_APPEND_WRITE, /* inode has appended data */
  614. FI_UPDATE_WRITE, /* inode has in-place-update data */
  615. FI_NEED_IPU, /* used for ipu per file */
  616. FI_ATOMIC_FILE, /* indicate atomic file */
  617. FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
  618. FI_VOLATILE_FILE, /* indicate volatile file */
  619. FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
  620. FI_DROP_CACHE, /* drop dirty page cache */
  621. FI_DATA_EXIST, /* indicate data exists */
  622. FI_INLINE_DOTS, /* indicate inline dot dentries */
  623. FI_DO_DEFRAG, /* indicate defragment is running */
  624. FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
  625. FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
  626. FI_HOT_DATA, /* indicate file is hot */
  627. FI_EXTRA_ATTR, /* indicate file has extra attribute */
  628. FI_PROJ_INHERIT, /* indicate file inherits projectid */
  629. FI_PIN_FILE, /* indicate file should not be gced */
  630. FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
  631. FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
  632. FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
  633. FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
  634. FI_MMAP_FILE, /* indicate file was mmapped */
  635. FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
  636. FI_COMPRESS_RELEASED, /* compressed blocks were released */
  637. FI_ALIGNED_WRITE, /* enable aligned write */
  638. FI_MAX, /* max flag, never be used */
  639. };
  640. struct f2fs_inode_info {
  641. struct inode vfs_inode; /* serve a vfs inode */
  642. unsigned long i_flags; /* keep an inode flags for ioctl */
  643. unsigned char i_advise; /* use to give file attribute hints */
  644. unsigned char i_dir_level; /* use for dentry level for large dir */
  645. unsigned int i_current_depth; /* only for directory depth */
  646. /* for gc failure statistic */
  647. unsigned int i_gc_failures[MAX_GC_FAILURE];
  648. unsigned int i_pino; /* parent inode number */
  649. umode_t i_acl_mode; /* keep file acl mode temporarily */
  650. /* Use below internally in f2fs*/
  651. unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
  652. struct f2fs_rwsem i_sem; /* protect fi info */
  653. atomic_t dirty_pages; /* # of dirty pages */
  654. f2fs_hash_t chash; /* hash value of given file name */
  655. unsigned int clevel; /* maximum level of given file name */
  656. struct task_struct *task; /* lookup and create consistency */
  657. struct task_struct *cp_task; /* separate cp/wb IO stats*/
  658. nid_t i_xattr_nid; /* node id that contains xattrs */
  659. loff_t last_disk_size; /* lastly written file size */
  660. spinlock_t i_size_lock; /* protect last_disk_size */
  661. #ifdef CONFIG_QUOTA
  662. struct dquot *i_dquot[MAXQUOTAS];
  663. /* quota space reservation, managed internally by quota code */
  664. qsize_t i_reserved_quota;
  665. #endif
  666. struct list_head dirty_list; /* dirty list for dirs and files */
  667. struct list_head gdirty_list; /* linked in global dirty list */
  668. struct list_head inmem_ilist; /* list for inmem inodes */
  669. struct list_head inmem_pages; /* inmemory pages managed by f2fs */
  670. struct task_struct *inmem_task; /* store inmemory task */
  671. struct mutex inmem_lock; /* lock for inmemory pages */
  672. struct extent_tree *extent_tree; /* cached extent_tree entry */
  673. /* avoid racing between foreground op and gc */
  674. struct f2fs_rwsem i_gc_rwsem[2];
  675. struct f2fs_rwsem i_mmap_sem;
  676. struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
  677. int i_extra_isize; /* size of extra space located in i_addr */
  678. kprojid_t i_projid; /* id for project quota */
  679. int i_inline_xattr_size; /* inline xattr size */
  680. struct timespec64 i_crtime; /* inode creation time */
  681. struct timespec64 i_disk_time[4];/* inode disk times */
  682. /* for file compress */
  683. atomic_t i_compr_blocks; /* # of compressed blocks */
  684. unsigned char i_compress_algorithm; /* algorithm type */
  685. unsigned char i_log_cluster_size; /* log of cluster size */
  686. unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
  687. unsigned short i_compress_flag; /* compress flag */
  688. unsigned int i_cluster_size; /* cluster size */
  689. };
  690. static inline void get_extent_info(struct extent_info *ext,
  691. struct f2fs_extent *i_ext)
  692. {
  693. ext->fofs = le32_to_cpu(i_ext->fofs);
  694. ext->blk = le32_to_cpu(i_ext->blk);
  695. ext->len = le32_to_cpu(i_ext->len);
  696. }
  697. static inline void set_raw_extent(struct extent_info *ext,
  698. struct f2fs_extent *i_ext)
  699. {
  700. i_ext->fofs = cpu_to_le32(ext->fofs);
  701. i_ext->blk = cpu_to_le32(ext->blk);
  702. i_ext->len = cpu_to_le32(ext->len);
  703. }
  704. static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
  705. u32 blk, unsigned int len)
  706. {
  707. ei->fofs = fofs;
  708. ei->blk = blk;
  709. ei->len = len;
  710. }
  711. static inline bool __is_discard_mergeable(struct discard_info *back,
  712. struct discard_info *front, unsigned int max_len)
  713. {
  714. return (back->lstart + back->len == front->lstart) &&
  715. (back->len + front->len <= max_len);
  716. }
  717. static inline bool __is_discard_back_mergeable(struct discard_info *cur,
  718. struct discard_info *back, unsigned int max_len)
  719. {
  720. return __is_discard_mergeable(back, cur, max_len);
  721. }
  722. static inline bool __is_discard_front_mergeable(struct discard_info *cur,
  723. struct discard_info *front, unsigned int max_len)
  724. {
  725. return __is_discard_mergeable(cur, front, max_len);
  726. }
  727. static inline bool __is_extent_mergeable(struct extent_info *back,
  728. struct extent_info *front)
  729. {
  730. return (back->fofs + back->len == front->fofs &&
  731. back->blk + back->len == front->blk);
  732. }
  733. static inline bool __is_back_mergeable(struct extent_info *cur,
  734. struct extent_info *back)
  735. {
  736. return __is_extent_mergeable(back, cur);
  737. }
  738. static inline bool __is_front_mergeable(struct extent_info *cur,
  739. struct extent_info *front)
  740. {
  741. return __is_extent_mergeable(cur, front);
  742. }
  743. extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
  744. static inline void __try_update_largest_extent(struct extent_tree *et,
  745. struct extent_node *en)
  746. {
  747. if (en->ei.len > et->largest.len) {
  748. et->largest = en->ei;
  749. et->largest_updated = true;
  750. }
  751. }
  752. /*
  753. * For free nid management
  754. */
  755. enum nid_state {
  756. FREE_NID, /* newly added to free nid list */
  757. PREALLOC_NID, /* it is preallocated */
  758. MAX_NID_STATE,
  759. };
  760. enum nat_state {
  761. TOTAL_NAT,
  762. DIRTY_NAT,
  763. RECLAIMABLE_NAT,
  764. MAX_NAT_STATE,
  765. };
  766. struct f2fs_nm_info {
  767. block_t nat_blkaddr; /* base disk address of NAT */
  768. nid_t max_nid; /* maximum possible node ids */
  769. nid_t available_nids; /* # of available node ids */
  770. nid_t next_scan_nid; /* the next nid to be scanned */
  771. unsigned int ram_thresh; /* control the memory footprint */
  772. unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
  773. unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
  774. /* NAT cache management */
  775. struct radix_tree_root nat_root;/* root of the nat entry cache */
  776. struct radix_tree_root nat_set_root;/* root of the nat set cache */
  777. struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
  778. struct list_head nat_entries; /* cached nat entry list (clean) */
  779. spinlock_t nat_list_lock; /* protect clean nat entry list */
  780. unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
  781. unsigned int nat_blocks; /* # of nat blocks */
  782. /* free node ids management */
  783. struct radix_tree_root free_nid_root;/* root of the free_nid cache */
  784. struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
  785. unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
  786. spinlock_t nid_list_lock; /* protect nid lists ops */
  787. struct mutex build_lock; /* lock for build free nids */
  788. unsigned char **free_nid_bitmap;
  789. unsigned char *nat_block_bitmap;
  790. unsigned short *free_nid_count; /* free nid count of NAT block */
  791. /* for checkpoint */
  792. char *nat_bitmap; /* NAT bitmap pointer */
  793. unsigned int nat_bits_blocks; /* # of nat bits blocks */
  794. unsigned char *nat_bits; /* NAT bits blocks */
  795. unsigned char *full_nat_bits; /* full NAT pages */
  796. unsigned char *empty_nat_bits; /* empty NAT pages */
  797. #ifdef CONFIG_F2FS_CHECK_FS
  798. char *nat_bitmap_mir; /* NAT bitmap mirror */
  799. #endif
  800. int bitmap_size; /* bitmap size */
  801. };
  802. /*
  803. * this structure is used as one of function parameters.
  804. * all the information are dedicated to a given direct node block determined
  805. * by the data offset in a file.
  806. */
  807. struct dnode_of_data {
  808. struct inode *inode; /* vfs inode pointer */
  809. struct page *inode_page; /* its inode page, NULL is possible */
  810. struct page *node_page; /* cached direct node page */
  811. nid_t nid; /* node id of the direct node block */
  812. unsigned int ofs_in_node; /* data offset in the node page */
  813. bool inode_page_locked; /* inode page is locked or not */
  814. bool node_changed; /* is node block changed */
  815. char cur_level; /* level of hole node page */
  816. char max_level; /* level of current page located */
  817. block_t data_blkaddr; /* block address of the node block */
  818. };
  819. static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
  820. struct page *ipage, struct page *npage, nid_t nid)
  821. {
  822. memset(dn, 0, sizeof(*dn));
  823. dn->inode = inode;
  824. dn->inode_page = ipage;
  825. dn->node_page = npage;
  826. dn->nid = nid;
  827. }
  828. /*
  829. * For SIT manager
  830. *
  831. * By default, there are 6 active log areas across the whole main area.
  832. * When considering hot and cold data separation to reduce cleaning overhead,
  833. * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
  834. * respectively.
  835. * In the current design, you should not change the numbers intentionally.
  836. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
  837. * logs individually according to the underlying devices. (default: 6)
  838. * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
  839. * data and 8 for node logs.
  840. */
  841. #define NR_CURSEG_DATA_TYPE (3)
  842. #define NR_CURSEG_NODE_TYPE (3)
  843. #define NR_CURSEG_INMEM_TYPE (2)
  844. #define NR_CURSEG_RO_TYPE (2)
  845. #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
  846. #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
  847. enum {
  848. CURSEG_HOT_DATA = 0, /* directory entry blocks */
  849. CURSEG_WARM_DATA, /* data blocks */
  850. CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
  851. CURSEG_HOT_NODE, /* direct node blocks of directory files */
  852. CURSEG_WARM_NODE, /* direct node blocks of normal files */
  853. CURSEG_COLD_NODE, /* indirect node blocks */
  854. NR_PERSISTENT_LOG, /* number of persistent log */
  855. CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
  856. /* pinned file that needs consecutive block address */
  857. CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
  858. NO_CHECK_TYPE, /* number of persistent & inmem log */
  859. };
  860. struct flush_cmd {
  861. struct completion wait;
  862. struct llist_node llnode;
  863. nid_t ino;
  864. int ret;
  865. };
  866. struct flush_cmd_control {
  867. struct task_struct *f2fs_issue_flush; /* flush thread */
  868. wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
  869. atomic_t issued_flush; /* # of issued flushes */
  870. atomic_t queued_flush; /* # of queued flushes */
  871. struct llist_head issue_list; /* list for command issue */
  872. struct llist_node *dispatch_list; /* list for command dispatch */
  873. };
  874. struct f2fs_sm_info {
  875. struct sit_info *sit_info; /* whole segment information */
  876. struct free_segmap_info *free_info; /* free segment information */
  877. struct dirty_seglist_info *dirty_info; /* dirty segment information */
  878. struct curseg_info *curseg_array; /* active segment information */
  879. struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
  880. block_t seg0_blkaddr; /* block address of 0'th segment */
  881. block_t main_blkaddr; /* start block address of main area */
  882. block_t ssa_blkaddr; /* start block address of SSA area */
  883. unsigned int segment_count; /* total # of segments */
  884. unsigned int main_segments; /* # of segments in main area */
  885. unsigned int reserved_segments; /* # of reserved segments */
  886. unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
  887. unsigned int ovp_segments; /* # of overprovision segments */
  888. /* a threshold to reclaim prefree segments */
  889. unsigned int rec_prefree_segments;
  890. /* for batched trimming */
  891. unsigned int trim_sections; /* # of sections to trim */
  892. struct list_head sit_entry_set; /* sit entry set list */
  893. unsigned int ipu_policy; /* in-place-update policy */
  894. unsigned int min_ipu_util; /* in-place-update threshold */
  895. unsigned int min_fsync_blocks; /* threshold for fsync */
  896. unsigned int min_seq_blocks; /* threshold for sequential blocks */
  897. unsigned int min_hot_blocks; /* threshold for hot block allocation */
  898. unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
  899. /* for flush command control */
  900. struct flush_cmd_control *fcc_info;
  901. /* for discard command control */
  902. struct discard_cmd_control *dcc_info;
  903. };
  904. /*
  905. * For superblock
  906. */
  907. /*
  908. * COUNT_TYPE for monitoring
  909. *
  910. * f2fs monitors the number of several block types such as on-writeback,
  911. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  912. */
  913. #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
  914. enum count_type {
  915. F2FS_DIRTY_DENTS,
  916. F2FS_DIRTY_DATA,
  917. F2FS_DIRTY_QDATA,
  918. F2FS_DIRTY_NODES,
  919. F2FS_DIRTY_META,
  920. F2FS_INMEM_PAGES,
  921. F2FS_DIRTY_IMETA,
  922. F2FS_WB_CP_DATA,
  923. F2FS_WB_DATA,
  924. F2FS_RD_DATA,
  925. F2FS_RD_NODE,
  926. F2FS_RD_META,
  927. F2FS_DIO_WRITE,
  928. F2FS_DIO_READ,
  929. NR_COUNT_TYPE,
  930. };
  931. /*
  932. * The below are the page types of bios used in submit_bio().
  933. * The available types are:
  934. * DATA User data pages. It operates as async mode.
  935. * NODE Node pages. It operates as async mode.
  936. * META FS metadata pages such as SIT, NAT, CP.
  937. * NR_PAGE_TYPE The number of page types.
  938. * META_FLUSH Make sure the previous pages are written
  939. * with waiting the bio's completion
  940. * ... Only can be used with META.
  941. */
  942. #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
  943. enum page_type {
  944. DATA,
  945. NODE,
  946. META,
  947. NR_PAGE_TYPE,
  948. META_FLUSH,
  949. INMEM, /* the below types are used by tracepoints only. */
  950. INMEM_DROP,
  951. INMEM_INVALIDATE,
  952. INMEM_REVOKE,
  953. IPU,
  954. OPU,
  955. };
  956. enum temp_type {
  957. HOT = 0, /* must be zero for meta bio */
  958. WARM,
  959. COLD,
  960. NR_TEMP_TYPE,
  961. };
  962. enum need_lock_type {
  963. LOCK_REQ = 0,
  964. LOCK_DONE,
  965. LOCK_RETRY,
  966. };
  967. enum cp_reason_type {
  968. CP_NO_NEEDED,
  969. CP_NON_REGULAR,
  970. CP_COMPRESSED,
  971. CP_HARDLINK,
  972. CP_SB_NEED_CP,
  973. CP_WRONG_PINO,
  974. CP_NO_SPC_ROLL,
  975. CP_NODE_NEED_CP,
  976. CP_FASTBOOT_MODE,
  977. CP_SPEC_LOG_NUM,
  978. CP_RECOVER_DIR,
  979. };
  980. enum iostat_type {
  981. /* WRITE IO */
  982. APP_DIRECT_IO, /* app direct write IOs */
  983. APP_BUFFERED_IO, /* app buffered write IOs */
  984. APP_WRITE_IO, /* app write IOs */
  985. APP_MAPPED_IO, /* app mapped IOs */
  986. FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
  987. FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
  988. FS_META_IO, /* meta IOs from kworker/reclaimer */
  989. FS_GC_DATA_IO, /* data IOs from forground gc */
  990. FS_GC_NODE_IO, /* node IOs from forground gc */
  991. FS_CP_DATA_IO, /* data IOs from checkpoint */
  992. FS_CP_NODE_IO, /* node IOs from checkpoint */
  993. FS_CP_META_IO, /* meta IOs from checkpoint */
  994. /* READ IO */
  995. APP_DIRECT_READ_IO, /* app direct read IOs */
  996. APP_BUFFERED_READ_IO, /* app buffered read IOs */
  997. APP_READ_IO, /* app read IOs */
  998. APP_MAPPED_READ_IO, /* app mapped read IOs */
  999. FS_DATA_READ_IO, /* data read IOs */
  1000. FS_GDATA_READ_IO, /* data read IOs from background gc */
  1001. FS_CDATA_READ_IO, /* compressed data read IOs */
  1002. FS_NODE_READ_IO, /* node read IOs */
  1003. FS_META_READ_IO, /* meta read IOs */
  1004. /* other */
  1005. FS_DISCARD, /* discard */
  1006. NR_IO_TYPE,
  1007. };
  1008. struct f2fs_io_info {
  1009. struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
  1010. nid_t ino; /* inode number */
  1011. enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
  1012. enum temp_type temp; /* contains HOT/WARM/COLD */
  1013. int op; /* contains REQ_OP_ */
  1014. int op_flags; /* req_flag_bits */
  1015. block_t new_blkaddr; /* new block address to be written */
  1016. block_t old_blkaddr; /* old block address before Cow */
  1017. struct page *page; /* page to be written */
  1018. struct page *encrypted_page; /* encrypted page */
  1019. struct page *compressed_page; /* compressed page */
  1020. struct list_head list; /* serialize IOs */
  1021. bool submitted; /* indicate IO submission */
  1022. int need_lock; /* indicate we need to lock cp_rwsem */
  1023. bool in_list; /* indicate fio is in io_list */
  1024. bool is_por; /* indicate IO is from recovery or not */
  1025. bool retry; /* need to reallocate block address */
  1026. int compr_blocks; /* # of compressed block addresses */
  1027. bool encrypted; /* indicate file is encrypted */
  1028. enum iostat_type io_type; /* io type */
  1029. struct writeback_control *io_wbc; /* writeback control */
  1030. struct bio **bio; /* bio for ipu */
  1031. sector_t *last_block; /* last block number in bio */
  1032. unsigned char version; /* version of the node */
  1033. };
  1034. struct bio_entry {
  1035. struct bio *bio;
  1036. struct list_head list;
  1037. };
  1038. #define is_read_io(rw) ((rw) == READ)
  1039. struct f2fs_bio_info {
  1040. struct f2fs_sb_info *sbi; /* f2fs superblock */
  1041. struct bio *bio; /* bios to merge */
  1042. sector_t last_block_in_bio; /* last block number */
  1043. struct f2fs_io_info fio; /* store buffered io info. */
  1044. struct f2fs_rwsem io_rwsem; /* blocking op for bio */
  1045. spinlock_t io_lock; /* serialize DATA/NODE IOs */
  1046. struct list_head io_list; /* track fios */
  1047. struct list_head bio_list; /* bio entry list head */
  1048. struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
  1049. };
  1050. #define FDEV(i) (sbi->devs[i])
  1051. #define RDEV(i) (raw_super->devs[i])
  1052. struct f2fs_dev_info {
  1053. struct block_device *bdev;
  1054. char path[MAX_PATH_LEN];
  1055. unsigned int total_segments;
  1056. block_t start_blk;
  1057. block_t end_blk;
  1058. #ifdef CONFIG_BLK_DEV_ZONED
  1059. unsigned int nr_blkz; /* Total number of zones */
  1060. unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
  1061. block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
  1062. #endif
  1063. };
  1064. enum inode_type {
  1065. DIR_INODE, /* for dirty dir inode */
  1066. FILE_INODE, /* for dirty regular/symlink inode */
  1067. DIRTY_META, /* for all dirtied inode metadata */
  1068. ATOMIC_FILE, /* for all atomic files */
  1069. NR_INODE_TYPE,
  1070. };
  1071. /* for inner inode cache management */
  1072. struct inode_management {
  1073. struct radix_tree_root ino_root; /* ino entry array */
  1074. spinlock_t ino_lock; /* for ino entry lock */
  1075. struct list_head ino_list; /* inode list head */
  1076. unsigned long ino_num; /* number of entries */
  1077. };
  1078. /* for GC_AT */
  1079. struct atgc_management {
  1080. bool atgc_enabled; /* ATGC is enabled or not */
  1081. struct rb_root_cached root; /* root of victim rb-tree */
  1082. struct list_head victim_list; /* linked with all victim entries */
  1083. unsigned int victim_count; /* victim count in rb-tree */
  1084. unsigned int candidate_ratio; /* candidate ratio */
  1085. unsigned int max_candidate_count; /* max candidate count */
  1086. unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
  1087. unsigned long long age_threshold; /* age threshold */
  1088. };
  1089. /* For s_flag in struct f2fs_sb_info */
  1090. enum {
  1091. SBI_IS_DIRTY, /* dirty flag for checkpoint */
  1092. SBI_IS_CLOSE, /* specify unmounting */
  1093. SBI_NEED_FSCK, /* need fsck.f2fs to fix */
  1094. SBI_POR_DOING, /* recovery is doing or not */
  1095. SBI_NEED_SB_WRITE, /* need to recover superblock */
  1096. SBI_NEED_CP, /* need to checkpoint */
  1097. SBI_IS_SHUTDOWN, /* shutdown by ioctl */
  1098. SBI_IS_RECOVERED, /* recovered orphan/data */
  1099. SBI_CP_DISABLED, /* CP was disabled last mount */
  1100. SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
  1101. SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
  1102. SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
  1103. SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
  1104. SBI_IS_RESIZEFS, /* resizefs is in process */
  1105. };
  1106. enum {
  1107. CP_TIME,
  1108. REQ_TIME,
  1109. DISCARD_TIME,
  1110. GC_TIME,
  1111. DISABLE_TIME,
  1112. UMOUNT_DISCARD_TIMEOUT,
  1113. MAX_TIME,
  1114. };
  1115. enum {
  1116. GC_NORMAL,
  1117. GC_IDLE_CB,
  1118. GC_IDLE_GREEDY,
  1119. GC_IDLE_AT,
  1120. GC_URGENT_HIGH,
  1121. GC_URGENT_LOW,
  1122. MAX_GC_MODE,
  1123. };
  1124. enum {
  1125. BGGC_MODE_ON, /* background gc is on */
  1126. BGGC_MODE_OFF, /* background gc is off */
  1127. BGGC_MODE_SYNC, /*
  1128. * background gc is on, migrating blocks
  1129. * like foreground gc
  1130. */
  1131. };
  1132. enum {
  1133. FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
  1134. FS_MODE_LFS, /* use lfs allocation only */
  1135. };
  1136. enum {
  1137. WHINT_MODE_OFF, /* not pass down write hints */
  1138. WHINT_MODE_USER, /* try to pass down hints given by users */
  1139. WHINT_MODE_FS, /* pass down hints with F2FS policy */
  1140. };
  1141. enum {
  1142. ALLOC_MODE_DEFAULT, /* stay default */
  1143. ALLOC_MODE_REUSE, /* reuse segments as much as possible */
  1144. };
  1145. enum fsync_mode {
  1146. FSYNC_MODE_POSIX, /* fsync follows posix semantics */
  1147. FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
  1148. FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
  1149. };
  1150. enum {
  1151. COMPR_MODE_FS, /*
  1152. * automatically compress compression
  1153. * enabled files
  1154. */
  1155. COMPR_MODE_USER, /*
  1156. * automatical compression is disabled.
  1157. * user can control the file compression
  1158. * using ioctls
  1159. */
  1160. };
  1161. static inline int f2fs_test_bit(unsigned int nr, char *addr);
  1162. static inline void f2fs_set_bit(unsigned int nr, char *addr);
  1163. static inline void f2fs_clear_bit(unsigned int nr, char *addr);
  1164. /*
  1165. * Layout of f2fs page.private:
  1166. *
  1167. * Layout A: lowest bit should be 1
  1168. * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
  1169. * bit 0 PAGE_PRIVATE_NOT_POINTER
  1170. * bit 1 PAGE_PRIVATE_ATOMIC_WRITE
  1171. * bit 2 PAGE_PRIVATE_DUMMY_WRITE
  1172. * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION
  1173. * bit 4 PAGE_PRIVATE_INLINE_INODE
  1174. * bit 5 PAGE_PRIVATE_REF_RESOURCE
  1175. * bit 6- f2fs private data
  1176. *
  1177. * Layout B: lowest bit should be 0
  1178. * page.private is a wrapped pointer.
  1179. */
  1180. enum {
  1181. PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
  1182. PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
  1183. PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
  1184. PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
  1185. PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
  1186. PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
  1187. PAGE_PRIVATE_MAX
  1188. };
  1189. #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
  1190. static inline bool page_private_##name(struct page *page) \
  1191. { \
  1192. return PagePrivate(page) && \
  1193. test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
  1194. test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  1195. }
  1196. #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
  1197. static inline void set_page_private_##name(struct page *page) \
  1198. { \
  1199. if (!PagePrivate(page)) { \
  1200. get_page(page); \
  1201. SetPagePrivate(page); \
  1202. set_page_private(page, 0); \
  1203. } \
  1204. set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
  1205. set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  1206. }
  1207. #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
  1208. static inline void clear_page_private_##name(struct page *page) \
  1209. { \
  1210. clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  1211. if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
  1212. set_page_private(page, 0); \
  1213. if (PagePrivate(page)) { \
  1214. ClearPagePrivate(page); \
  1215. put_page(page); \
  1216. }\
  1217. } \
  1218. }
  1219. PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
  1220. PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
  1221. PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
  1222. PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
  1223. PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
  1224. PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
  1225. PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
  1226. PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
  1227. PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
  1228. PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
  1229. PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
  1230. PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
  1231. PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
  1232. PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
  1233. PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
  1234. PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
  1235. static inline unsigned long get_page_private_data(struct page *page)
  1236. {
  1237. unsigned long data = page_private(page);
  1238. if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
  1239. return 0;
  1240. return data >> PAGE_PRIVATE_MAX;
  1241. }
  1242. static inline void set_page_private_data(struct page *page, unsigned long data)
  1243. {
  1244. if (!PagePrivate(page)) {
  1245. get_page(page);
  1246. SetPagePrivate(page);
  1247. set_page_private(page, 0);
  1248. }
  1249. set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
  1250. page_private(page) |= data << PAGE_PRIVATE_MAX;
  1251. }
  1252. static inline void clear_page_private_data(struct page *page)
  1253. {
  1254. page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
  1255. if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
  1256. set_page_private(page, 0);
  1257. if (PagePrivate(page)) {
  1258. ClearPagePrivate(page);
  1259. put_page(page);
  1260. }
  1261. }
  1262. }
  1263. /* For compression */
  1264. enum compress_algorithm_type {
  1265. COMPRESS_LZO,
  1266. COMPRESS_LZ4,
  1267. COMPRESS_ZSTD,
  1268. COMPRESS_LZORLE,
  1269. COMPRESS_MAX,
  1270. };
  1271. enum compress_flag {
  1272. COMPRESS_CHKSUM,
  1273. COMPRESS_MAX_FLAG,
  1274. };
  1275. #define COMPRESS_WATERMARK 20
  1276. #define COMPRESS_PERCENT 20
  1277. #define COMPRESS_DATA_RESERVED_SIZE 4
  1278. struct compress_data {
  1279. __le32 clen; /* compressed data size */
  1280. __le32 chksum; /* compressed data chksum */
  1281. __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
  1282. u8 cdata[]; /* compressed data */
  1283. };
  1284. #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
  1285. #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
  1286. #define COMPRESS_LEVEL_OFFSET 8
  1287. /* compress context */
  1288. struct compress_ctx {
  1289. struct inode *inode; /* inode the context belong to */
  1290. pgoff_t cluster_idx; /* cluster index number */
  1291. unsigned int cluster_size; /* page count in cluster */
  1292. unsigned int log_cluster_size; /* log of cluster size */
  1293. struct page **rpages; /* pages store raw data in cluster */
  1294. unsigned int nr_rpages; /* total page number in rpages */
  1295. struct page **cpages; /* pages store compressed data in cluster */
  1296. unsigned int nr_cpages; /* total page number in cpages */
  1297. void *rbuf; /* virtual mapped address on rpages */
  1298. struct compress_data *cbuf; /* virtual mapped address on cpages */
  1299. size_t rlen; /* valid data length in rbuf */
  1300. size_t clen; /* valid data length in cbuf */
  1301. void *private; /* payload buffer for specified compression algorithm */
  1302. void *private2; /* extra payload buffer */
  1303. };
  1304. /* compress context for write IO path */
  1305. struct compress_io_ctx {
  1306. u32 magic; /* magic number to indicate page is compressed */
  1307. struct inode *inode; /* inode the context belong to */
  1308. struct page **rpages; /* pages store raw data in cluster */
  1309. unsigned int nr_rpages; /* total page number in rpages */
  1310. atomic_t pending_pages; /* in-flight compressed page count */
  1311. };
  1312. /* Context for decompressing one cluster on the read IO path */
  1313. struct decompress_io_ctx {
  1314. u32 magic; /* magic number to indicate page is compressed */
  1315. struct inode *inode; /* inode the context belong to */
  1316. pgoff_t cluster_idx; /* cluster index number */
  1317. unsigned int cluster_size; /* page count in cluster */
  1318. unsigned int log_cluster_size; /* log of cluster size */
  1319. struct page **rpages; /* pages store raw data in cluster */
  1320. unsigned int nr_rpages; /* total page number in rpages */
  1321. struct page **cpages; /* pages store compressed data in cluster */
  1322. unsigned int nr_cpages; /* total page number in cpages */
  1323. struct page **tpages; /* temp pages to pad holes in cluster */
  1324. void *rbuf; /* virtual mapped address on rpages */
  1325. struct compress_data *cbuf; /* virtual mapped address on cpages */
  1326. size_t rlen; /* valid data length in rbuf */
  1327. size_t clen; /* valid data length in cbuf */
  1328. /*
  1329. * The number of compressed pages remaining to be read in this cluster.
  1330. * This is initially nr_cpages. It is decremented by 1 each time a page
  1331. * has been read (or failed to be read). When it reaches 0, the cluster
  1332. * is decompressed (or an error is reported).
  1333. *
  1334. * If an error occurs before all the pages have been submitted for I/O,
  1335. * then this will never reach 0. In this case the I/O submitter is
  1336. * responsible for calling f2fs_decompress_end_io() instead.
  1337. */
  1338. atomic_t remaining_pages;
  1339. /*
  1340. * Number of references to this decompress_io_ctx.
  1341. *
  1342. * One reference is held for I/O completion. This reference is dropped
  1343. * after the pagecache pages are updated and unlocked -- either after
  1344. * decompression (and verity if enabled), or after an error.
  1345. *
  1346. * In addition, each compressed page holds a reference while it is in a
  1347. * bio. These references are necessary prevent compressed pages from
  1348. * being freed while they are still in a bio.
  1349. */
  1350. refcount_t refcnt;
  1351. bool failed; /* IO error occurred before decompression? */
  1352. bool need_verity; /* need fs-verity verification after decompression? */
  1353. void *private; /* payload buffer for specified decompression algorithm */
  1354. void *private2; /* extra payload buffer */
  1355. struct work_struct verity_work; /* work to verify the decompressed pages */
  1356. };
  1357. #define NULL_CLUSTER ((unsigned int)(~0))
  1358. #define MIN_COMPRESS_LOG_SIZE 2
  1359. #define MAX_COMPRESS_LOG_SIZE 8
  1360. #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
  1361. struct f2fs_sb_info {
  1362. struct super_block *sb; /* pointer to VFS super block */
  1363. struct proc_dir_entry *s_proc; /* proc entry */
  1364. struct f2fs_super_block *raw_super; /* raw super block pointer */
  1365. struct f2fs_rwsem sb_lock; /* lock for raw super block */
  1366. int valid_super_block; /* valid super block no */
  1367. unsigned long s_flag; /* flags for sbi */
  1368. struct mutex writepages; /* mutex for writepages() */
  1369. #ifdef CONFIG_BLK_DEV_ZONED
  1370. unsigned int blocks_per_blkz; /* F2FS blocks per zone */
  1371. unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
  1372. #endif
  1373. /* for node-related operations */
  1374. struct f2fs_nm_info *nm_info; /* node manager */
  1375. struct inode *node_inode; /* cache node blocks */
  1376. /* for segment-related operations */
  1377. struct f2fs_sm_info *sm_info; /* segment manager */
  1378. /* for bio operations */
  1379. struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
  1380. /* keep migration IO order for LFS mode */
  1381. struct f2fs_rwsem io_order_lock;
  1382. mempool_t *write_io_dummy; /* Dummy pages */
  1383. /* for checkpoint */
  1384. struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
  1385. int cur_cp_pack; /* remain current cp pack */
  1386. spinlock_t cp_lock; /* for flag in ckpt */
  1387. struct inode *meta_inode; /* cache meta blocks */
  1388. struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
  1389. struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
  1390. struct f2fs_rwsem node_write; /* locking node writes */
  1391. struct f2fs_rwsem node_change; /* locking node change */
  1392. wait_queue_head_t cp_wait;
  1393. unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
  1394. long interval_time[MAX_TIME]; /* to store thresholds */
  1395. struct ckpt_req_control cprc_info; /* for checkpoint request control */
  1396. struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
  1397. spinlock_t fsync_node_lock; /* for node entry lock */
  1398. struct list_head fsync_node_list; /* node list head */
  1399. unsigned int fsync_seg_id; /* sequence id */
  1400. unsigned int fsync_node_num; /* number of node entries */
  1401. /* for orphan inode, use 0'th array */
  1402. unsigned int max_orphans; /* max orphan inodes */
  1403. /* for inode management */
  1404. struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
  1405. spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
  1406. struct mutex flush_lock; /* for flush exclusion */
  1407. /* for extent tree cache */
  1408. struct radix_tree_root extent_tree_root;/* cache extent cache entries */
  1409. struct mutex extent_tree_lock; /* locking extent radix tree */
  1410. struct list_head extent_list; /* lru list for shrinker */
  1411. spinlock_t extent_lock; /* locking extent lru list */
  1412. atomic_t total_ext_tree; /* extent tree count */
  1413. struct list_head zombie_list; /* extent zombie tree list */
  1414. atomic_t total_zombie_tree; /* extent zombie tree count */
  1415. atomic_t total_ext_node; /* extent info count */
  1416. /* basic filesystem units */
  1417. unsigned int log_sectors_per_block; /* log2 sectors per block */
  1418. unsigned int log_blocksize; /* log2 block size */
  1419. unsigned int blocksize; /* block size */
  1420. unsigned int root_ino_num; /* root inode number*/
  1421. unsigned int node_ino_num; /* node inode number*/
  1422. unsigned int meta_ino_num; /* meta inode number*/
  1423. unsigned int log_blocks_per_seg; /* log2 blocks per segment */
  1424. unsigned int blocks_per_seg; /* blocks per segment */
  1425. unsigned int segs_per_sec; /* segments per section */
  1426. unsigned int secs_per_zone; /* sections per zone */
  1427. unsigned int total_sections; /* total section count */
  1428. unsigned int total_node_count; /* total node block count */
  1429. unsigned int total_valid_node_count; /* valid node block count */
  1430. int dir_level; /* directory level */
  1431. int readdir_ra; /* readahead inode in readdir */
  1432. u64 max_io_bytes; /* max io bytes to merge IOs */
  1433. block_t user_block_count; /* # of user blocks */
  1434. block_t total_valid_block_count; /* # of valid blocks */
  1435. block_t discard_blks; /* discard command candidats */
  1436. block_t last_valid_block_count; /* for recovery */
  1437. block_t reserved_blocks; /* configurable reserved blocks */
  1438. block_t current_reserved_blocks; /* current reserved blocks */
  1439. /* Additional tracking for no checkpoint mode */
  1440. block_t unusable_block_count; /* # of blocks saved by last cp */
  1441. unsigned int nquota_files; /* # of quota sysfile */
  1442. struct f2fs_rwsem quota_sem; /* blocking cp for flags */
  1443. /* # of pages, see count_type */
  1444. atomic_t nr_pages[NR_COUNT_TYPE];
  1445. /* # of allocated blocks */
  1446. struct percpu_counter alloc_valid_block_count;
  1447. /* writeback control */
  1448. atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
  1449. /* valid inode count */
  1450. struct percpu_counter total_valid_inode_count;
  1451. struct f2fs_mount_info mount_opt; /* mount options */
  1452. /* for cleaning operations */
  1453. struct f2fs_rwsem gc_lock; /*
  1454. * semaphore for GC, avoid
  1455. * race between GC and GC or CP
  1456. */
  1457. struct f2fs_gc_kthread *gc_thread; /* GC thread */
  1458. struct atgc_management am; /* atgc management */
  1459. unsigned int cur_victim_sec; /* current victim section num */
  1460. unsigned int gc_mode; /* current GC state */
  1461. unsigned int next_victim_seg[2]; /* next segment in victim section */
  1462. /* for skip statistic */
  1463. unsigned int atomic_files; /* # of opened atomic file */
  1464. unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
  1465. unsigned long long skipped_gc_rwsem; /* FG_GC only */
  1466. /* threshold for gc trials on pinned files */
  1467. u64 gc_pin_file_threshold;
  1468. struct f2fs_rwsem pin_sem;
  1469. /* maximum # of trials to find a victim segment for SSR and GC */
  1470. unsigned int max_victim_search;
  1471. /* migration granularity of garbage collection, unit: segment */
  1472. unsigned int migration_granularity;
  1473. /*
  1474. * for stat information.
  1475. * one is for the LFS mode, and the other is for the SSR mode.
  1476. */
  1477. #ifdef CONFIG_F2FS_STAT_FS
  1478. struct f2fs_stat_info *stat_info; /* FS status information */
  1479. atomic_t meta_count[META_MAX]; /* # of meta blocks */
  1480. unsigned int segment_count[2]; /* # of allocated segments */
  1481. unsigned int block_count[2]; /* # of allocated blocks */
  1482. atomic_t inplace_count; /* # of inplace update */
  1483. atomic64_t total_hit_ext; /* # of lookup extent cache */
  1484. atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
  1485. atomic64_t read_hit_largest; /* # of hit largest extent node */
  1486. atomic64_t read_hit_cached; /* # of hit cached extent node */
  1487. atomic_t inline_xattr; /* # of inline_xattr inodes */
  1488. atomic_t inline_inode; /* # of inline_data inodes */
  1489. atomic_t inline_dir; /* # of inline_dentry inodes */
  1490. atomic_t compr_inode; /* # of compressed inodes */
  1491. atomic64_t compr_blocks; /* # of compressed blocks */
  1492. atomic_t vw_cnt; /* # of volatile writes */
  1493. atomic_t max_aw_cnt; /* max # of atomic writes */
  1494. atomic_t max_vw_cnt; /* max # of volatile writes */
  1495. unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
  1496. unsigned int other_skip_bggc; /* skip background gc for other reasons */
  1497. unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
  1498. #endif
  1499. spinlock_t stat_lock; /* lock for stat operations */
  1500. /* For app/fs IO statistics */
  1501. spinlock_t iostat_lock;
  1502. unsigned long long rw_iostat[NR_IO_TYPE];
  1503. unsigned long long prev_rw_iostat[NR_IO_TYPE];
  1504. bool iostat_enable;
  1505. unsigned long iostat_next_period;
  1506. unsigned int iostat_period_ms;
  1507. /* to attach REQ_META|REQ_FUA flags */
  1508. unsigned int data_io_flag;
  1509. unsigned int node_io_flag;
  1510. /* For sysfs suppport */
  1511. struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
  1512. struct completion s_kobj_unregister;
  1513. struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
  1514. struct completion s_stat_kobj_unregister;
  1515. struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
  1516. struct completion s_feature_list_kobj_unregister;
  1517. /* For shrinker support */
  1518. struct list_head s_list;
  1519. int s_ndevs; /* number of devices */
  1520. struct f2fs_dev_info *devs; /* for device list */
  1521. unsigned int dirty_device; /* for checkpoint data flush */
  1522. spinlock_t dev_lock; /* protect dirty_device */
  1523. struct mutex umount_mutex;
  1524. unsigned int shrinker_run_no;
  1525. /* For write statistics */
  1526. u64 sectors_written_start;
  1527. u64 kbytes_written;
  1528. /* Reference to checksum algorithm driver via cryptoapi */
  1529. struct crypto_shash *s_chksum_driver;
  1530. /* Precomputed FS UUID checksum for seeding other checksums */
  1531. __u32 s_chksum_seed;
  1532. struct workqueue_struct *post_read_wq; /* post read workqueue */
  1533. struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
  1534. unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
  1535. /* For reclaimed segs statistics per each GC mode */
  1536. unsigned int gc_segment_mode; /* GC state for reclaimed segments */
  1537. unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
  1538. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1539. struct kmem_cache *page_array_slab; /* page array entry */
  1540. unsigned int page_array_slab_size; /* default page array slab size */
  1541. /* For runtime compression statistics */
  1542. u64 compr_written_block;
  1543. u64 compr_saved_block;
  1544. u32 compr_new_inode;
  1545. /* For compressed block cache */
  1546. struct inode *compress_inode; /* cache compressed blocks */
  1547. unsigned int compress_percent; /* cache page percentage */
  1548. unsigned int compress_watermark; /* cache page watermark */
  1549. atomic_t compress_page_hit; /* cache hit count */
  1550. #endif
  1551. };
  1552. struct f2fs_private_dio {
  1553. struct inode *inode;
  1554. void *orig_private;
  1555. bio_end_io_t *orig_end_io;
  1556. bool write;
  1557. };
  1558. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1559. #define f2fs_show_injection_info(sbi, type) \
  1560. printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
  1561. KERN_INFO, sbi->sb->s_id, \
  1562. f2fs_fault_name[type], \
  1563. __func__, __builtin_return_address(0))
  1564. static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
  1565. {
  1566. struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  1567. if (!ffi->inject_rate)
  1568. return false;
  1569. if (!IS_FAULT_SET(ffi, type))
  1570. return false;
  1571. atomic_inc(&ffi->inject_ops);
  1572. if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
  1573. atomic_set(&ffi->inject_ops, 0);
  1574. return true;
  1575. }
  1576. return false;
  1577. }
  1578. #else
  1579. #define f2fs_show_injection_info(sbi, type) do { } while (0)
  1580. static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
  1581. {
  1582. return false;
  1583. }
  1584. #endif
  1585. /*
  1586. * Test if the mounted volume is a multi-device volume.
  1587. * - For a single regular disk volume, sbi->s_ndevs is 0.
  1588. * - For a single zoned disk volume, sbi->s_ndevs is 1.
  1589. * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
  1590. */
  1591. static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
  1592. {
  1593. return sbi->s_ndevs > 1;
  1594. }
  1595. static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
  1596. {
  1597. unsigned long now = jiffies;
  1598. sbi->last_time[type] = now;
  1599. /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
  1600. if (type == REQ_TIME) {
  1601. sbi->last_time[DISCARD_TIME] = now;
  1602. sbi->last_time[GC_TIME] = now;
  1603. }
  1604. }
  1605. static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
  1606. {
  1607. unsigned long interval = sbi->interval_time[type] * HZ;
  1608. return time_after(jiffies, sbi->last_time[type] + interval);
  1609. }
  1610. static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
  1611. int type)
  1612. {
  1613. unsigned long interval = sbi->interval_time[type] * HZ;
  1614. unsigned int wait_ms = 0;
  1615. long delta;
  1616. delta = (sbi->last_time[type] + interval) - jiffies;
  1617. if (delta > 0)
  1618. wait_ms = jiffies_to_msecs(delta);
  1619. return wait_ms;
  1620. }
  1621. /*
  1622. * Inline functions
  1623. */
  1624. static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
  1625. const void *address, unsigned int length)
  1626. {
  1627. struct {
  1628. struct shash_desc shash;
  1629. char ctx[4];
  1630. } desc;
  1631. int err;
  1632. BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
  1633. desc.shash.tfm = sbi->s_chksum_driver;
  1634. *(u32 *)desc.ctx = crc;
  1635. err = crypto_shash_update(&desc.shash, address, length);
  1636. BUG_ON(err);
  1637. return *(u32 *)desc.ctx;
  1638. }
  1639. static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
  1640. unsigned int length)
  1641. {
  1642. return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
  1643. }
  1644. static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
  1645. void *buf, size_t buf_size)
  1646. {
  1647. return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
  1648. }
  1649. static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
  1650. const void *address, unsigned int length)
  1651. {
  1652. return __f2fs_crc32(sbi, crc, address, length);
  1653. }
  1654. static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
  1655. {
  1656. return container_of(inode, struct f2fs_inode_info, vfs_inode);
  1657. }
  1658. static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
  1659. {
  1660. return sb->s_fs_info;
  1661. }
  1662. static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
  1663. {
  1664. return F2FS_SB(inode->i_sb);
  1665. }
  1666. static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
  1667. {
  1668. return F2FS_I_SB(mapping->host);
  1669. }
  1670. static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
  1671. {
  1672. return F2FS_M_SB(page_file_mapping(page));
  1673. }
  1674. static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
  1675. {
  1676. return (struct f2fs_super_block *)(sbi->raw_super);
  1677. }
  1678. static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
  1679. {
  1680. return (struct f2fs_checkpoint *)(sbi->ckpt);
  1681. }
  1682. static inline struct f2fs_node *F2FS_NODE(struct page *page)
  1683. {
  1684. return (struct f2fs_node *)page_address(page);
  1685. }
  1686. static inline struct f2fs_inode *F2FS_INODE(struct page *page)
  1687. {
  1688. return &((struct f2fs_node *)page_address(page))->i;
  1689. }
  1690. static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
  1691. {
  1692. return (struct f2fs_nm_info *)(sbi->nm_info);
  1693. }
  1694. static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
  1695. {
  1696. return (struct f2fs_sm_info *)(sbi->sm_info);
  1697. }
  1698. static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
  1699. {
  1700. return (struct sit_info *)(SM_I(sbi)->sit_info);
  1701. }
  1702. static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
  1703. {
  1704. return (struct free_segmap_info *)(SM_I(sbi)->free_info);
  1705. }
  1706. static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
  1707. {
  1708. return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
  1709. }
  1710. static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
  1711. {
  1712. return sbi->meta_inode->i_mapping;
  1713. }
  1714. static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
  1715. {
  1716. return sbi->node_inode->i_mapping;
  1717. }
  1718. static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
  1719. {
  1720. return test_bit(type, &sbi->s_flag);
  1721. }
  1722. static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
  1723. {
  1724. set_bit(type, &sbi->s_flag);
  1725. }
  1726. static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
  1727. {
  1728. clear_bit(type, &sbi->s_flag);
  1729. }
  1730. static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
  1731. {
  1732. return le64_to_cpu(cp->checkpoint_ver);
  1733. }
  1734. static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
  1735. {
  1736. if (type < F2FS_MAX_QUOTAS)
  1737. return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
  1738. return 0;
  1739. }
  1740. static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
  1741. {
  1742. size_t crc_offset = le32_to_cpu(cp->checksum_offset);
  1743. return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
  1744. }
  1745. static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1746. {
  1747. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1748. return ckpt_flags & f;
  1749. }
  1750. static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1751. {
  1752. return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
  1753. }
  1754. static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1755. {
  1756. unsigned int ckpt_flags;
  1757. ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1758. ckpt_flags |= f;
  1759. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  1760. }
  1761. static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1762. {
  1763. unsigned long flags;
  1764. spin_lock_irqsave(&sbi->cp_lock, flags);
  1765. __set_ckpt_flags(F2FS_CKPT(sbi), f);
  1766. spin_unlock_irqrestore(&sbi->cp_lock, flags);
  1767. }
  1768. static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1769. {
  1770. unsigned int ckpt_flags;
  1771. ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1772. ckpt_flags &= (~f);
  1773. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  1774. }
  1775. static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1776. {
  1777. unsigned long flags;
  1778. spin_lock_irqsave(&sbi->cp_lock, flags);
  1779. __clear_ckpt_flags(F2FS_CKPT(sbi), f);
  1780. spin_unlock_irqrestore(&sbi->cp_lock, flags);
  1781. }
  1782. static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
  1783. {
  1784. unsigned long flags;
  1785. unsigned char *nat_bits;
  1786. /*
  1787. * In order to re-enable nat_bits we need to call fsck.f2fs by
  1788. * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
  1789. * so let's rely on regular fsck or unclean shutdown.
  1790. */
  1791. if (lock)
  1792. spin_lock_irqsave(&sbi->cp_lock, flags);
  1793. __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
  1794. nat_bits = NM_I(sbi)->nat_bits;
  1795. NM_I(sbi)->nat_bits = NULL;
  1796. if (lock)
  1797. spin_unlock_irqrestore(&sbi->cp_lock, flags);
  1798. kvfree(nat_bits);
  1799. }
  1800. static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
  1801. struct cp_control *cpc)
  1802. {
  1803. bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
  1804. return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
  1805. }
  1806. #define init_f2fs_rwsem(sem) \
  1807. do { \
  1808. static struct lock_class_key __key; \
  1809. \
  1810. __init_f2fs_rwsem((sem), #sem, &__key); \
  1811. } while (0)
  1812. static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
  1813. const char *sem_name, struct lock_class_key *key)
  1814. {
  1815. __init_rwsem(&sem->internal_rwsem, sem_name, key);
  1816. init_waitqueue_head(&sem->read_waiters);
  1817. }
  1818. static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
  1819. {
  1820. return rwsem_is_locked(&sem->internal_rwsem);
  1821. }
  1822. static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
  1823. {
  1824. return rwsem_is_contended(&sem->internal_rwsem);
  1825. }
  1826. static inline void f2fs_down_read(struct f2fs_rwsem *sem)
  1827. {
  1828. wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
  1829. }
  1830. static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
  1831. {
  1832. return down_read_trylock(&sem->internal_rwsem);
  1833. }
  1834. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1835. static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
  1836. {
  1837. down_read_nested(&sem->internal_rwsem, subclass);
  1838. }
  1839. #else
  1840. #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
  1841. #endif
  1842. static inline void f2fs_up_read(struct f2fs_rwsem *sem)
  1843. {
  1844. up_read(&sem->internal_rwsem);
  1845. }
  1846. static inline void f2fs_down_write(struct f2fs_rwsem *sem)
  1847. {
  1848. down_write(&sem->internal_rwsem);
  1849. }
  1850. static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
  1851. {
  1852. return down_write_trylock(&sem->internal_rwsem);
  1853. }
  1854. static inline void f2fs_up_write(struct f2fs_rwsem *sem)
  1855. {
  1856. up_write(&sem->internal_rwsem);
  1857. wake_up_all(&sem->read_waiters);
  1858. }
  1859. static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
  1860. {
  1861. f2fs_down_read(&sbi->cp_rwsem);
  1862. }
  1863. static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
  1864. {
  1865. return f2fs_down_read_trylock(&sbi->cp_rwsem);
  1866. }
  1867. static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
  1868. {
  1869. f2fs_up_read(&sbi->cp_rwsem);
  1870. }
  1871. static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
  1872. {
  1873. f2fs_down_write(&sbi->cp_rwsem);
  1874. }
  1875. static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
  1876. {
  1877. f2fs_up_write(&sbi->cp_rwsem);
  1878. }
  1879. static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
  1880. {
  1881. int reason = CP_SYNC;
  1882. if (test_opt(sbi, FASTBOOT))
  1883. reason = CP_FASTBOOT;
  1884. if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
  1885. reason = CP_UMOUNT;
  1886. return reason;
  1887. }
  1888. static inline bool __remain_node_summaries(int reason)
  1889. {
  1890. return (reason & (CP_UMOUNT | CP_FASTBOOT));
  1891. }
  1892. static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
  1893. {
  1894. return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
  1895. is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
  1896. }
  1897. /*
  1898. * Check whether the inode has blocks or not
  1899. */
  1900. static inline int F2FS_HAS_BLOCKS(struct inode *inode)
  1901. {
  1902. block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
  1903. return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
  1904. }
  1905. static inline bool f2fs_has_xattr_block(unsigned int ofs)
  1906. {
  1907. return ofs == XATTR_NODE_OFFSET;
  1908. }
  1909. static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
  1910. struct inode *inode, bool cap)
  1911. {
  1912. if (!inode)
  1913. return true;
  1914. if (!test_opt(sbi, RESERVE_ROOT))
  1915. return false;
  1916. if (IS_NOQUOTA(inode))
  1917. return true;
  1918. if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
  1919. return true;
  1920. if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
  1921. in_group_p(F2FS_OPTION(sbi).s_resgid))
  1922. return true;
  1923. if (cap && capable(CAP_SYS_RESOURCE))
  1924. return true;
  1925. return false;
  1926. }
  1927. static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
  1928. static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
  1929. struct inode *inode, blkcnt_t *count)
  1930. {
  1931. blkcnt_t diff = 0, release = 0;
  1932. block_t avail_user_block_count;
  1933. int ret;
  1934. ret = dquot_reserve_block(inode, *count);
  1935. if (ret)
  1936. return ret;
  1937. if (time_to_inject(sbi, FAULT_BLOCK)) {
  1938. f2fs_show_injection_info(sbi, FAULT_BLOCK);
  1939. release = *count;
  1940. goto release_quota;
  1941. }
  1942. /*
  1943. * let's increase this in prior to actual block count change in order
  1944. * for f2fs_sync_file to avoid data races when deciding checkpoint.
  1945. */
  1946. percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
  1947. spin_lock(&sbi->stat_lock);
  1948. sbi->total_valid_block_count += (block_t)(*count);
  1949. avail_user_block_count = sbi->user_block_count -
  1950. sbi->current_reserved_blocks;
  1951. if (!__allow_reserved_blocks(sbi, inode, true))
  1952. avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
  1953. if (F2FS_IO_ALIGNED(sbi))
  1954. avail_user_block_count -= sbi->blocks_per_seg *
  1955. SM_I(sbi)->additional_reserved_segments;
  1956. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
  1957. if (avail_user_block_count > sbi->unusable_block_count)
  1958. avail_user_block_count -= sbi->unusable_block_count;
  1959. else
  1960. avail_user_block_count = 0;
  1961. }
  1962. if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
  1963. diff = sbi->total_valid_block_count - avail_user_block_count;
  1964. if (diff > *count)
  1965. diff = *count;
  1966. *count -= diff;
  1967. release = diff;
  1968. sbi->total_valid_block_count -= diff;
  1969. if (!*count) {
  1970. spin_unlock(&sbi->stat_lock);
  1971. goto enospc;
  1972. }
  1973. }
  1974. spin_unlock(&sbi->stat_lock);
  1975. if (unlikely(release)) {
  1976. percpu_counter_sub(&sbi->alloc_valid_block_count, release);
  1977. dquot_release_reservation_block(inode, release);
  1978. }
  1979. f2fs_i_blocks_write(inode, *count, true, true);
  1980. return 0;
  1981. enospc:
  1982. percpu_counter_sub(&sbi->alloc_valid_block_count, release);
  1983. release_quota:
  1984. dquot_release_reservation_block(inode, release);
  1985. return -ENOSPC;
  1986. }
  1987. __printf(2, 3)
  1988. void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
  1989. #define f2fs_err(sbi, fmt, ...) \
  1990. f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
  1991. #define f2fs_warn(sbi, fmt, ...) \
  1992. f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
  1993. #define f2fs_notice(sbi, fmt, ...) \
  1994. f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
  1995. #define f2fs_info(sbi, fmt, ...) \
  1996. f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
  1997. #define f2fs_debug(sbi, fmt, ...) \
  1998. f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
  1999. static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
  2000. struct inode *inode,
  2001. block_t count)
  2002. {
  2003. blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
  2004. spin_lock(&sbi->stat_lock);
  2005. f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
  2006. sbi->total_valid_block_count -= (block_t)count;
  2007. if (sbi->reserved_blocks &&
  2008. sbi->current_reserved_blocks < sbi->reserved_blocks)
  2009. sbi->current_reserved_blocks = min(sbi->reserved_blocks,
  2010. sbi->current_reserved_blocks + count);
  2011. spin_unlock(&sbi->stat_lock);
  2012. if (unlikely(inode->i_blocks < sectors)) {
  2013. f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
  2014. inode->i_ino,
  2015. (unsigned long long)inode->i_blocks,
  2016. (unsigned long long)sectors);
  2017. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2018. return;
  2019. }
  2020. f2fs_i_blocks_write(inode, count, false, true);
  2021. }
  2022. static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
  2023. {
  2024. atomic_inc(&sbi->nr_pages[count_type]);
  2025. if (count_type == F2FS_DIRTY_DENTS ||
  2026. count_type == F2FS_DIRTY_NODES ||
  2027. count_type == F2FS_DIRTY_META ||
  2028. count_type == F2FS_DIRTY_QDATA ||
  2029. count_type == F2FS_DIRTY_IMETA)
  2030. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2031. }
  2032. static inline void inode_inc_dirty_pages(struct inode *inode)
  2033. {
  2034. atomic_inc(&F2FS_I(inode)->dirty_pages);
  2035. inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
  2036. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
  2037. if (IS_NOQUOTA(inode))
  2038. inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
  2039. }
  2040. static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
  2041. {
  2042. atomic_dec(&sbi->nr_pages[count_type]);
  2043. }
  2044. static inline void inode_dec_dirty_pages(struct inode *inode)
  2045. {
  2046. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  2047. !S_ISLNK(inode->i_mode))
  2048. return;
  2049. atomic_dec(&F2FS_I(inode)->dirty_pages);
  2050. dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
  2051. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
  2052. if (IS_NOQUOTA(inode))
  2053. dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
  2054. }
  2055. static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
  2056. {
  2057. return atomic_read(&sbi->nr_pages[count_type]);
  2058. }
  2059. static inline int get_dirty_pages(struct inode *inode)
  2060. {
  2061. return atomic_read(&F2FS_I(inode)->dirty_pages);
  2062. }
  2063. static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
  2064. {
  2065. unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
  2066. unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
  2067. sbi->log_blocks_per_seg;
  2068. return segs / sbi->segs_per_sec;
  2069. }
  2070. static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
  2071. {
  2072. return sbi->total_valid_block_count;
  2073. }
  2074. static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
  2075. {
  2076. return sbi->discard_blks;
  2077. }
  2078. static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
  2079. {
  2080. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2081. /* return NAT or SIT bitmap */
  2082. if (flag == NAT_BITMAP)
  2083. return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  2084. else if (flag == SIT_BITMAP)
  2085. return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  2086. return 0;
  2087. }
  2088. static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
  2089. {
  2090. return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
  2091. }
  2092. static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
  2093. {
  2094. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2095. void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
  2096. int offset;
  2097. if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
  2098. offset = (flag == SIT_BITMAP) ?
  2099. le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
  2100. /*
  2101. * if large_nat_bitmap feature is enabled, leave checksum
  2102. * protection for all nat/sit bitmaps.
  2103. */
  2104. return tmp_ptr + offset + sizeof(__le32);
  2105. }
  2106. if (__cp_payload(sbi) > 0) {
  2107. if (flag == NAT_BITMAP)
  2108. return &ckpt->sit_nat_version_bitmap;
  2109. else
  2110. return (unsigned char *)ckpt + F2FS_BLKSIZE;
  2111. } else {
  2112. offset = (flag == NAT_BITMAP) ?
  2113. le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
  2114. return tmp_ptr + offset;
  2115. }
  2116. }
  2117. static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
  2118. {
  2119. block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  2120. if (sbi->cur_cp_pack == 2)
  2121. start_addr += sbi->blocks_per_seg;
  2122. return start_addr;
  2123. }
  2124. static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
  2125. {
  2126. block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  2127. if (sbi->cur_cp_pack == 1)
  2128. start_addr += sbi->blocks_per_seg;
  2129. return start_addr;
  2130. }
  2131. static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
  2132. {
  2133. sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
  2134. }
  2135. static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
  2136. {
  2137. return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
  2138. }
  2139. static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
  2140. struct inode *inode, bool is_inode)
  2141. {
  2142. block_t valid_block_count;
  2143. unsigned int valid_node_count, user_block_count;
  2144. int err;
  2145. if (is_inode) {
  2146. if (inode) {
  2147. err = dquot_alloc_inode(inode);
  2148. if (err)
  2149. return err;
  2150. }
  2151. } else {
  2152. err = dquot_reserve_block(inode, 1);
  2153. if (err)
  2154. return err;
  2155. }
  2156. if (time_to_inject(sbi, FAULT_BLOCK)) {
  2157. f2fs_show_injection_info(sbi, FAULT_BLOCK);
  2158. goto enospc;
  2159. }
  2160. spin_lock(&sbi->stat_lock);
  2161. valid_block_count = sbi->total_valid_block_count +
  2162. sbi->current_reserved_blocks + 1;
  2163. if (!__allow_reserved_blocks(sbi, inode, false))
  2164. valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
  2165. if (F2FS_IO_ALIGNED(sbi))
  2166. valid_block_count += sbi->blocks_per_seg *
  2167. SM_I(sbi)->additional_reserved_segments;
  2168. user_block_count = sbi->user_block_count;
  2169. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  2170. user_block_count -= sbi->unusable_block_count;
  2171. if (unlikely(valid_block_count > user_block_count)) {
  2172. spin_unlock(&sbi->stat_lock);
  2173. goto enospc;
  2174. }
  2175. valid_node_count = sbi->total_valid_node_count + 1;
  2176. if (unlikely(valid_node_count > sbi->total_node_count)) {
  2177. spin_unlock(&sbi->stat_lock);
  2178. goto enospc;
  2179. }
  2180. sbi->total_valid_node_count++;
  2181. sbi->total_valid_block_count++;
  2182. spin_unlock(&sbi->stat_lock);
  2183. if (inode) {
  2184. if (is_inode)
  2185. f2fs_mark_inode_dirty_sync(inode, true);
  2186. else
  2187. f2fs_i_blocks_write(inode, 1, true, true);
  2188. }
  2189. percpu_counter_inc(&sbi->alloc_valid_block_count);
  2190. return 0;
  2191. enospc:
  2192. if (is_inode) {
  2193. if (inode)
  2194. dquot_free_inode(inode);
  2195. } else {
  2196. dquot_release_reservation_block(inode, 1);
  2197. }
  2198. return -ENOSPC;
  2199. }
  2200. static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
  2201. struct inode *inode, bool is_inode)
  2202. {
  2203. spin_lock(&sbi->stat_lock);
  2204. f2fs_bug_on(sbi, !sbi->total_valid_block_count);
  2205. f2fs_bug_on(sbi, !sbi->total_valid_node_count);
  2206. sbi->total_valid_node_count--;
  2207. sbi->total_valid_block_count--;
  2208. if (sbi->reserved_blocks &&
  2209. sbi->current_reserved_blocks < sbi->reserved_blocks)
  2210. sbi->current_reserved_blocks++;
  2211. spin_unlock(&sbi->stat_lock);
  2212. if (is_inode) {
  2213. dquot_free_inode(inode);
  2214. } else {
  2215. if (unlikely(inode->i_blocks == 0)) {
  2216. f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
  2217. inode->i_ino,
  2218. (unsigned long long)inode->i_blocks);
  2219. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2220. return;
  2221. }
  2222. f2fs_i_blocks_write(inode, 1, false, true);
  2223. }
  2224. }
  2225. static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
  2226. {
  2227. return sbi->total_valid_node_count;
  2228. }
  2229. static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
  2230. {
  2231. percpu_counter_inc(&sbi->total_valid_inode_count);
  2232. }
  2233. static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
  2234. {
  2235. percpu_counter_dec(&sbi->total_valid_inode_count);
  2236. }
  2237. static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
  2238. {
  2239. return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
  2240. }
  2241. static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
  2242. pgoff_t index, bool for_write)
  2243. {
  2244. struct page *page;
  2245. if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
  2246. if (!for_write)
  2247. page = find_get_page_flags(mapping, index,
  2248. FGP_LOCK | FGP_ACCESSED);
  2249. else
  2250. page = find_lock_page(mapping, index);
  2251. if (page)
  2252. return page;
  2253. if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
  2254. f2fs_show_injection_info(F2FS_M_SB(mapping),
  2255. FAULT_PAGE_ALLOC);
  2256. return NULL;
  2257. }
  2258. }
  2259. if (!for_write)
  2260. return grab_cache_page(mapping, index);
  2261. return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
  2262. }
  2263. static inline struct page *f2fs_pagecache_get_page(
  2264. struct address_space *mapping, pgoff_t index,
  2265. int fgp_flags, gfp_t gfp_mask)
  2266. {
  2267. if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
  2268. f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
  2269. return NULL;
  2270. }
  2271. return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
  2272. }
  2273. static inline void f2fs_copy_page(struct page *src, struct page *dst)
  2274. {
  2275. char *src_kaddr = kmap(src);
  2276. char *dst_kaddr = kmap(dst);
  2277. memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
  2278. kunmap(dst);
  2279. kunmap(src);
  2280. }
  2281. static inline void f2fs_put_page(struct page *page, int unlock)
  2282. {
  2283. if (!page)
  2284. return;
  2285. if (unlock) {
  2286. f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
  2287. unlock_page(page);
  2288. }
  2289. put_page(page);
  2290. }
  2291. static inline void f2fs_put_dnode(struct dnode_of_data *dn)
  2292. {
  2293. if (dn->node_page)
  2294. f2fs_put_page(dn->node_page, 1);
  2295. if (dn->inode_page && dn->node_page != dn->inode_page)
  2296. f2fs_put_page(dn->inode_page, 0);
  2297. dn->node_page = NULL;
  2298. dn->inode_page = NULL;
  2299. }
  2300. static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
  2301. size_t size)
  2302. {
  2303. return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
  2304. }
  2305. static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
  2306. gfp_t flags)
  2307. {
  2308. void *entry;
  2309. entry = kmem_cache_alloc(cachep, flags);
  2310. if (!entry)
  2311. entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
  2312. return entry;
  2313. }
  2314. static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
  2315. {
  2316. if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
  2317. get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
  2318. get_pages(sbi, F2FS_WB_CP_DATA) ||
  2319. get_pages(sbi, F2FS_DIO_READ) ||
  2320. get_pages(sbi, F2FS_DIO_WRITE))
  2321. return true;
  2322. if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
  2323. atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
  2324. return true;
  2325. if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
  2326. atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
  2327. return true;
  2328. return false;
  2329. }
  2330. static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
  2331. {
  2332. if (sbi->gc_mode == GC_URGENT_HIGH)
  2333. return true;
  2334. if (is_inflight_io(sbi, type))
  2335. return false;
  2336. if (sbi->gc_mode == GC_URGENT_LOW &&
  2337. (type == DISCARD_TIME || type == GC_TIME))
  2338. return true;
  2339. return f2fs_time_over(sbi, type);
  2340. }
  2341. static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
  2342. unsigned long index, void *item)
  2343. {
  2344. while (radix_tree_insert(root, index, item))
  2345. cond_resched();
  2346. }
  2347. #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
  2348. static inline bool IS_INODE(struct page *page)
  2349. {
  2350. struct f2fs_node *p = F2FS_NODE(page);
  2351. return RAW_IS_INODE(p);
  2352. }
  2353. static inline int offset_in_addr(struct f2fs_inode *i)
  2354. {
  2355. return (i->i_inline & F2FS_EXTRA_ATTR) ?
  2356. (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
  2357. }
  2358. static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
  2359. {
  2360. return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
  2361. }
  2362. static inline int f2fs_has_extra_attr(struct inode *inode);
  2363. static inline block_t data_blkaddr(struct inode *inode,
  2364. struct page *node_page, unsigned int offset)
  2365. {
  2366. struct f2fs_node *raw_node;
  2367. __le32 *addr_array;
  2368. int base = 0;
  2369. bool is_inode = IS_INODE(node_page);
  2370. raw_node = F2FS_NODE(node_page);
  2371. if (is_inode) {
  2372. if (!inode)
  2373. /* from GC path only */
  2374. base = offset_in_addr(&raw_node->i);
  2375. else if (f2fs_has_extra_attr(inode))
  2376. base = get_extra_isize(inode);
  2377. }
  2378. addr_array = blkaddr_in_node(raw_node);
  2379. return le32_to_cpu(addr_array[base + offset]);
  2380. }
  2381. static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
  2382. {
  2383. return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
  2384. }
  2385. static inline int f2fs_test_bit(unsigned int nr, char *addr)
  2386. {
  2387. int mask;
  2388. addr += (nr >> 3);
  2389. mask = 1 << (7 - (nr & 0x07));
  2390. return mask & *addr;
  2391. }
  2392. static inline void f2fs_set_bit(unsigned int nr, char *addr)
  2393. {
  2394. int mask;
  2395. addr += (nr >> 3);
  2396. mask = 1 << (7 - (nr & 0x07));
  2397. *addr |= mask;
  2398. }
  2399. static inline void f2fs_clear_bit(unsigned int nr, char *addr)
  2400. {
  2401. int mask;
  2402. addr += (nr >> 3);
  2403. mask = 1 << (7 - (nr & 0x07));
  2404. *addr &= ~mask;
  2405. }
  2406. static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
  2407. {
  2408. int mask;
  2409. int ret;
  2410. addr += (nr >> 3);
  2411. mask = 1 << (7 - (nr & 0x07));
  2412. ret = mask & *addr;
  2413. *addr |= mask;
  2414. return ret;
  2415. }
  2416. static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
  2417. {
  2418. int mask;
  2419. int ret;
  2420. addr += (nr >> 3);
  2421. mask = 1 << (7 - (nr & 0x07));
  2422. ret = mask & *addr;
  2423. *addr &= ~mask;
  2424. return ret;
  2425. }
  2426. static inline void f2fs_change_bit(unsigned int nr, char *addr)
  2427. {
  2428. int mask;
  2429. addr += (nr >> 3);
  2430. mask = 1 << (7 - (nr & 0x07));
  2431. *addr ^= mask;
  2432. }
  2433. /*
  2434. * On-disk inode flags (f2fs_inode::i_flags)
  2435. */
  2436. #define F2FS_COMPR_FL 0x00000004 /* Compress file */
  2437. #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
  2438. #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
  2439. #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
  2440. #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
  2441. #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
  2442. #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
  2443. #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
  2444. #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
  2445. #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
  2446. #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
  2447. /* Flags that should be inherited by new inodes from their parent. */
  2448. #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
  2449. F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
  2450. F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
  2451. /* Flags that are appropriate for regular files (all but dir-specific ones). */
  2452. #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
  2453. F2FS_CASEFOLD_FL))
  2454. /* Flags that are appropriate for non-directories/regular files. */
  2455. #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
  2456. static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
  2457. {
  2458. if (S_ISDIR(mode))
  2459. return flags;
  2460. else if (S_ISREG(mode))
  2461. return flags & F2FS_REG_FLMASK;
  2462. else
  2463. return flags & F2FS_OTHER_FLMASK;
  2464. }
  2465. static inline void __mark_inode_dirty_flag(struct inode *inode,
  2466. int flag, bool set)
  2467. {
  2468. switch (flag) {
  2469. case FI_INLINE_XATTR:
  2470. case FI_INLINE_DATA:
  2471. case FI_INLINE_DENTRY:
  2472. case FI_NEW_INODE:
  2473. if (set)
  2474. return;
  2475. fallthrough;
  2476. case FI_DATA_EXIST:
  2477. case FI_INLINE_DOTS:
  2478. case FI_PIN_FILE:
  2479. case FI_COMPRESS_RELEASED:
  2480. f2fs_mark_inode_dirty_sync(inode, true);
  2481. }
  2482. }
  2483. static inline void set_inode_flag(struct inode *inode, int flag)
  2484. {
  2485. set_bit(flag, F2FS_I(inode)->flags);
  2486. __mark_inode_dirty_flag(inode, flag, true);
  2487. }
  2488. static inline int is_inode_flag_set(struct inode *inode, int flag)
  2489. {
  2490. return test_bit(flag, F2FS_I(inode)->flags);
  2491. }
  2492. static inline void clear_inode_flag(struct inode *inode, int flag)
  2493. {
  2494. clear_bit(flag, F2FS_I(inode)->flags);
  2495. __mark_inode_dirty_flag(inode, flag, false);
  2496. }
  2497. static inline bool f2fs_verity_in_progress(struct inode *inode)
  2498. {
  2499. return IS_ENABLED(CONFIG_FS_VERITY) &&
  2500. is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
  2501. }
  2502. static inline void set_acl_inode(struct inode *inode, umode_t mode)
  2503. {
  2504. F2FS_I(inode)->i_acl_mode = mode;
  2505. set_inode_flag(inode, FI_ACL_MODE);
  2506. f2fs_mark_inode_dirty_sync(inode, false);
  2507. }
  2508. static inline void f2fs_i_links_write(struct inode *inode, bool inc)
  2509. {
  2510. if (inc)
  2511. inc_nlink(inode);
  2512. else
  2513. drop_nlink(inode);
  2514. f2fs_mark_inode_dirty_sync(inode, true);
  2515. }
  2516. static inline void f2fs_i_blocks_write(struct inode *inode,
  2517. block_t diff, bool add, bool claim)
  2518. {
  2519. bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
  2520. bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
  2521. /* add = 1, claim = 1 should be dquot_reserve_block in pair */
  2522. if (add) {
  2523. if (claim)
  2524. dquot_claim_block(inode, diff);
  2525. else
  2526. dquot_alloc_block_nofail(inode, diff);
  2527. } else {
  2528. dquot_free_block(inode, diff);
  2529. }
  2530. f2fs_mark_inode_dirty_sync(inode, true);
  2531. if (clean || recover)
  2532. set_inode_flag(inode, FI_AUTO_RECOVER);
  2533. }
  2534. static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
  2535. {
  2536. bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
  2537. bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
  2538. if (i_size_read(inode) == i_size)
  2539. return;
  2540. i_size_write(inode, i_size);
  2541. f2fs_mark_inode_dirty_sync(inode, true);
  2542. if (clean || recover)
  2543. set_inode_flag(inode, FI_AUTO_RECOVER);
  2544. }
  2545. static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
  2546. {
  2547. F2FS_I(inode)->i_current_depth = depth;
  2548. f2fs_mark_inode_dirty_sync(inode, true);
  2549. }
  2550. static inline void f2fs_i_gc_failures_write(struct inode *inode,
  2551. unsigned int count)
  2552. {
  2553. F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
  2554. f2fs_mark_inode_dirty_sync(inode, true);
  2555. }
  2556. static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
  2557. {
  2558. F2FS_I(inode)->i_xattr_nid = xnid;
  2559. f2fs_mark_inode_dirty_sync(inode, true);
  2560. }
  2561. static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
  2562. {
  2563. F2FS_I(inode)->i_pino = pino;
  2564. f2fs_mark_inode_dirty_sync(inode, true);
  2565. }
  2566. static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
  2567. {
  2568. struct f2fs_inode_info *fi = F2FS_I(inode);
  2569. if (ri->i_inline & F2FS_INLINE_XATTR)
  2570. set_bit(FI_INLINE_XATTR, fi->flags);
  2571. if (ri->i_inline & F2FS_INLINE_DATA)
  2572. set_bit(FI_INLINE_DATA, fi->flags);
  2573. if (ri->i_inline & F2FS_INLINE_DENTRY)
  2574. set_bit(FI_INLINE_DENTRY, fi->flags);
  2575. if (ri->i_inline & F2FS_DATA_EXIST)
  2576. set_bit(FI_DATA_EXIST, fi->flags);
  2577. if (ri->i_inline & F2FS_INLINE_DOTS)
  2578. set_bit(FI_INLINE_DOTS, fi->flags);
  2579. if (ri->i_inline & F2FS_EXTRA_ATTR)
  2580. set_bit(FI_EXTRA_ATTR, fi->flags);
  2581. if (ri->i_inline & F2FS_PIN_FILE)
  2582. set_bit(FI_PIN_FILE, fi->flags);
  2583. if (ri->i_inline & F2FS_COMPRESS_RELEASED)
  2584. set_bit(FI_COMPRESS_RELEASED, fi->flags);
  2585. }
  2586. static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
  2587. {
  2588. ri->i_inline = 0;
  2589. if (is_inode_flag_set(inode, FI_INLINE_XATTR))
  2590. ri->i_inline |= F2FS_INLINE_XATTR;
  2591. if (is_inode_flag_set(inode, FI_INLINE_DATA))
  2592. ri->i_inline |= F2FS_INLINE_DATA;
  2593. if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
  2594. ri->i_inline |= F2FS_INLINE_DENTRY;
  2595. if (is_inode_flag_set(inode, FI_DATA_EXIST))
  2596. ri->i_inline |= F2FS_DATA_EXIST;
  2597. if (is_inode_flag_set(inode, FI_INLINE_DOTS))
  2598. ri->i_inline |= F2FS_INLINE_DOTS;
  2599. if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
  2600. ri->i_inline |= F2FS_EXTRA_ATTR;
  2601. if (is_inode_flag_set(inode, FI_PIN_FILE))
  2602. ri->i_inline |= F2FS_PIN_FILE;
  2603. if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
  2604. ri->i_inline |= F2FS_COMPRESS_RELEASED;
  2605. }
  2606. static inline int f2fs_has_extra_attr(struct inode *inode)
  2607. {
  2608. return is_inode_flag_set(inode, FI_EXTRA_ATTR);
  2609. }
  2610. static inline int f2fs_has_inline_xattr(struct inode *inode)
  2611. {
  2612. return is_inode_flag_set(inode, FI_INLINE_XATTR);
  2613. }
  2614. static inline int f2fs_compressed_file(struct inode *inode)
  2615. {
  2616. return S_ISREG(inode->i_mode) &&
  2617. is_inode_flag_set(inode, FI_COMPRESSED_FILE);
  2618. }
  2619. static inline bool f2fs_need_compress_data(struct inode *inode)
  2620. {
  2621. int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
  2622. if (!f2fs_compressed_file(inode))
  2623. return false;
  2624. if (compress_mode == COMPR_MODE_FS)
  2625. return true;
  2626. else if (compress_mode == COMPR_MODE_USER &&
  2627. is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
  2628. return true;
  2629. return false;
  2630. }
  2631. static inline unsigned int addrs_per_inode(struct inode *inode)
  2632. {
  2633. unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
  2634. get_inline_xattr_addrs(inode);
  2635. if (!f2fs_compressed_file(inode))
  2636. return addrs;
  2637. return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
  2638. }
  2639. static inline unsigned int addrs_per_block(struct inode *inode)
  2640. {
  2641. if (!f2fs_compressed_file(inode))
  2642. return DEF_ADDRS_PER_BLOCK;
  2643. return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
  2644. }
  2645. static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
  2646. {
  2647. struct f2fs_inode *ri = F2FS_INODE(page);
  2648. return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
  2649. get_inline_xattr_addrs(inode)]);
  2650. }
  2651. static inline int inline_xattr_size(struct inode *inode)
  2652. {
  2653. if (f2fs_has_inline_xattr(inode))
  2654. return get_inline_xattr_addrs(inode) * sizeof(__le32);
  2655. return 0;
  2656. }
  2657. static inline int f2fs_has_inline_data(struct inode *inode)
  2658. {
  2659. return is_inode_flag_set(inode, FI_INLINE_DATA);
  2660. }
  2661. static inline int f2fs_exist_data(struct inode *inode)
  2662. {
  2663. return is_inode_flag_set(inode, FI_DATA_EXIST);
  2664. }
  2665. static inline int f2fs_has_inline_dots(struct inode *inode)
  2666. {
  2667. return is_inode_flag_set(inode, FI_INLINE_DOTS);
  2668. }
  2669. static inline int f2fs_is_mmap_file(struct inode *inode)
  2670. {
  2671. return is_inode_flag_set(inode, FI_MMAP_FILE);
  2672. }
  2673. static inline bool f2fs_is_pinned_file(struct inode *inode)
  2674. {
  2675. return is_inode_flag_set(inode, FI_PIN_FILE);
  2676. }
  2677. static inline bool f2fs_is_atomic_file(struct inode *inode)
  2678. {
  2679. return is_inode_flag_set(inode, FI_ATOMIC_FILE);
  2680. }
  2681. static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
  2682. {
  2683. return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
  2684. }
  2685. static inline bool f2fs_is_volatile_file(struct inode *inode)
  2686. {
  2687. return is_inode_flag_set(inode, FI_VOLATILE_FILE);
  2688. }
  2689. static inline bool f2fs_is_first_block_written(struct inode *inode)
  2690. {
  2691. return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
  2692. }
  2693. static inline bool f2fs_is_drop_cache(struct inode *inode)
  2694. {
  2695. return is_inode_flag_set(inode, FI_DROP_CACHE);
  2696. }
  2697. static inline void *inline_data_addr(struct inode *inode, struct page *page)
  2698. {
  2699. struct f2fs_inode *ri = F2FS_INODE(page);
  2700. int extra_size = get_extra_isize(inode);
  2701. return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
  2702. }
  2703. static inline int f2fs_has_inline_dentry(struct inode *inode)
  2704. {
  2705. return is_inode_flag_set(inode, FI_INLINE_DENTRY);
  2706. }
  2707. static inline int is_file(struct inode *inode, int type)
  2708. {
  2709. return F2FS_I(inode)->i_advise & type;
  2710. }
  2711. static inline void set_file(struct inode *inode, int type)
  2712. {
  2713. F2FS_I(inode)->i_advise |= type;
  2714. f2fs_mark_inode_dirty_sync(inode, true);
  2715. }
  2716. static inline void clear_file(struct inode *inode, int type)
  2717. {
  2718. F2FS_I(inode)->i_advise &= ~type;
  2719. f2fs_mark_inode_dirty_sync(inode, true);
  2720. }
  2721. static inline bool f2fs_is_time_consistent(struct inode *inode)
  2722. {
  2723. if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
  2724. return false;
  2725. if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
  2726. return false;
  2727. if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
  2728. return false;
  2729. if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
  2730. &F2FS_I(inode)->i_crtime))
  2731. return false;
  2732. return true;
  2733. }
  2734. static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
  2735. {
  2736. bool ret;
  2737. if (dsync) {
  2738. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2739. spin_lock(&sbi->inode_lock[DIRTY_META]);
  2740. ret = list_empty(&F2FS_I(inode)->gdirty_list);
  2741. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  2742. return ret;
  2743. }
  2744. if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
  2745. file_keep_isize(inode) ||
  2746. i_size_read(inode) & ~PAGE_MASK)
  2747. return false;
  2748. if (!f2fs_is_time_consistent(inode))
  2749. return false;
  2750. spin_lock(&F2FS_I(inode)->i_size_lock);
  2751. ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
  2752. spin_unlock(&F2FS_I(inode)->i_size_lock);
  2753. return ret;
  2754. }
  2755. static inline bool f2fs_readonly(struct super_block *sb)
  2756. {
  2757. return sb_rdonly(sb);
  2758. }
  2759. static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
  2760. {
  2761. return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
  2762. }
  2763. static inline bool is_dot_dotdot(const u8 *name, size_t len)
  2764. {
  2765. if (len == 1 && name[0] == '.')
  2766. return true;
  2767. if (len == 2 && name[0] == '.' && name[1] == '.')
  2768. return true;
  2769. return false;
  2770. }
  2771. static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
  2772. size_t size, gfp_t flags)
  2773. {
  2774. if (time_to_inject(sbi, FAULT_KMALLOC)) {
  2775. f2fs_show_injection_info(sbi, FAULT_KMALLOC);
  2776. return NULL;
  2777. }
  2778. return kmalloc(size, flags);
  2779. }
  2780. static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
  2781. size_t size, gfp_t flags)
  2782. {
  2783. return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
  2784. }
  2785. static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
  2786. size_t size, gfp_t flags)
  2787. {
  2788. if (time_to_inject(sbi, FAULT_KVMALLOC)) {
  2789. f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
  2790. return NULL;
  2791. }
  2792. return kvmalloc(size, flags);
  2793. }
  2794. static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
  2795. size_t size, gfp_t flags)
  2796. {
  2797. return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
  2798. }
  2799. static inline int get_extra_isize(struct inode *inode)
  2800. {
  2801. return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
  2802. }
  2803. static inline int get_inline_xattr_addrs(struct inode *inode)
  2804. {
  2805. return F2FS_I(inode)->i_inline_xattr_size;
  2806. }
  2807. #define f2fs_get_inode_mode(i) \
  2808. ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
  2809. (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
  2810. #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
  2811. (offsetof(struct f2fs_inode, i_extra_end) - \
  2812. offsetof(struct f2fs_inode, i_extra_isize)) \
  2813. #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
  2814. #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
  2815. ((offsetof(typeof(*(f2fs_inode)), field) + \
  2816. sizeof((f2fs_inode)->field)) \
  2817. <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
  2818. #define DEFAULT_IOSTAT_PERIOD_MS 3000
  2819. #define MIN_IOSTAT_PERIOD_MS 100
  2820. /* maximum period of iostat tracing is 1 day */
  2821. #define MAX_IOSTAT_PERIOD_MS 8640000
  2822. static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
  2823. {
  2824. int i;
  2825. spin_lock(&sbi->iostat_lock);
  2826. for (i = 0; i < NR_IO_TYPE; i++) {
  2827. sbi->rw_iostat[i] = 0;
  2828. sbi->prev_rw_iostat[i] = 0;
  2829. }
  2830. spin_unlock(&sbi->iostat_lock);
  2831. }
  2832. extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
  2833. static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
  2834. enum iostat_type type, unsigned long long io_bytes)
  2835. {
  2836. if (!sbi->iostat_enable)
  2837. return;
  2838. spin_lock(&sbi->iostat_lock);
  2839. sbi->rw_iostat[type] += io_bytes;
  2840. if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
  2841. sbi->rw_iostat[APP_BUFFERED_IO] =
  2842. sbi->rw_iostat[APP_WRITE_IO] -
  2843. sbi->rw_iostat[APP_DIRECT_IO];
  2844. if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
  2845. sbi->rw_iostat[APP_BUFFERED_READ_IO] =
  2846. sbi->rw_iostat[APP_READ_IO] -
  2847. sbi->rw_iostat[APP_DIRECT_READ_IO];
  2848. spin_unlock(&sbi->iostat_lock);
  2849. f2fs_record_iostat(sbi);
  2850. }
  2851. #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
  2852. #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
  2853. bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
  2854. block_t blkaddr, int type);
  2855. static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
  2856. block_t blkaddr, int type)
  2857. {
  2858. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
  2859. f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
  2860. blkaddr, type);
  2861. f2fs_bug_on(sbi, 1);
  2862. }
  2863. }
  2864. static inline bool __is_valid_data_blkaddr(block_t blkaddr)
  2865. {
  2866. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
  2867. blkaddr == COMPRESS_ADDR)
  2868. return false;
  2869. return true;
  2870. }
  2871. /*
  2872. * file.c
  2873. */
  2874. int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
  2875. void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
  2876. int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
  2877. int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
  2878. int f2fs_truncate(struct inode *inode);
  2879. int f2fs_getattr(const struct path *path, struct kstat *stat,
  2880. u32 request_mask, unsigned int flags);
  2881. int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
  2882. int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
  2883. void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
  2884. int f2fs_precache_extents(struct inode *inode);
  2885. long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
  2886. long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  2887. int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
  2888. int f2fs_pin_file_control(struct inode *inode, bool inc);
  2889. /*
  2890. * inode.c
  2891. */
  2892. void f2fs_set_inode_flags(struct inode *inode);
  2893. bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
  2894. void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
  2895. struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
  2896. struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
  2897. int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
  2898. void f2fs_update_inode(struct inode *inode, struct page *node_page);
  2899. void f2fs_update_inode_page(struct inode *inode);
  2900. int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
  2901. void f2fs_evict_inode(struct inode *inode);
  2902. void f2fs_handle_failed_inode(struct inode *inode);
  2903. /*
  2904. * namei.c
  2905. */
  2906. int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
  2907. bool hot, bool set);
  2908. struct dentry *f2fs_get_parent(struct dentry *child);
  2909. /*
  2910. * dir.c
  2911. */
  2912. unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
  2913. int f2fs_init_casefolded_name(const struct inode *dir,
  2914. struct f2fs_filename *fname);
  2915. int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
  2916. int lookup, struct f2fs_filename *fname);
  2917. int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
  2918. struct f2fs_filename *fname);
  2919. void f2fs_free_filename(struct f2fs_filename *fname);
  2920. struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
  2921. const struct f2fs_filename *fname, int *max_slots);
  2922. int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
  2923. unsigned int start_pos, struct fscrypt_str *fstr);
  2924. void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
  2925. struct f2fs_dentry_ptr *d);
  2926. struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
  2927. const struct f2fs_filename *fname, struct page *dpage);
  2928. void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
  2929. unsigned int current_depth);
  2930. int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
  2931. void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
  2932. struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
  2933. const struct f2fs_filename *fname,
  2934. struct page **res_page);
  2935. struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
  2936. const struct qstr *child, struct page **res_page);
  2937. struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
  2938. ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
  2939. struct page **page);
  2940. void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
  2941. struct page *page, struct inode *inode);
  2942. bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
  2943. const struct f2fs_filename *fname);
  2944. void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
  2945. const struct fscrypt_str *name, f2fs_hash_t name_hash,
  2946. unsigned int bit_pos);
  2947. int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
  2948. struct inode *inode, nid_t ino, umode_t mode);
  2949. int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
  2950. struct inode *inode, nid_t ino, umode_t mode);
  2951. int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
  2952. struct inode *inode, nid_t ino, umode_t mode);
  2953. void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
  2954. struct inode *dir, struct inode *inode);
  2955. int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
  2956. bool f2fs_empty_dir(struct inode *dir);
  2957. static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
  2958. {
  2959. if (fscrypt_is_nokey_name(dentry))
  2960. return -ENOKEY;
  2961. return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
  2962. inode, inode->i_ino, inode->i_mode);
  2963. }
  2964. /*
  2965. * super.c
  2966. */
  2967. int f2fs_inode_dirtied(struct inode *inode, bool sync);
  2968. void f2fs_inode_synced(struct inode *inode);
  2969. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
  2970. int f2fs_quota_sync(struct super_block *sb, int type);
  2971. loff_t max_file_blocks(struct inode *inode);
  2972. void f2fs_quota_off_umount(struct super_block *sb);
  2973. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
  2974. int f2fs_sync_fs(struct super_block *sb, int sync);
  2975. int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
  2976. /*
  2977. * hash.c
  2978. */
  2979. void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
  2980. /*
  2981. * node.c
  2982. */
  2983. struct node_info;
  2984. int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
  2985. bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
  2986. bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
  2987. void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
  2988. void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
  2989. void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
  2990. int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
  2991. bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
  2992. bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
  2993. int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
  2994. struct node_info *ni, bool checkpoint_context);
  2995. pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
  2996. int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
  2997. int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
  2998. int f2fs_truncate_xattr_node(struct inode *inode);
  2999. int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
  3000. unsigned int seq_id);
  3001. int f2fs_remove_inode_page(struct inode *inode);
  3002. struct page *f2fs_new_inode_page(struct inode *inode);
  3003. struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
  3004. void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
  3005. struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
  3006. struct page *f2fs_get_node_page_ra(struct page *parent, int start);
  3007. int f2fs_move_node_page(struct page *node_page, int gc_type);
  3008. void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
  3009. int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
  3010. struct writeback_control *wbc, bool atomic,
  3011. unsigned int *seq_id);
  3012. int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
  3013. struct writeback_control *wbc,
  3014. bool do_balance, enum iostat_type io_type);
  3015. int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
  3016. bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
  3017. void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
  3018. void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
  3019. int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
  3020. int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
  3021. int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
  3022. int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
  3023. int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
  3024. unsigned int segno, struct f2fs_summary_block *sum);
  3025. int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3026. int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
  3027. void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
  3028. int __init f2fs_create_node_manager_caches(void);
  3029. void f2fs_destroy_node_manager_caches(void);
  3030. /*
  3031. * segment.c
  3032. */
  3033. bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
  3034. void f2fs_register_inmem_page(struct inode *inode, struct page *page);
  3035. void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
  3036. void f2fs_drop_inmem_pages(struct inode *inode);
  3037. void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
  3038. int f2fs_commit_inmem_pages(struct inode *inode);
  3039. void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
  3040. void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
  3041. int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
  3042. int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
  3043. int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
  3044. void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
  3045. void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
  3046. bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
  3047. void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
  3048. void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
  3049. bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
  3050. void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
  3051. struct cp_control *cpc);
  3052. void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
  3053. block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
  3054. int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
  3055. void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
  3056. int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
  3057. bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
  3058. void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
  3059. void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
  3060. void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
  3061. void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
  3062. unsigned int *newseg, bool new_sec, int dir);
  3063. void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
  3064. unsigned int start, unsigned int end);
  3065. void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
  3066. void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
  3067. int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
  3068. bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
  3069. struct cp_control *cpc);
  3070. struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
  3071. void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
  3072. block_t blk_addr);
  3073. void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
  3074. enum iostat_type io_type);
  3075. void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
  3076. void f2fs_outplace_write_data(struct dnode_of_data *dn,
  3077. struct f2fs_io_info *fio);
  3078. int f2fs_inplace_write_data(struct f2fs_io_info *fio);
  3079. void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  3080. block_t old_blkaddr, block_t new_blkaddr,
  3081. bool recover_curseg, bool recover_newaddr,
  3082. bool from_gc);
  3083. void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
  3084. block_t old_addr, block_t new_addr,
  3085. unsigned char version, bool recover_curseg,
  3086. bool recover_newaddr);
  3087. void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  3088. block_t old_blkaddr, block_t *new_blkaddr,
  3089. struct f2fs_summary *sum, int type,
  3090. struct f2fs_io_info *fio);
  3091. void f2fs_wait_on_page_writeback(struct page *page,
  3092. enum page_type type, bool ordered, bool locked);
  3093. void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
  3094. void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
  3095. block_t len);
  3096. void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
  3097. void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
  3098. int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
  3099. unsigned int val, int alloc);
  3100. void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3101. int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
  3102. int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
  3103. int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
  3104. void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
  3105. int __init f2fs_create_segment_manager_caches(void);
  3106. void f2fs_destroy_segment_manager_caches(void);
  3107. int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
  3108. enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
  3109. enum page_type type, enum temp_type temp);
  3110. unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
  3111. unsigned int segno);
  3112. unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
  3113. unsigned int segno);
  3114. /*
  3115. * checkpoint.c
  3116. */
  3117. void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
  3118. struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3119. struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3120. struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
  3121. struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3122. bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
  3123. block_t blkaddr, int type);
  3124. int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
  3125. int type, bool sync);
  3126. void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
  3127. long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
  3128. long nr_to_write, enum iostat_type io_type);
  3129. void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
  3130. void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
  3131. void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
  3132. bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
  3133. void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
  3134. unsigned int devidx, int type);
  3135. bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
  3136. unsigned int devidx, int type);
  3137. int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
  3138. int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
  3139. void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
  3140. void f2fs_add_orphan_inode(struct inode *inode);
  3141. void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
  3142. int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
  3143. int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
  3144. void f2fs_update_dirty_page(struct inode *inode, struct page *page);
  3145. void f2fs_remove_dirty_inode(struct inode *inode);
  3146. int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
  3147. void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
  3148. u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
  3149. int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3150. void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
  3151. int __init f2fs_create_checkpoint_caches(void);
  3152. void f2fs_destroy_checkpoint_caches(void);
  3153. int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
  3154. int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
  3155. void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
  3156. void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
  3157. /*
  3158. * data.c
  3159. */
  3160. int __init f2fs_init_bioset(void);
  3161. void f2fs_destroy_bioset(void);
  3162. int f2fs_init_bio_entry_cache(void);
  3163. void f2fs_destroy_bio_entry_cache(void);
  3164. void f2fs_submit_bio(struct f2fs_sb_info *sbi,
  3165. struct bio *bio, enum page_type type);
  3166. void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
  3167. void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
  3168. struct inode *inode, struct page *page,
  3169. nid_t ino, enum page_type type);
  3170. void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
  3171. struct bio **bio, struct page *page);
  3172. void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
  3173. int f2fs_submit_page_bio(struct f2fs_io_info *fio);
  3174. int f2fs_merge_page_bio(struct f2fs_io_info *fio);
  3175. void f2fs_submit_page_write(struct f2fs_io_info *fio);
  3176. struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
  3177. block_t blk_addr, struct bio *bio);
  3178. int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
  3179. void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
  3180. void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
  3181. int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
  3182. int f2fs_reserve_new_block(struct dnode_of_data *dn);
  3183. int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
  3184. int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
  3185. int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
  3186. struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
  3187. int op_flags, bool for_write);
  3188. struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
  3189. struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
  3190. bool for_write);
  3191. struct page *f2fs_get_new_data_page(struct inode *inode,
  3192. struct page *ipage, pgoff_t index, bool new_i_size);
  3193. int f2fs_do_write_data_page(struct f2fs_io_info *fio);
  3194. void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
  3195. int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
  3196. int create, int flag);
  3197. int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  3198. u64 start, u64 len);
  3199. int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
  3200. bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
  3201. bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
  3202. int f2fs_write_single_data_page(struct page *page, int *submitted,
  3203. struct bio **bio, sector_t *last_block,
  3204. struct writeback_control *wbc,
  3205. enum iostat_type io_type,
  3206. int compr_blocks, bool allow_balance);
  3207. void f2fs_invalidate_page(struct page *page, unsigned int offset,
  3208. unsigned int length);
  3209. int f2fs_release_page(struct page *page, gfp_t wait);
  3210. #ifdef CONFIG_MIGRATION
  3211. int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
  3212. struct page *page, enum migrate_mode mode);
  3213. #endif
  3214. bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
  3215. void f2fs_clear_page_cache_dirty_tag(struct page *page);
  3216. int f2fs_init_post_read_processing(void);
  3217. void f2fs_destroy_post_read_processing(void);
  3218. int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
  3219. void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
  3220. /*
  3221. * gc.c
  3222. */
  3223. int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
  3224. void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
  3225. block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
  3226. int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
  3227. unsigned int segno);
  3228. void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
  3229. int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
  3230. int __init f2fs_create_garbage_collection_cache(void);
  3231. void f2fs_destroy_garbage_collection_cache(void);
  3232. /*
  3233. * recovery.c
  3234. */
  3235. int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
  3236. bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
  3237. int __init f2fs_create_recovery_cache(void);
  3238. void f2fs_destroy_recovery_cache(void);
  3239. /*
  3240. * debug.c
  3241. */
  3242. #ifdef CONFIG_F2FS_STAT_FS
  3243. struct f2fs_stat_info {
  3244. struct list_head stat_list;
  3245. struct f2fs_sb_info *sbi;
  3246. int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
  3247. int main_area_segs, main_area_sections, main_area_zones;
  3248. unsigned long long hit_largest, hit_cached, hit_rbtree;
  3249. unsigned long long hit_total, total_ext;
  3250. int ext_tree, zombie_tree, ext_node;
  3251. int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
  3252. int ndirty_data, ndirty_qdata;
  3253. int inmem_pages;
  3254. unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
  3255. int nats, dirty_nats, sits, dirty_sits;
  3256. int free_nids, avail_nids, alloc_nids;
  3257. int total_count, utilization;
  3258. int bg_gc, nr_wb_cp_data, nr_wb_data;
  3259. int nr_rd_data, nr_rd_node, nr_rd_meta;
  3260. int nr_dio_read, nr_dio_write;
  3261. unsigned int io_skip_bggc, other_skip_bggc;
  3262. int nr_flushing, nr_flushed, flush_list_empty;
  3263. int nr_discarding, nr_discarded;
  3264. int nr_discard_cmd;
  3265. unsigned int undiscard_blks;
  3266. int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
  3267. unsigned int cur_ckpt_time, peak_ckpt_time;
  3268. int inline_xattr, inline_inode, inline_dir, append, update, orphans;
  3269. int compr_inode;
  3270. unsigned long long compr_blocks;
  3271. int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
  3272. unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
  3273. unsigned int bimodal, avg_vblocks;
  3274. int util_free, util_valid, util_invalid;
  3275. int rsvd_segs, overp_segs;
  3276. int dirty_count, node_pages, meta_pages, compress_pages;
  3277. int compress_page_hit;
  3278. int prefree_count, call_count, cp_count, bg_cp_count;
  3279. int tot_segs, node_segs, data_segs, free_segs, free_secs;
  3280. int bg_node_segs, bg_data_segs;
  3281. int tot_blks, data_blks, node_blks;
  3282. int bg_data_blks, bg_node_blks;
  3283. unsigned long long skipped_atomic_files[2];
  3284. int curseg[NR_CURSEG_TYPE];
  3285. int cursec[NR_CURSEG_TYPE];
  3286. int curzone[NR_CURSEG_TYPE];
  3287. unsigned int dirty_seg[NR_CURSEG_TYPE];
  3288. unsigned int full_seg[NR_CURSEG_TYPE];
  3289. unsigned int valid_blks[NR_CURSEG_TYPE];
  3290. unsigned int meta_count[META_MAX];
  3291. unsigned int segment_count[2];
  3292. unsigned int block_count[2];
  3293. unsigned int inplace_count;
  3294. unsigned long long base_mem, cache_mem, page_mem;
  3295. };
  3296. static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
  3297. {
  3298. return (struct f2fs_stat_info *)sbi->stat_info;
  3299. }
  3300. #define stat_inc_cp_count(si) ((si)->cp_count++)
  3301. #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
  3302. #define stat_inc_call_count(si) ((si)->call_count++)
  3303. #define stat_inc_bggc_count(si) ((si)->bg_gc++)
  3304. #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
  3305. #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
  3306. #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
  3307. #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
  3308. #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
  3309. #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
  3310. #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
  3311. #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
  3312. #define stat_inc_inline_xattr(inode) \
  3313. do { \
  3314. if (f2fs_has_inline_xattr(inode)) \
  3315. (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
  3316. } while (0)
  3317. #define stat_dec_inline_xattr(inode) \
  3318. do { \
  3319. if (f2fs_has_inline_xattr(inode)) \
  3320. (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
  3321. } while (0)
  3322. #define stat_inc_inline_inode(inode) \
  3323. do { \
  3324. if (f2fs_has_inline_data(inode)) \
  3325. (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
  3326. } while (0)
  3327. #define stat_dec_inline_inode(inode) \
  3328. do { \
  3329. if (f2fs_has_inline_data(inode)) \
  3330. (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
  3331. } while (0)
  3332. #define stat_inc_inline_dir(inode) \
  3333. do { \
  3334. if (f2fs_has_inline_dentry(inode)) \
  3335. (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
  3336. } while (0)
  3337. #define stat_dec_inline_dir(inode) \
  3338. do { \
  3339. if (f2fs_has_inline_dentry(inode)) \
  3340. (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
  3341. } while (0)
  3342. #define stat_inc_compr_inode(inode) \
  3343. do { \
  3344. if (f2fs_compressed_file(inode)) \
  3345. (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
  3346. } while (0)
  3347. #define stat_dec_compr_inode(inode) \
  3348. do { \
  3349. if (f2fs_compressed_file(inode)) \
  3350. (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
  3351. } while (0)
  3352. #define stat_add_compr_blocks(inode, blocks) \
  3353. (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
  3354. #define stat_sub_compr_blocks(inode, blocks) \
  3355. (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
  3356. #define stat_inc_meta_count(sbi, blkaddr) \
  3357. do { \
  3358. if (blkaddr < SIT_I(sbi)->sit_base_addr) \
  3359. atomic_inc(&(sbi)->meta_count[META_CP]); \
  3360. else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
  3361. atomic_inc(&(sbi)->meta_count[META_SIT]); \
  3362. else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
  3363. atomic_inc(&(sbi)->meta_count[META_NAT]); \
  3364. else if (blkaddr < SM_I(sbi)->main_blkaddr) \
  3365. atomic_inc(&(sbi)->meta_count[META_SSA]); \
  3366. } while (0)
  3367. #define stat_inc_seg_type(sbi, curseg) \
  3368. ((sbi)->segment_count[(curseg)->alloc_type]++)
  3369. #define stat_inc_block_count(sbi, curseg) \
  3370. ((sbi)->block_count[(curseg)->alloc_type]++)
  3371. #define stat_inc_inplace_blocks(sbi) \
  3372. (atomic_inc(&(sbi)->inplace_count))
  3373. #define stat_update_max_atomic_write(inode) \
  3374. do { \
  3375. int cur = F2FS_I_SB(inode)->atomic_files; \
  3376. int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
  3377. if (cur > max) \
  3378. atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
  3379. } while (0)
  3380. #define stat_inc_volatile_write(inode) \
  3381. (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
  3382. #define stat_dec_volatile_write(inode) \
  3383. (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
  3384. #define stat_update_max_volatile_write(inode) \
  3385. do { \
  3386. int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
  3387. int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
  3388. if (cur > max) \
  3389. atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
  3390. } while (0)
  3391. #define stat_inc_seg_count(sbi, type, gc_type) \
  3392. do { \
  3393. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3394. si->tot_segs++; \
  3395. if ((type) == SUM_TYPE_DATA) { \
  3396. si->data_segs++; \
  3397. si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
  3398. } else { \
  3399. si->node_segs++; \
  3400. si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
  3401. } \
  3402. } while (0)
  3403. #define stat_inc_tot_blk_count(si, blks) \
  3404. ((si)->tot_blks += (blks))
  3405. #define stat_inc_data_blk_count(sbi, blks, gc_type) \
  3406. do { \
  3407. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3408. stat_inc_tot_blk_count(si, blks); \
  3409. si->data_blks += (blks); \
  3410. si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
  3411. } while (0)
  3412. #define stat_inc_node_blk_count(sbi, blks, gc_type) \
  3413. do { \
  3414. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3415. stat_inc_tot_blk_count(si, blks); \
  3416. si->node_blks += (blks); \
  3417. si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
  3418. } while (0)
  3419. int f2fs_build_stats(struct f2fs_sb_info *sbi);
  3420. void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
  3421. void __init f2fs_create_root_stats(void);
  3422. void f2fs_destroy_root_stats(void);
  3423. void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
  3424. #else
  3425. #define stat_inc_cp_count(si) do { } while (0)
  3426. #define stat_inc_bg_cp_count(si) do { } while (0)
  3427. #define stat_inc_call_count(si) do { } while (0)
  3428. #define stat_inc_bggc_count(si) do { } while (0)
  3429. #define stat_io_skip_bggc_count(sbi) do { } while (0)
  3430. #define stat_other_skip_bggc_count(sbi) do { } while (0)
  3431. #define stat_inc_dirty_inode(sbi, type) do { } while (0)
  3432. #define stat_dec_dirty_inode(sbi, type) do { } while (0)
  3433. #define stat_inc_total_hit(sbi) do { } while (0)
  3434. #define stat_inc_rbtree_node_hit(sbi) do { } while (0)
  3435. #define stat_inc_largest_node_hit(sbi) do { } while (0)
  3436. #define stat_inc_cached_node_hit(sbi) do { } while (0)
  3437. #define stat_inc_inline_xattr(inode) do { } while (0)
  3438. #define stat_dec_inline_xattr(inode) do { } while (0)
  3439. #define stat_inc_inline_inode(inode) do { } while (0)
  3440. #define stat_dec_inline_inode(inode) do { } while (0)
  3441. #define stat_inc_inline_dir(inode) do { } while (0)
  3442. #define stat_dec_inline_dir(inode) do { } while (0)
  3443. #define stat_inc_compr_inode(inode) do { } while (0)
  3444. #define stat_dec_compr_inode(inode) do { } while (0)
  3445. #define stat_add_compr_blocks(inode, blocks) do { } while (0)
  3446. #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
  3447. #define stat_update_max_atomic_write(inode) do { } while (0)
  3448. #define stat_inc_volatile_write(inode) do { } while (0)
  3449. #define stat_dec_volatile_write(inode) do { } while (0)
  3450. #define stat_update_max_volatile_write(inode) do { } while (0)
  3451. #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
  3452. #define stat_inc_seg_type(sbi, curseg) do { } while (0)
  3453. #define stat_inc_block_count(sbi, curseg) do { } while (0)
  3454. #define stat_inc_inplace_blocks(sbi) do { } while (0)
  3455. #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
  3456. #define stat_inc_tot_blk_count(si, blks) do { } while (0)
  3457. #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
  3458. #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
  3459. static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
  3460. static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
  3461. static inline void __init f2fs_create_root_stats(void) { }
  3462. static inline void f2fs_destroy_root_stats(void) { }
  3463. static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
  3464. #endif
  3465. extern const struct file_operations f2fs_dir_operations;
  3466. extern const struct file_operations f2fs_file_operations;
  3467. extern const struct inode_operations f2fs_file_inode_operations;
  3468. extern const struct address_space_operations f2fs_dblock_aops;
  3469. extern const struct address_space_operations f2fs_node_aops;
  3470. extern const struct address_space_operations f2fs_meta_aops;
  3471. extern const struct inode_operations f2fs_dir_inode_operations;
  3472. extern const struct inode_operations f2fs_symlink_inode_operations;
  3473. extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
  3474. extern const struct inode_operations f2fs_special_inode_operations;
  3475. extern struct kmem_cache *f2fs_inode_entry_slab;
  3476. /*
  3477. * inline.c
  3478. */
  3479. bool f2fs_may_inline_data(struct inode *inode);
  3480. bool f2fs_may_inline_dentry(struct inode *inode);
  3481. void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
  3482. void f2fs_truncate_inline_inode(struct inode *inode,
  3483. struct page *ipage, u64 from);
  3484. int f2fs_read_inline_data(struct inode *inode, struct page *page);
  3485. int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
  3486. int f2fs_convert_inline_inode(struct inode *inode);
  3487. int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
  3488. int f2fs_write_inline_data(struct inode *inode, struct page *page);
  3489. int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
  3490. struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
  3491. const struct f2fs_filename *fname,
  3492. struct page **res_page);
  3493. int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
  3494. struct page *ipage);
  3495. int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
  3496. struct inode *inode, nid_t ino, umode_t mode);
  3497. void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
  3498. struct page *page, struct inode *dir,
  3499. struct inode *inode);
  3500. bool f2fs_empty_inline_dir(struct inode *dir);
  3501. int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
  3502. struct fscrypt_str *fstr);
  3503. int f2fs_inline_data_fiemap(struct inode *inode,
  3504. struct fiemap_extent_info *fieinfo,
  3505. __u64 start, __u64 len);
  3506. /*
  3507. * shrinker.c
  3508. */
  3509. unsigned long f2fs_shrink_count(struct shrinker *shrink,
  3510. struct shrink_control *sc);
  3511. unsigned long f2fs_shrink_scan(struct shrinker *shrink,
  3512. struct shrink_control *sc);
  3513. void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
  3514. void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
  3515. /*
  3516. * extent_cache.c
  3517. */
  3518. struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
  3519. struct rb_entry *cached_re, unsigned int ofs);
  3520. struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
  3521. struct rb_root_cached *root,
  3522. struct rb_node **parent,
  3523. unsigned long long key, bool *left_most);
  3524. struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
  3525. struct rb_root_cached *root,
  3526. struct rb_node **parent,
  3527. unsigned int ofs, bool *leftmost);
  3528. struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
  3529. struct rb_entry *cached_re, unsigned int ofs,
  3530. struct rb_entry **prev_entry, struct rb_entry **next_entry,
  3531. struct rb_node ***insert_p, struct rb_node **insert_parent,
  3532. bool force, bool *leftmost);
  3533. bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
  3534. struct rb_root_cached *root, bool check_key);
  3535. unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
  3536. void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
  3537. void f2fs_drop_extent_tree(struct inode *inode);
  3538. unsigned int f2fs_destroy_extent_node(struct inode *inode);
  3539. void f2fs_destroy_extent_tree(struct inode *inode);
  3540. bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
  3541. struct extent_info *ei);
  3542. void f2fs_update_extent_cache(struct dnode_of_data *dn);
  3543. void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
  3544. pgoff_t fofs, block_t blkaddr, unsigned int len);
  3545. void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
  3546. int __init f2fs_create_extent_cache(void);
  3547. void f2fs_destroy_extent_cache(void);
  3548. /*
  3549. * sysfs.c
  3550. */
  3551. int __init f2fs_init_sysfs(void);
  3552. void f2fs_exit_sysfs(void);
  3553. int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
  3554. void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
  3555. /* verity.c */
  3556. extern const struct fsverity_operations f2fs_verityops;
  3557. /*
  3558. * crypto support
  3559. */
  3560. static inline bool f2fs_encrypted_file(struct inode *inode)
  3561. {
  3562. return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
  3563. }
  3564. static inline void f2fs_set_encrypted_inode(struct inode *inode)
  3565. {
  3566. #ifdef CONFIG_FS_ENCRYPTION
  3567. file_set_encrypt(inode);
  3568. f2fs_set_inode_flags(inode);
  3569. #endif
  3570. }
  3571. /*
  3572. * Returns true if the reads of the inode's data need to undergo some
  3573. * postprocessing step, like decryption or authenticity verification.
  3574. */
  3575. static inline bool f2fs_post_read_required(struct inode *inode)
  3576. {
  3577. return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
  3578. f2fs_compressed_file(inode);
  3579. }
  3580. /*
  3581. * compress.c
  3582. */
  3583. #ifdef CONFIG_F2FS_FS_COMPRESSION
  3584. bool f2fs_is_compressed_page(struct page *page);
  3585. struct page *f2fs_compress_control_page(struct page *page);
  3586. int f2fs_prepare_compress_overwrite(struct inode *inode,
  3587. struct page **pagep, pgoff_t index, void **fsdata);
  3588. bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
  3589. pgoff_t index, unsigned copied);
  3590. int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
  3591. void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
  3592. bool f2fs_is_compress_backend_ready(struct inode *inode);
  3593. int f2fs_init_compress_mempool(void);
  3594. void f2fs_destroy_compress_mempool(void);
  3595. void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
  3596. void f2fs_end_read_compressed_page(struct page *page, bool failed,
  3597. block_t blkaddr);
  3598. bool f2fs_cluster_is_empty(struct compress_ctx *cc);
  3599. bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
  3600. void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
  3601. int f2fs_write_multi_pages(struct compress_ctx *cc,
  3602. int *submitted,
  3603. struct writeback_control *wbc,
  3604. enum iostat_type io_type);
  3605. int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
  3606. int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
  3607. unsigned nr_pages, sector_t *last_block_in_bio,
  3608. bool is_readahead, bool for_write);
  3609. struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
  3610. void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
  3611. void f2fs_put_page_dic(struct page *page);
  3612. int f2fs_init_compress_ctx(struct compress_ctx *cc);
  3613. void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
  3614. void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
  3615. int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
  3616. void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
  3617. int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
  3618. void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
  3619. int __init f2fs_init_compress_cache(void);
  3620. void f2fs_destroy_compress_cache(void);
  3621. struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
  3622. void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
  3623. void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  3624. nid_t ino, block_t blkaddr);
  3625. bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  3626. block_t blkaddr);
  3627. void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
  3628. #define inc_compr_inode_stat(inode) \
  3629. do { \
  3630. struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
  3631. sbi->compr_new_inode++; \
  3632. } while (0)
  3633. #define add_compr_block_stat(inode, blocks) \
  3634. do { \
  3635. struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
  3636. int diff = F2FS_I(inode)->i_cluster_size - blocks; \
  3637. sbi->compr_written_block += blocks; \
  3638. sbi->compr_saved_block += diff; \
  3639. } while (0)
  3640. #else
  3641. static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
  3642. static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
  3643. {
  3644. if (!f2fs_compressed_file(inode))
  3645. return true;
  3646. /* not support compression */
  3647. return false;
  3648. }
  3649. static inline struct page *f2fs_compress_control_page(struct page *page)
  3650. {
  3651. WARN_ON_ONCE(1);
  3652. return ERR_PTR(-EINVAL);
  3653. }
  3654. static inline int f2fs_init_compress_mempool(void) { return 0; }
  3655. static inline void f2fs_destroy_compress_mempool(void) { }
  3656. static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
  3657. static inline void f2fs_end_read_compressed_page(struct page *page,
  3658. bool failed, block_t blkaddr)
  3659. {
  3660. WARN_ON_ONCE(1);
  3661. }
  3662. static inline void f2fs_put_page_dic(struct page *page)
  3663. {
  3664. WARN_ON_ONCE(1);
  3665. }
  3666. static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
  3667. static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
  3668. static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
  3669. static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
  3670. static inline int __init f2fs_init_compress_cache(void) { return 0; }
  3671. static inline void f2fs_destroy_compress_cache(void) { }
  3672. static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
  3673. block_t blkaddr) { }
  3674. static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
  3675. struct page *page, nid_t ino, block_t blkaddr) { }
  3676. static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
  3677. struct page *page, block_t blkaddr) { return false; }
  3678. static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
  3679. nid_t ino) { }
  3680. #define inc_compr_inode_stat(inode) do { } while (0)
  3681. #endif
  3682. static inline void set_compress_context(struct inode *inode)
  3683. {
  3684. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3685. F2FS_I(inode)->i_compress_algorithm =
  3686. F2FS_OPTION(sbi).compress_algorithm;
  3687. F2FS_I(inode)->i_log_cluster_size =
  3688. F2FS_OPTION(sbi).compress_log_size;
  3689. F2FS_I(inode)->i_compress_flag =
  3690. F2FS_OPTION(sbi).compress_chksum ?
  3691. 1 << COMPRESS_CHKSUM : 0;
  3692. F2FS_I(inode)->i_cluster_size =
  3693. 1 << F2FS_I(inode)->i_log_cluster_size;
  3694. if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
  3695. F2FS_OPTION(sbi).compress_level)
  3696. F2FS_I(inode)->i_compress_flag |=
  3697. F2FS_OPTION(sbi).compress_level <<
  3698. COMPRESS_LEVEL_OFFSET;
  3699. F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
  3700. set_inode_flag(inode, FI_COMPRESSED_FILE);
  3701. stat_inc_compr_inode(inode);
  3702. inc_compr_inode_stat(inode);
  3703. f2fs_mark_inode_dirty_sync(inode, true);
  3704. }
  3705. static inline bool f2fs_disable_compressed_file(struct inode *inode)
  3706. {
  3707. struct f2fs_inode_info *fi = F2FS_I(inode);
  3708. if (!f2fs_compressed_file(inode))
  3709. return true;
  3710. if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
  3711. return false;
  3712. fi->i_flags &= ~F2FS_COMPR_FL;
  3713. stat_dec_compr_inode(inode);
  3714. clear_inode_flag(inode, FI_COMPRESSED_FILE);
  3715. f2fs_mark_inode_dirty_sync(inode, true);
  3716. return true;
  3717. }
  3718. #define F2FS_FEATURE_FUNCS(name, flagname) \
  3719. static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
  3720. { \
  3721. return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
  3722. }
  3723. F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
  3724. F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
  3725. F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
  3726. F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
  3727. F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
  3728. F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
  3729. F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
  3730. F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
  3731. F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
  3732. F2FS_FEATURE_FUNCS(verity, VERITY);
  3733. F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
  3734. F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
  3735. F2FS_FEATURE_FUNCS(compression, COMPRESSION);
  3736. F2FS_FEATURE_FUNCS(readonly, RO);
  3737. static inline bool f2fs_may_extent_tree(struct inode *inode)
  3738. {
  3739. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3740. if (!test_opt(sbi, EXTENT_CACHE) ||
  3741. is_inode_flag_set(inode, FI_NO_EXTENT) ||
  3742. (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
  3743. !f2fs_sb_has_readonly(sbi)))
  3744. return false;
  3745. /*
  3746. * for recovered files during mount do not create extents
  3747. * if shrinker is not registered.
  3748. */
  3749. if (list_empty(&sbi->s_list))
  3750. return false;
  3751. return S_ISREG(inode->i_mode);
  3752. }
  3753. #ifdef CONFIG_BLK_DEV_ZONED
  3754. static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
  3755. block_t blkaddr)
  3756. {
  3757. unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
  3758. return test_bit(zno, FDEV(devi).blkz_seq);
  3759. }
  3760. #endif
  3761. static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
  3762. {
  3763. return f2fs_sb_has_blkzoned(sbi);
  3764. }
  3765. static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
  3766. {
  3767. return blk_queue_discard(bdev_get_queue(bdev)) ||
  3768. bdev_is_zoned(bdev);
  3769. }
  3770. static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
  3771. {
  3772. int i;
  3773. if (!f2fs_is_multi_device(sbi))
  3774. return f2fs_bdev_support_discard(sbi->sb->s_bdev);
  3775. for (i = 0; i < sbi->s_ndevs; i++)
  3776. if (f2fs_bdev_support_discard(FDEV(i).bdev))
  3777. return true;
  3778. return false;
  3779. }
  3780. static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
  3781. {
  3782. return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
  3783. f2fs_hw_should_discard(sbi);
  3784. }
  3785. static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
  3786. {
  3787. int i;
  3788. if (!f2fs_is_multi_device(sbi))
  3789. return bdev_read_only(sbi->sb->s_bdev);
  3790. for (i = 0; i < sbi->s_ndevs; i++)
  3791. if (bdev_read_only(FDEV(i).bdev))
  3792. return true;
  3793. return false;
  3794. }
  3795. static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
  3796. {
  3797. return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
  3798. }
  3799. static inline bool f2fs_may_compress(struct inode *inode)
  3800. {
  3801. if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
  3802. f2fs_is_atomic_file(inode) ||
  3803. f2fs_is_volatile_file(inode))
  3804. return false;
  3805. return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
  3806. }
  3807. static inline void f2fs_i_compr_blocks_update(struct inode *inode,
  3808. u64 blocks, bool add)
  3809. {
  3810. int diff = F2FS_I(inode)->i_cluster_size - blocks;
  3811. struct f2fs_inode_info *fi = F2FS_I(inode);
  3812. /* don't update i_compr_blocks if saved blocks were released */
  3813. if (!add && !atomic_read(&fi->i_compr_blocks))
  3814. return;
  3815. if (add) {
  3816. atomic_add(diff, &fi->i_compr_blocks);
  3817. stat_add_compr_blocks(inode, diff);
  3818. } else {
  3819. atomic_sub(diff, &fi->i_compr_blocks);
  3820. stat_sub_compr_blocks(inode, diff);
  3821. }
  3822. f2fs_mark_inode_dirty_sync(inode, true);
  3823. }
  3824. static inline int block_unaligned_IO(struct inode *inode,
  3825. struct kiocb *iocb, struct iov_iter *iter)
  3826. {
  3827. unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
  3828. unsigned int blocksize_mask = (1 << i_blkbits) - 1;
  3829. loff_t offset = iocb->ki_pos;
  3830. unsigned long align = offset | iov_iter_alignment(iter);
  3831. return align & blocksize_mask;
  3832. }
  3833. static inline int allow_outplace_dio(struct inode *inode,
  3834. struct kiocb *iocb, struct iov_iter *iter)
  3835. {
  3836. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3837. int rw = iov_iter_rw(iter);
  3838. return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
  3839. !block_unaligned_IO(inode, iocb, iter));
  3840. }
  3841. static inline bool f2fs_force_buffered_io(struct inode *inode,
  3842. struct kiocb *iocb, struct iov_iter *iter)
  3843. {
  3844. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3845. int rw = iov_iter_rw(iter);
  3846. if (!fscrypt_dio_supported(iocb, iter))
  3847. return true;
  3848. if (fsverity_active(inode))
  3849. return true;
  3850. if (f2fs_compressed_file(inode))
  3851. return true;
  3852. if (f2fs_is_multi_device(sbi))
  3853. return true;
  3854. /*
  3855. * for blkzoned device, fallback direct IO to buffered IO, so
  3856. * all IOs can be serialized by log-structured write.
  3857. */
  3858. if (f2fs_sb_has_blkzoned(sbi))
  3859. return true;
  3860. if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
  3861. if (block_unaligned_IO(inode, iocb, iter))
  3862. return true;
  3863. if (F2FS_IO_ALIGNED(sbi))
  3864. return true;
  3865. }
  3866. if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
  3867. return true;
  3868. return false;
  3869. }
  3870. static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
  3871. {
  3872. return fsverity_active(inode) &&
  3873. idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
  3874. }
  3875. #ifdef CONFIG_F2FS_FAULT_INJECTION
  3876. extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
  3877. unsigned int type);
  3878. #else
  3879. #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
  3880. #endif
  3881. static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
  3882. {
  3883. #ifdef CONFIG_QUOTA
  3884. if (f2fs_sb_has_quota_ino(sbi))
  3885. return true;
  3886. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
  3887. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
  3888. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  3889. return true;
  3890. #endif
  3891. return false;
  3892. }
  3893. #define EFSBADCRC EBADMSG /* Bad CRC detected */
  3894. #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
  3895. #endif /* _LINUX_F2FS_H */