super.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/super.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/statfs.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/backing-dev.h>
  14. #include <linux/kthread.h>
  15. #include <linux/parser.h>
  16. #include <linux/mount.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/random.h>
  20. #include <linux/exportfs.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/quotaops.h>
  23. #include <linux/f2fs_fs.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/quota.h>
  26. #include <linux/unicode.h>
  27. #include <linux/part_stat.h>
  28. #include <linux/zstd.h>
  29. #include <linux/lz4.h>
  30. #include "f2fs.h"
  31. #include "node.h"
  32. #include "segment.h"
  33. #include "xattr.h"
  34. #include "gc.h"
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/f2fs.h>
  37. static struct kmem_cache *f2fs_inode_cachep;
  38. #ifdef CONFIG_F2FS_FAULT_INJECTION
  39. const char *f2fs_fault_name[FAULT_MAX] = {
  40. [FAULT_KMALLOC] = "kmalloc",
  41. [FAULT_KVMALLOC] = "kvmalloc",
  42. [FAULT_PAGE_ALLOC] = "page alloc",
  43. [FAULT_PAGE_GET] = "page get",
  44. [FAULT_ALLOC_NID] = "alloc nid",
  45. [FAULT_ORPHAN] = "orphan",
  46. [FAULT_BLOCK] = "no more block",
  47. [FAULT_DIR_DEPTH] = "too big dir depth",
  48. [FAULT_EVICT_INODE] = "evict_inode fail",
  49. [FAULT_TRUNCATE] = "truncate fail",
  50. [FAULT_READ_IO] = "read IO error",
  51. [FAULT_CHECKPOINT] = "checkpoint error",
  52. [FAULT_DISCARD] = "discard error",
  53. [FAULT_WRITE_IO] = "write IO error",
  54. };
  55. void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
  56. unsigned int type)
  57. {
  58. struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  59. if (rate) {
  60. atomic_set(&ffi->inject_ops, 0);
  61. ffi->inject_rate = rate;
  62. }
  63. if (type)
  64. ffi->inject_type = type;
  65. if (!rate && !type)
  66. memset(ffi, 0, sizeof(struct f2fs_fault_info));
  67. }
  68. #endif
  69. /* f2fs-wide shrinker description */
  70. static struct shrinker f2fs_shrinker_info = {
  71. .scan_objects = f2fs_shrink_scan,
  72. .count_objects = f2fs_shrink_count,
  73. .seeks = DEFAULT_SEEKS,
  74. };
  75. enum {
  76. Opt_gc_background,
  77. Opt_disable_roll_forward,
  78. Opt_norecovery,
  79. Opt_discard,
  80. Opt_nodiscard,
  81. Opt_noheap,
  82. Opt_heap,
  83. Opt_user_xattr,
  84. Opt_nouser_xattr,
  85. Opt_acl,
  86. Opt_noacl,
  87. Opt_active_logs,
  88. Opt_disable_ext_identify,
  89. Opt_inline_xattr,
  90. Opt_noinline_xattr,
  91. Opt_inline_xattr_size,
  92. Opt_inline_data,
  93. Opt_inline_dentry,
  94. Opt_noinline_dentry,
  95. Opt_flush_merge,
  96. Opt_noflush_merge,
  97. Opt_nobarrier,
  98. Opt_fastboot,
  99. Opt_extent_cache,
  100. Opt_noextent_cache,
  101. Opt_noinline_data,
  102. Opt_data_flush,
  103. Opt_reserve_root,
  104. Opt_resgid,
  105. Opt_resuid,
  106. Opt_mode,
  107. Opt_io_size_bits,
  108. Opt_fault_injection,
  109. Opt_fault_type,
  110. Opt_lazytime,
  111. Opt_nolazytime,
  112. Opt_quota,
  113. Opt_noquota,
  114. Opt_usrquota,
  115. Opt_grpquota,
  116. Opt_prjquota,
  117. Opt_usrjquota,
  118. Opt_grpjquota,
  119. Opt_prjjquota,
  120. Opt_offusrjquota,
  121. Opt_offgrpjquota,
  122. Opt_offprjjquota,
  123. Opt_jqfmt_vfsold,
  124. Opt_jqfmt_vfsv0,
  125. Opt_jqfmt_vfsv1,
  126. Opt_whint,
  127. Opt_alloc,
  128. Opt_fsync,
  129. Opt_test_dummy_encryption,
  130. Opt_inlinecrypt,
  131. Opt_checkpoint_disable,
  132. Opt_checkpoint_disable_cap,
  133. Opt_checkpoint_disable_cap_perc,
  134. Opt_checkpoint_enable,
  135. Opt_checkpoint_merge,
  136. Opt_nocheckpoint_merge,
  137. Opt_compress_algorithm,
  138. Opt_compress_log_size,
  139. Opt_compress_extension,
  140. Opt_compress_chksum,
  141. Opt_compress_mode,
  142. Opt_compress_cache,
  143. Opt_atgc,
  144. Opt_gc_merge,
  145. Opt_nogc_merge,
  146. Opt_err,
  147. };
  148. static match_table_t f2fs_tokens = {
  149. {Opt_gc_background, "background_gc=%s"},
  150. {Opt_disable_roll_forward, "disable_roll_forward"},
  151. {Opt_norecovery, "norecovery"},
  152. {Opt_discard, "discard"},
  153. {Opt_nodiscard, "nodiscard"},
  154. {Opt_noheap, "no_heap"},
  155. {Opt_heap, "heap"},
  156. {Opt_user_xattr, "user_xattr"},
  157. {Opt_nouser_xattr, "nouser_xattr"},
  158. {Opt_acl, "acl"},
  159. {Opt_noacl, "noacl"},
  160. {Opt_active_logs, "active_logs=%u"},
  161. {Opt_disable_ext_identify, "disable_ext_identify"},
  162. {Opt_inline_xattr, "inline_xattr"},
  163. {Opt_noinline_xattr, "noinline_xattr"},
  164. {Opt_inline_xattr_size, "inline_xattr_size=%u"},
  165. {Opt_inline_data, "inline_data"},
  166. {Opt_inline_dentry, "inline_dentry"},
  167. {Opt_noinline_dentry, "noinline_dentry"},
  168. {Opt_flush_merge, "flush_merge"},
  169. {Opt_noflush_merge, "noflush_merge"},
  170. {Opt_nobarrier, "nobarrier"},
  171. {Opt_fastboot, "fastboot"},
  172. {Opt_extent_cache, "extent_cache"},
  173. {Opt_noextent_cache, "noextent_cache"},
  174. {Opt_noinline_data, "noinline_data"},
  175. {Opt_data_flush, "data_flush"},
  176. {Opt_reserve_root, "reserve_root=%u"},
  177. {Opt_resgid, "resgid=%u"},
  178. {Opt_resuid, "resuid=%u"},
  179. {Opt_mode, "mode=%s"},
  180. {Opt_io_size_bits, "io_bits=%u"},
  181. {Opt_fault_injection, "fault_injection=%u"},
  182. {Opt_fault_type, "fault_type=%u"},
  183. {Opt_lazytime, "lazytime"},
  184. {Opt_nolazytime, "nolazytime"},
  185. {Opt_quota, "quota"},
  186. {Opt_noquota, "noquota"},
  187. {Opt_usrquota, "usrquota"},
  188. {Opt_grpquota, "grpquota"},
  189. {Opt_prjquota, "prjquota"},
  190. {Opt_usrjquota, "usrjquota=%s"},
  191. {Opt_grpjquota, "grpjquota=%s"},
  192. {Opt_prjjquota, "prjjquota=%s"},
  193. {Opt_offusrjquota, "usrjquota="},
  194. {Opt_offgrpjquota, "grpjquota="},
  195. {Opt_offprjjquota, "prjjquota="},
  196. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  197. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  198. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  199. {Opt_whint, "whint_mode=%s"},
  200. {Opt_alloc, "alloc_mode=%s"},
  201. {Opt_fsync, "fsync_mode=%s"},
  202. {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
  203. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  204. {Opt_inlinecrypt, "inlinecrypt"},
  205. {Opt_checkpoint_disable, "checkpoint=disable"},
  206. {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
  207. {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
  208. {Opt_checkpoint_enable, "checkpoint=enable"},
  209. {Opt_checkpoint_merge, "checkpoint_merge"},
  210. {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
  211. {Opt_compress_algorithm, "compress_algorithm=%s"},
  212. {Opt_compress_log_size, "compress_log_size=%u"},
  213. {Opt_compress_extension, "compress_extension=%s"},
  214. {Opt_compress_chksum, "compress_chksum"},
  215. {Opt_compress_mode, "compress_mode=%s"},
  216. {Opt_compress_cache, "compress_cache"},
  217. {Opt_atgc, "atgc"},
  218. {Opt_gc_merge, "gc_merge"},
  219. {Opt_nogc_merge, "nogc_merge"},
  220. {Opt_err, NULL},
  221. };
  222. void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
  223. {
  224. struct va_format vaf;
  225. va_list args;
  226. int level;
  227. va_start(args, fmt);
  228. level = printk_get_level(fmt);
  229. vaf.fmt = printk_skip_level(fmt);
  230. vaf.va = &args;
  231. printk("%c%cF2FS-fs (%s): %pV\n",
  232. KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
  233. va_end(args);
  234. }
  235. #ifdef CONFIG_UNICODE
  236. static const struct f2fs_sb_encodings {
  237. __u16 magic;
  238. char *name;
  239. char *version;
  240. } f2fs_sb_encoding_map[] = {
  241. {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
  242. };
  243. static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
  244. const struct f2fs_sb_encodings **encoding,
  245. __u16 *flags)
  246. {
  247. __u16 magic = le16_to_cpu(sb->s_encoding);
  248. int i;
  249. for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
  250. if (magic == f2fs_sb_encoding_map[i].magic)
  251. break;
  252. if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
  253. return -EINVAL;
  254. *encoding = &f2fs_sb_encoding_map[i];
  255. *flags = le16_to_cpu(sb->s_encoding_flags);
  256. return 0;
  257. }
  258. struct kmem_cache *f2fs_cf_name_slab;
  259. static int __init f2fs_create_casefold_cache(void)
  260. {
  261. f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
  262. F2FS_NAME_LEN);
  263. if (!f2fs_cf_name_slab)
  264. return -ENOMEM;
  265. return 0;
  266. }
  267. static void f2fs_destroy_casefold_cache(void)
  268. {
  269. kmem_cache_destroy(f2fs_cf_name_slab);
  270. }
  271. #else
  272. static int __init f2fs_create_casefold_cache(void) { return 0; }
  273. static void f2fs_destroy_casefold_cache(void) { }
  274. #endif
  275. static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
  276. {
  277. block_t limit = min((sbi->user_block_count << 1) / 1000,
  278. sbi->user_block_count - sbi->reserved_blocks);
  279. /* limit is 0.2% */
  280. if (test_opt(sbi, RESERVE_ROOT) &&
  281. F2FS_OPTION(sbi).root_reserved_blocks > limit) {
  282. F2FS_OPTION(sbi).root_reserved_blocks = limit;
  283. f2fs_info(sbi, "Reduce reserved blocks for root = %u",
  284. F2FS_OPTION(sbi).root_reserved_blocks);
  285. }
  286. if (!test_opt(sbi, RESERVE_ROOT) &&
  287. (!uid_eq(F2FS_OPTION(sbi).s_resuid,
  288. make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
  289. !gid_eq(F2FS_OPTION(sbi).s_resgid,
  290. make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
  291. f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
  292. from_kuid_munged(&init_user_ns,
  293. F2FS_OPTION(sbi).s_resuid),
  294. from_kgid_munged(&init_user_ns,
  295. F2FS_OPTION(sbi).s_resgid));
  296. }
  297. static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
  298. {
  299. unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
  300. unsigned int avg_vblocks;
  301. unsigned int wanted_reserved_segments;
  302. block_t avail_user_block_count;
  303. if (!F2FS_IO_ALIGNED(sbi))
  304. return 0;
  305. /* average valid block count in section in worst case */
  306. avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
  307. /*
  308. * we need enough free space when migrating one section in worst case
  309. */
  310. wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
  311. reserved_segments(sbi);
  312. wanted_reserved_segments -= reserved_segments(sbi);
  313. avail_user_block_count = sbi->user_block_count -
  314. sbi->current_reserved_blocks -
  315. F2FS_OPTION(sbi).root_reserved_blocks;
  316. if (wanted_reserved_segments * sbi->blocks_per_seg >
  317. avail_user_block_count) {
  318. f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
  319. wanted_reserved_segments,
  320. avail_user_block_count >> sbi->log_blocks_per_seg);
  321. return -ENOSPC;
  322. }
  323. SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
  324. f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
  325. wanted_reserved_segments);
  326. return 0;
  327. }
  328. static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
  329. {
  330. if (!F2FS_OPTION(sbi).unusable_cap_perc)
  331. return;
  332. if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
  333. F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
  334. else
  335. F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
  336. F2FS_OPTION(sbi).unusable_cap_perc;
  337. f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
  338. F2FS_OPTION(sbi).unusable_cap,
  339. F2FS_OPTION(sbi).unusable_cap_perc);
  340. }
  341. static void init_once(void *foo)
  342. {
  343. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  344. inode_init_once(&fi->vfs_inode);
  345. }
  346. #ifdef CONFIG_QUOTA
  347. static const char * const quotatypes[] = INITQFNAMES;
  348. #define QTYPE2NAME(t) (quotatypes[t])
  349. static int f2fs_set_qf_name(struct super_block *sb, int qtype,
  350. substring_t *args)
  351. {
  352. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  353. char *qname;
  354. int ret = -EINVAL;
  355. if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
  356. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  357. return -EINVAL;
  358. }
  359. if (f2fs_sb_has_quota_ino(sbi)) {
  360. f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
  361. return 0;
  362. }
  363. qname = match_strdup(args);
  364. if (!qname) {
  365. f2fs_err(sbi, "Not enough memory for storing quotafile name");
  366. return -ENOMEM;
  367. }
  368. if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
  369. if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
  370. ret = 0;
  371. else
  372. f2fs_err(sbi, "%s quota file already specified",
  373. QTYPE2NAME(qtype));
  374. goto errout;
  375. }
  376. if (strchr(qname, '/')) {
  377. f2fs_err(sbi, "quotafile must be on filesystem root");
  378. goto errout;
  379. }
  380. F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
  381. set_opt(sbi, QUOTA);
  382. return 0;
  383. errout:
  384. kfree(qname);
  385. return ret;
  386. }
  387. static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
  388. {
  389. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  390. if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
  391. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  392. return -EINVAL;
  393. }
  394. kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
  395. F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
  396. return 0;
  397. }
  398. static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
  399. {
  400. /*
  401. * We do the test below only for project quotas. 'usrquota' and
  402. * 'grpquota' mount options are allowed even without quota feature
  403. * to support legacy quotas in quota files.
  404. */
  405. if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
  406. f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
  407. return -1;
  408. }
  409. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
  410. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
  411. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
  412. if (test_opt(sbi, USRQUOTA) &&
  413. F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  414. clear_opt(sbi, USRQUOTA);
  415. if (test_opt(sbi, GRPQUOTA) &&
  416. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  417. clear_opt(sbi, GRPQUOTA);
  418. if (test_opt(sbi, PRJQUOTA) &&
  419. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  420. clear_opt(sbi, PRJQUOTA);
  421. if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
  422. test_opt(sbi, PRJQUOTA)) {
  423. f2fs_err(sbi, "old and new quota format mixing");
  424. return -1;
  425. }
  426. if (!F2FS_OPTION(sbi).s_jquota_fmt) {
  427. f2fs_err(sbi, "journaled quota format not specified");
  428. return -1;
  429. }
  430. }
  431. if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
  432. f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
  433. F2FS_OPTION(sbi).s_jquota_fmt = 0;
  434. }
  435. return 0;
  436. }
  437. #endif
  438. static int f2fs_set_test_dummy_encryption(struct super_block *sb,
  439. const char *opt,
  440. const substring_t *arg,
  441. bool is_remount)
  442. {
  443. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  444. #ifdef CONFIG_FS_ENCRYPTION
  445. int err;
  446. if (!f2fs_sb_has_encrypt(sbi)) {
  447. f2fs_err(sbi, "Encrypt feature is off");
  448. return -EINVAL;
  449. }
  450. /*
  451. * This mount option is just for testing, and it's not worthwhile to
  452. * implement the extra complexity (e.g. RCU protection) that would be
  453. * needed to allow it to be set or changed during remount. We do allow
  454. * it to be specified during remount, but only if there is no change.
  455. */
  456. if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
  457. f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
  458. return -EINVAL;
  459. }
  460. err = fscrypt_set_test_dummy_encryption(
  461. sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
  462. if (err) {
  463. if (err == -EEXIST)
  464. f2fs_warn(sbi,
  465. "Can't change test_dummy_encryption on remount");
  466. else if (err == -EINVAL)
  467. f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
  468. opt);
  469. else
  470. f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
  471. opt, err);
  472. return -EINVAL;
  473. }
  474. f2fs_warn(sbi, "Test dummy encryption mode enabled");
  475. #else
  476. f2fs_warn(sbi, "Test dummy encryption mount option ignored");
  477. #endif
  478. return 0;
  479. }
  480. #ifdef CONFIG_F2FS_FS_COMPRESSION
  481. #ifdef CONFIG_F2FS_FS_LZ4
  482. static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
  483. {
  484. #ifdef CONFIG_F2FS_FS_LZ4HC
  485. unsigned int level;
  486. #endif
  487. if (strlen(str) == 3) {
  488. F2FS_OPTION(sbi).compress_level = 0;
  489. return 0;
  490. }
  491. #ifdef CONFIG_F2FS_FS_LZ4HC
  492. str += 3;
  493. if (str[0] != ':') {
  494. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  495. return -EINVAL;
  496. }
  497. if (kstrtouint(str + 1, 10, &level))
  498. return -EINVAL;
  499. if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
  500. f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
  501. return -EINVAL;
  502. }
  503. F2FS_OPTION(sbi).compress_level = level;
  504. return 0;
  505. #else
  506. f2fs_info(sbi, "kernel doesn't support lz4hc compression");
  507. return -EINVAL;
  508. #endif
  509. }
  510. #endif
  511. #ifdef CONFIG_F2FS_FS_ZSTD
  512. static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
  513. {
  514. unsigned int level;
  515. int len = 4;
  516. if (strlen(str) == len) {
  517. F2FS_OPTION(sbi).compress_level = 0;
  518. return 0;
  519. }
  520. str += len;
  521. if (str[0] != ':') {
  522. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  523. return -EINVAL;
  524. }
  525. if (kstrtouint(str + 1, 10, &level))
  526. return -EINVAL;
  527. if (!level || level > ZSTD_maxCLevel()) {
  528. f2fs_info(sbi, "invalid zstd compress level: %d", level);
  529. return -EINVAL;
  530. }
  531. F2FS_OPTION(sbi).compress_level = level;
  532. return 0;
  533. }
  534. #endif
  535. #endif
  536. static int parse_options(struct super_block *sb, char *options, bool is_remount)
  537. {
  538. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  539. substring_t args[MAX_OPT_ARGS];
  540. #ifdef CONFIG_F2FS_FS_COMPRESSION
  541. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  542. int ext_cnt;
  543. #endif
  544. char *p, *name;
  545. int arg = 0;
  546. kuid_t uid;
  547. kgid_t gid;
  548. int ret;
  549. if (!options)
  550. goto default_check;
  551. while ((p = strsep(&options, ",")) != NULL) {
  552. int token;
  553. if (!*p)
  554. continue;
  555. /*
  556. * Initialize args struct so we know whether arg was
  557. * found; some options take optional arguments.
  558. */
  559. args[0].to = args[0].from = NULL;
  560. token = match_token(p, f2fs_tokens, args);
  561. switch (token) {
  562. case Opt_gc_background:
  563. name = match_strdup(&args[0]);
  564. if (!name)
  565. return -ENOMEM;
  566. if (!strcmp(name, "on")) {
  567. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  568. } else if (!strcmp(name, "off")) {
  569. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
  570. } else if (!strcmp(name, "sync")) {
  571. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
  572. } else {
  573. kfree(name);
  574. return -EINVAL;
  575. }
  576. kfree(name);
  577. break;
  578. case Opt_disable_roll_forward:
  579. set_opt(sbi, DISABLE_ROLL_FORWARD);
  580. break;
  581. case Opt_norecovery:
  582. /* this option mounts f2fs with ro */
  583. set_opt(sbi, NORECOVERY);
  584. if (!f2fs_readonly(sb))
  585. return -EINVAL;
  586. break;
  587. case Opt_discard:
  588. set_opt(sbi, DISCARD);
  589. break;
  590. case Opt_nodiscard:
  591. if (f2fs_sb_has_blkzoned(sbi)) {
  592. f2fs_warn(sbi, "discard is required for zoned block devices");
  593. return -EINVAL;
  594. }
  595. clear_opt(sbi, DISCARD);
  596. break;
  597. case Opt_noheap:
  598. set_opt(sbi, NOHEAP);
  599. break;
  600. case Opt_heap:
  601. clear_opt(sbi, NOHEAP);
  602. break;
  603. #ifdef CONFIG_F2FS_FS_XATTR
  604. case Opt_user_xattr:
  605. set_opt(sbi, XATTR_USER);
  606. break;
  607. case Opt_nouser_xattr:
  608. clear_opt(sbi, XATTR_USER);
  609. break;
  610. case Opt_inline_xattr:
  611. set_opt(sbi, INLINE_XATTR);
  612. break;
  613. case Opt_noinline_xattr:
  614. clear_opt(sbi, INLINE_XATTR);
  615. break;
  616. case Opt_inline_xattr_size:
  617. if (args->from && match_int(args, &arg))
  618. return -EINVAL;
  619. set_opt(sbi, INLINE_XATTR_SIZE);
  620. F2FS_OPTION(sbi).inline_xattr_size = arg;
  621. break;
  622. #else
  623. case Opt_user_xattr:
  624. f2fs_info(sbi, "user_xattr options not supported");
  625. break;
  626. case Opt_nouser_xattr:
  627. f2fs_info(sbi, "nouser_xattr options not supported");
  628. break;
  629. case Opt_inline_xattr:
  630. f2fs_info(sbi, "inline_xattr options not supported");
  631. break;
  632. case Opt_noinline_xattr:
  633. f2fs_info(sbi, "noinline_xattr options not supported");
  634. break;
  635. #endif
  636. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  637. case Opt_acl:
  638. set_opt(sbi, POSIX_ACL);
  639. break;
  640. case Opt_noacl:
  641. clear_opt(sbi, POSIX_ACL);
  642. break;
  643. #else
  644. case Opt_acl:
  645. f2fs_info(sbi, "acl options not supported");
  646. break;
  647. case Opt_noacl:
  648. f2fs_info(sbi, "noacl options not supported");
  649. break;
  650. #endif
  651. case Opt_active_logs:
  652. if (args->from && match_int(args, &arg))
  653. return -EINVAL;
  654. if (arg != 2 && arg != 4 &&
  655. arg != NR_CURSEG_PERSIST_TYPE)
  656. return -EINVAL;
  657. F2FS_OPTION(sbi).active_logs = arg;
  658. break;
  659. case Opt_disable_ext_identify:
  660. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  661. break;
  662. case Opt_inline_data:
  663. set_opt(sbi, INLINE_DATA);
  664. break;
  665. case Opt_inline_dentry:
  666. set_opt(sbi, INLINE_DENTRY);
  667. break;
  668. case Opt_noinline_dentry:
  669. clear_opt(sbi, INLINE_DENTRY);
  670. break;
  671. case Opt_flush_merge:
  672. set_opt(sbi, FLUSH_MERGE);
  673. break;
  674. case Opt_noflush_merge:
  675. clear_opt(sbi, FLUSH_MERGE);
  676. break;
  677. case Opt_nobarrier:
  678. set_opt(sbi, NOBARRIER);
  679. break;
  680. case Opt_fastboot:
  681. set_opt(sbi, FASTBOOT);
  682. break;
  683. case Opt_extent_cache:
  684. set_opt(sbi, EXTENT_CACHE);
  685. break;
  686. case Opt_noextent_cache:
  687. clear_opt(sbi, EXTENT_CACHE);
  688. break;
  689. case Opt_noinline_data:
  690. clear_opt(sbi, INLINE_DATA);
  691. break;
  692. case Opt_data_flush:
  693. set_opt(sbi, DATA_FLUSH);
  694. break;
  695. case Opt_reserve_root:
  696. if (args->from && match_int(args, &arg))
  697. return -EINVAL;
  698. if (test_opt(sbi, RESERVE_ROOT)) {
  699. f2fs_info(sbi, "Preserve previous reserve_root=%u",
  700. F2FS_OPTION(sbi).root_reserved_blocks);
  701. } else {
  702. F2FS_OPTION(sbi).root_reserved_blocks = arg;
  703. set_opt(sbi, RESERVE_ROOT);
  704. }
  705. break;
  706. case Opt_resuid:
  707. if (args->from && match_int(args, &arg))
  708. return -EINVAL;
  709. uid = make_kuid(current_user_ns(), arg);
  710. if (!uid_valid(uid)) {
  711. f2fs_err(sbi, "Invalid uid value %d", arg);
  712. return -EINVAL;
  713. }
  714. F2FS_OPTION(sbi).s_resuid = uid;
  715. break;
  716. case Opt_resgid:
  717. if (args->from && match_int(args, &arg))
  718. return -EINVAL;
  719. gid = make_kgid(current_user_ns(), arg);
  720. if (!gid_valid(gid)) {
  721. f2fs_err(sbi, "Invalid gid value %d", arg);
  722. return -EINVAL;
  723. }
  724. F2FS_OPTION(sbi).s_resgid = gid;
  725. break;
  726. case Opt_mode:
  727. name = match_strdup(&args[0]);
  728. if (!name)
  729. return -ENOMEM;
  730. if (!strcmp(name, "adaptive")) {
  731. if (f2fs_sb_has_blkzoned(sbi)) {
  732. f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
  733. kfree(name);
  734. return -EINVAL;
  735. }
  736. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  737. } else if (!strcmp(name, "lfs")) {
  738. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  739. } else {
  740. kfree(name);
  741. return -EINVAL;
  742. }
  743. kfree(name);
  744. break;
  745. case Opt_io_size_bits:
  746. if (args->from && match_int(args, &arg))
  747. return -EINVAL;
  748. if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
  749. f2fs_warn(sbi, "Not support %d, larger than %d",
  750. 1 << arg, BIO_MAX_PAGES);
  751. return -EINVAL;
  752. }
  753. F2FS_OPTION(sbi).write_io_size_bits = arg;
  754. break;
  755. #ifdef CONFIG_F2FS_FAULT_INJECTION
  756. case Opt_fault_injection:
  757. if (args->from && match_int(args, &arg))
  758. return -EINVAL;
  759. f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
  760. set_opt(sbi, FAULT_INJECTION);
  761. break;
  762. case Opt_fault_type:
  763. if (args->from && match_int(args, &arg))
  764. return -EINVAL;
  765. f2fs_build_fault_attr(sbi, 0, arg);
  766. set_opt(sbi, FAULT_INJECTION);
  767. break;
  768. #else
  769. case Opt_fault_injection:
  770. f2fs_info(sbi, "fault_injection options not supported");
  771. break;
  772. case Opt_fault_type:
  773. f2fs_info(sbi, "fault_type options not supported");
  774. break;
  775. #endif
  776. case Opt_lazytime:
  777. sb->s_flags |= SB_LAZYTIME;
  778. break;
  779. case Opt_nolazytime:
  780. sb->s_flags &= ~SB_LAZYTIME;
  781. break;
  782. #ifdef CONFIG_QUOTA
  783. case Opt_quota:
  784. case Opt_usrquota:
  785. set_opt(sbi, USRQUOTA);
  786. break;
  787. case Opt_grpquota:
  788. set_opt(sbi, GRPQUOTA);
  789. break;
  790. case Opt_prjquota:
  791. set_opt(sbi, PRJQUOTA);
  792. break;
  793. case Opt_usrjquota:
  794. ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
  795. if (ret)
  796. return ret;
  797. break;
  798. case Opt_grpjquota:
  799. ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
  800. if (ret)
  801. return ret;
  802. break;
  803. case Opt_prjjquota:
  804. ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
  805. if (ret)
  806. return ret;
  807. break;
  808. case Opt_offusrjquota:
  809. ret = f2fs_clear_qf_name(sb, USRQUOTA);
  810. if (ret)
  811. return ret;
  812. break;
  813. case Opt_offgrpjquota:
  814. ret = f2fs_clear_qf_name(sb, GRPQUOTA);
  815. if (ret)
  816. return ret;
  817. break;
  818. case Opt_offprjjquota:
  819. ret = f2fs_clear_qf_name(sb, PRJQUOTA);
  820. if (ret)
  821. return ret;
  822. break;
  823. case Opt_jqfmt_vfsold:
  824. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
  825. break;
  826. case Opt_jqfmt_vfsv0:
  827. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
  828. break;
  829. case Opt_jqfmt_vfsv1:
  830. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
  831. break;
  832. case Opt_noquota:
  833. clear_opt(sbi, QUOTA);
  834. clear_opt(sbi, USRQUOTA);
  835. clear_opt(sbi, GRPQUOTA);
  836. clear_opt(sbi, PRJQUOTA);
  837. break;
  838. #else
  839. case Opt_quota:
  840. case Opt_usrquota:
  841. case Opt_grpquota:
  842. case Opt_prjquota:
  843. case Opt_usrjquota:
  844. case Opt_grpjquota:
  845. case Opt_prjjquota:
  846. case Opt_offusrjquota:
  847. case Opt_offgrpjquota:
  848. case Opt_offprjjquota:
  849. case Opt_jqfmt_vfsold:
  850. case Opt_jqfmt_vfsv0:
  851. case Opt_jqfmt_vfsv1:
  852. case Opt_noquota:
  853. f2fs_info(sbi, "quota operations not supported");
  854. break;
  855. #endif
  856. case Opt_whint:
  857. name = match_strdup(&args[0]);
  858. if (!name)
  859. return -ENOMEM;
  860. if (!strcmp(name, "user-based")) {
  861. F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
  862. } else if (!strcmp(name, "off")) {
  863. F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
  864. } else if (!strcmp(name, "fs-based")) {
  865. F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
  866. } else {
  867. kfree(name);
  868. return -EINVAL;
  869. }
  870. kfree(name);
  871. break;
  872. case Opt_alloc:
  873. name = match_strdup(&args[0]);
  874. if (!name)
  875. return -ENOMEM;
  876. if (!strcmp(name, "default")) {
  877. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  878. } else if (!strcmp(name, "reuse")) {
  879. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  880. } else {
  881. kfree(name);
  882. return -EINVAL;
  883. }
  884. kfree(name);
  885. break;
  886. case Opt_fsync:
  887. name = match_strdup(&args[0]);
  888. if (!name)
  889. return -ENOMEM;
  890. if (!strcmp(name, "posix")) {
  891. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  892. } else if (!strcmp(name, "strict")) {
  893. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
  894. } else if (!strcmp(name, "nobarrier")) {
  895. F2FS_OPTION(sbi).fsync_mode =
  896. FSYNC_MODE_NOBARRIER;
  897. } else {
  898. kfree(name);
  899. return -EINVAL;
  900. }
  901. kfree(name);
  902. break;
  903. case Opt_test_dummy_encryption:
  904. ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
  905. is_remount);
  906. if (ret)
  907. return ret;
  908. break;
  909. case Opt_inlinecrypt:
  910. #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
  911. sb->s_flags |= SB_INLINECRYPT;
  912. #else
  913. f2fs_info(sbi, "inline encryption not supported");
  914. #endif
  915. break;
  916. case Opt_checkpoint_disable_cap_perc:
  917. if (args->from && match_int(args, &arg))
  918. return -EINVAL;
  919. if (arg < 0 || arg > 100)
  920. return -EINVAL;
  921. F2FS_OPTION(sbi).unusable_cap_perc = arg;
  922. set_opt(sbi, DISABLE_CHECKPOINT);
  923. break;
  924. case Opt_checkpoint_disable_cap:
  925. if (args->from && match_int(args, &arg))
  926. return -EINVAL;
  927. F2FS_OPTION(sbi).unusable_cap = arg;
  928. set_opt(sbi, DISABLE_CHECKPOINT);
  929. break;
  930. case Opt_checkpoint_disable:
  931. set_opt(sbi, DISABLE_CHECKPOINT);
  932. break;
  933. case Opt_checkpoint_enable:
  934. clear_opt(sbi, DISABLE_CHECKPOINT);
  935. break;
  936. case Opt_checkpoint_merge:
  937. set_opt(sbi, MERGE_CHECKPOINT);
  938. break;
  939. case Opt_nocheckpoint_merge:
  940. clear_opt(sbi, MERGE_CHECKPOINT);
  941. break;
  942. #ifdef CONFIG_F2FS_FS_COMPRESSION
  943. case Opt_compress_algorithm:
  944. if (!f2fs_sb_has_compression(sbi)) {
  945. f2fs_info(sbi, "Image doesn't support compression");
  946. break;
  947. }
  948. name = match_strdup(&args[0]);
  949. if (!name)
  950. return -ENOMEM;
  951. if (!strcmp(name, "lzo")) {
  952. #ifdef CONFIG_F2FS_FS_LZO
  953. F2FS_OPTION(sbi).compress_level = 0;
  954. F2FS_OPTION(sbi).compress_algorithm =
  955. COMPRESS_LZO;
  956. #else
  957. f2fs_info(sbi, "kernel doesn't support lzo compression");
  958. #endif
  959. } else if (!strncmp(name, "lz4", 3)) {
  960. #ifdef CONFIG_F2FS_FS_LZ4
  961. ret = f2fs_set_lz4hc_level(sbi, name);
  962. if (ret) {
  963. kfree(name);
  964. return -EINVAL;
  965. }
  966. F2FS_OPTION(sbi).compress_algorithm =
  967. COMPRESS_LZ4;
  968. #else
  969. f2fs_info(sbi, "kernel doesn't support lz4 compression");
  970. #endif
  971. } else if (!strncmp(name, "zstd", 4)) {
  972. #ifdef CONFIG_F2FS_FS_ZSTD
  973. ret = f2fs_set_zstd_level(sbi, name);
  974. if (ret) {
  975. kfree(name);
  976. return -EINVAL;
  977. }
  978. F2FS_OPTION(sbi).compress_algorithm =
  979. COMPRESS_ZSTD;
  980. #else
  981. f2fs_info(sbi, "kernel doesn't support zstd compression");
  982. #endif
  983. } else if (!strcmp(name, "lzo-rle")) {
  984. #ifdef CONFIG_F2FS_FS_LZORLE
  985. F2FS_OPTION(sbi).compress_level = 0;
  986. F2FS_OPTION(sbi).compress_algorithm =
  987. COMPRESS_LZORLE;
  988. #else
  989. f2fs_info(sbi, "kernel doesn't support lzorle compression");
  990. #endif
  991. } else {
  992. kfree(name);
  993. return -EINVAL;
  994. }
  995. kfree(name);
  996. break;
  997. case Opt_compress_log_size:
  998. if (!f2fs_sb_has_compression(sbi)) {
  999. f2fs_info(sbi, "Image doesn't support compression");
  1000. break;
  1001. }
  1002. if (args->from && match_int(args, &arg))
  1003. return -EINVAL;
  1004. if (arg < MIN_COMPRESS_LOG_SIZE ||
  1005. arg > MAX_COMPRESS_LOG_SIZE) {
  1006. f2fs_err(sbi,
  1007. "Compress cluster log size is out of range");
  1008. return -EINVAL;
  1009. }
  1010. F2FS_OPTION(sbi).compress_log_size = arg;
  1011. break;
  1012. case Opt_compress_extension:
  1013. if (!f2fs_sb_has_compression(sbi)) {
  1014. f2fs_info(sbi, "Image doesn't support compression");
  1015. break;
  1016. }
  1017. name = match_strdup(&args[0]);
  1018. if (!name)
  1019. return -ENOMEM;
  1020. ext = F2FS_OPTION(sbi).extensions;
  1021. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  1022. if (strlen(name) >= F2FS_EXTENSION_LEN ||
  1023. ext_cnt >= COMPRESS_EXT_NUM) {
  1024. f2fs_err(sbi,
  1025. "invalid extension length/number");
  1026. kfree(name);
  1027. return -EINVAL;
  1028. }
  1029. strcpy(ext[ext_cnt], name);
  1030. F2FS_OPTION(sbi).compress_ext_cnt++;
  1031. kfree(name);
  1032. break;
  1033. case Opt_compress_chksum:
  1034. F2FS_OPTION(sbi).compress_chksum = true;
  1035. break;
  1036. case Opt_compress_mode:
  1037. name = match_strdup(&args[0]);
  1038. if (!name)
  1039. return -ENOMEM;
  1040. if (!strcmp(name, "fs")) {
  1041. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1042. } else if (!strcmp(name, "user")) {
  1043. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
  1044. } else {
  1045. kfree(name);
  1046. return -EINVAL;
  1047. }
  1048. kfree(name);
  1049. break;
  1050. case Opt_compress_cache:
  1051. set_opt(sbi, COMPRESS_CACHE);
  1052. break;
  1053. #else
  1054. case Opt_compress_algorithm:
  1055. case Opt_compress_log_size:
  1056. case Opt_compress_extension:
  1057. case Opt_compress_chksum:
  1058. case Opt_compress_mode:
  1059. case Opt_compress_cache:
  1060. f2fs_info(sbi, "compression options not supported");
  1061. break;
  1062. #endif
  1063. case Opt_atgc:
  1064. set_opt(sbi, ATGC);
  1065. break;
  1066. case Opt_gc_merge:
  1067. set_opt(sbi, GC_MERGE);
  1068. break;
  1069. case Opt_nogc_merge:
  1070. clear_opt(sbi, GC_MERGE);
  1071. break;
  1072. default:
  1073. f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
  1074. p);
  1075. return -EINVAL;
  1076. }
  1077. }
  1078. default_check:
  1079. #ifdef CONFIG_QUOTA
  1080. if (f2fs_check_quota_options(sbi))
  1081. return -EINVAL;
  1082. #else
  1083. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
  1084. f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1085. return -EINVAL;
  1086. }
  1087. if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
  1088. f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1089. return -EINVAL;
  1090. }
  1091. #endif
  1092. #ifndef CONFIG_UNICODE
  1093. if (f2fs_sb_has_casefold(sbi)) {
  1094. f2fs_err(sbi,
  1095. "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  1096. return -EINVAL;
  1097. }
  1098. #endif
  1099. /*
  1100. * The BLKZONED feature indicates that the drive was formatted with
  1101. * zone alignment optimization. This is optional for host-aware
  1102. * devices, but mandatory for host-managed zoned block devices.
  1103. */
  1104. #ifndef CONFIG_BLK_DEV_ZONED
  1105. if (f2fs_sb_has_blkzoned(sbi)) {
  1106. f2fs_err(sbi, "Zoned block device support is not enabled");
  1107. return -EINVAL;
  1108. }
  1109. #endif
  1110. if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
  1111. f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
  1112. F2FS_IO_SIZE_KB(sbi));
  1113. return -EINVAL;
  1114. }
  1115. if (test_opt(sbi, INLINE_XATTR_SIZE)) {
  1116. int min_size, max_size;
  1117. if (!f2fs_sb_has_extra_attr(sbi) ||
  1118. !f2fs_sb_has_flexible_inline_xattr(sbi)) {
  1119. f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
  1120. return -EINVAL;
  1121. }
  1122. if (!test_opt(sbi, INLINE_XATTR)) {
  1123. f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
  1124. return -EINVAL;
  1125. }
  1126. min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
  1127. max_size = MAX_INLINE_XATTR_SIZE;
  1128. if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
  1129. F2FS_OPTION(sbi).inline_xattr_size > max_size) {
  1130. f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
  1131. min_size, max_size);
  1132. return -EINVAL;
  1133. }
  1134. }
  1135. if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
  1136. f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
  1137. return -EINVAL;
  1138. }
  1139. /* Not pass down write hints if the number of active logs is lesser
  1140. * than NR_CURSEG_PERSIST_TYPE.
  1141. */
  1142. if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
  1143. F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
  1144. if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
  1145. f2fs_err(sbi, "Allow to mount readonly mode only");
  1146. return -EROFS;
  1147. }
  1148. return 0;
  1149. }
  1150. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  1151. {
  1152. struct f2fs_inode_info *fi;
  1153. fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
  1154. if (!fi)
  1155. return NULL;
  1156. init_once((void *) fi);
  1157. /* Initialize f2fs-specific inode info */
  1158. atomic_set(&fi->dirty_pages, 0);
  1159. atomic_set(&fi->i_compr_blocks, 0);
  1160. init_f2fs_rwsem(&fi->i_sem);
  1161. spin_lock_init(&fi->i_size_lock);
  1162. INIT_LIST_HEAD(&fi->dirty_list);
  1163. INIT_LIST_HEAD(&fi->gdirty_list);
  1164. INIT_LIST_HEAD(&fi->inmem_ilist);
  1165. INIT_LIST_HEAD(&fi->inmem_pages);
  1166. mutex_init(&fi->inmem_lock);
  1167. init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
  1168. init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
  1169. init_f2fs_rwsem(&fi->i_mmap_sem);
  1170. init_f2fs_rwsem(&fi->i_xattr_sem);
  1171. /* Will be used by directory only */
  1172. fi->i_dir_level = F2FS_SB(sb)->dir_level;
  1173. return &fi->vfs_inode;
  1174. }
  1175. static int f2fs_drop_inode(struct inode *inode)
  1176. {
  1177. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1178. int ret;
  1179. /*
  1180. * during filesystem shutdown, if checkpoint is disabled,
  1181. * drop useless meta/node dirty pages.
  1182. */
  1183. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
  1184. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1185. inode->i_ino == F2FS_META_INO(sbi)) {
  1186. trace_f2fs_drop_inode(inode, 1);
  1187. return 1;
  1188. }
  1189. }
  1190. /*
  1191. * This is to avoid a deadlock condition like below.
  1192. * writeback_single_inode(inode)
  1193. * - f2fs_write_data_page
  1194. * - f2fs_gc -> iput -> evict
  1195. * - inode_wait_for_writeback(inode)
  1196. */
  1197. if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
  1198. if (!inode->i_nlink && !is_bad_inode(inode)) {
  1199. /* to avoid evict_inode call simultaneously */
  1200. atomic_inc(&inode->i_count);
  1201. spin_unlock(&inode->i_lock);
  1202. /* some remained atomic pages should discarded */
  1203. if (f2fs_is_atomic_file(inode))
  1204. f2fs_drop_inmem_pages(inode);
  1205. /* should remain fi->extent_tree for writepage */
  1206. f2fs_destroy_extent_node(inode);
  1207. sb_start_intwrite(inode->i_sb);
  1208. f2fs_i_size_write(inode, 0);
  1209. f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
  1210. inode, NULL, 0, DATA);
  1211. truncate_inode_pages_final(inode->i_mapping);
  1212. if (F2FS_HAS_BLOCKS(inode))
  1213. f2fs_truncate(inode);
  1214. sb_end_intwrite(inode->i_sb);
  1215. spin_lock(&inode->i_lock);
  1216. atomic_dec(&inode->i_count);
  1217. }
  1218. trace_f2fs_drop_inode(inode, 0);
  1219. return 0;
  1220. }
  1221. ret = generic_drop_inode(inode);
  1222. if (!ret)
  1223. ret = fscrypt_drop_inode(inode);
  1224. trace_f2fs_drop_inode(inode, ret);
  1225. return ret;
  1226. }
  1227. int f2fs_inode_dirtied(struct inode *inode, bool sync)
  1228. {
  1229. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1230. int ret = 0;
  1231. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1232. if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1233. ret = 1;
  1234. } else {
  1235. set_inode_flag(inode, FI_DIRTY_INODE);
  1236. stat_inc_dirty_inode(sbi, DIRTY_META);
  1237. }
  1238. if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
  1239. list_add_tail(&F2FS_I(inode)->gdirty_list,
  1240. &sbi->inode_list[DIRTY_META]);
  1241. inc_page_count(sbi, F2FS_DIRTY_IMETA);
  1242. }
  1243. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1244. return ret;
  1245. }
  1246. void f2fs_inode_synced(struct inode *inode)
  1247. {
  1248. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1249. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1250. if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1251. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1252. return;
  1253. }
  1254. if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
  1255. list_del_init(&F2FS_I(inode)->gdirty_list);
  1256. dec_page_count(sbi, F2FS_DIRTY_IMETA);
  1257. }
  1258. clear_inode_flag(inode, FI_DIRTY_INODE);
  1259. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1260. stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
  1261. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1262. }
  1263. /*
  1264. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  1265. *
  1266. * We should call set_dirty_inode to write the dirty inode through write_inode.
  1267. */
  1268. static void f2fs_dirty_inode(struct inode *inode, int flags)
  1269. {
  1270. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1271. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1272. inode->i_ino == F2FS_META_INO(sbi))
  1273. return;
  1274. if (flags == I_DIRTY_TIME)
  1275. return;
  1276. if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
  1277. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1278. f2fs_inode_dirtied(inode, false);
  1279. }
  1280. static void f2fs_free_inode(struct inode *inode)
  1281. {
  1282. fscrypt_free_inode(inode);
  1283. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  1284. }
  1285. static void destroy_percpu_info(struct f2fs_sb_info *sbi)
  1286. {
  1287. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  1288. percpu_counter_destroy(&sbi->total_valid_inode_count);
  1289. }
  1290. static void destroy_device_list(struct f2fs_sb_info *sbi)
  1291. {
  1292. int i;
  1293. for (i = 0; i < sbi->s_ndevs; i++) {
  1294. blkdev_put(FDEV(i).bdev, FMODE_EXCL);
  1295. #ifdef CONFIG_BLK_DEV_ZONED
  1296. kvfree(FDEV(i).blkz_seq);
  1297. kfree(FDEV(i).zone_capacity_blocks);
  1298. #endif
  1299. }
  1300. kvfree(sbi->devs);
  1301. }
  1302. static void f2fs_put_super(struct super_block *sb)
  1303. {
  1304. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1305. int i;
  1306. bool dropped;
  1307. /* unregister procfs/sysfs entries in advance to avoid race case */
  1308. f2fs_unregister_sysfs(sbi);
  1309. f2fs_quota_off_umount(sb);
  1310. /* prevent remaining shrinker jobs */
  1311. mutex_lock(&sbi->umount_mutex);
  1312. /*
  1313. * flush all issued checkpoints and stop checkpoint issue thread.
  1314. * after then, all checkpoints should be done by each process context.
  1315. */
  1316. f2fs_stop_ckpt_thread(sbi);
  1317. /*
  1318. * We don't need to do checkpoint when superblock is clean.
  1319. * But, the previous checkpoint was not done by umount, it needs to do
  1320. * clean checkpoint again.
  1321. */
  1322. if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  1323. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
  1324. struct cp_control cpc = {
  1325. .reason = CP_UMOUNT,
  1326. };
  1327. f2fs_write_checkpoint(sbi, &cpc);
  1328. }
  1329. /* be sure to wait for any on-going discard commands */
  1330. dropped = f2fs_issue_discard_timeout(sbi);
  1331. if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
  1332. !sbi->discard_blks && !dropped) {
  1333. struct cp_control cpc = {
  1334. .reason = CP_UMOUNT | CP_TRIMMED,
  1335. };
  1336. f2fs_write_checkpoint(sbi, &cpc);
  1337. }
  1338. /*
  1339. * normally superblock is clean, so we need to release this.
  1340. * In addition, EIO will skip do checkpoint, we need this as well.
  1341. */
  1342. f2fs_release_ino_entry(sbi, true);
  1343. f2fs_leave_shrinker(sbi);
  1344. mutex_unlock(&sbi->umount_mutex);
  1345. /* our cp_error case, we can wait for any writeback page */
  1346. f2fs_flush_merged_writes(sbi);
  1347. f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
  1348. f2fs_bug_on(sbi, sbi->fsync_node_num);
  1349. f2fs_destroy_compress_inode(sbi);
  1350. iput(sbi->node_inode);
  1351. sbi->node_inode = NULL;
  1352. iput(sbi->meta_inode);
  1353. sbi->meta_inode = NULL;
  1354. /*
  1355. * iput() can update stat information, if f2fs_write_checkpoint()
  1356. * above failed with error.
  1357. */
  1358. f2fs_destroy_stats(sbi);
  1359. /* destroy f2fs internal modules */
  1360. f2fs_destroy_node_manager(sbi);
  1361. f2fs_destroy_segment_manager(sbi);
  1362. f2fs_destroy_post_read_wq(sbi);
  1363. kvfree(sbi->ckpt);
  1364. sb->s_fs_info = NULL;
  1365. if (sbi->s_chksum_driver)
  1366. crypto_free_shash(sbi->s_chksum_driver);
  1367. kfree(sbi->raw_super);
  1368. destroy_device_list(sbi);
  1369. f2fs_destroy_page_array_cache(sbi);
  1370. f2fs_destroy_xattr_caches(sbi);
  1371. mempool_destroy(sbi->write_io_dummy);
  1372. #ifdef CONFIG_QUOTA
  1373. for (i = 0; i < MAXQUOTAS; i++)
  1374. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  1375. #endif
  1376. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  1377. destroy_percpu_info(sbi);
  1378. for (i = 0; i < NR_PAGE_TYPE; i++)
  1379. kvfree(sbi->write_io[i]);
  1380. #ifdef CONFIG_UNICODE
  1381. utf8_unload(sb->s_encoding);
  1382. #endif
  1383. kfree(sbi);
  1384. }
  1385. int f2fs_sync_fs(struct super_block *sb, int sync)
  1386. {
  1387. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1388. int err = 0;
  1389. if (unlikely(f2fs_cp_error(sbi)))
  1390. return 0;
  1391. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  1392. return 0;
  1393. trace_f2fs_sync_fs(sb, sync);
  1394. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  1395. return -EAGAIN;
  1396. if (sync)
  1397. err = f2fs_issue_checkpoint(sbi);
  1398. return err;
  1399. }
  1400. static int f2fs_freeze(struct super_block *sb)
  1401. {
  1402. if (f2fs_readonly(sb))
  1403. return 0;
  1404. /* IO error happened before */
  1405. if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
  1406. return -EIO;
  1407. /* must be clean, since sync_filesystem() was already called */
  1408. if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
  1409. return -EINVAL;
  1410. /* ensure no checkpoint required */
  1411. if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
  1412. return -EINVAL;
  1413. return 0;
  1414. }
  1415. static int f2fs_unfreeze(struct super_block *sb)
  1416. {
  1417. return 0;
  1418. }
  1419. #ifdef CONFIG_QUOTA
  1420. static int f2fs_statfs_project(struct super_block *sb,
  1421. kprojid_t projid, struct kstatfs *buf)
  1422. {
  1423. struct kqid qid;
  1424. struct dquot *dquot;
  1425. u64 limit;
  1426. u64 curblock;
  1427. qid = make_kqid_projid(projid);
  1428. dquot = dqget(sb, qid);
  1429. if (IS_ERR(dquot))
  1430. return PTR_ERR(dquot);
  1431. spin_lock(&dquot->dq_dqb_lock);
  1432. limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
  1433. dquot->dq_dqb.dqb_bhardlimit);
  1434. if (limit)
  1435. limit >>= sb->s_blocksize_bits;
  1436. if (limit && buf->f_blocks > limit) {
  1437. curblock = (dquot->dq_dqb.dqb_curspace +
  1438. dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
  1439. buf->f_blocks = limit;
  1440. buf->f_bfree = buf->f_bavail =
  1441. (buf->f_blocks > curblock) ?
  1442. (buf->f_blocks - curblock) : 0;
  1443. }
  1444. limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
  1445. dquot->dq_dqb.dqb_ihardlimit);
  1446. if (limit && buf->f_files > limit) {
  1447. buf->f_files = limit;
  1448. buf->f_ffree =
  1449. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  1450. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  1451. }
  1452. spin_unlock(&dquot->dq_dqb_lock);
  1453. dqput(dquot);
  1454. return 0;
  1455. }
  1456. #endif
  1457. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  1458. {
  1459. struct super_block *sb = dentry->d_sb;
  1460. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1461. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  1462. block_t total_count, user_block_count, start_count;
  1463. u64 avail_node_count;
  1464. total_count = le64_to_cpu(sbi->raw_super->block_count);
  1465. user_block_count = sbi->user_block_count;
  1466. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  1467. buf->f_type = F2FS_SUPER_MAGIC;
  1468. buf->f_bsize = sbi->blocksize;
  1469. buf->f_blocks = total_count - start_count;
  1470. buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
  1471. sbi->current_reserved_blocks;
  1472. spin_lock(&sbi->stat_lock);
  1473. if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
  1474. buf->f_bfree = 0;
  1475. else
  1476. buf->f_bfree -= sbi->unusable_block_count;
  1477. spin_unlock(&sbi->stat_lock);
  1478. if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
  1479. buf->f_bavail = buf->f_bfree -
  1480. F2FS_OPTION(sbi).root_reserved_blocks;
  1481. else
  1482. buf->f_bavail = 0;
  1483. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  1484. if (avail_node_count > user_block_count) {
  1485. buf->f_files = user_block_count;
  1486. buf->f_ffree = buf->f_bavail;
  1487. } else {
  1488. buf->f_files = avail_node_count;
  1489. buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
  1490. buf->f_bavail);
  1491. }
  1492. buf->f_namelen = F2FS_NAME_LEN;
  1493. buf->f_fsid = u64_to_fsid(id);
  1494. #ifdef CONFIG_QUOTA
  1495. if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
  1496. sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
  1497. f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
  1498. }
  1499. #endif
  1500. return 0;
  1501. }
  1502. static inline void f2fs_show_quota_options(struct seq_file *seq,
  1503. struct super_block *sb)
  1504. {
  1505. #ifdef CONFIG_QUOTA
  1506. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1507. if (F2FS_OPTION(sbi).s_jquota_fmt) {
  1508. char *fmtname = "";
  1509. switch (F2FS_OPTION(sbi).s_jquota_fmt) {
  1510. case QFMT_VFS_OLD:
  1511. fmtname = "vfsold";
  1512. break;
  1513. case QFMT_VFS_V0:
  1514. fmtname = "vfsv0";
  1515. break;
  1516. case QFMT_VFS_V1:
  1517. fmtname = "vfsv1";
  1518. break;
  1519. }
  1520. seq_printf(seq, ",jqfmt=%s", fmtname);
  1521. }
  1522. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  1523. seq_show_option(seq, "usrjquota",
  1524. F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
  1525. if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  1526. seq_show_option(seq, "grpjquota",
  1527. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
  1528. if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  1529. seq_show_option(seq, "prjjquota",
  1530. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
  1531. #endif
  1532. }
  1533. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1534. static inline void f2fs_show_compress_options(struct seq_file *seq,
  1535. struct super_block *sb)
  1536. {
  1537. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1538. char *algtype = "";
  1539. int i;
  1540. if (!f2fs_sb_has_compression(sbi))
  1541. return;
  1542. switch (F2FS_OPTION(sbi).compress_algorithm) {
  1543. case COMPRESS_LZO:
  1544. algtype = "lzo";
  1545. break;
  1546. case COMPRESS_LZ4:
  1547. algtype = "lz4";
  1548. break;
  1549. case COMPRESS_ZSTD:
  1550. algtype = "zstd";
  1551. break;
  1552. case COMPRESS_LZORLE:
  1553. algtype = "lzo-rle";
  1554. break;
  1555. }
  1556. seq_printf(seq, ",compress_algorithm=%s", algtype);
  1557. if (F2FS_OPTION(sbi).compress_level)
  1558. seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
  1559. seq_printf(seq, ",compress_log_size=%u",
  1560. F2FS_OPTION(sbi).compress_log_size);
  1561. for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
  1562. seq_printf(seq, ",compress_extension=%s",
  1563. F2FS_OPTION(sbi).extensions[i]);
  1564. }
  1565. if (F2FS_OPTION(sbi).compress_chksum)
  1566. seq_puts(seq, ",compress_chksum");
  1567. if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
  1568. seq_printf(seq, ",compress_mode=%s", "fs");
  1569. else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
  1570. seq_printf(seq, ",compress_mode=%s", "user");
  1571. if (test_opt(sbi, COMPRESS_CACHE))
  1572. seq_puts(seq, ",compress_cache");
  1573. }
  1574. #endif
  1575. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  1576. {
  1577. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  1578. if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
  1579. seq_printf(seq, ",background_gc=%s", "sync");
  1580. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
  1581. seq_printf(seq, ",background_gc=%s", "on");
  1582. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
  1583. seq_printf(seq, ",background_gc=%s", "off");
  1584. if (test_opt(sbi, GC_MERGE))
  1585. seq_puts(seq, ",gc_merge");
  1586. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  1587. seq_puts(seq, ",disable_roll_forward");
  1588. if (test_opt(sbi, NORECOVERY))
  1589. seq_puts(seq, ",norecovery");
  1590. if (test_opt(sbi, DISCARD))
  1591. seq_puts(seq, ",discard");
  1592. else
  1593. seq_puts(seq, ",nodiscard");
  1594. if (test_opt(sbi, NOHEAP))
  1595. seq_puts(seq, ",no_heap");
  1596. else
  1597. seq_puts(seq, ",heap");
  1598. #ifdef CONFIG_F2FS_FS_XATTR
  1599. if (test_opt(sbi, XATTR_USER))
  1600. seq_puts(seq, ",user_xattr");
  1601. else
  1602. seq_puts(seq, ",nouser_xattr");
  1603. if (test_opt(sbi, INLINE_XATTR))
  1604. seq_puts(seq, ",inline_xattr");
  1605. else
  1606. seq_puts(seq, ",noinline_xattr");
  1607. if (test_opt(sbi, INLINE_XATTR_SIZE))
  1608. seq_printf(seq, ",inline_xattr_size=%u",
  1609. F2FS_OPTION(sbi).inline_xattr_size);
  1610. #endif
  1611. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1612. if (test_opt(sbi, POSIX_ACL))
  1613. seq_puts(seq, ",acl");
  1614. else
  1615. seq_puts(seq, ",noacl");
  1616. #endif
  1617. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  1618. seq_puts(seq, ",disable_ext_identify");
  1619. if (test_opt(sbi, INLINE_DATA))
  1620. seq_puts(seq, ",inline_data");
  1621. else
  1622. seq_puts(seq, ",noinline_data");
  1623. if (test_opt(sbi, INLINE_DENTRY))
  1624. seq_puts(seq, ",inline_dentry");
  1625. else
  1626. seq_puts(seq, ",noinline_dentry");
  1627. if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
  1628. seq_puts(seq, ",flush_merge");
  1629. if (test_opt(sbi, NOBARRIER))
  1630. seq_puts(seq, ",nobarrier");
  1631. if (test_opt(sbi, FASTBOOT))
  1632. seq_puts(seq, ",fastboot");
  1633. if (test_opt(sbi, EXTENT_CACHE))
  1634. seq_puts(seq, ",extent_cache");
  1635. else
  1636. seq_puts(seq, ",noextent_cache");
  1637. if (test_opt(sbi, DATA_FLUSH))
  1638. seq_puts(seq, ",data_flush");
  1639. seq_puts(seq, ",mode=");
  1640. if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
  1641. seq_puts(seq, "adaptive");
  1642. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
  1643. seq_puts(seq, "lfs");
  1644. seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
  1645. if (test_opt(sbi, RESERVE_ROOT))
  1646. seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
  1647. F2FS_OPTION(sbi).root_reserved_blocks,
  1648. from_kuid_munged(&init_user_ns,
  1649. F2FS_OPTION(sbi).s_resuid),
  1650. from_kgid_munged(&init_user_ns,
  1651. F2FS_OPTION(sbi).s_resgid));
  1652. if (F2FS_IO_SIZE_BITS(sbi))
  1653. seq_printf(seq, ",io_bits=%u",
  1654. F2FS_OPTION(sbi).write_io_size_bits);
  1655. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1656. if (test_opt(sbi, FAULT_INJECTION)) {
  1657. seq_printf(seq, ",fault_injection=%u",
  1658. F2FS_OPTION(sbi).fault_info.inject_rate);
  1659. seq_printf(seq, ",fault_type=%u",
  1660. F2FS_OPTION(sbi).fault_info.inject_type);
  1661. }
  1662. #endif
  1663. #ifdef CONFIG_QUOTA
  1664. if (test_opt(sbi, QUOTA))
  1665. seq_puts(seq, ",quota");
  1666. if (test_opt(sbi, USRQUOTA))
  1667. seq_puts(seq, ",usrquota");
  1668. if (test_opt(sbi, GRPQUOTA))
  1669. seq_puts(seq, ",grpquota");
  1670. if (test_opt(sbi, PRJQUOTA))
  1671. seq_puts(seq, ",prjquota");
  1672. #endif
  1673. f2fs_show_quota_options(seq, sbi->sb);
  1674. if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
  1675. seq_printf(seq, ",whint_mode=%s", "user-based");
  1676. else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
  1677. seq_printf(seq, ",whint_mode=%s", "fs-based");
  1678. fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
  1679. if (sbi->sb->s_flags & SB_INLINECRYPT)
  1680. seq_puts(seq, ",inlinecrypt");
  1681. if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
  1682. seq_printf(seq, ",alloc_mode=%s", "default");
  1683. else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
  1684. seq_printf(seq, ",alloc_mode=%s", "reuse");
  1685. if (test_opt(sbi, DISABLE_CHECKPOINT))
  1686. seq_printf(seq, ",checkpoint=disable:%u",
  1687. F2FS_OPTION(sbi).unusable_cap);
  1688. if (test_opt(sbi, MERGE_CHECKPOINT))
  1689. seq_puts(seq, ",checkpoint_merge");
  1690. else
  1691. seq_puts(seq, ",nocheckpoint_merge");
  1692. if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
  1693. seq_printf(seq, ",fsync_mode=%s", "posix");
  1694. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
  1695. seq_printf(seq, ",fsync_mode=%s", "strict");
  1696. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
  1697. seq_printf(seq, ",fsync_mode=%s", "nobarrier");
  1698. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1699. f2fs_show_compress_options(seq, sbi->sb);
  1700. #endif
  1701. if (test_opt(sbi, ATGC))
  1702. seq_puts(seq, ",atgc");
  1703. return 0;
  1704. }
  1705. static void default_options(struct f2fs_sb_info *sbi)
  1706. {
  1707. /* init some FS parameters */
  1708. if (f2fs_sb_has_readonly(sbi))
  1709. F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
  1710. else
  1711. F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
  1712. F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
  1713. F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
  1714. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  1715. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  1716. F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
  1717. F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
  1718. F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
  1719. F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
  1720. F2FS_OPTION(sbi).compress_ext_cnt = 0;
  1721. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1722. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  1723. sbi->sb->s_flags &= ~SB_INLINECRYPT;
  1724. set_opt(sbi, INLINE_XATTR);
  1725. set_opt(sbi, INLINE_DATA);
  1726. set_opt(sbi, INLINE_DENTRY);
  1727. set_opt(sbi, EXTENT_CACHE);
  1728. set_opt(sbi, NOHEAP);
  1729. clear_opt(sbi, DISABLE_CHECKPOINT);
  1730. set_opt(sbi, MERGE_CHECKPOINT);
  1731. F2FS_OPTION(sbi).unusable_cap = 0;
  1732. sbi->sb->s_flags |= SB_LAZYTIME;
  1733. set_opt(sbi, FLUSH_MERGE);
  1734. set_opt(sbi, DISCARD);
  1735. if (f2fs_sb_has_blkzoned(sbi))
  1736. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  1737. else
  1738. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  1739. #ifdef CONFIG_F2FS_FS_XATTR
  1740. set_opt(sbi, XATTR_USER);
  1741. #endif
  1742. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1743. set_opt(sbi, POSIX_ACL);
  1744. #endif
  1745. f2fs_build_fault_attr(sbi, 0, 0);
  1746. }
  1747. #ifdef CONFIG_QUOTA
  1748. static int f2fs_enable_quotas(struct super_block *sb);
  1749. #endif
  1750. static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
  1751. {
  1752. unsigned int s_flags = sbi->sb->s_flags;
  1753. struct cp_control cpc;
  1754. int err = 0;
  1755. int ret;
  1756. block_t unusable;
  1757. if (s_flags & SB_RDONLY) {
  1758. f2fs_err(sbi, "checkpoint=disable on readonly fs");
  1759. return -EINVAL;
  1760. }
  1761. sbi->sb->s_flags |= SB_ACTIVE;
  1762. f2fs_update_time(sbi, DISABLE_TIME);
  1763. while (!f2fs_time_over(sbi, DISABLE_TIME)) {
  1764. f2fs_down_write(&sbi->gc_lock);
  1765. err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
  1766. if (err == -ENODATA) {
  1767. err = 0;
  1768. break;
  1769. }
  1770. if (err && err != -EAGAIN)
  1771. break;
  1772. }
  1773. ret = sync_filesystem(sbi->sb);
  1774. if (ret || err) {
  1775. err = ret ? ret : err;
  1776. goto restore_flag;
  1777. }
  1778. unusable = f2fs_get_unusable_blocks(sbi);
  1779. if (f2fs_disable_cp_again(sbi, unusable)) {
  1780. err = -EAGAIN;
  1781. goto restore_flag;
  1782. }
  1783. f2fs_down_write(&sbi->gc_lock);
  1784. cpc.reason = CP_PAUSE;
  1785. set_sbi_flag(sbi, SBI_CP_DISABLED);
  1786. err = f2fs_write_checkpoint(sbi, &cpc);
  1787. if (err)
  1788. goto out_unlock;
  1789. spin_lock(&sbi->stat_lock);
  1790. sbi->unusable_block_count = unusable;
  1791. spin_unlock(&sbi->stat_lock);
  1792. out_unlock:
  1793. f2fs_up_write(&sbi->gc_lock);
  1794. restore_flag:
  1795. sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  1796. return err;
  1797. }
  1798. static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
  1799. {
  1800. int retry = DEFAULT_RETRY_IO_COUNT;
  1801. /* we should flush all the data to keep data consistency */
  1802. do {
  1803. sync_inodes_sb(sbi->sb);
  1804. cond_resched();
  1805. congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
  1806. } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
  1807. if (unlikely(retry < 0))
  1808. f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
  1809. f2fs_down_write(&sbi->gc_lock);
  1810. f2fs_dirty_to_prefree(sbi);
  1811. clear_sbi_flag(sbi, SBI_CP_DISABLED);
  1812. set_sbi_flag(sbi, SBI_IS_DIRTY);
  1813. f2fs_up_write(&sbi->gc_lock);
  1814. f2fs_sync_fs(sbi->sb, 1);
  1815. }
  1816. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  1817. {
  1818. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1819. struct f2fs_mount_info org_mount_opt;
  1820. unsigned long old_sb_flags;
  1821. int err;
  1822. bool need_restart_gc = false, need_stop_gc = false;
  1823. bool need_restart_ckpt = false, need_stop_ckpt = false;
  1824. bool need_restart_flush = false, need_stop_flush = false;
  1825. bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
  1826. bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
  1827. bool no_io_align = !F2FS_IO_ALIGNED(sbi);
  1828. bool no_atgc = !test_opt(sbi, ATGC);
  1829. bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
  1830. bool checkpoint_changed;
  1831. #ifdef CONFIG_QUOTA
  1832. int i, j;
  1833. #endif
  1834. /*
  1835. * Save the old mount options in case we
  1836. * need to restore them.
  1837. */
  1838. org_mount_opt = sbi->mount_opt;
  1839. old_sb_flags = sb->s_flags;
  1840. #ifdef CONFIG_QUOTA
  1841. org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
  1842. for (i = 0; i < MAXQUOTAS; i++) {
  1843. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  1844. org_mount_opt.s_qf_names[i] =
  1845. kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
  1846. GFP_KERNEL);
  1847. if (!org_mount_opt.s_qf_names[i]) {
  1848. for (j = 0; j < i; j++)
  1849. kfree(org_mount_opt.s_qf_names[j]);
  1850. return -ENOMEM;
  1851. }
  1852. } else {
  1853. org_mount_opt.s_qf_names[i] = NULL;
  1854. }
  1855. }
  1856. #endif
  1857. /* recover superblocks we couldn't write due to previous RO mount */
  1858. if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
  1859. err = f2fs_commit_super(sbi, false);
  1860. f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
  1861. err);
  1862. if (!err)
  1863. clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  1864. }
  1865. default_options(sbi);
  1866. /* parse mount options */
  1867. err = parse_options(sb, data, true);
  1868. if (err)
  1869. goto restore_opts;
  1870. checkpoint_changed =
  1871. disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
  1872. /*
  1873. * Previous and new state of filesystem is RO,
  1874. * so skip checking GC and FLUSH_MERGE conditions.
  1875. */
  1876. if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
  1877. goto skip;
  1878. if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
  1879. err = -EROFS;
  1880. goto restore_opts;
  1881. }
  1882. #ifdef CONFIG_QUOTA
  1883. if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
  1884. err = dquot_suspend(sb, -1);
  1885. if (err < 0)
  1886. goto restore_opts;
  1887. } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
  1888. /* dquot_resume needs RW */
  1889. sb->s_flags &= ~SB_RDONLY;
  1890. if (sb_any_quota_suspended(sb)) {
  1891. dquot_resume(sb, -1);
  1892. } else if (f2fs_sb_has_quota_ino(sbi)) {
  1893. err = f2fs_enable_quotas(sb);
  1894. if (err)
  1895. goto restore_opts;
  1896. }
  1897. }
  1898. #endif
  1899. /* disallow enable atgc dynamically */
  1900. if (no_atgc == !!test_opt(sbi, ATGC)) {
  1901. err = -EINVAL;
  1902. f2fs_warn(sbi, "switch atgc option is not allowed");
  1903. goto restore_opts;
  1904. }
  1905. /* disallow enable/disable extent_cache dynamically */
  1906. if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
  1907. err = -EINVAL;
  1908. f2fs_warn(sbi, "switch extent_cache option is not allowed");
  1909. goto restore_opts;
  1910. }
  1911. if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
  1912. err = -EINVAL;
  1913. f2fs_warn(sbi, "switch io_bits option is not allowed");
  1914. goto restore_opts;
  1915. }
  1916. if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
  1917. err = -EINVAL;
  1918. f2fs_warn(sbi, "switch compress_cache option is not allowed");
  1919. goto restore_opts;
  1920. }
  1921. if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
  1922. err = -EINVAL;
  1923. f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
  1924. goto restore_opts;
  1925. }
  1926. /*
  1927. * We stop the GC thread if FS is mounted as RO
  1928. * or if background_gc = off is passed in mount
  1929. * option. Also sync the filesystem.
  1930. */
  1931. if ((*flags & SB_RDONLY) ||
  1932. (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
  1933. !test_opt(sbi, GC_MERGE))) {
  1934. if (sbi->gc_thread) {
  1935. f2fs_stop_gc_thread(sbi);
  1936. need_restart_gc = true;
  1937. }
  1938. } else if (!sbi->gc_thread) {
  1939. err = f2fs_start_gc_thread(sbi);
  1940. if (err)
  1941. goto restore_opts;
  1942. need_stop_gc = true;
  1943. }
  1944. if (*flags & SB_RDONLY ||
  1945. F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
  1946. sync_inodes_sb(sb);
  1947. set_sbi_flag(sbi, SBI_IS_DIRTY);
  1948. set_sbi_flag(sbi, SBI_IS_CLOSE);
  1949. f2fs_sync_fs(sb, 1);
  1950. clear_sbi_flag(sbi, SBI_IS_CLOSE);
  1951. }
  1952. if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
  1953. !test_opt(sbi, MERGE_CHECKPOINT)) {
  1954. f2fs_stop_ckpt_thread(sbi);
  1955. need_restart_ckpt = true;
  1956. } else {
  1957. err = f2fs_start_ckpt_thread(sbi);
  1958. if (err) {
  1959. f2fs_err(sbi,
  1960. "Failed to start F2FS issue_checkpoint_thread (%d)",
  1961. err);
  1962. goto restore_gc;
  1963. }
  1964. need_stop_ckpt = true;
  1965. }
  1966. /*
  1967. * We stop issue flush thread if FS is mounted as RO
  1968. * or if flush_merge is not passed in mount option.
  1969. */
  1970. if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
  1971. clear_opt(sbi, FLUSH_MERGE);
  1972. f2fs_destroy_flush_cmd_control(sbi, false);
  1973. need_restart_flush = true;
  1974. } else {
  1975. err = f2fs_create_flush_cmd_control(sbi);
  1976. if (err)
  1977. goto restore_ckpt;
  1978. need_stop_flush = true;
  1979. }
  1980. if (checkpoint_changed) {
  1981. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  1982. err = f2fs_disable_checkpoint(sbi);
  1983. if (err)
  1984. goto restore_flush;
  1985. } else {
  1986. f2fs_enable_checkpoint(sbi);
  1987. }
  1988. }
  1989. skip:
  1990. #ifdef CONFIG_QUOTA
  1991. /* Release old quota file names */
  1992. for (i = 0; i < MAXQUOTAS; i++)
  1993. kfree(org_mount_opt.s_qf_names[i]);
  1994. #endif
  1995. /* Update the POSIXACL Flag */
  1996. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  1997. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  1998. limit_reserve_root(sbi);
  1999. adjust_unusable_cap_perc(sbi);
  2000. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  2001. return 0;
  2002. restore_flush:
  2003. if (need_restart_flush) {
  2004. if (f2fs_create_flush_cmd_control(sbi))
  2005. f2fs_warn(sbi, "background flush thread has stopped");
  2006. } else if (need_stop_flush) {
  2007. clear_opt(sbi, FLUSH_MERGE);
  2008. f2fs_destroy_flush_cmd_control(sbi, false);
  2009. }
  2010. restore_ckpt:
  2011. if (need_restart_ckpt) {
  2012. if (f2fs_start_ckpt_thread(sbi))
  2013. f2fs_warn(sbi, "background ckpt thread has stopped");
  2014. } else if (need_stop_ckpt) {
  2015. f2fs_stop_ckpt_thread(sbi);
  2016. }
  2017. restore_gc:
  2018. if (need_restart_gc) {
  2019. if (f2fs_start_gc_thread(sbi))
  2020. f2fs_warn(sbi, "background gc thread has stopped");
  2021. } else if (need_stop_gc) {
  2022. f2fs_stop_gc_thread(sbi);
  2023. }
  2024. restore_opts:
  2025. #ifdef CONFIG_QUOTA
  2026. F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
  2027. for (i = 0; i < MAXQUOTAS; i++) {
  2028. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  2029. F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
  2030. }
  2031. #endif
  2032. sbi->mount_opt = org_mount_opt;
  2033. sb->s_flags = old_sb_flags;
  2034. return err;
  2035. }
  2036. #ifdef CONFIG_QUOTA
  2037. /* Read data from quotafile */
  2038. static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
  2039. size_t len, loff_t off)
  2040. {
  2041. struct inode *inode = sb_dqopt(sb)->files[type];
  2042. struct address_space *mapping = inode->i_mapping;
  2043. block_t blkidx = F2FS_BYTES_TO_BLK(off);
  2044. int offset = off & (sb->s_blocksize - 1);
  2045. int tocopy;
  2046. size_t toread;
  2047. loff_t i_size = i_size_read(inode);
  2048. struct page *page;
  2049. char *kaddr;
  2050. if (off > i_size)
  2051. return 0;
  2052. if (off + len > i_size)
  2053. len = i_size - off;
  2054. toread = len;
  2055. while (toread > 0) {
  2056. tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
  2057. repeat:
  2058. page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
  2059. if (IS_ERR(page)) {
  2060. if (PTR_ERR(page) == -ENOMEM) {
  2061. congestion_wait(BLK_RW_ASYNC,
  2062. DEFAULT_IO_TIMEOUT);
  2063. goto repeat;
  2064. }
  2065. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2066. return PTR_ERR(page);
  2067. }
  2068. lock_page(page);
  2069. if (unlikely(page->mapping != mapping)) {
  2070. f2fs_put_page(page, 1);
  2071. goto repeat;
  2072. }
  2073. if (unlikely(!PageUptodate(page))) {
  2074. f2fs_put_page(page, 1);
  2075. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2076. return -EIO;
  2077. }
  2078. kaddr = kmap_atomic(page);
  2079. memcpy(data, kaddr + offset, tocopy);
  2080. kunmap_atomic(kaddr);
  2081. f2fs_put_page(page, 1);
  2082. offset = 0;
  2083. toread -= tocopy;
  2084. data += tocopy;
  2085. blkidx++;
  2086. }
  2087. return len;
  2088. }
  2089. /* Write to quotafile */
  2090. static ssize_t f2fs_quota_write(struct super_block *sb, int type,
  2091. const char *data, size_t len, loff_t off)
  2092. {
  2093. struct inode *inode = sb_dqopt(sb)->files[type];
  2094. struct address_space *mapping = inode->i_mapping;
  2095. const struct address_space_operations *a_ops = mapping->a_ops;
  2096. int offset = off & (sb->s_blocksize - 1);
  2097. size_t towrite = len;
  2098. struct page *page;
  2099. void *fsdata = NULL;
  2100. char *kaddr;
  2101. int err = 0;
  2102. int tocopy;
  2103. while (towrite > 0) {
  2104. tocopy = min_t(unsigned long, sb->s_blocksize - offset,
  2105. towrite);
  2106. retry:
  2107. err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
  2108. &page, &fsdata);
  2109. if (unlikely(err)) {
  2110. if (err == -ENOMEM) {
  2111. congestion_wait(BLK_RW_ASYNC,
  2112. DEFAULT_IO_TIMEOUT);
  2113. goto retry;
  2114. }
  2115. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2116. break;
  2117. }
  2118. kaddr = kmap_atomic(page);
  2119. memcpy(kaddr + offset, data, tocopy);
  2120. kunmap_atomic(kaddr);
  2121. flush_dcache_page(page);
  2122. a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
  2123. page, fsdata);
  2124. offset = 0;
  2125. towrite -= tocopy;
  2126. off += tocopy;
  2127. data += tocopy;
  2128. cond_resched();
  2129. }
  2130. if (len == towrite)
  2131. return err;
  2132. inode->i_mtime = inode->i_ctime = current_time(inode);
  2133. f2fs_mark_inode_dirty_sync(inode, false);
  2134. return len - towrite;
  2135. }
  2136. static struct dquot **f2fs_get_dquots(struct inode *inode)
  2137. {
  2138. return F2FS_I(inode)->i_dquot;
  2139. }
  2140. static qsize_t *f2fs_get_reserved_space(struct inode *inode)
  2141. {
  2142. return &F2FS_I(inode)->i_reserved_quota;
  2143. }
  2144. static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
  2145. {
  2146. if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
  2147. f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
  2148. return 0;
  2149. }
  2150. return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
  2151. F2FS_OPTION(sbi).s_jquota_fmt, type);
  2152. }
  2153. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
  2154. {
  2155. int enabled = 0;
  2156. int i, err;
  2157. if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
  2158. err = f2fs_enable_quotas(sbi->sb);
  2159. if (err) {
  2160. f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
  2161. return 0;
  2162. }
  2163. return 1;
  2164. }
  2165. for (i = 0; i < MAXQUOTAS; i++) {
  2166. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  2167. err = f2fs_quota_on_mount(sbi, i);
  2168. if (!err) {
  2169. enabled = 1;
  2170. continue;
  2171. }
  2172. f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
  2173. err, i);
  2174. }
  2175. }
  2176. return enabled;
  2177. }
  2178. static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
  2179. unsigned int flags)
  2180. {
  2181. struct inode *qf_inode;
  2182. unsigned long qf_inum;
  2183. int err;
  2184. BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
  2185. qf_inum = f2fs_qf_ino(sb, type);
  2186. if (!qf_inum)
  2187. return -EPERM;
  2188. qf_inode = f2fs_iget(sb, qf_inum);
  2189. if (IS_ERR(qf_inode)) {
  2190. f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
  2191. return PTR_ERR(qf_inode);
  2192. }
  2193. /* Don't account quota for quota files to avoid recursion */
  2194. qf_inode->i_flags |= S_NOQUOTA;
  2195. err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
  2196. iput(qf_inode);
  2197. return err;
  2198. }
  2199. static int f2fs_enable_quotas(struct super_block *sb)
  2200. {
  2201. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2202. int type, err = 0;
  2203. unsigned long qf_inum;
  2204. bool quota_mopt[MAXQUOTAS] = {
  2205. test_opt(sbi, USRQUOTA),
  2206. test_opt(sbi, GRPQUOTA),
  2207. test_opt(sbi, PRJQUOTA),
  2208. };
  2209. if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
  2210. f2fs_err(sbi, "quota file may be corrupted, skip loading it");
  2211. return 0;
  2212. }
  2213. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
  2214. for (type = 0; type < MAXQUOTAS; type++) {
  2215. qf_inum = f2fs_qf_ino(sb, type);
  2216. if (qf_inum) {
  2217. err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
  2218. DQUOT_USAGE_ENABLED |
  2219. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  2220. if (err) {
  2221. f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
  2222. type, err);
  2223. for (type--; type >= 0; type--)
  2224. dquot_quota_off(sb, type);
  2225. set_sbi_flag(F2FS_SB(sb),
  2226. SBI_QUOTA_NEED_REPAIR);
  2227. return err;
  2228. }
  2229. }
  2230. }
  2231. return 0;
  2232. }
  2233. static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
  2234. {
  2235. struct quota_info *dqopt = sb_dqopt(sbi->sb);
  2236. struct address_space *mapping = dqopt->files[type]->i_mapping;
  2237. int ret = 0;
  2238. ret = dquot_writeback_dquots(sbi->sb, type);
  2239. if (ret)
  2240. goto out;
  2241. ret = filemap_fdatawrite(mapping);
  2242. if (ret)
  2243. goto out;
  2244. /* if we are using journalled quota */
  2245. if (is_journalled_quota(sbi))
  2246. goto out;
  2247. ret = filemap_fdatawait(mapping);
  2248. truncate_inode_pages(&dqopt->files[type]->i_data, 0);
  2249. out:
  2250. if (ret)
  2251. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2252. return ret;
  2253. }
  2254. int f2fs_quota_sync(struct super_block *sb, int type)
  2255. {
  2256. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2257. struct quota_info *dqopt = sb_dqopt(sb);
  2258. int cnt;
  2259. int ret = 0;
  2260. /*
  2261. * Now when everything is written we can discard the pagecache so
  2262. * that userspace sees the changes.
  2263. */
  2264. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  2265. if (type != -1 && cnt != type)
  2266. continue;
  2267. if (!sb_has_quota_active(sb, cnt))
  2268. continue;
  2269. inode_lock(dqopt->files[cnt]);
  2270. /*
  2271. * do_quotactl
  2272. * f2fs_quota_sync
  2273. * f2fs_down_read(quota_sem)
  2274. * dquot_writeback_dquots()
  2275. * f2fs_dquot_commit
  2276. * block_operation
  2277. * f2fs_down_read(quota_sem)
  2278. */
  2279. f2fs_lock_op(sbi);
  2280. f2fs_down_read(&sbi->quota_sem);
  2281. ret = f2fs_quota_sync_file(sbi, cnt);
  2282. f2fs_up_read(&sbi->quota_sem);
  2283. f2fs_unlock_op(sbi);
  2284. inode_unlock(dqopt->files[cnt]);
  2285. if (ret)
  2286. break;
  2287. }
  2288. return ret;
  2289. }
  2290. static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
  2291. const struct path *path)
  2292. {
  2293. struct inode *inode;
  2294. int err;
  2295. /* if quota sysfile exists, deny enabling quota with specific file */
  2296. if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
  2297. f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
  2298. return -EBUSY;
  2299. }
  2300. err = f2fs_quota_sync(sb, type);
  2301. if (err)
  2302. return err;
  2303. err = dquot_quota_on(sb, type, format_id, path);
  2304. if (err)
  2305. return err;
  2306. inode = d_inode(path->dentry);
  2307. inode_lock(inode);
  2308. F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
  2309. f2fs_set_inode_flags(inode);
  2310. inode_unlock(inode);
  2311. f2fs_mark_inode_dirty_sync(inode, false);
  2312. return 0;
  2313. }
  2314. static int __f2fs_quota_off(struct super_block *sb, int type)
  2315. {
  2316. struct inode *inode = sb_dqopt(sb)->files[type];
  2317. int err;
  2318. if (!inode || !igrab(inode))
  2319. return dquot_quota_off(sb, type);
  2320. err = f2fs_quota_sync(sb, type);
  2321. if (err)
  2322. goto out_put;
  2323. err = dquot_quota_off(sb, type);
  2324. if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
  2325. goto out_put;
  2326. inode_lock(inode);
  2327. F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
  2328. f2fs_set_inode_flags(inode);
  2329. inode_unlock(inode);
  2330. f2fs_mark_inode_dirty_sync(inode, false);
  2331. out_put:
  2332. iput(inode);
  2333. return err;
  2334. }
  2335. static int f2fs_quota_off(struct super_block *sb, int type)
  2336. {
  2337. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2338. int err;
  2339. err = __f2fs_quota_off(sb, type);
  2340. /*
  2341. * quotactl can shutdown journalled quota, result in inconsistence
  2342. * between quota record and fs data by following updates, tag the
  2343. * flag to let fsck be aware of it.
  2344. */
  2345. if (is_journalled_quota(sbi))
  2346. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2347. return err;
  2348. }
  2349. void f2fs_quota_off_umount(struct super_block *sb)
  2350. {
  2351. int type;
  2352. int err;
  2353. for (type = 0; type < MAXQUOTAS; type++) {
  2354. err = __f2fs_quota_off(sb, type);
  2355. if (err) {
  2356. int ret = dquot_quota_off(sb, type);
  2357. f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
  2358. type, err, ret);
  2359. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2360. }
  2361. }
  2362. /*
  2363. * In case of checkpoint=disable, we must flush quota blocks.
  2364. * This can cause NULL exception for node_inode in end_io, since
  2365. * put_super already dropped it.
  2366. */
  2367. sync_filesystem(sb);
  2368. }
  2369. static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
  2370. {
  2371. struct quota_info *dqopt = sb_dqopt(sb);
  2372. int type;
  2373. for (type = 0; type < MAXQUOTAS; type++) {
  2374. if (!dqopt->files[type])
  2375. continue;
  2376. f2fs_inode_synced(dqopt->files[type]);
  2377. }
  2378. }
  2379. static int f2fs_dquot_commit(struct dquot *dquot)
  2380. {
  2381. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2382. int ret;
  2383. f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
  2384. ret = dquot_commit(dquot);
  2385. if (ret < 0)
  2386. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2387. f2fs_up_read(&sbi->quota_sem);
  2388. return ret;
  2389. }
  2390. static int f2fs_dquot_acquire(struct dquot *dquot)
  2391. {
  2392. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2393. int ret;
  2394. f2fs_down_read(&sbi->quota_sem);
  2395. ret = dquot_acquire(dquot);
  2396. if (ret < 0)
  2397. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2398. f2fs_up_read(&sbi->quota_sem);
  2399. return ret;
  2400. }
  2401. static int f2fs_dquot_release(struct dquot *dquot)
  2402. {
  2403. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2404. int ret = dquot_release(dquot);
  2405. if (ret < 0)
  2406. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2407. return ret;
  2408. }
  2409. static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
  2410. {
  2411. struct super_block *sb = dquot->dq_sb;
  2412. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2413. int ret = dquot_mark_dquot_dirty(dquot);
  2414. /* if we are using journalled quota */
  2415. if (is_journalled_quota(sbi))
  2416. set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
  2417. return ret;
  2418. }
  2419. static int f2fs_dquot_commit_info(struct super_block *sb, int type)
  2420. {
  2421. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2422. int ret = dquot_commit_info(sb, type);
  2423. if (ret < 0)
  2424. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2425. return ret;
  2426. }
  2427. static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
  2428. {
  2429. *projid = F2FS_I(inode)->i_projid;
  2430. return 0;
  2431. }
  2432. static const struct dquot_operations f2fs_quota_operations = {
  2433. .get_reserved_space = f2fs_get_reserved_space,
  2434. .write_dquot = f2fs_dquot_commit,
  2435. .acquire_dquot = f2fs_dquot_acquire,
  2436. .release_dquot = f2fs_dquot_release,
  2437. .mark_dirty = f2fs_dquot_mark_dquot_dirty,
  2438. .write_info = f2fs_dquot_commit_info,
  2439. .alloc_dquot = dquot_alloc,
  2440. .destroy_dquot = dquot_destroy,
  2441. .get_projid = f2fs_get_projid,
  2442. .get_next_id = dquot_get_next_id,
  2443. };
  2444. static const struct quotactl_ops f2fs_quotactl_ops = {
  2445. .quota_on = f2fs_quota_on,
  2446. .quota_off = f2fs_quota_off,
  2447. .quota_sync = f2fs_quota_sync,
  2448. .get_state = dquot_get_state,
  2449. .set_info = dquot_set_dqinfo,
  2450. .get_dqblk = dquot_get_dqblk,
  2451. .set_dqblk = dquot_set_dqblk,
  2452. .get_nextdqblk = dquot_get_next_dqblk,
  2453. };
  2454. #else
  2455. int f2fs_quota_sync(struct super_block *sb, int type)
  2456. {
  2457. return 0;
  2458. }
  2459. void f2fs_quota_off_umount(struct super_block *sb)
  2460. {
  2461. }
  2462. #endif
  2463. static const struct super_operations f2fs_sops = {
  2464. .alloc_inode = f2fs_alloc_inode,
  2465. .free_inode = f2fs_free_inode,
  2466. .drop_inode = f2fs_drop_inode,
  2467. .write_inode = f2fs_write_inode,
  2468. .dirty_inode = f2fs_dirty_inode,
  2469. .show_options = f2fs_show_options,
  2470. #ifdef CONFIG_QUOTA
  2471. .quota_read = f2fs_quota_read,
  2472. .quota_write = f2fs_quota_write,
  2473. .get_dquots = f2fs_get_dquots,
  2474. #endif
  2475. .evict_inode = f2fs_evict_inode,
  2476. .put_super = f2fs_put_super,
  2477. .sync_fs = f2fs_sync_fs,
  2478. .freeze_fs = f2fs_freeze,
  2479. .unfreeze_fs = f2fs_unfreeze,
  2480. .statfs = f2fs_statfs,
  2481. .remount_fs = f2fs_remount,
  2482. };
  2483. #ifdef CONFIG_FS_ENCRYPTION
  2484. static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
  2485. {
  2486. return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2487. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2488. ctx, len, NULL);
  2489. }
  2490. static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  2491. void *fs_data)
  2492. {
  2493. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2494. /*
  2495. * Encrypting the root directory is not allowed because fsck
  2496. * expects lost+found directory to exist and remain unencrypted
  2497. * if LOST_FOUND feature is enabled.
  2498. *
  2499. */
  2500. if (f2fs_sb_has_lost_found(sbi) &&
  2501. inode->i_ino == F2FS_ROOT_INO(sbi))
  2502. return -EPERM;
  2503. return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2504. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2505. ctx, len, fs_data, XATTR_CREATE);
  2506. }
  2507. static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
  2508. {
  2509. return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
  2510. }
  2511. static bool f2fs_has_stable_inodes(struct super_block *sb)
  2512. {
  2513. return true;
  2514. }
  2515. static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
  2516. int *ino_bits_ret, int *lblk_bits_ret)
  2517. {
  2518. *ino_bits_ret = 8 * sizeof(nid_t);
  2519. *lblk_bits_ret = 8 * sizeof(block_t);
  2520. }
  2521. static int f2fs_get_num_devices(struct super_block *sb)
  2522. {
  2523. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2524. if (f2fs_is_multi_device(sbi))
  2525. return sbi->s_ndevs;
  2526. return 1;
  2527. }
  2528. static void f2fs_get_devices(struct super_block *sb,
  2529. struct request_queue **devs)
  2530. {
  2531. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2532. int i;
  2533. for (i = 0; i < sbi->s_ndevs; i++)
  2534. devs[i] = bdev_get_queue(FDEV(i).bdev);
  2535. }
  2536. static const struct fscrypt_operations f2fs_cryptops = {
  2537. .key_prefix = "f2fs:",
  2538. .get_context = f2fs_get_context,
  2539. .set_context = f2fs_set_context,
  2540. .get_dummy_policy = f2fs_get_dummy_policy,
  2541. .empty_dir = f2fs_empty_dir,
  2542. .max_namelen = F2FS_NAME_LEN,
  2543. .has_stable_inodes = f2fs_has_stable_inodes,
  2544. .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
  2545. .get_num_devices = f2fs_get_num_devices,
  2546. .get_devices = f2fs_get_devices,
  2547. };
  2548. #endif
  2549. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  2550. u64 ino, u32 generation)
  2551. {
  2552. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2553. struct inode *inode;
  2554. if (f2fs_check_nid_range(sbi, ino))
  2555. return ERR_PTR(-ESTALE);
  2556. /*
  2557. * f2fs_iget isn't quite right if the inode is currently unallocated!
  2558. * However f2fs_iget currently does appropriate checks to handle stale
  2559. * inodes so everything is OK.
  2560. */
  2561. inode = f2fs_iget(sb, ino);
  2562. if (IS_ERR(inode))
  2563. return ERR_CAST(inode);
  2564. if (unlikely(generation && inode->i_generation != generation)) {
  2565. /* we didn't find the right inode.. */
  2566. iput(inode);
  2567. return ERR_PTR(-ESTALE);
  2568. }
  2569. return inode;
  2570. }
  2571. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  2572. int fh_len, int fh_type)
  2573. {
  2574. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  2575. f2fs_nfs_get_inode);
  2576. }
  2577. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  2578. int fh_len, int fh_type)
  2579. {
  2580. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  2581. f2fs_nfs_get_inode);
  2582. }
  2583. static const struct export_operations f2fs_export_ops = {
  2584. .fh_to_dentry = f2fs_fh_to_dentry,
  2585. .fh_to_parent = f2fs_fh_to_parent,
  2586. .get_parent = f2fs_get_parent,
  2587. };
  2588. loff_t max_file_blocks(struct inode *inode)
  2589. {
  2590. loff_t result = 0;
  2591. loff_t leaf_count;
  2592. /*
  2593. * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
  2594. * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
  2595. * space in inode.i_addr, it will be more safe to reassign
  2596. * result as zero.
  2597. */
  2598. if (inode && f2fs_compressed_file(inode))
  2599. leaf_count = ADDRS_PER_BLOCK(inode);
  2600. else
  2601. leaf_count = DEF_ADDRS_PER_BLOCK;
  2602. /* two direct node blocks */
  2603. result += (leaf_count * 2);
  2604. /* two indirect node blocks */
  2605. leaf_count *= NIDS_PER_BLOCK;
  2606. result += (leaf_count * 2);
  2607. /* one double indirect node block */
  2608. leaf_count *= NIDS_PER_BLOCK;
  2609. result += leaf_count;
  2610. return result;
  2611. }
  2612. static int __f2fs_commit_super(struct buffer_head *bh,
  2613. struct f2fs_super_block *super)
  2614. {
  2615. lock_buffer(bh);
  2616. if (super)
  2617. memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
  2618. set_buffer_dirty(bh);
  2619. unlock_buffer(bh);
  2620. /* it's rare case, we can do fua all the time */
  2621. return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
  2622. }
  2623. static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
  2624. struct buffer_head *bh)
  2625. {
  2626. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  2627. (bh->b_data + F2FS_SUPER_OFFSET);
  2628. struct super_block *sb = sbi->sb;
  2629. u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2630. u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
  2631. u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
  2632. u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
  2633. u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  2634. u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  2635. u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
  2636. u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
  2637. u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
  2638. u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
  2639. u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  2640. u32 segment_count = le32_to_cpu(raw_super->segment_count);
  2641. u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  2642. u64 main_end_blkaddr = main_blkaddr +
  2643. (segment_count_main << log_blocks_per_seg);
  2644. u64 seg_end_blkaddr = segment0_blkaddr +
  2645. (segment_count << log_blocks_per_seg);
  2646. if (segment0_blkaddr != cp_blkaddr) {
  2647. f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
  2648. segment0_blkaddr, cp_blkaddr);
  2649. return true;
  2650. }
  2651. if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
  2652. sit_blkaddr) {
  2653. f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
  2654. cp_blkaddr, sit_blkaddr,
  2655. segment_count_ckpt << log_blocks_per_seg);
  2656. return true;
  2657. }
  2658. if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
  2659. nat_blkaddr) {
  2660. f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
  2661. sit_blkaddr, nat_blkaddr,
  2662. segment_count_sit << log_blocks_per_seg);
  2663. return true;
  2664. }
  2665. if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
  2666. ssa_blkaddr) {
  2667. f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
  2668. nat_blkaddr, ssa_blkaddr,
  2669. segment_count_nat << log_blocks_per_seg);
  2670. return true;
  2671. }
  2672. if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
  2673. main_blkaddr) {
  2674. f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
  2675. ssa_blkaddr, main_blkaddr,
  2676. segment_count_ssa << log_blocks_per_seg);
  2677. return true;
  2678. }
  2679. if (main_end_blkaddr > seg_end_blkaddr) {
  2680. f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
  2681. main_blkaddr, seg_end_blkaddr,
  2682. segment_count_main << log_blocks_per_seg);
  2683. return true;
  2684. } else if (main_end_blkaddr < seg_end_blkaddr) {
  2685. int err = 0;
  2686. char *res;
  2687. /* fix in-memory information all the time */
  2688. raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
  2689. segment0_blkaddr) >> log_blocks_per_seg);
  2690. if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
  2691. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  2692. res = "internally";
  2693. } else {
  2694. err = __f2fs_commit_super(bh, NULL);
  2695. res = err ? "failed" : "done";
  2696. }
  2697. f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
  2698. res, main_blkaddr, seg_end_blkaddr,
  2699. segment_count_main << log_blocks_per_seg);
  2700. if (err)
  2701. return true;
  2702. }
  2703. return false;
  2704. }
  2705. static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
  2706. struct buffer_head *bh)
  2707. {
  2708. block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
  2709. block_t total_sections, blocks_per_seg;
  2710. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  2711. (bh->b_data + F2FS_SUPER_OFFSET);
  2712. size_t crc_offset = 0;
  2713. __u32 crc = 0;
  2714. if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
  2715. f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
  2716. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  2717. return -EINVAL;
  2718. }
  2719. /* Check checksum_offset and crc in superblock */
  2720. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
  2721. crc_offset = le32_to_cpu(raw_super->checksum_offset);
  2722. if (crc_offset !=
  2723. offsetof(struct f2fs_super_block, crc)) {
  2724. f2fs_info(sbi, "Invalid SB checksum offset: %zu",
  2725. crc_offset);
  2726. return -EFSCORRUPTED;
  2727. }
  2728. crc = le32_to_cpu(raw_super->crc);
  2729. if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
  2730. f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
  2731. return -EFSCORRUPTED;
  2732. }
  2733. }
  2734. /* Currently, support only 4KB block size */
  2735. if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
  2736. f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
  2737. le32_to_cpu(raw_super->log_blocksize),
  2738. F2FS_BLKSIZE_BITS);
  2739. return -EFSCORRUPTED;
  2740. }
  2741. /* check log blocks per segment */
  2742. if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
  2743. f2fs_info(sbi, "Invalid log blocks per segment (%u)",
  2744. le32_to_cpu(raw_super->log_blocks_per_seg));
  2745. return -EFSCORRUPTED;
  2746. }
  2747. /* Currently, support 512/1024/2048/4096 bytes sector size */
  2748. if (le32_to_cpu(raw_super->log_sectorsize) >
  2749. F2FS_MAX_LOG_SECTOR_SIZE ||
  2750. le32_to_cpu(raw_super->log_sectorsize) <
  2751. F2FS_MIN_LOG_SECTOR_SIZE) {
  2752. f2fs_info(sbi, "Invalid log sectorsize (%u)",
  2753. le32_to_cpu(raw_super->log_sectorsize));
  2754. return -EFSCORRUPTED;
  2755. }
  2756. if (le32_to_cpu(raw_super->log_sectors_per_block) +
  2757. le32_to_cpu(raw_super->log_sectorsize) !=
  2758. F2FS_MAX_LOG_SECTOR_SIZE) {
  2759. f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
  2760. le32_to_cpu(raw_super->log_sectors_per_block),
  2761. le32_to_cpu(raw_super->log_sectorsize));
  2762. return -EFSCORRUPTED;
  2763. }
  2764. segment_count = le32_to_cpu(raw_super->segment_count);
  2765. segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  2766. segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  2767. secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  2768. total_sections = le32_to_cpu(raw_super->section_count);
  2769. /* blocks_per_seg should be 512, given the above check */
  2770. blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
  2771. if (segment_count > F2FS_MAX_SEGMENT ||
  2772. segment_count < F2FS_MIN_SEGMENTS) {
  2773. f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
  2774. return -EFSCORRUPTED;
  2775. }
  2776. if (total_sections > segment_count_main || total_sections < 1 ||
  2777. segs_per_sec > segment_count || !segs_per_sec) {
  2778. f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
  2779. segment_count, total_sections, segs_per_sec);
  2780. return -EFSCORRUPTED;
  2781. }
  2782. if (segment_count_main != total_sections * segs_per_sec) {
  2783. f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
  2784. segment_count_main, total_sections, segs_per_sec);
  2785. return -EFSCORRUPTED;
  2786. }
  2787. if ((segment_count / segs_per_sec) < total_sections) {
  2788. f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
  2789. segment_count, segs_per_sec, total_sections);
  2790. return -EFSCORRUPTED;
  2791. }
  2792. if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
  2793. f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
  2794. segment_count, le64_to_cpu(raw_super->block_count));
  2795. return -EFSCORRUPTED;
  2796. }
  2797. if (RDEV(0).path[0]) {
  2798. block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
  2799. int i = 1;
  2800. while (i < MAX_DEVICES && RDEV(i).path[0]) {
  2801. dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
  2802. i++;
  2803. }
  2804. if (segment_count != dev_seg_count) {
  2805. f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
  2806. segment_count, dev_seg_count);
  2807. return -EFSCORRUPTED;
  2808. }
  2809. } else {
  2810. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
  2811. !bdev_is_zoned(sbi->sb->s_bdev)) {
  2812. f2fs_info(sbi, "Zoned block device path is missing");
  2813. return -EFSCORRUPTED;
  2814. }
  2815. }
  2816. if (secs_per_zone > total_sections || !secs_per_zone) {
  2817. f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
  2818. secs_per_zone, total_sections);
  2819. return -EFSCORRUPTED;
  2820. }
  2821. if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
  2822. raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
  2823. (le32_to_cpu(raw_super->extension_count) +
  2824. raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
  2825. f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
  2826. le32_to_cpu(raw_super->extension_count),
  2827. raw_super->hot_ext_count,
  2828. F2FS_MAX_EXTENSION);
  2829. return -EFSCORRUPTED;
  2830. }
  2831. if (le32_to_cpu(raw_super->cp_payload) >=
  2832. (blocks_per_seg - F2FS_CP_PACKS -
  2833. NR_CURSEG_PERSIST_TYPE)) {
  2834. f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
  2835. le32_to_cpu(raw_super->cp_payload),
  2836. blocks_per_seg - F2FS_CP_PACKS -
  2837. NR_CURSEG_PERSIST_TYPE);
  2838. return -EFSCORRUPTED;
  2839. }
  2840. /* check reserved ino info */
  2841. if (le32_to_cpu(raw_super->node_ino) != 1 ||
  2842. le32_to_cpu(raw_super->meta_ino) != 2 ||
  2843. le32_to_cpu(raw_super->root_ino) != 3) {
  2844. f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
  2845. le32_to_cpu(raw_super->node_ino),
  2846. le32_to_cpu(raw_super->meta_ino),
  2847. le32_to_cpu(raw_super->root_ino));
  2848. return -EFSCORRUPTED;
  2849. }
  2850. /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
  2851. if (sanity_check_area_boundary(sbi, bh))
  2852. return -EFSCORRUPTED;
  2853. return 0;
  2854. }
  2855. int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
  2856. {
  2857. unsigned int total, fsmeta;
  2858. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  2859. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2860. unsigned int ovp_segments, reserved_segments;
  2861. unsigned int main_segs, blocks_per_seg;
  2862. unsigned int sit_segs, nat_segs;
  2863. unsigned int sit_bitmap_size, nat_bitmap_size;
  2864. unsigned int log_blocks_per_seg;
  2865. unsigned int segment_count_main;
  2866. unsigned int cp_pack_start_sum, cp_payload;
  2867. block_t user_block_count, valid_user_blocks;
  2868. block_t avail_node_count, valid_node_count;
  2869. unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
  2870. int i, j;
  2871. total = le32_to_cpu(raw_super->segment_count);
  2872. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  2873. sit_segs = le32_to_cpu(raw_super->segment_count_sit);
  2874. fsmeta += sit_segs;
  2875. nat_segs = le32_to_cpu(raw_super->segment_count_nat);
  2876. fsmeta += nat_segs;
  2877. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  2878. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  2879. if (unlikely(fsmeta >= total))
  2880. return 1;
  2881. ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  2882. reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  2883. if (!f2fs_sb_has_readonly(sbi) &&
  2884. unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
  2885. ovp_segments == 0 || reserved_segments == 0)) {
  2886. f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
  2887. return 1;
  2888. }
  2889. user_block_count = le64_to_cpu(ckpt->user_block_count);
  2890. segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
  2891. (f2fs_sb_has_readonly(sbi) ? 1 : 0);
  2892. log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  2893. if (!user_block_count || user_block_count >=
  2894. segment_count_main << log_blocks_per_seg) {
  2895. f2fs_err(sbi, "Wrong user_block_count: %u",
  2896. user_block_count);
  2897. return 1;
  2898. }
  2899. valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
  2900. if (valid_user_blocks > user_block_count) {
  2901. f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
  2902. valid_user_blocks, user_block_count);
  2903. return 1;
  2904. }
  2905. valid_node_count = le32_to_cpu(ckpt->valid_node_count);
  2906. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  2907. if (valid_node_count > avail_node_count) {
  2908. f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
  2909. valid_node_count, avail_node_count);
  2910. return 1;
  2911. }
  2912. main_segs = le32_to_cpu(raw_super->segment_count_main);
  2913. blocks_per_seg = sbi->blocks_per_seg;
  2914. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  2915. if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
  2916. le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
  2917. return 1;
  2918. if (f2fs_sb_has_readonly(sbi))
  2919. goto check_data;
  2920. for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
  2921. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  2922. le32_to_cpu(ckpt->cur_node_segno[j])) {
  2923. f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
  2924. i, j,
  2925. le32_to_cpu(ckpt->cur_node_segno[i]));
  2926. return 1;
  2927. }
  2928. }
  2929. }
  2930. check_data:
  2931. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  2932. if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
  2933. le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
  2934. return 1;
  2935. if (f2fs_sb_has_readonly(sbi))
  2936. goto skip_cross;
  2937. for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
  2938. if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
  2939. le32_to_cpu(ckpt->cur_data_segno[j])) {
  2940. f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
  2941. i, j,
  2942. le32_to_cpu(ckpt->cur_data_segno[i]));
  2943. return 1;
  2944. }
  2945. }
  2946. }
  2947. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  2948. for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
  2949. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  2950. le32_to_cpu(ckpt->cur_data_segno[j])) {
  2951. f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
  2952. i, j,
  2953. le32_to_cpu(ckpt->cur_node_segno[i]));
  2954. return 1;
  2955. }
  2956. }
  2957. }
  2958. skip_cross:
  2959. sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  2960. nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  2961. if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
  2962. nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
  2963. f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
  2964. sit_bitmap_size, nat_bitmap_size);
  2965. return 1;
  2966. }
  2967. cp_pack_start_sum = __start_sum_addr(sbi);
  2968. cp_payload = __cp_payload(sbi);
  2969. if (cp_pack_start_sum < cp_payload + 1 ||
  2970. cp_pack_start_sum > blocks_per_seg - 1 -
  2971. NR_CURSEG_PERSIST_TYPE) {
  2972. f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
  2973. cp_pack_start_sum);
  2974. return 1;
  2975. }
  2976. if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
  2977. le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
  2978. f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
  2979. "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
  2980. "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
  2981. le32_to_cpu(ckpt->checksum_offset));
  2982. return 1;
  2983. }
  2984. nat_blocks = nat_segs << log_blocks_per_seg;
  2985. nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
  2986. nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
  2987. if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
  2988. (cp_payload + F2FS_CP_PACKS +
  2989. NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
  2990. f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
  2991. cp_payload, nat_bits_blocks);
  2992. return 1;
  2993. }
  2994. if (unlikely(f2fs_cp_error(sbi))) {
  2995. f2fs_err(sbi, "A bug case: need to run fsck");
  2996. return 1;
  2997. }
  2998. return 0;
  2999. }
  3000. static void init_sb_info(struct f2fs_sb_info *sbi)
  3001. {
  3002. struct f2fs_super_block *raw_super = sbi->raw_super;
  3003. int i;
  3004. sbi->log_sectors_per_block =
  3005. le32_to_cpu(raw_super->log_sectors_per_block);
  3006. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  3007. sbi->blocksize = 1 << sbi->log_blocksize;
  3008. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3009. sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
  3010. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  3011. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  3012. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  3013. sbi->total_node_count =
  3014. (le32_to_cpu(raw_super->segment_count_nat) / 2)
  3015. * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
  3016. F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
  3017. F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
  3018. F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
  3019. sbi->cur_victim_sec = NULL_SECNO;
  3020. sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
  3021. sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
  3022. sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
  3023. sbi->migration_granularity = sbi->segs_per_sec;
  3024. sbi->dir_level = DEF_DIR_LEVEL;
  3025. sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
  3026. sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
  3027. sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
  3028. sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
  3029. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
  3030. sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
  3031. DEF_UMOUNT_DISCARD_TIMEOUT;
  3032. clear_sbi_flag(sbi, SBI_NEED_FSCK);
  3033. for (i = 0; i < NR_COUNT_TYPE; i++)
  3034. atomic_set(&sbi->nr_pages[i], 0);
  3035. for (i = 0; i < META; i++)
  3036. atomic_set(&sbi->wb_sync_req[i], 0);
  3037. INIT_LIST_HEAD(&sbi->s_list);
  3038. mutex_init(&sbi->umount_mutex);
  3039. init_f2fs_rwsem(&sbi->io_order_lock);
  3040. spin_lock_init(&sbi->cp_lock);
  3041. sbi->dirty_device = 0;
  3042. spin_lock_init(&sbi->dev_lock);
  3043. init_f2fs_rwsem(&sbi->sb_lock);
  3044. init_f2fs_rwsem(&sbi->pin_sem);
  3045. }
  3046. static int init_percpu_info(struct f2fs_sb_info *sbi)
  3047. {
  3048. int err;
  3049. err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
  3050. if (err)
  3051. return err;
  3052. err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
  3053. GFP_KERNEL);
  3054. if (err)
  3055. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  3056. return err;
  3057. }
  3058. #ifdef CONFIG_BLK_DEV_ZONED
  3059. struct f2fs_report_zones_args {
  3060. struct f2fs_dev_info *dev;
  3061. bool zone_cap_mismatch;
  3062. };
  3063. static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
  3064. void *data)
  3065. {
  3066. struct f2fs_report_zones_args *rz_args = data;
  3067. if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
  3068. return 0;
  3069. set_bit(idx, rz_args->dev->blkz_seq);
  3070. rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
  3071. F2FS_LOG_SECTORS_PER_BLOCK;
  3072. if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
  3073. rz_args->zone_cap_mismatch = true;
  3074. return 0;
  3075. }
  3076. static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
  3077. {
  3078. struct block_device *bdev = FDEV(devi).bdev;
  3079. sector_t nr_sectors = bdev->bd_part->nr_sects;
  3080. struct f2fs_report_zones_args rep_zone_arg;
  3081. int ret;
  3082. if (!f2fs_sb_has_blkzoned(sbi))
  3083. return 0;
  3084. if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
  3085. SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
  3086. return -EINVAL;
  3087. sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
  3088. if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
  3089. __ilog2_u32(sbi->blocks_per_blkz))
  3090. return -EINVAL;
  3091. sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
  3092. FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
  3093. sbi->log_blocks_per_blkz;
  3094. if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
  3095. FDEV(devi).nr_blkz++;
  3096. FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
  3097. BITS_TO_LONGS(FDEV(devi).nr_blkz)
  3098. * sizeof(unsigned long),
  3099. GFP_KERNEL);
  3100. if (!FDEV(devi).blkz_seq)
  3101. return -ENOMEM;
  3102. /* Get block zones type and zone-capacity */
  3103. FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
  3104. FDEV(devi).nr_blkz * sizeof(block_t),
  3105. GFP_KERNEL);
  3106. if (!FDEV(devi).zone_capacity_blocks)
  3107. return -ENOMEM;
  3108. rep_zone_arg.dev = &FDEV(devi);
  3109. rep_zone_arg.zone_cap_mismatch = false;
  3110. ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
  3111. &rep_zone_arg);
  3112. if (ret < 0)
  3113. return ret;
  3114. if (!rep_zone_arg.zone_cap_mismatch) {
  3115. kfree(FDEV(devi).zone_capacity_blocks);
  3116. FDEV(devi).zone_capacity_blocks = NULL;
  3117. }
  3118. return 0;
  3119. }
  3120. #endif
  3121. /*
  3122. * Read f2fs raw super block.
  3123. * Because we have two copies of super block, so read both of them
  3124. * to get the first valid one. If any one of them is broken, we pass
  3125. * them recovery flag back to the caller.
  3126. */
  3127. static int read_raw_super_block(struct f2fs_sb_info *sbi,
  3128. struct f2fs_super_block **raw_super,
  3129. int *valid_super_block, int *recovery)
  3130. {
  3131. struct super_block *sb = sbi->sb;
  3132. int block;
  3133. struct buffer_head *bh;
  3134. struct f2fs_super_block *super;
  3135. int err = 0;
  3136. super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
  3137. if (!super)
  3138. return -ENOMEM;
  3139. for (block = 0; block < 2; block++) {
  3140. bh = sb_bread(sb, block);
  3141. if (!bh) {
  3142. f2fs_err(sbi, "Unable to read %dth superblock",
  3143. block + 1);
  3144. err = -EIO;
  3145. *recovery = 1;
  3146. continue;
  3147. }
  3148. /* sanity checking of raw super */
  3149. err = sanity_check_raw_super(sbi, bh);
  3150. if (err) {
  3151. f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
  3152. block + 1);
  3153. brelse(bh);
  3154. *recovery = 1;
  3155. continue;
  3156. }
  3157. if (!*raw_super) {
  3158. memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
  3159. sizeof(*super));
  3160. *valid_super_block = block;
  3161. *raw_super = super;
  3162. }
  3163. brelse(bh);
  3164. }
  3165. /* No valid superblock */
  3166. if (!*raw_super)
  3167. kfree(super);
  3168. else
  3169. err = 0;
  3170. return err;
  3171. }
  3172. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
  3173. {
  3174. struct buffer_head *bh;
  3175. __u32 crc = 0;
  3176. int err;
  3177. if ((recover && f2fs_readonly(sbi->sb)) ||
  3178. bdev_read_only(sbi->sb->s_bdev)) {
  3179. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  3180. return -EROFS;
  3181. }
  3182. /* we should update superblock crc here */
  3183. if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
  3184. crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
  3185. offsetof(struct f2fs_super_block, crc));
  3186. F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
  3187. }
  3188. /* write back-up superblock first */
  3189. bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
  3190. if (!bh)
  3191. return -EIO;
  3192. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  3193. brelse(bh);
  3194. /* if we are in recovery path, skip writing valid superblock */
  3195. if (recover || err)
  3196. return err;
  3197. /* write current valid superblock */
  3198. bh = sb_bread(sbi->sb, sbi->valid_super_block);
  3199. if (!bh)
  3200. return -EIO;
  3201. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  3202. brelse(bh);
  3203. return err;
  3204. }
  3205. static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
  3206. {
  3207. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3208. unsigned int max_devices = MAX_DEVICES;
  3209. int i;
  3210. /* Initialize single device information */
  3211. if (!RDEV(0).path[0]) {
  3212. if (!bdev_is_zoned(sbi->sb->s_bdev))
  3213. return 0;
  3214. max_devices = 1;
  3215. }
  3216. /*
  3217. * Initialize multiple devices information, or single
  3218. * zoned block device information.
  3219. */
  3220. sbi->devs = f2fs_kzalloc(sbi,
  3221. array_size(max_devices,
  3222. sizeof(struct f2fs_dev_info)),
  3223. GFP_KERNEL);
  3224. if (!sbi->devs)
  3225. return -ENOMEM;
  3226. for (i = 0; i < max_devices; i++) {
  3227. if (i > 0 && !RDEV(i).path[0])
  3228. break;
  3229. if (max_devices == 1) {
  3230. /* Single zoned block device mount */
  3231. FDEV(0).bdev =
  3232. blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
  3233. sbi->sb->s_mode, sbi->sb->s_type);
  3234. } else {
  3235. /* Multi-device mount */
  3236. memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
  3237. FDEV(i).total_segments =
  3238. le32_to_cpu(RDEV(i).total_segments);
  3239. if (i == 0) {
  3240. FDEV(i).start_blk = 0;
  3241. FDEV(i).end_blk = FDEV(i).start_blk +
  3242. (FDEV(i).total_segments <<
  3243. sbi->log_blocks_per_seg) - 1 +
  3244. le32_to_cpu(raw_super->segment0_blkaddr);
  3245. } else {
  3246. FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
  3247. FDEV(i).end_blk = FDEV(i).start_blk +
  3248. (FDEV(i).total_segments <<
  3249. sbi->log_blocks_per_seg) - 1;
  3250. }
  3251. FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
  3252. sbi->sb->s_mode, sbi->sb->s_type);
  3253. }
  3254. if (IS_ERR(FDEV(i).bdev))
  3255. return PTR_ERR(FDEV(i).bdev);
  3256. /* to release errored devices */
  3257. sbi->s_ndevs = i + 1;
  3258. #ifdef CONFIG_BLK_DEV_ZONED
  3259. if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
  3260. !f2fs_sb_has_blkzoned(sbi)) {
  3261. f2fs_err(sbi, "Zoned block device feature not enabled");
  3262. return -EINVAL;
  3263. }
  3264. if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
  3265. if (init_blkz_info(sbi, i)) {
  3266. f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
  3267. return -EINVAL;
  3268. }
  3269. if (max_devices == 1)
  3270. break;
  3271. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
  3272. i, FDEV(i).path,
  3273. FDEV(i).total_segments,
  3274. FDEV(i).start_blk, FDEV(i).end_blk,
  3275. bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
  3276. "Host-aware" : "Host-managed");
  3277. continue;
  3278. }
  3279. #endif
  3280. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
  3281. i, FDEV(i).path,
  3282. FDEV(i).total_segments,
  3283. FDEV(i).start_blk, FDEV(i).end_blk);
  3284. }
  3285. f2fs_info(sbi,
  3286. "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
  3287. return 0;
  3288. }
  3289. static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
  3290. {
  3291. #ifdef CONFIG_UNICODE
  3292. if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
  3293. const struct f2fs_sb_encodings *encoding_info;
  3294. struct unicode_map *encoding;
  3295. __u16 encoding_flags;
  3296. if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
  3297. &encoding_flags)) {
  3298. f2fs_err(sbi,
  3299. "Encoding requested by superblock is unknown");
  3300. return -EINVAL;
  3301. }
  3302. encoding = utf8_load(encoding_info->version);
  3303. if (IS_ERR(encoding)) {
  3304. f2fs_err(sbi,
  3305. "can't mount with superblock charset: %s-%s "
  3306. "not supported by the kernel. flags: 0x%x.",
  3307. encoding_info->name, encoding_info->version,
  3308. encoding_flags);
  3309. return PTR_ERR(encoding);
  3310. }
  3311. f2fs_info(sbi, "Using encoding defined by superblock: "
  3312. "%s-%s with flags 0x%hx", encoding_info->name,
  3313. encoding_info->version?:"\b", encoding_flags);
  3314. sbi->sb->s_encoding = encoding;
  3315. sbi->sb->s_encoding_flags = encoding_flags;
  3316. }
  3317. #else
  3318. if (f2fs_sb_has_casefold(sbi)) {
  3319. f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  3320. return -EINVAL;
  3321. }
  3322. #endif
  3323. return 0;
  3324. }
  3325. static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
  3326. {
  3327. struct f2fs_sm_info *sm_i = SM_I(sbi);
  3328. /* adjust parameters according to the volume size */
  3329. if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
  3330. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  3331. sm_i->dcc_info->discard_granularity = 1;
  3332. sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
  3333. }
  3334. sbi->readdir_ra = 1;
  3335. }
  3336. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  3337. {
  3338. struct f2fs_sb_info *sbi;
  3339. struct f2fs_super_block *raw_super;
  3340. struct inode *root;
  3341. int err;
  3342. bool skip_recovery = false, need_fsck = false;
  3343. char *options = NULL;
  3344. int recovery, i, valid_super_block;
  3345. struct curseg_info *seg_i;
  3346. int retry_cnt = 1;
  3347. try_onemore:
  3348. err = -EINVAL;
  3349. raw_super = NULL;
  3350. valid_super_block = -1;
  3351. recovery = 0;
  3352. /* allocate memory for f2fs-specific super block info */
  3353. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  3354. if (!sbi)
  3355. return -ENOMEM;
  3356. sbi->sb = sb;
  3357. /* Load the checksum driver */
  3358. sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
  3359. if (IS_ERR(sbi->s_chksum_driver)) {
  3360. f2fs_err(sbi, "Cannot load crc32 driver.");
  3361. err = PTR_ERR(sbi->s_chksum_driver);
  3362. sbi->s_chksum_driver = NULL;
  3363. goto free_sbi;
  3364. }
  3365. /* set a block size */
  3366. if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
  3367. f2fs_err(sbi, "unable to set blocksize");
  3368. goto free_sbi;
  3369. }
  3370. err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  3371. &recovery);
  3372. if (err)
  3373. goto free_sbi;
  3374. sb->s_fs_info = sbi;
  3375. sbi->raw_super = raw_super;
  3376. /* precompute checksum seed for metadata */
  3377. if (f2fs_sb_has_inode_chksum(sbi))
  3378. sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
  3379. sizeof(raw_super->uuid));
  3380. default_options(sbi);
  3381. /* parse mount options */
  3382. options = kstrdup((const char *)data, GFP_KERNEL);
  3383. if (data && !options) {
  3384. err = -ENOMEM;
  3385. goto free_sb_buf;
  3386. }
  3387. err = parse_options(sb, options, false);
  3388. if (err)
  3389. goto free_options;
  3390. sb->s_maxbytes = max_file_blocks(NULL) <<
  3391. le32_to_cpu(raw_super->log_blocksize);
  3392. sb->s_max_links = F2FS_LINK_MAX;
  3393. err = f2fs_setup_casefold(sbi);
  3394. if (err)
  3395. goto free_options;
  3396. #ifdef CONFIG_QUOTA
  3397. sb->dq_op = &f2fs_quota_operations;
  3398. sb->s_qcop = &f2fs_quotactl_ops;
  3399. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3400. if (f2fs_sb_has_quota_ino(sbi)) {
  3401. for (i = 0; i < MAXQUOTAS; i++) {
  3402. if (f2fs_qf_ino(sbi->sb, i))
  3403. sbi->nquota_files++;
  3404. }
  3405. }
  3406. #endif
  3407. sb->s_op = &f2fs_sops;
  3408. #ifdef CONFIG_FS_ENCRYPTION
  3409. sb->s_cop = &f2fs_cryptops;
  3410. #endif
  3411. #ifdef CONFIG_FS_VERITY
  3412. sb->s_vop = &f2fs_verityops;
  3413. #endif
  3414. sb->s_xattr = f2fs_xattr_handlers;
  3415. sb->s_export_op = &f2fs_export_ops;
  3416. sb->s_magic = F2FS_SUPER_MAGIC;
  3417. sb->s_time_gran = 1;
  3418. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3419. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  3420. memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  3421. sb->s_iflags |= SB_I_CGROUPWB;
  3422. /* init f2fs-specific super block info */
  3423. sbi->valid_super_block = valid_super_block;
  3424. init_f2fs_rwsem(&sbi->gc_lock);
  3425. mutex_init(&sbi->writepages);
  3426. init_f2fs_rwsem(&sbi->cp_global_sem);
  3427. init_f2fs_rwsem(&sbi->node_write);
  3428. init_f2fs_rwsem(&sbi->node_change);
  3429. /* disallow all the data/node/meta page writes */
  3430. set_sbi_flag(sbi, SBI_POR_DOING);
  3431. spin_lock_init(&sbi->stat_lock);
  3432. /* init iostat info */
  3433. spin_lock_init(&sbi->iostat_lock);
  3434. sbi->iostat_enable = false;
  3435. sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
  3436. for (i = 0; i < NR_PAGE_TYPE; i++) {
  3437. int n = (i == META) ? 1 : NR_TEMP_TYPE;
  3438. int j;
  3439. sbi->write_io[i] =
  3440. f2fs_kmalloc(sbi,
  3441. array_size(n,
  3442. sizeof(struct f2fs_bio_info)),
  3443. GFP_KERNEL);
  3444. if (!sbi->write_io[i]) {
  3445. err = -ENOMEM;
  3446. goto free_bio_info;
  3447. }
  3448. for (j = HOT; j < n; j++) {
  3449. init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
  3450. sbi->write_io[i][j].sbi = sbi;
  3451. sbi->write_io[i][j].bio = NULL;
  3452. spin_lock_init(&sbi->write_io[i][j].io_lock);
  3453. INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
  3454. INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
  3455. init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
  3456. }
  3457. }
  3458. init_f2fs_rwsem(&sbi->cp_rwsem);
  3459. init_f2fs_rwsem(&sbi->quota_sem);
  3460. init_waitqueue_head(&sbi->cp_wait);
  3461. init_sb_info(sbi);
  3462. err = init_percpu_info(sbi);
  3463. if (err)
  3464. goto free_bio_info;
  3465. if (F2FS_IO_ALIGNED(sbi)) {
  3466. sbi->write_io_dummy =
  3467. mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
  3468. if (!sbi->write_io_dummy) {
  3469. err = -ENOMEM;
  3470. goto free_percpu;
  3471. }
  3472. }
  3473. /* init per sbi slab cache */
  3474. err = f2fs_init_xattr_caches(sbi);
  3475. if (err)
  3476. goto free_io_dummy;
  3477. err = f2fs_init_page_array_cache(sbi);
  3478. if (err)
  3479. goto free_xattr_cache;
  3480. /* get an inode for meta space */
  3481. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  3482. if (IS_ERR(sbi->meta_inode)) {
  3483. f2fs_err(sbi, "Failed to read F2FS meta data inode");
  3484. err = PTR_ERR(sbi->meta_inode);
  3485. goto free_page_array_cache;
  3486. }
  3487. err = f2fs_get_valid_checkpoint(sbi);
  3488. if (err) {
  3489. f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
  3490. goto free_meta_inode;
  3491. }
  3492. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
  3493. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  3494. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
  3495. set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  3496. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
  3497. }
  3498. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
  3499. set_sbi_flag(sbi, SBI_NEED_FSCK);
  3500. /* Initialize device list */
  3501. err = f2fs_scan_devices(sbi);
  3502. if (err) {
  3503. f2fs_err(sbi, "Failed to find devices");
  3504. goto free_devices;
  3505. }
  3506. err = f2fs_init_post_read_wq(sbi);
  3507. if (err) {
  3508. f2fs_err(sbi, "Failed to initialize post read workqueue");
  3509. goto free_devices;
  3510. }
  3511. sbi->total_valid_node_count =
  3512. le32_to_cpu(sbi->ckpt->valid_node_count);
  3513. percpu_counter_set(&sbi->total_valid_inode_count,
  3514. le32_to_cpu(sbi->ckpt->valid_inode_count));
  3515. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  3516. sbi->total_valid_block_count =
  3517. le64_to_cpu(sbi->ckpt->valid_block_count);
  3518. sbi->last_valid_block_count = sbi->total_valid_block_count;
  3519. sbi->reserved_blocks = 0;
  3520. sbi->current_reserved_blocks = 0;
  3521. limit_reserve_root(sbi);
  3522. adjust_unusable_cap_perc(sbi);
  3523. for (i = 0; i < NR_INODE_TYPE; i++) {
  3524. INIT_LIST_HEAD(&sbi->inode_list[i]);
  3525. spin_lock_init(&sbi->inode_lock[i]);
  3526. }
  3527. mutex_init(&sbi->flush_lock);
  3528. f2fs_init_extent_cache_info(sbi);
  3529. f2fs_init_ino_entry_info(sbi);
  3530. f2fs_init_fsync_node_info(sbi);
  3531. /* setup checkpoint request control and start checkpoint issue thread */
  3532. f2fs_init_ckpt_req_control(sbi);
  3533. if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
  3534. test_opt(sbi, MERGE_CHECKPOINT)) {
  3535. err = f2fs_start_ckpt_thread(sbi);
  3536. if (err) {
  3537. f2fs_err(sbi,
  3538. "Failed to start F2FS issue_checkpoint_thread (%d)",
  3539. err);
  3540. goto stop_ckpt_thread;
  3541. }
  3542. }
  3543. /* setup f2fs internal modules */
  3544. err = f2fs_build_segment_manager(sbi);
  3545. if (err) {
  3546. f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
  3547. err);
  3548. goto free_sm;
  3549. }
  3550. err = f2fs_build_node_manager(sbi);
  3551. if (err) {
  3552. f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
  3553. err);
  3554. goto free_nm;
  3555. }
  3556. err = adjust_reserved_segment(sbi);
  3557. if (err)
  3558. goto free_nm;
  3559. /* For write statistics */
  3560. sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
  3561. /* Read accumulated write IO statistics if exists */
  3562. seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  3563. if (__exist_node_summaries(sbi))
  3564. sbi->kbytes_written =
  3565. le64_to_cpu(seg_i->journal->info.kbytes_written);
  3566. f2fs_build_gc_manager(sbi);
  3567. err = f2fs_build_stats(sbi);
  3568. if (err)
  3569. goto free_nm;
  3570. /* get an inode for node space */
  3571. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  3572. if (IS_ERR(sbi->node_inode)) {
  3573. f2fs_err(sbi, "Failed to read node inode");
  3574. err = PTR_ERR(sbi->node_inode);
  3575. goto free_stats;
  3576. }
  3577. /* read root inode and dentry */
  3578. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  3579. if (IS_ERR(root)) {
  3580. f2fs_err(sbi, "Failed to read root inode");
  3581. err = PTR_ERR(root);
  3582. goto free_node_inode;
  3583. }
  3584. if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
  3585. !root->i_size || !root->i_nlink) {
  3586. iput(root);
  3587. err = -EINVAL;
  3588. goto free_node_inode;
  3589. }
  3590. sb->s_root = d_make_root(root); /* allocate root dentry */
  3591. if (!sb->s_root) {
  3592. err = -ENOMEM;
  3593. goto free_node_inode;
  3594. }
  3595. err = f2fs_init_compress_inode(sbi);
  3596. if (err)
  3597. goto free_root_inode;
  3598. err = f2fs_register_sysfs(sbi);
  3599. if (err)
  3600. goto free_compress_inode;
  3601. #ifdef CONFIG_QUOTA
  3602. /* Enable quota usage during mount */
  3603. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
  3604. err = f2fs_enable_quotas(sb);
  3605. if (err)
  3606. f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
  3607. }
  3608. #endif
  3609. /* if there are any orphan inodes, free them */
  3610. err = f2fs_recover_orphan_inodes(sbi);
  3611. if (err)
  3612. goto free_meta;
  3613. if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
  3614. goto reset_checkpoint;
  3615. /* recover fsynced data */
  3616. if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
  3617. !test_opt(sbi, NORECOVERY)) {
  3618. /*
  3619. * mount should be failed, when device has readonly mode, and
  3620. * previous checkpoint was not done by clean system shutdown.
  3621. */
  3622. if (f2fs_hw_is_readonly(sbi)) {
  3623. if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  3624. err = f2fs_recover_fsync_data(sbi, true);
  3625. if (err > 0) {
  3626. err = -EROFS;
  3627. f2fs_err(sbi, "Need to recover fsync data, but "
  3628. "write access unavailable, please try "
  3629. "mount w/ disable_roll_forward or norecovery");
  3630. }
  3631. if (err < 0)
  3632. goto free_meta;
  3633. }
  3634. f2fs_info(sbi, "write access unavailable, skipping recovery");
  3635. goto reset_checkpoint;
  3636. }
  3637. if (need_fsck)
  3638. set_sbi_flag(sbi, SBI_NEED_FSCK);
  3639. if (skip_recovery)
  3640. goto reset_checkpoint;
  3641. err = f2fs_recover_fsync_data(sbi, false);
  3642. if (err < 0) {
  3643. if (err != -ENOMEM)
  3644. skip_recovery = true;
  3645. need_fsck = true;
  3646. f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
  3647. err);
  3648. goto free_meta;
  3649. }
  3650. } else {
  3651. err = f2fs_recover_fsync_data(sbi, true);
  3652. if (!f2fs_readonly(sb) && err > 0) {
  3653. err = -EINVAL;
  3654. f2fs_err(sbi, "Need to recover fsync data");
  3655. goto free_meta;
  3656. }
  3657. }
  3658. /*
  3659. * If the f2fs is not readonly and fsync data recovery succeeds,
  3660. * check zoned block devices' write pointer consistency.
  3661. */
  3662. if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
  3663. err = f2fs_check_write_pointer(sbi);
  3664. if (err)
  3665. goto free_meta;
  3666. }
  3667. reset_checkpoint:
  3668. f2fs_init_inmem_curseg(sbi);
  3669. /* f2fs_recover_fsync_data() cleared this already */
  3670. clear_sbi_flag(sbi, SBI_POR_DOING);
  3671. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  3672. err = f2fs_disable_checkpoint(sbi);
  3673. if (err)
  3674. goto sync_free_meta;
  3675. } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
  3676. f2fs_enable_checkpoint(sbi);
  3677. }
  3678. /*
  3679. * If filesystem is not mounted as read-only then
  3680. * do start the gc_thread.
  3681. */
  3682. if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
  3683. test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
  3684. /* After POR, we can run background GC thread.*/
  3685. err = f2fs_start_gc_thread(sbi);
  3686. if (err)
  3687. goto sync_free_meta;
  3688. }
  3689. kvfree(options);
  3690. /* recover broken superblock */
  3691. if (recovery) {
  3692. err = f2fs_commit_super(sbi, true);
  3693. f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
  3694. sbi->valid_super_block ? 1 : 2, err);
  3695. }
  3696. f2fs_join_shrinker(sbi);
  3697. f2fs_tuning_parameters(sbi);
  3698. f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
  3699. cur_cp_version(F2FS_CKPT(sbi)));
  3700. f2fs_update_time(sbi, CP_TIME);
  3701. f2fs_update_time(sbi, REQ_TIME);
  3702. clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  3703. return 0;
  3704. sync_free_meta:
  3705. /* safe to flush all the data */
  3706. sync_filesystem(sbi->sb);
  3707. retry_cnt = 0;
  3708. free_meta:
  3709. #ifdef CONFIG_QUOTA
  3710. f2fs_truncate_quota_inode_pages(sb);
  3711. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
  3712. f2fs_quota_off_umount(sbi->sb);
  3713. #endif
  3714. /*
  3715. * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
  3716. * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
  3717. * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
  3718. * falls into an infinite loop in f2fs_sync_meta_pages().
  3719. */
  3720. truncate_inode_pages_final(META_MAPPING(sbi));
  3721. /* evict some inodes being cached by GC */
  3722. evict_inodes(sb);
  3723. f2fs_unregister_sysfs(sbi);
  3724. free_compress_inode:
  3725. f2fs_destroy_compress_inode(sbi);
  3726. free_root_inode:
  3727. dput(sb->s_root);
  3728. sb->s_root = NULL;
  3729. free_node_inode:
  3730. f2fs_release_ino_entry(sbi, true);
  3731. truncate_inode_pages_final(NODE_MAPPING(sbi));
  3732. iput(sbi->node_inode);
  3733. sbi->node_inode = NULL;
  3734. free_stats:
  3735. f2fs_destroy_stats(sbi);
  3736. free_nm:
  3737. /* stop discard thread before destroying node manager */
  3738. f2fs_stop_discard_thread(sbi);
  3739. f2fs_destroy_node_manager(sbi);
  3740. free_sm:
  3741. f2fs_destroy_segment_manager(sbi);
  3742. f2fs_destroy_post_read_wq(sbi);
  3743. stop_ckpt_thread:
  3744. f2fs_stop_ckpt_thread(sbi);
  3745. free_devices:
  3746. destroy_device_list(sbi);
  3747. kvfree(sbi->ckpt);
  3748. free_meta_inode:
  3749. make_bad_inode(sbi->meta_inode);
  3750. iput(sbi->meta_inode);
  3751. sbi->meta_inode = NULL;
  3752. free_page_array_cache:
  3753. f2fs_destroy_page_array_cache(sbi);
  3754. free_xattr_cache:
  3755. f2fs_destroy_xattr_caches(sbi);
  3756. free_io_dummy:
  3757. mempool_destroy(sbi->write_io_dummy);
  3758. free_percpu:
  3759. destroy_percpu_info(sbi);
  3760. free_bio_info:
  3761. for (i = 0; i < NR_PAGE_TYPE; i++)
  3762. kvfree(sbi->write_io[i]);
  3763. #ifdef CONFIG_UNICODE
  3764. utf8_unload(sb->s_encoding);
  3765. sb->s_encoding = NULL;
  3766. #endif
  3767. free_options:
  3768. #ifdef CONFIG_QUOTA
  3769. for (i = 0; i < MAXQUOTAS; i++)
  3770. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  3771. #endif
  3772. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  3773. kvfree(options);
  3774. free_sb_buf:
  3775. kfree(raw_super);
  3776. free_sbi:
  3777. if (sbi->s_chksum_driver)
  3778. crypto_free_shash(sbi->s_chksum_driver);
  3779. kfree(sbi);
  3780. /* give only one another chance */
  3781. if (retry_cnt > 0 && skip_recovery) {
  3782. retry_cnt--;
  3783. shrink_dcache_sb(sb);
  3784. goto try_onemore;
  3785. }
  3786. return err;
  3787. }
  3788. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  3789. const char *dev_name, void *data)
  3790. {
  3791. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  3792. }
  3793. static void kill_f2fs_super(struct super_block *sb)
  3794. {
  3795. if (sb->s_root) {
  3796. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  3797. set_sbi_flag(sbi, SBI_IS_CLOSE);
  3798. f2fs_stop_gc_thread(sbi);
  3799. f2fs_stop_discard_thread(sbi);
  3800. #ifdef CONFIG_F2FS_FS_COMPRESSION
  3801. /*
  3802. * latter evict_inode() can bypass checking and invalidating
  3803. * compress inode cache.
  3804. */
  3805. if (test_opt(sbi, COMPRESS_CACHE))
  3806. truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
  3807. #endif
  3808. if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  3809. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  3810. struct cp_control cpc = {
  3811. .reason = CP_UMOUNT,
  3812. };
  3813. f2fs_write_checkpoint(sbi, &cpc);
  3814. }
  3815. if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
  3816. sb->s_flags &= ~SB_RDONLY;
  3817. }
  3818. kill_block_super(sb);
  3819. }
  3820. static struct file_system_type f2fs_fs_type = {
  3821. .owner = THIS_MODULE,
  3822. .name = "f2fs",
  3823. .mount = f2fs_mount,
  3824. .kill_sb = kill_f2fs_super,
  3825. .fs_flags = FS_REQUIRES_DEV,
  3826. };
  3827. MODULE_ALIAS_FS("f2fs");
  3828. static int __init init_inodecache(void)
  3829. {
  3830. f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
  3831. sizeof(struct f2fs_inode_info), 0,
  3832. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
  3833. if (!f2fs_inode_cachep)
  3834. return -ENOMEM;
  3835. return 0;
  3836. }
  3837. static void destroy_inodecache(void)
  3838. {
  3839. /*
  3840. * Make sure all delayed rcu free inodes are flushed before we
  3841. * destroy cache.
  3842. */
  3843. rcu_barrier();
  3844. kmem_cache_destroy(f2fs_inode_cachep);
  3845. }
  3846. static int __init init_f2fs_fs(void)
  3847. {
  3848. int err;
  3849. if (PAGE_SIZE != F2FS_BLKSIZE) {
  3850. printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
  3851. PAGE_SIZE, F2FS_BLKSIZE);
  3852. return -EINVAL;
  3853. }
  3854. err = init_inodecache();
  3855. if (err)
  3856. goto fail;
  3857. err = f2fs_create_node_manager_caches();
  3858. if (err)
  3859. goto free_inodecache;
  3860. err = f2fs_create_segment_manager_caches();
  3861. if (err)
  3862. goto free_node_manager_caches;
  3863. err = f2fs_create_checkpoint_caches();
  3864. if (err)
  3865. goto free_segment_manager_caches;
  3866. err = f2fs_create_recovery_cache();
  3867. if (err)
  3868. goto free_checkpoint_caches;
  3869. err = f2fs_create_extent_cache();
  3870. if (err)
  3871. goto free_recovery_cache;
  3872. err = f2fs_create_garbage_collection_cache();
  3873. if (err)
  3874. goto free_extent_cache;
  3875. err = f2fs_init_sysfs();
  3876. if (err)
  3877. goto free_garbage_collection_cache;
  3878. err = register_shrinker(&f2fs_shrinker_info);
  3879. if (err)
  3880. goto free_sysfs;
  3881. err = register_filesystem(&f2fs_fs_type);
  3882. if (err)
  3883. goto free_shrinker;
  3884. f2fs_create_root_stats();
  3885. err = f2fs_init_post_read_processing();
  3886. if (err)
  3887. goto free_root_stats;
  3888. err = f2fs_init_bio_entry_cache();
  3889. if (err)
  3890. goto free_post_read;
  3891. err = f2fs_init_bioset();
  3892. if (err)
  3893. goto free_bio_enrty_cache;
  3894. err = f2fs_init_compress_mempool();
  3895. if (err)
  3896. goto free_bioset;
  3897. err = f2fs_init_compress_cache();
  3898. if (err)
  3899. goto free_compress_mempool;
  3900. err = f2fs_create_casefold_cache();
  3901. if (err)
  3902. goto free_compress_cache;
  3903. return 0;
  3904. free_compress_cache:
  3905. f2fs_destroy_compress_cache();
  3906. free_compress_mempool:
  3907. f2fs_destroy_compress_mempool();
  3908. free_bioset:
  3909. f2fs_destroy_bioset();
  3910. free_bio_enrty_cache:
  3911. f2fs_destroy_bio_entry_cache();
  3912. free_post_read:
  3913. f2fs_destroy_post_read_processing();
  3914. free_root_stats:
  3915. f2fs_destroy_root_stats();
  3916. unregister_filesystem(&f2fs_fs_type);
  3917. free_shrinker:
  3918. unregister_shrinker(&f2fs_shrinker_info);
  3919. free_sysfs:
  3920. f2fs_exit_sysfs();
  3921. free_garbage_collection_cache:
  3922. f2fs_destroy_garbage_collection_cache();
  3923. free_extent_cache:
  3924. f2fs_destroy_extent_cache();
  3925. free_recovery_cache:
  3926. f2fs_destroy_recovery_cache();
  3927. free_checkpoint_caches:
  3928. f2fs_destroy_checkpoint_caches();
  3929. free_segment_manager_caches:
  3930. f2fs_destroy_segment_manager_caches();
  3931. free_node_manager_caches:
  3932. f2fs_destroy_node_manager_caches();
  3933. free_inodecache:
  3934. destroy_inodecache();
  3935. fail:
  3936. return err;
  3937. }
  3938. static void __exit exit_f2fs_fs(void)
  3939. {
  3940. f2fs_destroy_casefold_cache();
  3941. f2fs_destroy_compress_cache();
  3942. f2fs_destroy_compress_mempool();
  3943. f2fs_destroy_bioset();
  3944. f2fs_destroy_bio_entry_cache();
  3945. f2fs_destroy_post_read_processing();
  3946. f2fs_destroy_root_stats();
  3947. unregister_filesystem(&f2fs_fs_type);
  3948. unregister_shrinker(&f2fs_shrinker_info);
  3949. f2fs_exit_sysfs();
  3950. f2fs_destroy_garbage_collection_cache();
  3951. f2fs_destroy_extent_cache();
  3952. f2fs_destroy_recovery_cache();
  3953. f2fs_destroy_checkpoint_caches();
  3954. f2fs_destroy_segment_manager_caches();
  3955. f2fs_destroy_node_manager_caches();
  3956. destroy_inodecache();
  3957. }
  3958. module_init(init_f2fs_fs)
  3959. module_exit(exit_f2fs_fs)
  3960. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  3961. MODULE_DESCRIPTION("Flash Friendly File System");
  3962. MODULE_LICENSE("GPL");
  3963. MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
  3964. MODULE_SOFTDEP("pre: crc32");