volumes.c 206 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/sched/mm.h>
  7. #include <linux/bio.h>
  8. #include <linux/slab.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/ratelimit.h>
  11. #include <linux/kthread.h>
  12. #include <linux/raid/pq.h>
  13. #include <linux/semaphore.h>
  14. #include <linux/uuid.h>
  15. #include <linux/list_sort.h>
  16. #include <linux/namei.h>
  17. #include "misc.h"
  18. #include "ctree.h"
  19. #include "extent_map.h"
  20. #include "disk-io.h"
  21. #include "transaction.h"
  22. #include "print-tree.h"
  23. #include "volumes.h"
  24. #include "raid56.h"
  25. #include "async-thread.h"
  26. #include "check-integrity.h"
  27. #include "rcu-string.h"
  28. #include "dev-replace.h"
  29. #include "sysfs.h"
  30. #include "tree-checker.h"
  31. #include "space-info.h"
  32. #include "block-group.h"
  33. #include "discard.h"
  34. const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
  35. [BTRFS_RAID_RAID10] = {
  36. .sub_stripes = 2,
  37. .dev_stripes = 1,
  38. .devs_max = 0, /* 0 == as many as possible */
  39. .devs_min = 4,
  40. .tolerated_failures = 1,
  41. .devs_increment = 2,
  42. .ncopies = 2,
  43. .nparity = 0,
  44. .raid_name = "raid10",
  45. .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
  46. .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
  47. },
  48. [BTRFS_RAID_RAID1] = {
  49. .sub_stripes = 1,
  50. .dev_stripes = 1,
  51. .devs_max = 2,
  52. .devs_min = 2,
  53. .tolerated_failures = 1,
  54. .devs_increment = 2,
  55. .ncopies = 2,
  56. .nparity = 0,
  57. .raid_name = "raid1",
  58. .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
  59. .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
  60. },
  61. [BTRFS_RAID_RAID1C3] = {
  62. .sub_stripes = 1,
  63. .dev_stripes = 1,
  64. .devs_max = 3,
  65. .devs_min = 3,
  66. .tolerated_failures = 2,
  67. .devs_increment = 3,
  68. .ncopies = 3,
  69. .nparity = 0,
  70. .raid_name = "raid1c3",
  71. .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
  72. .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
  73. },
  74. [BTRFS_RAID_RAID1C4] = {
  75. .sub_stripes = 1,
  76. .dev_stripes = 1,
  77. .devs_max = 4,
  78. .devs_min = 4,
  79. .tolerated_failures = 3,
  80. .devs_increment = 4,
  81. .ncopies = 4,
  82. .nparity = 0,
  83. .raid_name = "raid1c4",
  84. .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
  85. .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
  86. },
  87. [BTRFS_RAID_DUP] = {
  88. .sub_stripes = 1,
  89. .dev_stripes = 2,
  90. .devs_max = 1,
  91. .devs_min = 1,
  92. .tolerated_failures = 0,
  93. .devs_increment = 1,
  94. .ncopies = 2,
  95. .nparity = 0,
  96. .raid_name = "dup",
  97. .bg_flag = BTRFS_BLOCK_GROUP_DUP,
  98. .mindev_error = 0,
  99. },
  100. [BTRFS_RAID_RAID0] = {
  101. .sub_stripes = 1,
  102. .dev_stripes = 1,
  103. .devs_max = 0,
  104. .devs_min = 2,
  105. .tolerated_failures = 0,
  106. .devs_increment = 1,
  107. .ncopies = 1,
  108. .nparity = 0,
  109. .raid_name = "raid0",
  110. .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
  111. .mindev_error = 0,
  112. },
  113. [BTRFS_RAID_SINGLE] = {
  114. .sub_stripes = 1,
  115. .dev_stripes = 1,
  116. .devs_max = 1,
  117. .devs_min = 1,
  118. .tolerated_failures = 0,
  119. .devs_increment = 1,
  120. .ncopies = 1,
  121. .nparity = 0,
  122. .raid_name = "single",
  123. .bg_flag = 0,
  124. .mindev_error = 0,
  125. },
  126. [BTRFS_RAID_RAID5] = {
  127. .sub_stripes = 1,
  128. .dev_stripes = 1,
  129. .devs_max = 0,
  130. .devs_min = 2,
  131. .tolerated_failures = 1,
  132. .devs_increment = 1,
  133. .ncopies = 1,
  134. .nparity = 1,
  135. .raid_name = "raid5",
  136. .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
  137. .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
  138. },
  139. [BTRFS_RAID_RAID6] = {
  140. .sub_stripes = 1,
  141. .dev_stripes = 1,
  142. .devs_max = 0,
  143. .devs_min = 3,
  144. .tolerated_failures = 2,
  145. .devs_increment = 1,
  146. .ncopies = 1,
  147. .nparity = 2,
  148. .raid_name = "raid6",
  149. .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
  150. .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
  151. },
  152. };
  153. const char *btrfs_bg_type_to_raid_name(u64 flags)
  154. {
  155. const int index = btrfs_bg_flags_to_raid_index(flags);
  156. if (index >= BTRFS_NR_RAID_TYPES)
  157. return NULL;
  158. return btrfs_raid_array[index].raid_name;
  159. }
  160. /*
  161. * Fill @buf with textual description of @bg_flags, no more than @size_buf
  162. * bytes including terminating null byte.
  163. */
  164. void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
  165. {
  166. int i;
  167. int ret;
  168. char *bp = buf;
  169. u64 flags = bg_flags;
  170. u32 size_bp = size_buf;
  171. if (!flags) {
  172. strcpy(bp, "NONE");
  173. return;
  174. }
  175. #define DESCRIBE_FLAG(flag, desc) \
  176. do { \
  177. if (flags & (flag)) { \
  178. ret = snprintf(bp, size_bp, "%s|", (desc)); \
  179. if (ret < 0 || ret >= size_bp) \
  180. goto out_overflow; \
  181. size_bp -= ret; \
  182. bp += ret; \
  183. flags &= ~(flag); \
  184. } \
  185. } while (0)
  186. DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
  187. DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
  188. DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
  189. DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
  190. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  191. DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
  192. btrfs_raid_array[i].raid_name);
  193. #undef DESCRIBE_FLAG
  194. if (flags) {
  195. ret = snprintf(bp, size_bp, "0x%llx|", flags);
  196. size_bp -= ret;
  197. }
  198. if (size_bp < size_buf)
  199. buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
  200. /*
  201. * The text is trimmed, it's up to the caller to provide sufficiently
  202. * large buffer
  203. */
  204. out_overflow:;
  205. }
  206. static int init_first_rw_device(struct btrfs_trans_handle *trans);
  207. static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
  208. static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
  209. static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
  210. static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
  211. enum btrfs_map_op op,
  212. u64 logical, u64 *length,
  213. struct btrfs_bio **bbio_ret,
  214. int mirror_num, int need_raid_map);
  215. /*
  216. * Device locking
  217. * ==============
  218. *
  219. * There are several mutexes that protect manipulation of devices and low-level
  220. * structures like chunks but not block groups, extents or files
  221. *
  222. * uuid_mutex (global lock)
  223. * ------------------------
  224. * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
  225. * the SCAN_DEV ioctl registration or from mount either implicitly (the first
  226. * device) or requested by the device= mount option
  227. *
  228. * the mutex can be very coarse and can cover long-running operations
  229. *
  230. * protects: updates to fs_devices counters like missing devices, rw devices,
  231. * seeding, structure cloning, opening/closing devices at mount/umount time
  232. *
  233. * global::fs_devs - add, remove, updates to the global list
  234. *
  235. * does not protect: manipulation of the fs_devices::devices list in general
  236. * but in mount context it could be used to exclude list modifications by eg.
  237. * scan ioctl
  238. *
  239. * btrfs_device::name - renames (write side), read is RCU
  240. *
  241. * fs_devices::device_list_mutex (per-fs, with RCU)
  242. * ------------------------------------------------
  243. * protects updates to fs_devices::devices, ie. adding and deleting
  244. *
  245. * simple list traversal with read-only actions can be done with RCU protection
  246. *
  247. * may be used to exclude some operations from running concurrently without any
  248. * modifications to the list (see write_all_supers)
  249. *
  250. * Is not required at mount and close times, because our device list is
  251. * protected by the uuid_mutex at that point.
  252. *
  253. * balance_mutex
  254. * -------------
  255. * protects balance structures (status, state) and context accessed from
  256. * several places (internally, ioctl)
  257. *
  258. * chunk_mutex
  259. * -----------
  260. * protects chunks, adding or removing during allocation, trim or when a new
  261. * device is added/removed. Additionally it also protects post_commit_list of
  262. * individual devices, since they can be added to the transaction's
  263. * post_commit_list only with chunk_mutex held.
  264. *
  265. * cleaner_mutex
  266. * -------------
  267. * a big lock that is held by the cleaner thread and prevents running subvolume
  268. * cleaning together with relocation or delayed iputs
  269. *
  270. *
  271. * Lock nesting
  272. * ============
  273. *
  274. * uuid_mutex
  275. * device_list_mutex
  276. * chunk_mutex
  277. * balance_mutex
  278. *
  279. *
  280. * Exclusive operations
  281. * ====================
  282. *
  283. * Maintains the exclusivity of the following operations that apply to the
  284. * whole filesystem and cannot run in parallel.
  285. *
  286. * - Balance (*)
  287. * - Device add
  288. * - Device remove
  289. * - Device replace (*)
  290. * - Resize
  291. *
  292. * The device operations (as above) can be in one of the following states:
  293. *
  294. * - Running state
  295. * - Paused state
  296. * - Completed state
  297. *
  298. * Only device operations marked with (*) can go into the Paused state for the
  299. * following reasons:
  300. *
  301. * - ioctl (only Balance can be Paused through ioctl)
  302. * - filesystem remounted as read-only
  303. * - filesystem unmounted and mounted as read-only
  304. * - system power-cycle and filesystem mounted as read-only
  305. * - filesystem or device errors leading to forced read-only
  306. *
  307. * The status of exclusive operation is set and cleared atomically.
  308. * During the course of Paused state, fs_info::exclusive_operation remains set.
  309. * A device operation in Paused or Running state can be canceled or resumed
  310. * either by ioctl (Balance only) or when remounted as read-write.
  311. * The exclusive status is cleared when the device operation is canceled or
  312. * completed.
  313. */
  314. DEFINE_MUTEX(uuid_mutex);
  315. static LIST_HEAD(fs_uuids);
  316. struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
  317. {
  318. return &fs_uuids;
  319. }
  320. /*
  321. * alloc_fs_devices - allocate struct btrfs_fs_devices
  322. * @fsid: if not NULL, copy the UUID to fs_devices::fsid
  323. * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
  324. *
  325. * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
  326. * The returned struct is not linked onto any lists and can be destroyed with
  327. * kfree() right away.
  328. */
  329. static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
  330. const u8 *metadata_fsid)
  331. {
  332. struct btrfs_fs_devices *fs_devs;
  333. fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
  334. if (!fs_devs)
  335. return ERR_PTR(-ENOMEM);
  336. mutex_init(&fs_devs->device_list_mutex);
  337. INIT_LIST_HEAD(&fs_devs->devices);
  338. INIT_LIST_HEAD(&fs_devs->alloc_list);
  339. INIT_LIST_HEAD(&fs_devs->fs_list);
  340. INIT_LIST_HEAD(&fs_devs->seed_list);
  341. if (fsid)
  342. memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
  343. if (metadata_fsid)
  344. memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
  345. else if (fsid)
  346. memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
  347. return fs_devs;
  348. }
  349. void btrfs_free_device(struct btrfs_device *device)
  350. {
  351. WARN_ON(!list_empty(&device->post_commit_list));
  352. rcu_string_free(device->name);
  353. extent_io_tree_release(&device->alloc_state);
  354. bio_put(device->flush_bio);
  355. kfree(device);
  356. }
  357. static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
  358. {
  359. struct btrfs_device *device;
  360. WARN_ON(fs_devices->opened);
  361. while (!list_empty(&fs_devices->devices)) {
  362. device = list_entry(fs_devices->devices.next,
  363. struct btrfs_device, dev_list);
  364. list_del(&device->dev_list);
  365. btrfs_free_device(device);
  366. }
  367. kfree(fs_devices);
  368. }
  369. void __exit btrfs_cleanup_fs_uuids(void)
  370. {
  371. struct btrfs_fs_devices *fs_devices;
  372. while (!list_empty(&fs_uuids)) {
  373. fs_devices = list_entry(fs_uuids.next,
  374. struct btrfs_fs_devices, fs_list);
  375. list_del(&fs_devices->fs_list);
  376. free_fs_devices(fs_devices);
  377. }
  378. }
  379. /*
  380. * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
  381. * Returned struct is not linked onto any lists and must be destroyed using
  382. * btrfs_free_device.
  383. */
  384. static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
  385. {
  386. struct btrfs_device *dev;
  387. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  388. if (!dev)
  389. return ERR_PTR(-ENOMEM);
  390. /*
  391. * Preallocate a bio that's always going to be used for flushing device
  392. * barriers and matches the device lifespan
  393. */
  394. dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
  395. if (!dev->flush_bio) {
  396. kfree(dev);
  397. return ERR_PTR(-ENOMEM);
  398. }
  399. INIT_LIST_HEAD(&dev->dev_list);
  400. INIT_LIST_HEAD(&dev->dev_alloc_list);
  401. INIT_LIST_HEAD(&dev->post_commit_list);
  402. atomic_set(&dev->reada_in_flight, 0);
  403. atomic_set(&dev->dev_stats_ccnt, 0);
  404. btrfs_device_data_ordered_init(dev);
  405. INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
  406. INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
  407. extent_io_tree_init(fs_info, &dev->alloc_state,
  408. IO_TREE_DEVICE_ALLOC_STATE, NULL);
  409. return dev;
  410. }
  411. static noinline struct btrfs_fs_devices *find_fsid(
  412. const u8 *fsid, const u8 *metadata_fsid)
  413. {
  414. struct btrfs_fs_devices *fs_devices;
  415. ASSERT(fsid);
  416. /* Handle non-split brain cases */
  417. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  418. if (metadata_fsid) {
  419. if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
  420. && memcmp(metadata_fsid, fs_devices->metadata_uuid,
  421. BTRFS_FSID_SIZE) == 0)
  422. return fs_devices;
  423. } else {
  424. if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
  425. return fs_devices;
  426. }
  427. }
  428. return NULL;
  429. }
  430. static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
  431. struct btrfs_super_block *disk_super)
  432. {
  433. struct btrfs_fs_devices *fs_devices;
  434. /*
  435. * Handle scanned device having completed its fsid change but
  436. * belonging to a fs_devices that was created by first scanning
  437. * a device which didn't have its fsid/metadata_uuid changed
  438. * at all and the CHANGING_FSID_V2 flag set.
  439. */
  440. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  441. if (fs_devices->fsid_change &&
  442. memcmp(disk_super->metadata_uuid, fs_devices->fsid,
  443. BTRFS_FSID_SIZE) == 0 &&
  444. memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
  445. BTRFS_FSID_SIZE) == 0) {
  446. return fs_devices;
  447. }
  448. }
  449. /*
  450. * Handle scanned device having completed its fsid change but
  451. * belonging to a fs_devices that was created by a device that
  452. * has an outdated pair of fsid/metadata_uuid and
  453. * CHANGING_FSID_V2 flag set.
  454. */
  455. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  456. if (fs_devices->fsid_change &&
  457. memcmp(fs_devices->metadata_uuid,
  458. fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
  459. memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
  460. BTRFS_FSID_SIZE) == 0) {
  461. return fs_devices;
  462. }
  463. }
  464. return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
  465. }
  466. static int
  467. btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
  468. int flush, struct block_device **bdev,
  469. struct btrfs_super_block **disk_super)
  470. {
  471. int ret;
  472. *bdev = blkdev_get_by_path(device_path, flags, holder);
  473. if (IS_ERR(*bdev)) {
  474. ret = PTR_ERR(*bdev);
  475. goto error;
  476. }
  477. if (flush)
  478. filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
  479. ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
  480. if (ret) {
  481. blkdev_put(*bdev, flags);
  482. goto error;
  483. }
  484. invalidate_bdev(*bdev);
  485. *disk_super = btrfs_read_dev_super(*bdev);
  486. if (IS_ERR(*disk_super)) {
  487. ret = PTR_ERR(*disk_super);
  488. blkdev_put(*bdev, flags);
  489. goto error;
  490. }
  491. return 0;
  492. error:
  493. *bdev = NULL;
  494. return ret;
  495. }
  496. static bool device_path_matched(const char *path, struct btrfs_device *device)
  497. {
  498. int found;
  499. rcu_read_lock();
  500. found = strcmp(rcu_str_deref(device->name), path);
  501. rcu_read_unlock();
  502. return found == 0;
  503. }
  504. /*
  505. * Search and remove all stale (devices which are not mounted) devices.
  506. * When both inputs are NULL, it will search and release all stale devices.
  507. * path: Optional. When provided will it release all unmounted devices
  508. * matching this path only.
  509. * skip_dev: Optional. Will skip this device when searching for the stale
  510. * devices.
  511. * Return: 0 for success or if @path is NULL.
  512. * -EBUSY if @path is a mounted device.
  513. * -ENOENT if @path does not match any device in the list.
  514. */
  515. static int btrfs_free_stale_devices(const char *path,
  516. struct btrfs_device *skip_device)
  517. {
  518. struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
  519. struct btrfs_device *device, *tmp_device;
  520. int ret = 0;
  521. lockdep_assert_held(&uuid_mutex);
  522. if (path)
  523. ret = -ENOENT;
  524. list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
  525. mutex_lock(&fs_devices->device_list_mutex);
  526. list_for_each_entry_safe(device, tmp_device,
  527. &fs_devices->devices, dev_list) {
  528. if (skip_device && skip_device == device)
  529. continue;
  530. if (path && !device->name)
  531. continue;
  532. if (path && !device_path_matched(path, device))
  533. continue;
  534. if (fs_devices->opened) {
  535. /* for an already deleted device return 0 */
  536. if (path && ret != 0)
  537. ret = -EBUSY;
  538. break;
  539. }
  540. /* delete the stale device */
  541. fs_devices->num_devices--;
  542. list_del(&device->dev_list);
  543. btrfs_free_device(device);
  544. ret = 0;
  545. }
  546. mutex_unlock(&fs_devices->device_list_mutex);
  547. if (fs_devices->num_devices == 0) {
  548. btrfs_sysfs_remove_fsid(fs_devices);
  549. list_del(&fs_devices->fs_list);
  550. free_fs_devices(fs_devices);
  551. }
  552. }
  553. return ret;
  554. }
  555. /*
  556. * This is only used on mount, and we are protected from competing things
  557. * messing with our fs_devices by the uuid_mutex, thus we do not need the
  558. * fs_devices->device_list_mutex here.
  559. */
  560. static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
  561. struct btrfs_device *device, fmode_t flags,
  562. void *holder)
  563. {
  564. struct request_queue *q;
  565. struct block_device *bdev;
  566. struct btrfs_super_block *disk_super;
  567. u64 devid;
  568. int ret;
  569. if (device->bdev)
  570. return -EINVAL;
  571. if (!device->name)
  572. return -EINVAL;
  573. ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
  574. &bdev, &disk_super);
  575. if (ret)
  576. return ret;
  577. devid = btrfs_stack_device_id(&disk_super->dev_item);
  578. if (devid != device->devid)
  579. goto error_free_page;
  580. if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
  581. goto error_free_page;
  582. device->generation = btrfs_super_generation(disk_super);
  583. if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
  584. if (btrfs_super_incompat_flags(disk_super) &
  585. BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
  586. pr_err(
  587. "BTRFS: Invalid seeding and uuid-changed device detected\n");
  588. goto error_free_page;
  589. }
  590. clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  591. fs_devices->seeding = true;
  592. } else {
  593. if (bdev_read_only(bdev))
  594. clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  595. else
  596. set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  597. }
  598. q = bdev_get_queue(bdev);
  599. if (!blk_queue_nonrot(q))
  600. fs_devices->rotating = true;
  601. device->bdev = bdev;
  602. clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
  603. device->mode = flags;
  604. fs_devices->open_devices++;
  605. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
  606. device->devid != BTRFS_DEV_REPLACE_DEVID) {
  607. fs_devices->rw_devices++;
  608. list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
  609. }
  610. btrfs_release_disk_super(disk_super);
  611. return 0;
  612. error_free_page:
  613. btrfs_release_disk_super(disk_super);
  614. blkdev_put(bdev, flags);
  615. return -EINVAL;
  616. }
  617. /*
  618. * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
  619. * being created with a disk that has already completed its fsid change. Such
  620. * disk can belong to an fs which has its FSID changed or to one which doesn't.
  621. * Handle both cases here.
  622. */
  623. static struct btrfs_fs_devices *find_fsid_inprogress(
  624. struct btrfs_super_block *disk_super)
  625. {
  626. struct btrfs_fs_devices *fs_devices;
  627. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  628. if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
  629. BTRFS_FSID_SIZE) != 0 &&
  630. memcmp(fs_devices->metadata_uuid, disk_super->fsid,
  631. BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
  632. return fs_devices;
  633. }
  634. }
  635. return find_fsid(disk_super->fsid, NULL);
  636. }
  637. static struct btrfs_fs_devices *find_fsid_changed(
  638. struct btrfs_super_block *disk_super)
  639. {
  640. struct btrfs_fs_devices *fs_devices;
  641. /*
  642. * Handles the case where scanned device is part of an fs that had
  643. * multiple successful changes of FSID but curently device didn't
  644. * observe it. Meaning our fsid will be different than theirs. We need
  645. * to handle two subcases :
  646. * 1 - The fs still continues to have different METADATA/FSID uuids.
  647. * 2 - The fs is switched back to its original FSID (METADATA/FSID
  648. * are equal).
  649. */
  650. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  651. /* Changed UUIDs */
  652. if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
  653. BTRFS_FSID_SIZE) != 0 &&
  654. memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
  655. BTRFS_FSID_SIZE) == 0 &&
  656. memcmp(fs_devices->fsid, disk_super->fsid,
  657. BTRFS_FSID_SIZE) != 0)
  658. return fs_devices;
  659. /* Unchanged UUIDs */
  660. if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
  661. BTRFS_FSID_SIZE) == 0 &&
  662. memcmp(fs_devices->fsid, disk_super->metadata_uuid,
  663. BTRFS_FSID_SIZE) == 0)
  664. return fs_devices;
  665. }
  666. return NULL;
  667. }
  668. static struct btrfs_fs_devices *find_fsid_reverted_metadata(
  669. struct btrfs_super_block *disk_super)
  670. {
  671. struct btrfs_fs_devices *fs_devices;
  672. /*
  673. * Handle the case where the scanned device is part of an fs whose last
  674. * metadata UUID change reverted it to the original FSID. At the same
  675. * time * fs_devices was first created by another constitutent device
  676. * which didn't fully observe the operation. This results in an
  677. * btrfs_fs_devices created with metadata/fsid different AND
  678. * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
  679. * fs_devices equal to the FSID of the disk.
  680. */
  681. list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
  682. if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
  683. BTRFS_FSID_SIZE) != 0 &&
  684. memcmp(fs_devices->metadata_uuid, disk_super->fsid,
  685. BTRFS_FSID_SIZE) == 0 &&
  686. fs_devices->fsid_change)
  687. return fs_devices;
  688. }
  689. return NULL;
  690. }
  691. /*
  692. * Add new device to list of registered devices
  693. *
  694. * Returns:
  695. * device pointer which was just added or updated when successful
  696. * error pointer when failed
  697. */
  698. static noinline struct btrfs_device *device_list_add(const char *path,
  699. struct btrfs_super_block *disk_super,
  700. bool *new_device_added)
  701. {
  702. struct btrfs_device *device;
  703. struct btrfs_fs_devices *fs_devices = NULL;
  704. struct rcu_string *name;
  705. u64 found_transid = btrfs_super_generation(disk_super);
  706. u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
  707. bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
  708. BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
  709. bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
  710. BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
  711. if (fsid_change_in_progress) {
  712. if (!has_metadata_uuid)
  713. fs_devices = find_fsid_inprogress(disk_super);
  714. else
  715. fs_devices = find_fsid_changed(disk_super);
  716. } else if (has_metadata_uuid) {
  717. fs_devices = find_fsid_with_metadata_uuid(disk_super);
  718. } else {
  719. fs_devices = find_fsid_reverted_metadata(disk_super);
  720. if (!fs_devices)
  721. fs_devices = find_fsid(disk_super->fsid, NULL);
  722. }
  723. if (!fs_devices) {
  724. if (has_metadata_uuid)
  725. fs_devices = alloc_fs_devices(disk_super->fsid,
  726. disk_super->metadata_uuid);
  727. else
  728. fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
  729. if (IS_ERR(fs_devices))
  730. return ERR_CAST(fs_devices);
  731. fs_devices->fsid_change = fsid_change_in_progress;
  732. mutex_lock(&fs_devices->device_list_mutex);
  733. list_add(&fs_devices->fs_list, &fs_uuids);
  734. device = NULL;
  735. } else {
  736. mutex_lock(&fs_devices->device_list_mutex);
  737. device = btrfs_find_device(fs_devices, devid,
  738. disk_super->dev_item.uuid, NULL, false);
  739. /*
  740. * If this disk has been pulled into an fs devices created by
  741. * a device which had the CHANGING_FSID_V2 flag then replace the
  742. * metadata_uuid/fsid values of the fs_devices.
  743. */
  744. if (fs_devices->fsid_change &&
  745. found_transid > fs_devices->latest_generation) {
  746. memcpy(fs_devices->fsid, disk_super->fsid,
  747. BTRFS_FSID_SIZE);
  748. if (has_metadata_uuid)
  749. memcpy(fs_devices->metadata_uuid,
  750. disk_super->metadata_uuid,
  751. BTRFS_FSID_SIZE);
  752. else
  753. memcpy(fs_devices->metadata_uuid,
  754. disk_super->fsid, BTRFS_FSID_SIZE);
  755. fs_devices->fsid_change = false;
  756. }
  757. }
  758. if (!device) {
  759. if (fs_devices->opened) {
  760. mutex_unlock(&fs_devices->device_list_mutex);
  761. return ERR_PTR(-EBUSY);
  762. }
  763. device = btrfs_alloc_device(NULL, &devid,
  764. disk_super->dev_item.uuid);
  765. if (IS_ERR(device)) {
  766. mutex_unlock(&fs_devices->device_list_mutex);
  767. /* we can safely leave the fs_devices entry around */
  768. return device;
  769. }
  770. name = rcu_string_strdup(path, GFP_NOFS);
  771. if (!name) {
  772. btrfs_free_device(device);
  773. mutex_unlock(&fs_devices->device_list_mutex);
  774. return ERR_PTR(-ENOMEM);
  775. }
  776. rcu_assign_pointer(device->name, name);
  777. list_add_rcu(&device->dev_list, &fs_devices->devices);
  778. fs_devices->num_devices++;
  779. device->fs_devices = fs_devices;
  780. *new_device_added = true;
  781. if (disk_super->label[0])
  782. pr_info(
  783. "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
  784. disk_super->label, devid, found_transid, path,
  785. current->comm, task_pid_nr(current));
  786. else
  787. pr_info(
  788. "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
  789. disk_super->fsid, devid, found_transid, path,
  790. current->comm, task_pid_nr(current));
  791. } else if (!device->name || strcmp(device->name->str, path)) {
  792. /*
  793. * When FS is already mounted.
  794. * 1. If you are here and if the device->name is NULL that
  795. * means this device was missing at time of FS mount.
  796. * 2. If you are here and if the device->name is different
  797. * from 'path' that means either
  798. * a. The same device disappeared and reappeared with
  799. * different name. or
  800. * b. The missing-disk-which-was-replaced, has
  801. * reappeared now.
  802. *
  803. * We must allow 1 and 2a above. But 2b would be a spurious
  804. * and unintentional.
  805. *
  806. * Further in case of 1 and 2a above, the disk at 'path'
  807. * would have missed some transaction when it was away and
  808. * in case of 2a the stale bdev has to be updated as well.
  809. * 2b must not be allowed at all time.
  810. */
  811. /*
  812. * For now, we do allow update to btrfs_fs_device through the
  813. * btrfs dev scan cli after FS has been mounted. We're still
  814. * tracking a problem where systems fail mount by subvolume id
  815. * when we reject replacement on a mounted FS.
  816. */
  817. if (!fs_devices->opened && found_transid < device->generation) {
  818. /*
  819. * That is if the FS is _not_ mounted and if you
  820. * are here, that means there is more than one
  821. * disk with same uuid and devid.We keep the one
  822. * with larger generation number or the last-in if
  823. * generation are equal.
  824. */
  825. mutex_unlock(&fs_devices->device_list_mutex);
  826. return ERR_PTR(-EEXIST);
  827. }
  828. /*
  829. * We are going to replace the device path for a given devid,
  830. * make sure it's the same device if the device is mounted
  831. */
  832. if (device->bdev) {
  833. struct block_device *path_bdev;
  834. path_bdev = lookup_bdev(path);
  835. if (IS_ERR(path_bdev)) {
  836. mutex_unlock(&fs_devices->device_list_mutex);
  837. return ERR_CAST(path_bdev);
  838. }
  839. if (device->bdev != path_bdev) {
  840. bdput(path_bdev);
  841. mutex_unlock(&fs_devices->device_list_mutex);
  842. /*
  843. * device->fs_info may not be reliable here, so
  844. * pass in a NULL instead. This avoids a
  845. * possible use-after-free when the fs_info and
  846. * fs_info->sb are already torn down.
  847. */
  848. btrfs_warn_in_rcu(NULL,
  849. "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
  850. path, devid, found_transid,
  851. current->comm,
  852. task_pid_nr(current));
  853. return ERR_PTR(-EEXIST);
  854. }
  855. bdput(path_bdev);
  856. btrfs_info_in_rcu(device->fs_info,
  857. "devid %llu device path %s changed to %s scanned by %s (%d)",
  858. devid, rcu_str_deref(device->name),
  859. path, current->comm,
  860. task_pid_nr(current));
  861. }
  862. name = rcu_string_strdup(path, GFP_NOFS);
  863. if (!name) {
  864. mutex_unlock(&fs_devices->device_list_mutex);
  865. return ERR_PTR(-ENOMEM);
  866. }
  867. rcu_string_free(device->name);
  868. rcu_assign_pointer(device->name, name);
  869. if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
  870. fs_devices->missing_devices--;
  871. clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
  872. }
  873. }
  874. /*
  875. * Unmount does not free the btrfs_device struct but would zero
  876. * generation along with most of the other members. So just update
  877. * it back. We need it to pick the disk with largest generation
  878. * (as above).
  879. */
  880. if (!fs_devices->opened) {
  881. device->generation = found_transid;
  882. fs_devices->latest_generation = max_t(u64, found_transid,
  883. fs_devices->latest_generation);
  884. }
  885. fs_devices->total_devices = btrfs_super_num_devices(disk_super);
  886. mutex_unlock(&fs_devices->device_list_mutex);
  887. return device;
  888. }
  889. static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
  890. {
  891. struct btrfs_fs_devices *fs_devices;
  892. struct btrfs_device *device;
  893. struct btrfs_device *orig_dev;
  894. int ret = 0;
  895. lockdep_assert_held(&uuid_mutex);
  896. fs_devices = alloc_fs_devices(orig->fsid, NULL);
  897. if (IS_ERR(fs_devices))
  898. return fs_devices;
  899. fs_devices->total_devices = orig->total_devices;
  900. list_for_each_entry(orig_dev, &orig->devices, dev_list) {
  901. struct rcu_string *name;
  902. device = btrfs_alloc_device(NULL, &orig_dev->devid,
  903. orig_dev->uuid);
  904. if (IS_ERR(device)) {
  905. ret = PTR_ERR(device);
  906. goto error;
  907. }
  908. /*
  909. * This is ok to do without rcu read locked because we hold the
  910. * uuid mutex so nothing we touch in here is going to disappear.
  911. */
  912. if (orig_dev->name) {
  913. name = rcu_string_strdup(orig_dev->name->str,
  914. GFP_KERNEL);
  915. if (!name) {
  916. btrfs_free_device(device);
  917. ret = -ENOMEM;
  918. goto error;
  919. }
  920. rcu_assign_pointer(device->name, name);
  921. }
  922. list_add(&device->dev_list, &fs_devices->devices);
  923. device->fs_devices = fs_devices;
  924. fs_devices->num_devices++;
  925. }
  926. return fs_devices;
  927. error:
  928. free_fs_devices(fs_devices);
  929. return ERR_PTR(ret);
  930. }
  931. static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
  932. int step, struct btrfs_device **latest_dev)
  933. {
  934. struct btrfs_device *device, *next;
  935. /* This is the initialized path, it is safe to release the devices. */
  936. list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
  937. if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
  938. if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
  939. &device->dev_state) &&
  940. !test_bit(BTRFS_DEV_STATE_MISSING,
  941. &device->dev_state) &&
  942. (!*latest_dev ||
  943. device->generation > (*latest_dev)->generation)) {
  944. *latest_dev = device;
  945. }
  946. continue;
  947. }
  948. /*
  949. * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
  950. * in btrfs_init_dev_replace() so just continue.
  951. */
  952. if (device->devid == BTRFS_DEV_REPLACE_DEVID)
  953. continue;
  954. if (device->bdev) {
  955. blkdev_put(device->bdev, device->mode);
  956. device->bdev = NULL;
  957. fs_devices->open_devices--;
  958. }
  959. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  960. list_del_init(&device->dev_alloc_list);
  961. clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  962. fs_devices->rw_devices--;
  963. }
  964. list_del_init(&device->dev_list);
  965. fs_devices->num_devices--;
  966. btrfs_free_device(device);
  967. }
  968. }
  969. /*
  970. * After we have read the system tree and know devids belonging to this
  971. * filesystem, remove the device which does not belong there.
  972. */
  973. void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
  974. {
  975. struct btrfs_device *latest_dev = NULL;
  976. struct btrfs_fs_devices *seed_dev;
  977. mutex_lock(&uuid_mutex);
  978. __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
  979. list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
  980. __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
  981. fs_devices->latest_bdev = latest_dev->bdev;
  982. mutex_unlock(&uuid_mutex);
  983. }
  984. static void btrfs_close_bdev(struct btrfs_device *device)
  985. {
  986. if (!device->bdev)
  987. return;
  988. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  989. sync_blockdev(device->bdev);
  990. invalidate_bdev(device->bdev);
  991. }
  992. blkdev_put(device->bdev, device->mode);
  993. }
  994. static void btrfs_close_one_device(struct btrfs_device *device)
  995. {
  996. struct btrfs_fs_devices *fs_devices = device->fs_devices;
  997. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
  998. device->devid != BTRFS_DEV_REPLACE_DEVID) {
  999. list_del_init(&device->dev_alloc_list);
  1000. fs_devices->rw_devices--;
  1001. }
  1002. if (device->devid == BTRFS_DEV_REPLACE_DEVID)
  1003. clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
  1004. if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
  1005. clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
  1006. fs_devices->missing_devices--;
  1007. }
  1008. btrfs_close_bdev(device);
  1009. if (device->bdev) {
  1010. fs_devices->open_devices--;
  1011. device->bdev = NULL;
  1012. }
  1013. clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  1014. device->fs_info = NULL;
  1015. atomic_set(&device->dev_stats_ccnt, 0);
  1016. extent_io_tree_release(&device->alloc_state);
  1017. /*
  1018. * Reset the flush error record. We might have a transient flush error
  1019. * in this mount, and if so we aborted the current transaction and set
  1020. * the fs to an error state, guaranteeing no super blocks can be further
  1021. * committed. However that error might be transient and if we unmount the
  1022. * filesystem and mount it again, we should allow the mount to succeed
  1023. * (btrfs_check_rw_degradable() should not fail) - if after mounting the
  1024. * filesystem again we still get flush errors, then we will again abort
  1025. * any transaction and set the error state, guaranteeing no commits of
  1026. * unsafe super blocks.
  1027. */
  1028. device->last_flush_error = 0;
  1029. /* Verify the device is back in a pristine state */
  1030. ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
  1031. ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
  1032. ASSERT(list_empty(&device->dev_alloc_list));
  1033. ASSERT(list_empty(&device->post_commit_list));
  1034. ASSERT(atomic_read(&device->reada_in_flight) == 0);
  1035. }
  1036. static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
  1037. {
  1038. struct btrfs_device *device, *tmp;
  1039. lockdep_assert_held(&uuid_mutex);
  1040. if (--fs_devices->opened > 0)
  1041. return;
  1042. list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
  1043. btrfs_close_one_device(device);
  1044. WARN_ON(fs_devices->open_devices);
  1045. WARN_ON(fs_devices->rw_devices);
  1046. fs_devices->opened = 0;
  1047. fs_devices->seeding = false;
  1048. fs_devices->fs_info = NULL;
  1049. }
  1050. void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
  1051. {
  1052. LIST_HEAD(list);
  1053. struct btrfs_fs_devices *tmp;
  1054. mutex_lock(&uuid_mutex);
  1055. close_fs_devices(fs_devices);
  1056. if (!fs_devices->opened)
  1057. list_splice_init(&fs_devices->seed_list, &list);
  1058. list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
  1059. close_fs_devices(fs_devices);
  1060. list_del(&fs_devices->seed_list);
  1061. free_fs_devices(fs_devices);
  1062. }
  1063. mutex_unlock(&uuid_mutex);
  1064. }
  1065. static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
  1066. fmode_t flags, void *holder)
  1067. {
  1068. struct btrfs_device *device;
  1069. struct btrfs_device *latest_dev = NULL;
  1070. struct btrfs_device *tmp_device;
  1071. flags |= FMODE_EXCL;
  1072. list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
  1073. dev_list) {
  1074. int ret;
  1075. ret = btrfs_open_one_device(fs_devices, device, flags, holder);
  1076. if (ret == 0 &&
  1077. (!latest_dev || device->generation > latest_dev->generation)) {
  1078. latest_dev = device;
  1079. } else if (ret == -ENODATA) {
  1080. fs_devices->num_devices--;
  1081. list_del(&device->dev_list);
  1082. btrfs_free_device(device);
  1083. }
  1084. }
  1085. if (fs_devices->open_devices == 0)
  1086. return -EINVAL;
  1087. fs_devices->opened = 1;
  1088. fs_devices->latest_bdev = latest_dev->bdev;
  1089. fs_devices->total_rw_bytes = 0;
  1090. fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
  1091. return 0;
  1092. }
  1093. static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
  1094. {
  1095. struct btrfs_device *dev1, *dev2;
  1096. dev1 = list_entry(a, struct btrfs_device, dev_list);
  1097. dev2 = list_entry(b, struct btrfs_device, dev_list);
  1098. if (dev1->devid < dev2->devid)
  1099. return -1;
  1100. else if (dev1->devid > dev2->devid)
  1101. return 1;
  1102. return 0;
  1103. }
  1104. int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
  1105. fmode_t flags, void *holder)
  1106. {
  1107. int ret;
  1108. lockdep_assert_held(&uuid_mutex);
  1109. /*
  1110. * The device_list_mutex cannot be taken here in case opening the
  1111. * underlying device takes further locks like bd_mutex.
  1112. *
  1113. * We also don't need the lock here as this is called during mount and
  1114. * exclusion is provided by uuid_mutex
  1115. */
  1116. if (fs_devices->opened) {
  1117. fs_devices->opened++;
  1118. ret = 0;
  1119. } else {
  1120. list_sort(NULL, &fs_devices->devices, devid_cmp);
  1121. ret = open_fs_devices(fs_devices, flags, holder);
  1122. }
  1123. return ret;
  1124. }
  1125. void btrfs_release_disk_super(struct btrfs_super_block *super)
  1126. {
  1127. struct page *page = virt_to_page(super);
  1128. put_page(page);
  1129. }
  1130. static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
  1131. u64 bytenr)
  1132. {
  1133. struct btrfs_super_block *disk_super;
  1134. struct page *page;
  1135. void *p;
  1136. pgoff_t index;
  1137. /* make sure our super fits in the device */
  1138. if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
  1139. return ERR_PTR(-EINVAL);
  1140. /* make sure our super fits in the page */
  1141. if (sizeof(*disk_super) > PAGE_SIZE)
  1142. return ERR_PTR(-EINVAL);
  1143. /* make sure our super doesn't straddle pages on disk */
  1144. index = bytenr >> PAGE_SHIFT;
  1145. if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
  1146. return ERR_PTR(-EINVAL);
  1147. /* pull in the page with our super */
  1148. page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
  1149. if (IS_ERR(page))
  1150. return ERR_CAST(page);
  1151. p = page_address(page);
  1152. /* align our pointer to the offset of the super block */
  1153. disk_super = p + offset_in_page(bytenr);
  1154. if (btrfs_super_bytenr(disk_super) != bytenr ||
  1155. btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
  1156. btrfs_release_disk_super(p);
  1157. return ERR_PTR(-EINVAL);
  1158. }
  1159. if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
  1160. disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
  1161. return disk_super;
  1162. }
  1163. int btrfs_forget_devices(const char *path)
  1164. {
  1165. int ret;
  1166. mutex_lock(&uuid_mutex);
  1167. ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
  1168. mutex_unlock(&uuid_mutex);
  1169. return ret;
  1170. }
  1171. /*
  1172. * Look for a btrfs signature on a device. This may be called out of the mount path
  1173. * and we are not allowed to call set_blocksize during the scan. The superblock
  1174. * is read via pagecache
  1175. */
  1176. struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
  1177. void *holder)
  1178. {
  1179. struct btrfs_super_block *disk_super;
  1180. bool new_device_added = false;
  1181. struct btrfs_device *device = NULL;
  1182. struct block_device *bdev;
  1183. u64 bytenr;
  1184. lockdep_assert_held(&uuid_mutex);
  1185. /*
  1186. * we would like to check all the supers, but that would make
  1187. * a btrfs mount succeed after a mkfs from a different FS.
  1188. * So, we need to add a special mount option to scan for
  1189. * later supers, using BTRFS_SUPER_MIRROR_MAX instead
  1190. */
  1191. bytenr = btrfs_sb_offset(0);
  1192. flags |= FMODE_EXCL;
  1193. bdev = blkdev_get_by_path(path, flags, holder);
  1194. if (IS_ERR(bdev))
  1195. return ERR_CAST(bdev);
  1196. disk_super = btrfs_read_disk_super(bdev, bytenr);
  1197. if (IS_ERR(disk_super)) {
  1198. device = ERR_CAST(disk_super);
  1199. goto error_bdev_put;
  1200. }
  1201. device = device_list_add(path, disk_super, &new_device_added);
  1202. if (!IS_ERR(device)) {
  1203. if (new_device_added)
  1204. btrfs_free_stale_devices(path, device);
  1205. }
  1206. btrfs_release_disk_super(disk_super);
  1207. error_bdev_put:
  1208. blkdev_put(bdev, flags);
  1209. return device;
  1210. }
  1211. /*
  1212. * Try to find a chunk that intersects [start, start + len] range and when one
  1213. * such is found, record the end of it in *start
  1214. */
  1215. static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
  1216. u64 len)
  1217. {
  1218. u64 physical_start, physical_end;
  1219. lockdep_assert_held(&device->fs_info->chunk_mutex);
  1220. if (!find_first_extent_bit(&device->alloc_state, *start,
  1221. &physical_start, &physical_end,
  1222. CHUNK_ALLOCATED, NULL)) {
  1223. if (in_range(physical_start, *start, len) ||
  1224. in_range(*start, physical_start,
  1225. physical_end - physical_start)) {
  1226. *start = physical_end + 1;
  1227. return true;
  1228. }
  1229. }
  1230. return false;
  1231. }
  1232. static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
  1233. {
  1234. switch (device->fs_devices->chunk_alloc_policy) {
  1235. case BTRFS_CHUNK_ALLOC_REGULAR:
  1236. /*
  1237. * We don't want to overwrite the superblock on the drive nor
  1238. * any area used by the boot loader (grub for example), so we
  1239. * make sure to start at an offset of at least 1MB.
  1240. */
  1241. return max_t(u64, start, SZ_1M);
  1242. default:
  1243. BUG();
  1244. }
  1245. }
  1246. /**
  1247. * dev_extent_hole_check - check if specified hole is suitable for allocation
  1248. * @device: the device which we have the hole
  1249. * @hole_start: starting position of the hole
  1250. * @hole_size: the size of the hole
  1251. * @num_bytes: the size of the free space that we need
  1252. *
  1253. * This function may modify @hole_start and @hole_end to reflect the suitable
  1254. * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
  1255. */
  1256. static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
  1257. u64 *hole_size, u64 num_bytes)
  1258. {
  1259. bool changed = false;
  1260. u64 hole_end = *hole_start + *hole_size;
  1261. /*
  1262. * Check before we set max_hole_start, otherwise we could end up
  1263. * sending back this offset anyway.
  1264. */
  1265. if (contains_pending_extent(device, hole_start, *hole_size)) {
  1266. if (hole_end >= *hole_start)
  1267. *hole_size = hole_end - *hole_start;
  1268. else
  1269. *hole_size = 0;
  1270. changed = true;
  1271. }
  1272. switch (device->fs_devices->chunk_alloc_policy) {
  1273. case BTRFS_CHUNK_ALLOC_REGULAR:
  1274. /* No extra check */
  1275. break;
  1276. default:
  1277. BUG();
  1278. }
  1279. return changed;
  1280. }
  1281. /*
  1282. * find_free_dev_extent_start - find free space in the specified device
  1283. * @device: the device which we search the free space in
  1284. * @num_bytes: the size of the free space that we need
  1285. * @search_start: the position from which to begin the search
  1286. * @start: store the start of the free space.
  1287. * @len: the size of the free space. that we find, or the size
  1288. * of the max free space if we don't find suitable free space
  1289. *
  1290. * this uses a pretty simple search, the expectation is that it is
  1291. * called very infrequently and that a given device has a small number
  1292. * of extents
  1293. *
  1294. * @start is used to store the start of the free space if we find. But if we
  1295. * don't find suitable free space, it will be used to store the start position
  1296. * of the max free space.
  1297. *
  1298. * @len is used to store the size of the free space that we find.
  1299. * But if we don't find suitable free space, it is used to store the size of
  1300. * the max free space.
  1301. *
  1302. * NOTE: This function will search *commit* root of device tree, and does extra
  1303. * check to ensure dev extents are not double allocated.
  1304. * This makes the function safe to allocate dev extents but may not report
  1305. * correct usable device space, as device extent freed in current transaction
  1306. * is not reported as avaiable.
  1307. */
  1308. static int find_free_dev_extent_start(struct btrfs_device *device,
  1309. u64 num_bytes, u64 search_start, u64 *start,
  1310. u64 *len)
  1311. {
  1312. struct btrfs_fs_info *fs_info = device->fs_info;
  1313. struct btrfs_root *root = fs_info->dev_root;
  1314. struct btrfs_key key;
  1315. struct btrfs_dev_extent *dev_extent;
  1316. struct btrfs_path *path;
  1317. u64 hole_size;
  1318. u64 max_hole_start;
  1319. u64 max_hole_size;
  1320. u64 extent_end;
  1321. u64 search_end = device->total_bytes;
  1322. int ret;
  1323. int slot;
  1324. struct extent_buffer *l;
  1325. search_start = dev_extent_search_start(device, search_start);
  1326. path = btrfs_alloc_path();
  1327. if (!path)
  1328. return -ENOMEM;
  1329. max_hole_start = search_start;
  1330. max_hole_size = 0;
  1331. again:
  1332. if (search_start >= search_end ||
  1333. test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
  1334. ret = -ENOSPC;
  1335. goto out;
  1336. }
  1337. path->reada = READA_FORWARD;
  1338. path->search_commit_root = 1;
  1339. path->skip_locking = 1;
  1340. key.objectid = device->devid;
  1341. key.offset = search_start;
  1342. key.type = BTRFS_DEV_EXTENT_KEY;
  1343. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1344. if (ret < 0)
  1345. goto out;
  1346. if (ret > 0) {
  1347. ret = btrfs_previous_item(root, path, key.objectid, key.type);
  1348. if (ret < 0)
  1349. goto out;
  1350. }
  1351. while (1) {
  1352. l = path->nodes[0];
  1353. slot = path->slots[0];
  1354. if (slot >= btrfs_header_nritems(l)) {
  1355. ret = btrfs_next_leaf(root, path);
  1356. if (ret == 0)
  1357. continue;
  1358. if (ret < 0)
  1359. goto out;
  1360. break;
  1361. }
  1362. btrfs_item_key_to_cpu(l, &key, slot);
  1363. if (key.objectid < device->devid)
  1364. goto next;
  1365. if (key.objectid > device->devid)
  1366. break;
  1367. if (key.type != BTRFS_DEV_EXTENT_KEY)
  1368. goto next;
  1369. if (key.offset > search_start) {
  1370. hole_size = key.offset - search_start;
  1371. dev_extent_hole_check(device, &search_start, &hole_size,
  1372. num_bytes);
  1373. if (hole_size > max_hole_size) {
  1374. max_hole_start = search_start;
  1375. max_hole_size = hole_size;
  1376. }
  1377. /*
  1378. * If this free space is greater than which we need,
  1379. * it must be the max free space that we have found
  1380. * until now, so max_hole_start must point to the start
  1381. * of this free space and the length of this free space
  1382. * is stored in max_hole_size. Thus, we return
  1383. * max_hole_start and max_hole_size and go back to the
  1384. * caller.
  1385. */
  1386. if (hole_size >= num_bytes) {
  1387. ret = 0;
  1388. goto out;
  1389. }
  1390. }
  1391. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  1392. extent_end = key.offset + btrfs_dev_extent_length(l,
  1393. dev_extent);
  1394. if (extent_end > search_start)
  1395. search_start = extent_end;
  1396. next:
  1397. path->slots[0]++;
  1398. cond_resched();
  1399. }
  1400. /*
  1401. * At this point, search_start should be the end of
  1402. * allocated dev extents, and when shrinking the device,
  1403. * search_end may be smaller than search_start.
  1404. */
  1405. if (search_end > search_start) {
  1406. hole_size = search_end - search_start;
  1407. if (dev_extent_hole_check(device, &search_start, &hole_size,
  1408. num_bytes)) {
  1409. btrfs_release_path(path);
  1410. goto again;
  1411. }
  1412. if (hole_size > max_hole_size) {
  1413. max_hole_start = search_start;
  1414. max_hole_size = hole_size;
  1415. }
  1416. }
  1417. /* See above. */
  1418. if (max_hole_size < num_bytes)
  1419. ret = -ENOSPC;
  1420. else
  1421. ret = 0;
  1422. out:
  1423. btrfs_free_path(path);
  1424. *start = max_hole_start;
  1425. if (len)
  1426. *len = max_hole_size;
  1427. return ret;
  1428. }
  1429. int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
  1430. u64 *start, u64 *len)
  1431. {
  1432. /* FIXME use last free of some kind */
  1433. return find_free_dev_extent_start(device, num_bytes, 0, start, len);
  1434. }
  1435. static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
  1436. struct btrfs_device *device,
  1437. u64 start, u64 *dev_extent_len)
  1438. {
  1439. struct btrfs_fs_info *fs_info = device->fs_info;
  1440. struct btrfs_root *root = fs_info->dev_root;
  1441. int ret;
  1442. struct btrfs_path *path;
  1443. struct btrfs_key key;
  1444. struct btrfs_key found_key;
  1445. struct extent_buffer *leaf = NULL;
  1446. struct btrfs_dev_extent *extent = NULL;
  1447. path = btrfs_alloc_path();
  1448. if (!path)
  1449. return -ENOMEM;
  1450. key.objectid = device->devid;
  1451. key.offset = start;
  1452. key.type = BTRFS_DEV_EXTENT_KEY;
  1453. again:
  1454. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1455. if (ret > 0) {
  1456. ret = btrfs_previous_item(root, path, key.objectid,
  1457. BTRFS_DEV_EXTENT_KEY);
  1458. if (ret)
  1459. goto out;
  1460. leaf = path->nodes[0];
  1461. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1462. extent = btrfs_item_ptr(leaf, path->slots[0],
  1463. struct btrfs_dev_extent);
  1464. BUG_ON(found_key.offset > start || found_key.offset +
  1465. btrfs_dev_extent_length(leaf, extent) < start);
  1466. key = found_key;
  1467. btrfs_release_path(path);
  1468. goto again;
  1469. } else if (ret == 0) {
  1470. leaf = path->nodes[0];
  1471. extent = btrfs_item_ptr(leaf, path->slots[0],
  1472. struct btrfs_dev_extent);
  1473. } else {
  1474. btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
  1475. goto out;
  1476. }
  1477. *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
  1478. ret = btrfs_del_item(trans, root, path);
  1479. if (ret) {
  1480. btrfs_handle_fs_error(fs_info, ret,
  1481. "Failed to remove dev extent item");
  1482. } else {
  1483. set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
  1484. }
  1485. out:
  1486. btrfs_free_path(path);
  1487. return ret;
  1488. }
  1489. static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
  1490. struct btrfs_device *device,
  1491. u64 chunk_offset, u64 start, u64 num_bytes)
  1492. {
  1493. int ret;
  1494. struct btrfs_path *path;
  1495. struct btrfs_fs_info *fs_info = device->fs_info;
  1496. struct btrfs_root *root = fs_info->dev_root;
  1497. struct btrfs_dev_extent *extent;
  1498. struct extent_buffer *leaf;
  1499. struct btrfs_key key;
  1500. WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
  1501. WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
  1502. path = btrfs_alloc_path();
  1503. if (!path)
  1504. return -ENOMEM;
  1505. key.objectid = device->devid;
  1506. key.offset = start;
  1507. key.type = BTRFS_DEV_EXTENT_KEY;
  1508. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1509. sizeof(*extent));
  1510. if (ret)
  1511. goto out;
  1512. leaf = path->nodes[0];
  1513. extent = btrfs_item_ptr(leaf, path->slots[0],
  1514. struct btrfs_dev_extent);
  1515. btrfs_set_dev_extent_chunk_tree(leaf, extent,
  1516. BTRFS_CHUNK_TREE_OBJECTID);
  1517. btrfs_set_dev_extent_chunk_objectid(leaf, extent,
  1518. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  1519. btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
  1520. btrfs_set_dev_extent_length(leaf, extent, num_bytes);
  1521. btrfs_mark_buffer_dirty(leaf);
  1522. out:
  1523. btrfs_free_path(path);
  1524. return ret;
  1525. }
  1526. static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
  1527. {
  1528. struct extent_map_tree *em_tree;
  1529. struct extent_map *em;
  1530. struct rb_node *n;
  1531. u64 ret = 0;
  1532. em_tree = &fs_info->mapping_tree;
  1533. read_lock(&em_tree->lock);
  1534. n = rb_last(&em_tree->map.rb_root);
  1535. if (n) {
  1536. em = rb_entry(n, struct extent_map, rb_node);
  1537. ret = em->start + em->len;
  1538. }
  1539. read_unlock(&em_tree->lock);
  1540. return ret;
  1541. }
  1542. static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
  1543. u64 *devid_ret)
  1544. {
  1545. int ret;
  1546. struct btrfs_key key;
  1547. struct btrfs_key found_key;
  1548. struct btrfs_path *path;
  1549. path = btrfs_alloc_path();
  1550. if (!path)
  1551. return -ENOMEM;
  1552. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1553. key.type = BTRFS_DEV_ITEM_KEY;
  1554. key.offset = (u64)-1;
  1555. ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
  1556. if (ret < 0)
  1557. goto error;
  1558. if (ret == 0) {
  1559. /* Corruption */
  1560. btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
  1561. ret = -EUCLEAN;
  1562. goto error;
  1563. }
  1564. ret = btrfs_previous_item(fs_info->chunk_root, path,
  1565. BTRFS_DEV_ITEMS_OBJECTID,
  1566. BTRFS_DEV_ITEM_KEY);
  1567. if (ret) {
  1568. *devid_ret = 1;
  1569. } else {
  1570. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  1571. path->slots[0]);
  1572. *devid_ret = found_key.offset + 1;
  1573. }
  1574. ret = 0;
  1575. error:
  1576. btrfs_free_path(path);
  1577. return ret;
  1578. }
  1579. /*
  1580. * the device information is stored in the chunk root
  1581. * the btrfs_device struct should be fully filled in
  1582. */
  1583. static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
  1584. struct btrfs_device *device)
  1585. {
  1586. int ret;
  1587. struct btrfs_path *path;
  1588. struct btrfs_dev_item *dev_item;
  1589. struct extent_buffer *leaf;
  1590. struct btrfs_key key;
  1591. unsigned long ptr;
  1592. path = btrfs_alloc_path();
  1593. if (!path)
  1594. return -ENOMEM;
  1595. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1596. key.type = BTRFS_DEV_ITEM_KEY;
  1597. key.offset = device->devid;
  1598. ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
  1599. &key, sizeof(*dev_item));
  1600. if (ret)
  1601. goto out;
  1602. leaf = path->nodes[0];
  1603. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  1604. btrfs_set_device_id(leaf, dev_item, device->devid);
  1605. btrfs_set_device_generation(leaf, dev_item, 0);
  1606. btrfs_set_device_type(leaf, dev_item, device->type);
  1607. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  1608. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  1609. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  1610. btrfs_set_device_total_bytes(leaf, dev_item,
  1611. btrfs_device_get_disk_total_bytes(device));
  1612. btrfs_set_device_bytes_used(leaf, dev_item,
  1613. btrfs_device_get_bytes_used(device));
  1614. btrfs_set_device_group(leaf, dev_item, 0);
  1615. btrfs_set_device_seek_speed(leaf, dev_item, 0);
  1616. btrfs_set_device_bandwidth(leaf, dev_item, 0);
  1617. btrfs_set_device_start_offset(leaf, dev_item, 0);
  1618. ptr = btrfs_device_uuid(dev_item);
  1619. write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
  1620. ptr = btrfs_device_fsid(dev_item);
  1621. write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
  1622. ptr, BTRFS_FSID_SIZE);
  1623. btrfs_mark_buffer_dirty(leaf);
  1624. ret = 0;
  1625. out:
  1626. btrfs_free_path(path);
  1627. return ret;
  1628. }
  1629. /*
  1630. * Function to update ctime/mtime for a given device path.
  1631. * Mainly used for ctime/mtime based probe like libblkid.
  1632. *
  1633. * We don't care about errors here, this is just to be kind to userspace.
  1634. */
  1635. static void update_dev_time(const char *device_path)
  1636. {
  1637. struct path path;
  1638. struct timespec64 now;
  1639. int ret;
  1640. ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
  1641. if (ret)
  1642. return;
  1643. now = current_time(d_inode(path.dentry));
  1644. inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
  1645. path_put(&path);
  1646. }
  1647. static int btrfs_rm_dev_item(struct btrfs_device *device)
  1648. {
  1649. struct btrfs_root *root = device->fs_info->chunk_root;
  1650. int ret;
  1651. struct btrfs_path *path;
  1652. struct btrfs_key key;
  1653. struct btrfs_trans_handle *trans;
  1654. path = btrfs_alloc_path();
  1655. if (!path)
  1656. return -ENOMEM;
  1657. trans = btrfs_start_transaction(root, 0);
  1658. if (IS_ERR(trans)) {
  1659. btrfs_free_path(path);
  1660. return PTR_ERR(trans);
  1661. }
  1662. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1663. key.type = BTRFS_DEV_ITEM_KEY;
  1664. key.offset = device->devid;
  1665. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1666. if (ret) {
  1667. if (ret > 0)
  1668. ret = -ENOENT;
  1669. btrfs_abort_transaction(trans, ret);
  1670. btrfs_end_transaction(trans);
  1671. goto out;
  1672. }
  1673. ret = btrfs_del_item(trans, root, path);
  1674. if (ret) {
  1675. btrfs_abort_transaction(trans, ret);
  1676. btrfs_end_transaction(trans);
  1677. }
  1678. out:
  1679. btrfs_free_path(path);
  1680. if (!ret)
  1681. ret = btrfs_commit_transaction(trans);
  1682. return ret;
  1683. }
  1684. /*
  1685. * Verify that @num_devices satisfies the RAID profile constraints in the whole
  1686. * filesystem. It's up to the caller to adjust that number regarding eg. device
  1687. * replace.
  1688. */
  1689. static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
  1690. u64 num_devices)
  1691. {
  1692. u64 all_avail;
  1693. unsigned seq;
  1694. int i;
  1695. do {
  1696. seq = read_seqbegin(&fs_info->profiles_lock);
  1697. all_avail = fs_info->avail_data_alloc_bits |
  1698. fs_info->avail_system_alloc_bits |
  1699. fs_info->avail_metadata_alloc_bits;
  1700. } while (read_seqretry(&fs_info->profiles_lock, seq));
  1701. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  1702. if (!(all_avail & btrfs_raid_array[i].bg_flag))
  1703. continue;
  1704. if (num_devices < btrfs_raid_array[i].devs_min) {
  1705. int ret = btrfs_raid_array[i].mindev_error;
  1706. if (ret)
  1707. return ret;
  1708. }
  1709. }
  1710. return 0;
  1711. }
  1712. static struct btrfs_device * btrfs_find_next_active_device(
  1713. struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
  1714. {
  1715. struct btrfs_device *next_device;
  1716. list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
  1717. if (next_device != device &&
  1718. !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
  1719. && next_device->bdev)
  1720. return next_device;
  1721. }
  1722. return NULL;
  1723. }
  1724. /*
  1725. * Helper function to check if the given device is part of s_bdev / latest_bdev
  1726. * and replace it with the provided or the next active device, in the context
  1727. * where this function called, there should be always be another device (or
  1728. * this_dev) which is active.
  1729. */
  1730. void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
  1731. struct btrfs_device *next_device)
  1732. {
  1733. struct btrfs_fs_info *fs_info = device->fs_info;
  1734. if (!next_device)
  1735. next_device = btrfs_find_next_active_device(fs_info->fs_devices,
  1736. device);
  1737. ASSERT(next_device);
  1738. if (fs_info->sb->s_bdev &&
  1739. (fs_info->sb->s_bdev == device->bdev))
  1740. fs_info->sb->s_bdev = next_device->bdev;
  1741. if (fs_info->fs_devices->latest_bdev == device->bdev)
  1742. fs_info->fs_devices->latest_bdev = next_device->bdev;
  1743. }
  1744. /*
  1745. * Return btrfs_fs_devices::num_devices excluding the device that's being
  1746. * currently replaced.
  1747. */
  1748. static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
  1749. {
  1750. u64 num_devices = fs_info->fs_devices->num_devices;
  1751. down_read(&fs_info->dev_replace.rwsem);
  1752. if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
  1753. ASSERT(num_devices > 1);
  1754. num_devices--;
  1755. }
  1756. up_read(&fs_info->dev_replace.rwsem);
  1757. return num_devices;
  1758. }
  1759. void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
  1760. struct block_device *bdev,
  1761. const char *device_path)
  1762. {
  1763. struct btrfs_super_block *disk_super;
  1764. int copy_num;
  1765. if (!bdev)
  1766. return;
  1767. for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
  1768. struct page *page;
  1769. int ret;
  1770. disk_super = btrfs_read_dev_one_super(bdev, copy_num);
  1771. if (IS_ERR(disk_super))
  1772. continue;
  1773. memset(&disk_super->magic, 0, sizeof(disk_super->magic));
  1774. page = virt_to_page(disk_super);
  1775. set_page_dirty(page);
  1776. lock_page(page);
  1777. /* write_on_page() unlocks the page */
  1778. ret = write_one_page(page);
  1779. if (ret)
  1780. btrfs_warn(fs_info,
  1781. "error clearing superblock number %d (%d)",
  1782. copy_num, ret);
  1783. btrfs_release_disk_super(disk_super);
  1784. }
  1785. /* Notify udev that device has changed */
  1786. btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
  1787. /* Update ctime/mtime for device path for libblkid */
  1788. update_dev_time(device_path);
  1789. }
  1790. int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
  1791. u64 devid)
  1792. {
  1793. struct btrfs_device *device;
  1794. struct btrfs_fs_devices *cur_devices;
  1795. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  1796. u64 num_devices;
  1797. int ret = 0;
  1798. /*
  1799. * The device list in fs_devices is accessed without locks (neither
  1800. * uuid_mutex nor device_list_mutex) as it won't change on a mounted
  1801. * filesystem and another device rm cannot run.
  1802. */
  1803. num_devices = btrfs_num_devices(fs_info);
  1804. ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
  1805. if (ret)
  1806. goto out;
  1807. device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
  1808. if (IS_ERR(device)) {
  1809. if (PTR_ERR(device) == -ENOENT &&
  1810. device_path && strcmp(device_path, "missing") == 0)
  1811. ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
  1812. else
  1813. ret = PTR_ERR(device);
  1814. goto out;
  1815. }
  1816. if (btrfs_pinned_by_swapfile(fs_info, device)) {
  1817. btrfs_warn_in_rcu(fs_info,
  1818. "cannot remove device %s (devid %llu) due to active swapfile",
  1819. rcu_str_deref(device->name), device->devid);
  1820. ret = -ETXTBSY;
  1821. goto out;
  1822. }
  1823. if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
  1824. ret = BTRFS_ERROR_DEV_TGT_REPLACE;
  1825. goto out;
  1826. }
  1827. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
  1828. fs_info->fs_devices->rw_devices == 1) {
  1829. ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
  1830. goto out;
  1831. }
  1832. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  1833. mutex_lock(&fs_info->chunk_mutex);
  1834. list_del_init(&device->dev_alloc_list);
  1835. device->fs_devices->rw_devices--;
  1836. mutex_unlock(&fs_info->chunk_mutex);
  1837. }
  1838. ret = btrfs_shrink_device(device, 0);
  1839. if (!ret)
  1840. btrfs_reada_remove_dev(device);
  1841. if (ret)
  1842. goto error_undo;
  1843. /*
  1844. * TODO: the superblock still includes this device in its num_devices
  1845. * counter although write_all_supers() is not locked out. This
  1846. * could give a filesystem state which requires a degraded mount.
  1847. */
  1848. ret = btrfs_rm_dev_item(device);
  1849. if (ret)
  1850. goto error_undo;
  1851. clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
  1852. btrfs_scrub_cancel_dev(device);
  1853. /*
  1854. * the device list mutex makes sure that we don't change
  1855. * the device list while someone else is writing out all
  1856. * the device supers. Whoever is writing all supers, should
  1857. * lock the device list mutex before getting the number of
  1858. * devices in the super block (super_copy). Conversely,
  1859. * whoever updates the number of devices in the super block
  1860. * (super_copy) should hold the device list mutex.
  1861. */
  1862. /*
  1863. * In normal cases the cur_devices == fs_devices. But in case
  1864. * of deleting a seed device, the cur_devices should point to
  1865. * its own fs_devices listed under the fs_devices->seed.
  1866. */
  1867. cur_devices = device->fs_devices;
  1868. mutex_lock(&fs_devices->device_list_mutex);
  1869. list_del_rcu(&device->dev_list);
  1870. cur_devices->num_devices--;
  1871. cur_devices->total_devices--;
  1872. /* Update total_devices of the parent fs_devices if it's seed */
  1873. if (cur_devices != fs_devices)
  1874. fs_devices->total_devices--;
  1875. if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
  1876. cur_devices->missing_devices--;
  1877. btrfs_assign_next_active_device(device, NULL);
  1878. if (device->bdev) {
  1879. cur_devices->open_devices--;
  1880. /* remove sysfs entry */
  1881. btrfs_sysfs_remove_device(device);
  1882. }
  1883. num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
  1884. btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
  1885. mutex_unlock(&fs_devices->device_list_mutex);
  1886. /*
  1887. * at this point, the device is zero sized and detached from
  1888. * the devices list. All that's left is to zero out the old
  1889. * supers and free the device.
  1890. */
  1891. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
  1892. btrfs_scratch_superblocks(fs_info, device->bdev,
  1893. device->name->str);
  1894. btrfs_close_bdev(device);
  1895. synchronize_rcu();
  1896. btrfs_free_device(device);
  1897. if (cur_devices->open_devices == 0) {
  1898. list_del_init(&cur_devices->seed_list);
  1899. close_fs_devices(cur_devices);
  1900. free_fs_devices(cur_devices);
  1901. }
  1902. out:
  1903. return ret;
  1904. error_undo:
  1905. btrfs_reada_undo_remove_dev(device);
  1906. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  1907. mutex_lock(&fs_info->chunk_mutex);
  1908. list_add(&device->dev_alloc_list,
  1909. &fs_devices->alloc_list);
  1910. device->fs_devices->rw_devices++;
  1911. mutex_unlock(&fs_info->chunk_mutex);
  1912. }
  1913. goto out;
  1914. }
  1915. void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
  1916. {
  1917. struct btrfs_fs_devices *fs_devices;
  1918. lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
  1919. /*
  1920. * in case of fs with no seed, srcdev->fs_devices will point
  1921. * to fs_devices of fs_info. However when the dev being replaced is
  1922. * a seed dev it will point to the seed's local fs_devices. In short
  1923. * srcdev will have its correct fs_devices in both the cases.
  1924. */
  1925. fs_devices = srcdev->fs_devices;
  1926. list_del_rcu(&srcdev->dev_list);
  1927. list_del(&srcdev->dev_alloc_list);
  1928. fs_devices->num_devices--;
  1929. if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
  1930. fs_devices->missing_devices--;
  1931. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
  1932. fs_devices->rw_devices--;
  1933. if (srcdev->bdev)
  1934. fs_devices->open_devices--;
  1935. }
  1936. void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
  1937. {
  1938. struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
  1939. mutex_lock(&uuid_mutex);
  1940. btrfs_close_bdev(srcdev);
  1941. synchronize_rcu();
  1942. btrfs_free_device(srcdev);
  1943. /* if this is no devs we rather delete the fs_devices */
  1944. if (!fs_devices->num_devices) {
  1945. /*
  1946. * On a mounted FS, num_devices can't be zero unless it's a
  1947. * seed. In case of a seed device being replaced, the replace
  1948. * target added to the sprout FS, so there will be no more
  1949. * device left under the seed FS.
  1950. */
  1951. ASSERT(fs_devices->seeding);
  1952. list_del_init(&fs_devices->seed_list);
  1953. close_fs_devices(fs_devices);
  1954. free_fs_devices(fs_devices);
  1955. }
  1956. mutex_unlock(&uuid_mutex);
  1957. }
  1958. void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
  1959. {
  1960. struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
  1961. mutex_lock(&fs_devices->device_list_mutex);
  1962. btrfs_sysfs_remove_device(tgtdev);
  1963. if (tgtdev->bdev)
  1964. fs_devices->open_devices--;
  1965. fs_devices->num_devices--;
  1966. btrfs_assign_next_active_device(tgtdev, NULL);
  1967. list_del_rcu(&tgtdev->dev_list);
  1968. mutex_unlock(&fs_devices->device_list_mutex);
  1969. /*
  1970. * The update_dev_time() with in btrfs_scratch_superblocks()
  1971. * may lead to a call to btrfs_show_devname() which will try
  1972. * to hold device_list_mutex. And here this device
  1973. * is already out of device list, so we don't have to hold
  1974. * the device_list_mutex lock.
  1975. */
  1976. btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
  1977. tgtdev->name->str);
  1978. btrfs_close_bdev(tgtdev);
  1979. synchronize_rcu();
  1980. btrfs_free_device(tgtdev);
  1981. }
  1982. static struct btrfs_device *btrfs_find_device_by_path(
  1983. struct btrfs_fs_info *fs_info, const char *device_path)
  1984. {
  1985. int ret = 0;
  1986. struct btrfs_super_block *disk_super;
  1987. u64 devid;
  1988. u8 *dev_uuid;
  1989. struct block_device *bdev;
  1990. struct btrfs_device *device;
  1991. ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
  1992. fs_info->bdev_holder, 0, &bdev, &disk_super);
  1993. if (ret)
  1994. return ERR_PTR(ret);
  1995. devid = btrfs_stack_device_id(&disk_super->dev_item);
  1996. dev_uuid = disk_super->dev_item.uuid;
  1997. if (btrfs_fs_incompat(fs_info, METADATA_UUID))
  1998. device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
  1999. disk_super->metadata_uuid, true);
  2000. else
  2001. device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
  2002. disk_super->fsid, true);
  2003. btrfs_release_disk_super(disk_super);
  2004. if (!device)
  2005. device = ERR_PTR(-ENOENT);
  2006. blkdev_put(bdev, FMODE_READ);
  2007. return device;
  2008. }
  2009. /*
  2010. * Lookup a device given by device id, or the path if the id is 0.
  2011. */
  2012. struct btrfs_device *btrfs_find_device_by_devspec(
  2013. struct btrfs_fs_info *fs_info, u64 devid,
  2014. const char *device_path)
  2015. {
  2016. struct btrfs_device *device;
  2017. if (devid) {
  2018. device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
  2019. NULL, true);
  2020. if (!device)
  2021. return ERR_PTR(-ENOENT);
  2022. return device;
  2023. }
  2024. if (!device_path || !device_path[0])
  2025. return ERR_PTR(-EINVAL);
  2026. if (strcmp(device_path, "missing") == 0) {
  2027. /* Find first missing device */
  2028. list_for_each_entry(device, &fs_info->fs_devices->devices,
  2029. dev_list) {
  2030. if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
  2031. &device->dev_state) && !device->bdev)
  2032. return device;
  2033. }
  2034. return ERR_PTR(-ENOENT);
  2035. }
  2036. return btrfs_find_device_by_path(fs_info, device_path);
  2037. }
  2038. /*
  2039. * does all the dirty work required for changing file system's UUID.
  2040. */
  2041. static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
  2042. {
  2043. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  2044. struct btrfs_fs_devices *old_devices;
  2045. struct btrfs_fs_devices *seed_devices;
  2046. struct btrfs_super_block *disk_super = fs_info->super_copy;
  2047. struct btrfs_device *device;
  2048. u64 super_flags;
  2049. lockdep_assert_held(&uuid_mutex);
  2050. if (!fs_devices->seeding)
  2051. return -EINVAL;
  2052. /*
  2053. * Private copy of the seed devices, anchored at
  2054. * fs_info->fs_devices->seed_list
  2055. */
  2056. seed_devices = alloc_fs_devices(NULL, NULL);
  2057. if (IS_ERR(seed_devices))
  2058. return PTR_ERR(seed_devices);
  2059. /*
  2060. * It's necessary to retain a copy of the original seed fs_devices in
  2061. * fs_uuids so that filesystems which have been seeded can successfully
  2062. * reference the seed device from open_seed_devices. This also supports
  2063. * multiple fs seed.
  2064. */
  2065. old_devices = clone_fs_devices(fs_devices);
  2066. if (IS_ERR(old_devices)) {
  2067. kfree(seed_devices);
  2068. return PTR_ERR(old_devices);
  2069. }
  2070. list_add(&old_devices->fs_list, &fs_uuids);
  2071. memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
  2072. seed_devices->opened = 1;
  2073. INIT_LIST_HEAD(&seed_devices->devices);
  2074. INIT_LIST_HEAD(&seed_devices->alloc_list);
  2075. mutex_init(&seed_devices->device_list_mutex);
  2076. mutex_lock(&fs_devices->device_list_mutex);
  2077. list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
  2078. synchronize_rcu);
  2079. list_for_each_entry(device, &seed_devices->devices, dev_list)
  2080. device->fs_devices = seed_devices;
  2081. fs_devices->seeding = false;
  2082. fs_devices->num_devices = 0;
  2083. fs_devices->open_devices = 0;
  2084. fs_devices->missing_devices = 0;
  2085. fs_devices->rotating = false;
  2086. list_add(&seed_devices->seed_list, &fs_devices->seed_list);
  2087. generate_random_uuid(fs_devices->fsid);
  2088. memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
  2089. memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
  2090. mutex_unlock(&fs_devices->device_list_mutex);
  2091. super_flags = btrfs_super_flags(disk_super) &
  2092. ~BTRFS_SUPER_FLAG_SEEDING;
  2093. btrfs_set_super_flags(disk_super, super_flags);
  2094. return 0;
  2095. }
  2096. /*
  2097. * Store the expected generation for seed devices in device items.
  2098. */
  2099. static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
  2100. {
  2101. struct btrfs_fs_info *fs_info = trans->fs_info;
  2102. struct btrfs_root *root = fs_info->chunk_root;
  2103. struct btrfs_path *path;
  2104. struct extent_buffer *leaf;
  2105. struct btrfs_dev_item *dev_item;
  2106. struct btrfs_device *device;
  2107. struct btrfs_key key;
  2108. u8 fs_uuid[BTRFS_FSID_SIZE];
  2109. u8 dev_uuid[BTRFS_UUID_SIZE];
  2110. u64 devid;
  2111. int ret;
  2112. path = btrfs_alloc_path();
  2113. if (!path)
  2114. return -ENOMEM;
  2115. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  2116. key.offset = 0;
  2117. key.type = BTRFS_DEV_ITEM_KEY;
  2118. while (1) {
  2119. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  2120. if (ret < 0)
  2121. goto error;
  2122. leaf = path->nodes[0];
  2123. next_slot:
  2124. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  2125. ret = btrfs_next_leaf(root, path);
  2126. if (ret > 0)
  2127. break;
  2128. if (ret < 0)
  2129. goto error;
  2130. leaf = path->nodes[0];
  2131. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2132. btrfs_release_path(path);
  2133. continue;
  2134. }
  2135. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2136. if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
  2137. key.type != BTRFS_DEV_ITEM_KEY)
  2138. break;
  2139. dev_item = btrfs_item_ptr(leaf, path->slots[0],
  2140. struct btrfs_dev_item);
  2141. devid = btrfs_device_id(leaf, dev_item);
  2142. read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
  2143. BTRFS_UUID_SIZE);
  2144. read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
  2145. BTRFS_FSID_SIZE);
  2146. device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
  2147. fs_uuid, true);
  2148. BUG_ON(!device); /* Logic error */
  2149. if (device->fs_devices->seeding) {
  2150. btrfs_set_device_generation(leaf, dev_item,
  2151. device->generation);
  2152. btrfs_mark_buffer_dirty(leaf);
  2153. }
  2154. path->slots[0]++;
  2155. goto next_slot;
  2156. }
  2157. ret = 0;
  2158. error:
  2159. btrfs_free_path(path);
  2160. return ret;
  2161. }
  2162. int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
  2163. {
  2164. struct btrfs_root *root = fs_info->dev_root;
  2165. struct request_queue *q;
  2166. struct btrfs_trans_handle *trans;
  2167. struct btrfs_device *device;
  2168. struct block_device *bdev;
  2169. struct super_block *sb = fs_info->sb;
  2170. struct rcu_string *name;
  2171. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  2172. u64 orig_super_total_bytes;
  2173. u64 orig_super_num_devices;
  2174. int seeding_dev = 0;
  2175. int ret = 0;
  2176. bool locked = false;
  2177. if (sb_rdonly(sb) && !fs_devices->seeding)
  2178. return -EROFS;
  2179. bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
  2180. fs_info->bdev_holder);
  2181. if (IS_ERR(bdev))
  2182. return PTR_ERR(bdev);
  2183. if (fs_devices->seeding) {
  2184. seeding_dev = 1;
  2185. down_write(&sb->s_umount);
  2186. mutex_lock(&uuid_mutex);
  2187. locked = true;
  2188. }
  2189. sync_blockdev(bdev);
  2190. rcu_read_lock();
  2191. list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
  2192. if (device->bdev == bdev) {
  2193. ret = -EEXIST;
  2194. rcu_read_unlock();
  2195. goto error;
  2196. }
  2197. }
  2198. rcu_read_unlock();
  2199. device = btrfs_alloc_device(fs_info, NULL, NULL);
  2200. if (IS_ERR(device)) {
  2201. /* we can safely leave the fs_devices entry around */
  2202. ret = PTR_ERR(device);
  2203. goto error;
  2204. }
  2205. name = rcu_string_strdup(device_path, GFP_KERNEL);
  2206. if (!name) {
  2207. ret = -ENOMEM;
  2208. goto error_free_device;
  2209. }
  2210. rcu_assign_pointer(device->name, name);
  2211. trans = btrfs_start_transaction(root, 0);
  2212. if (IS_ERR(trans)) {
  2213. ret = PTR_ERR(trans);
  2214. goto error_free_device;
  2215. }
  2216. q = bdev_get_queue(bdev);
  2217. set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
  2218. device->generation = trans->transid;
  2219. device->io_width = fs_info->sectorsize;
  2220. device->io_align = fs_info->sectorsize;
  2221. device->sector_size = fs_info->sectorsize;
  2222. device->total_bytes = round_down(i_size_read(bdev->bd_inode),
  2223. fs_info->sectorsize);
  2224. device->disk_total_bytes = device->total_bytes;
  2225. device->commit_total_bytes = device->total_bytes;
  2226. device->fs_info = fs_info;
  2227. device->bdev = bdev;
  2228. set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
  2229. clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
  2230. device->mode = FMODE_EXCL;
  2231. device->dev_stats_valid = 1;
  2232. set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
  2233. if (seeding_dev) {
  2234. sb->s_flags &= ~SB_RDONLY;
  2235. ret = btrfs_prepare_sprout(fs_info);
  2236. if (ret) {
  2237. btrfs_abort_transaction(trans, ret);
  2238. goto error_trans;
  2239. }
  2240. }
  2241. device->fs_devices = fs_devices;
  2242. mutex_lock(&fs_devices->device_list_mutex);
  2243. mutex_lock(&fs_info->chunk_mutex);
  2244. list_add_rcu(&device->dev_list, &fs_devices->devices);
  2245. list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
  2246. fs_devices->num_devices++;
  2247. fs_devices->open_devices++;
  2248. fs_devices->rw_devices++;
  2249. fs_devices->total_devices++;
  2250. fs_devices->total_rw_bytes += device->total_bytes;
  2251. atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
  2252. if (!blk_queue_nonrot(q))
  2253. fs_devices->rotating = true;
  2254. orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  2255. btrfs_set_super_total_bytes(fs_info->super_copy,
  2256. round_down(orig_super_total_bytes + device->total_bytes,
  2257. fs_info->sectorsize));
  2258. orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
  2259. btrfs_set_super_num_devices(fs_info->super_copy,
  2260. orig_super_num_devices + 1);
  2261. /*
  2262. * we've got more storage, clear any full flags on the space
  2263. * infos
  2264. */
  2265. btrfs_clear_space_info_full(fs_info);
  2266. mutex_unlock(&fs_info->chunk_mutex);
  2267. /* Add sysfs device entry */
  2268. btrfs_sysfs_add_device(device);
  2269. mutex_unlock(&fs_devices->device_list_mutex);
  2270. if (seeding_dev) {
  2271. mutex_lock(&fs_info->chunk_mutex);
  2272. ret = init_first_rw_device(trans);
  2273. mutex_unlock(&fs_info->chunk_mutex);
  2274. if (ret) {
  2275. btrfs_abort_transaction(trans, ret);
  2276. goto error_sysfs;
  2277. }
  2278. }
  2279. ret = btrfs_add_dev_item(trans, device);
  2280. if (ret) {
  2281. btrfs_abort_transaction(trans, ret);
  2282. goto error_sysfs;
  2283. }
  2284. if (seeding_dev) {
  2285. ret = btrfs_finish_sprout(trans);
  2286. if (ret) {
  2287. btrfs_abort_transaction(trans, ret);
  2288. goto error_sysfs;
  2289. }
  2290. /*
  2291. * fs_devices now represents the newly sprouted filesystem and
  2292. * its fsid has been changed by btrfs_prepare_sprout
  2293. */
  2294. btrfs_sysfs_update_sprout_fsid(fs_devices);
  2295. }
  2296. ret = btrfs_commit_transaction(trans);
  2297. if (seeding_dev) {
  2298. mutex_unlock(&uuid_mutex);
  2299. up_write(&sb->s_umount);
  2300. locked = false;
  2301. if (ret) /* transaction commit */
  2302. return ret;
  2303. ret = btrfs_relocate_sys_chunks(fs_info);
  2304. if (ret < 0)
  2305. btrfs_handle_fs_error(fs_info, ret,
  2306. "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
  2307. trans = btrfs_attach_transaction(root);
  2308. if (IS_ERR(trans)) {
  2309. if (PTR_ERR(trans) == -ENOENT)
  2310. return 0;
  2311. ret = PTR_ERR(trans);
  2312. trans = NULL;
  2313. goto error_sysfs;
  2314. }
  2315. ret = btrfs_commit_transaction(trans);
  2316. }
  2317. /*
  2318. * Now that we have written a new super block to this device, check all
  2319. * other fs_devices list if device_path alienates any other scanned
  2320. * device.
  2321. * We can ignore the return value as it typically returns -EINVAL and
  2322. * only succeeds if the device was an alien.
  2323. */
  2324. btrfs_forget_devices(device_path);
  2325. /* Update ctime/mtime for blkid or udev */
  2326. update_dev_time(device_path);
  2327. return ret;
  2328. error_sysfs:
  2329. btrfs_sysfs_remove_device(device);
  2330. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  2331. mutex_lock(&fs_info->chunk_mutex);
  2332. list_del_rcu(&device->dev_list);
  2333. list_del(&device->dev_alloc_list);
  2334. fs_info->fs_devices->num_devices--;
  2335. fs_info->fs_devices->open_devices--;
  2336. fs_info->fs_devices->rw_devices--;
  2337. fs_info->fs_devices->total_devices--;
  2338. fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
  2339. atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
  2340. btrfs_set_super_total_bytes(fs_info->super_copy,
  2341. orig_super_total_bytes);
  2342. btrfs_set_super_num_devices(fs_info->super_copy,
  2343. orig_super_num_devices);
  2344. mutex_unlock(&fs_info->chunk_mutex);
  2345. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  2346. error_trans:
  2347. if (seeding_dev)
  2348. sb->s_flags |= SB_RDONLY;
  2349. if (trans)
  2350. btrfs_end_transaction(trans);
  2351. error_free_device:
  2352. btrfs_free_device(device);
  2353. error:
  2354. blkdev_put(bdev, FMODE_EXCL);
  2355. if (locked) {
  2356. mutex_unlock(&uuid_mutex);
  2357. up_write(&sb->s_umount);
  2358. }
  2359. return ret;
  2360. }
  2361. static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
  2362. struct btrfs_device *device)
  2363. {
  2364. int ret;
  2365. struct btrfs_path *path;
  2366. struct btrfs_root *root = device->fs_info->chunk_root;
  2367. struct btrfs_dev_item *dev_item;
  2368. struct extent_buffer *leaf;
  2369. struct btrfs_key key;
  2370. path = btrfs_alloc_path();
  2371. if (!path)
  2372. return -ENOMEM;
  2373. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  2374. key.type = BTRFS_DEV_ITEM_KEY;
  2375. key.offset = device->devid;
  2376. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  2377. if (ret < 0)
  2378. goto out;
  2379. if (ret > 0) {
  2380. ret = -ENOENT;
  2381. goto out;
  2382. }
  2383. leaf = path->nodes[0];
  2384. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  2385. btrfs_set_device_id(leaf, dev_item, device->devid);
  2386. btrfs_set_device_type(leaf, dev_item, device->type);
  2387. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  2388. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  2389. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  2390. btrfs_set_device_total_bytes(leaf, dev_item,
  2391. btrfs_device_get_disk_total_bytes(device));
  2392. btrfs_set_device_bytes_used(leaf, dev_item,
  2393. btrfs_device_get_bytes_used(device));
  2394. btrfs_mark_buffer_dirty(leaf);
  2395. out:
  2396. btrfs_free_path(path);
  2397. return ret;
  2398. }
  2399. int btrfs_grow_device(struct btrfs_trans_handle *trans,
  2400. struct btrfs_device *device, u64 new_size)
  2401. {
  2402. struct btrfs_fs_info *fs_info = device->fs_info;
  2403. struct btrfs_super_block *super_copy = fs_info->super_copy;
  2404. u64 old_total;
  2405. u64 diff;
  2406. if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
  2407. return -EACCES;
  2408. new_size = round_down(new_size, fs_info->sectorsize);
  2409. mutex_lock(&fs_info->chunk_mutex);
  2410. old_total = btrfs_super_total_bytes(super_copy);
  2411. diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
  2412. if (new_size <= device->total_bytes ||
  2413. test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
  2414. mutex_unlock(&fs_info->chunk_mutex);
  2415. return -EINVAL;
  2416. }
  2417. btrfs_set_super_total_bytes(super_copy,
  2418. round_down(old_total + diff, fs_info->sectorsize));
  2419. device->fs_devices->total_rw_bytes += diff;
  2420. btrfs_device_set_total_bytes(device, new_size);
  2421. btrfs_device_set_disk_total_bytes(device, new_size);
  2422. btrfs_clear_space_info_full(device->fs_info);
  2423. if (list_empty(&device->post_commit_list))
  2424. list_add_tail(&device->post_commit_list,
  2425. &trans->transaction->dev_update_list);
  2426. mutex_unlock(&fs_info->chunk_mutex);
  2427. return btrfs_update_device(trans, device);
  2428. }
  2429. static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
  2430. {
  2431. struct btrfs_fs_info *fs_info = trans->fs_info;
  2432. struct btrfs_root *root = fs_info->chunk_root;
  2433. int ret;
  2434. struct btrfs_path *path;
  2435. struct btrfs_key key;
  2436. path = btrfs_alloc_path();
  2437. if (!path)
  2438. return -ENOMEM;
  2439. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  2440. key.offset = chunk_offset;
  2441. key.type = BTRFS_CHUNK_ITEM_KEY;
  2442. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  2443. if (ret < 0)
  2444. goto out;
  2445. else if (ret > 0) { /* Logic error or corruption */
  2446. btrfs_handle_fs_error(fs_info, -ENOENT,
  2447. "Failed lookup while freeing chunk.");
  2448. ret = -ENOENT;
  2449. goto out;
  2450. }
  2451. ret = btrfs_del_item(trans, root, path);
  2452. if (ret < 0)
  2453. btrfs_handle_fs_error(fs_info, ret,
  2454. "Failed to delete chunk item.");
  2455. out:
  2456. btrfs_free_path(path);
  2457. return ret;
  2458. }
  2459. static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
  2460. {
  2461. struct btrfs_super_block *super_copy = fs_info->super_copy;
  2462. struct btrfs_disk_key *disk_key;
  2463. struct btrfs_chunk *chunk;
  2464. u8 *ptr;
  2465. int ret = 0;
  2466. u32 num_stripes;
  2467. u32 array_size;
  2468. u32 len = 0;
  2469. u32 cur;
  2470. struct btrfs_key key;
  2471. mutex_lock(&fs_info->chunk_mutex);
  2472. array_size = btrfs_super_sys_array_size(super_copy);
  2473. ptr = super_copy->sys_chunk_array;
  2474. cur = 0;
  2475. while (cur < array_size) {
  2476. disk_key = (struct btrfs_disk_key *)ptr;
  2477. btrfs_disk_key_to_cpu(&key, disk_key);
  2478. len = sizeof(*disk_key);
  2479. if (key.type == BTRFS_CHUNK_ITEM_KEY) {
  2480. chunk = (struct btrfs_chunk *)(ptr + len);
  2481. num_stripes = btrfs_stack_chunk_num_stripes(chunk);
  2482. len += btrfs_chunk_item_size(num_stripes);
  2483. } else {
  2484. ret = -EIO;
  2485. break;
  2486. }
  2487. if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
  2488. key.offset == chunk_offset) {
  2489. memmove(ptr, ptr + len, array_size - (cur + len));
  2490. array_size -= len;
  2491. btrfs_set_super_sys_array_size(super_copy, array_size);
  2492. } else {
  2493. ptr += len;
  2494. cur += len;
  2495. }
  2496. }
  2497. mutex_unlock(&fs_info->chunk_mutex);
  2498. return ret;
  2499. }
  2500. /*
  2501. * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
  2502. * @logical: Logical block offset in bytes.
  2503. * @length: Length of extent in bytes.
  2504. *
  2505. * Return: Chunk mapping or ERR_PTR.
  2506. */
  2507. struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
  2508. u64 logical, u64 length)
  2509. {
  2510. struct extent_map_tree *em_tree;
  2511. struct extent_map *em;
  2512. em_tree = &fs_info->mapping_tree;
  2513. read_lock(&em_tree->lock);
  2514. em = lookup_extent_mapping(em_tree, logical, length);
  2515. read_unlock(&em_tree->lock);
  2516. if (!em) {
  2517. btrfs_crit(fs_info, "unable to find logical %llu length %llu",
  2518. logical, length);
  2519. return ERR_PTR(-EINVAL);
  2520. }
  2521. if (em->start > logical || em->start + em->len < logical) {
  2522. btrfs_crit(fs_info,
  2523. "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
  2524. logical, length, em->start, em->start + em->len);
  2525. free_extent_map(em);
  2526. return ERR_PTR(-EINVAL);
  2527. }
  2528. /* callers are responsible for dropping em's ref. */
  2529. return em;
  2530. }
  2531. int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
  2532. {
  2533. struct btrfs_fs_info *fs_info = trans->fs_info;
  2534. struct extent_map *em;
  2535. struct map_lookup *map;
  2536. u64 dev_extent_len = 0;
  2537. int i, ret = 0;
  2538. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  2539. em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
  2540. if (IS_ERR(em)) {
  2541. /*
  2542. * This is a logic error, but we don't want to just rely on the
  2543. * user having built with ASSERT enabled, so if ASSERT doesn't
  2544. * do anything we still error out.
  2545. */
  2546. ASSERT(0);
  2547. return PTR_ERR(em);
  2548. }
  2549. map = em->map_lookup;
  2550. mutex_lock(&fs_info->chunk_mutex);
  2551. check_system_chunk(trans, map->type);
  2552. mutex_unlock(&fs_info->chunk_mutex);
  2553. /*
  2554. * Take the device list mutex to prevent races with the final phase of
  2555. * a device replace operation that replaces the device object associated
  2556. * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
  2557. */
  2558. mutex_lock(&fs_devices->device_list_mutex);
  2559. for (i = 0; i < map->num_stripes; i++) {
  2560. struct btrfs_device *device = map->stripes[i].dev;
  2561. ret = btrfs_free_dev_extent(trans, device,
  2562. map->stripes[i].physical,
  2563. &dev_extent_len);
  2564. if (ret) {
  2565. mutex_unlock(&fs_devices->device_list_mutex);
  2566. btrfs_abort_transaction(trans, ret);
  2567. goto out;
  2568. }
  2569. if (device->bytes_used > 0) {
  2570. mutex_lock(&fs_info->chunk_mutex);
  2571. btrfs_device_set_bytes_used(device,
  2572. device->bytes_used - dev_extent_len);
  2573. atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
  2574. btrfs_clear_space_info_full(fs_info);
  2575. mutex_unlock(&fs_info->chunk_mutex);
  2576. }
  2577. ret = btrfs_update_device(trans, device);
  2578. if (ret) {
  2579. mutex_unlock(&fs_devices->device_list_mutex);
  2580. btrfs_abort_transaction(trans, ret);
  2581. goto out;
  2582. }
  2583. }
  2584. mutex_unlock(&fs_devices->device_list_mutex);
  2585. ret = btrfs_free_chunk(trans, chunk_offset);
  2586. if (ret) {
  2587. btrfs_abort_transaction(trans, ret);
  2588. goto out;
  2589. }
  2590. trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
  2591. if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
  2592. ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
  2593. if (ret) {
  2594. btrfs_abort_transaction(trans, ret);
  2595. goto out;
  2596. }
  2597. }
  2598. ret = btrfs_remove_block_group(trans, chunk_offset, em);
  2599. if (ret) {
  2600. btrfs_abort_transaction(trans, ret);
  2601. goto out;
  2602. }
  2603. out:
  2604. /* once for us */
  2605. free_extent_map(em);
  2606. return ret;
  2607. }
  2608. static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
  2609. {
  2610. struct btrfs_root *root = fs_info->chunk_root;
  2611. struct btrfs_trans_handle *trans;
  2612. struct btrfs_block_group *block_group;
  2613. int ret;
  2614. /*
  2615. * Prevent races with automatic removal of unused block groups.
  2616. * After we relocate and before we remove the chunk with offset
  2617. * chunk_offset, automatic removal of the block group can kick in,
  2618. * resulting in a failure when calling btrfs_remove_chunk() below.
  2619. *
  2620. * Make sure to acquire this mutex before doing a tree search (dev
  2621. * or chunk trees) to find chunks. Otherwise the cleaner kthread might
  2622. * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
  2623. * we release the path used to search the chunk/dev tree and before
  2624. * the current task acquires this mutex and calls us.
  2625. */
  2626. lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
  2627. /* step one, relocate all the extents inside this chunk */
  2628. btrfs_scrub_pause(fs_info);
  2629. ret = btrfs_relocate_block_group(fs_info, chunk_offset);
  2630. btrfs_scrub_continue(fs_info);
  2631. if (ret)
  2632. return ret;
  2633. block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
  2634. if (!block_group)
  2635. return -ENOENT;
  2636. btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
  2637. btrfs_put_block_group(block_group);
  2638. trans = btrfs_start_trans_remove_block_group(root->fs_info,
  2639. chunk_offset);
  2640. if (IS_ERR(trans)) {
  2641. ret = PTR_ERR(trans);
  2642. btrfs_handle_fs_error(root->fs_info, ret, NULL);
  2643. return ret;
  2644. }
  2645. /*
  2646. * step two, delete the device extents and the
  2647. * chunk tree entries
  2648. */
  2649. ret = btrfs_remove_chunk(trans, chunk_offset);
  2650. btrfs_end_transaction(trans);
  2651. return ret;
  2652. }
  2653. static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
  2654. {
  2655. struct btrfs_root *chunk_root = fs_info->chunk_root;
  2656. struct btrfs_path *path;
  2657. struct extent_buffer *leaf;
  2658. struct btrfs_chunk *chunk;
  2659. struct btrfs_key key;
  2660. struct btrfs_key found_key;
  2661. u64 chunk_type;
  2662. bool retried = false;
  2663. int failed = 0;
  2664. int ret;
  2665. path = btrfs_alloc_path();
  2666. if (!path)
  2667. return -ENOMEM;
  2668. again:
  2669. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  2670. key.offset = (u64)-1;
  2671. key.type = BTRFS_CHUNK_ITEM_KEY;
  2672. while (1) {
  2673. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  2674. ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
  2675. if (ret < 0) {
  2676. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  2677. goto error;
  2678. }
  2679. BUG_ON(ret == 0); /* Corruption */
  2680. ret = btrfs_previous_item(chunk_root, path, key.objectid,
  2681. key.type);
  2682. if (ret)
  2683. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  2684. if (ret < 0)
  2685. goto error;
  2686. if (ret > 0)
  2687. break;
  2688. leaf = path->nodes[0];
  2689. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  2690. chunk = btrfs_item_ptr(leaf, path->slots[0],
  2691. struct btrfs_chunk);
  2692. chunk_type = btrfs_chunk_type(leaf, chunk);
  2693. btrfs_release_path(path);
  2694. if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
  2695. ret = btrfs_relocate_chunk(fs_info, found_key.offset);
  2696. if (ret == -ENOSPC)
  2697. failed++;
  2698. else
  2699. BUG_ON(ret);
  2700. }
  2701. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  2702. if (found_key.offset == 0)
  2703. break;
  2704. key.offset = found_key.offset - 1;
  2705. }
  2706. ret = 0;
  2707. if (failed && !retried) {
  2708. failed = 0;
  2709. retried = true;
  2710. goto again;
  2711. } else if (WARN_ON(failed && retried)) {
  2712. ret = -ENOSPC;
  2713. }
  2714. error:
  2715. btrfs_free_path(path);
  2716. return ret;
  2717. }
  2718. /*
  2719. * return 1 : allocate a data chunk successfully,
  2720. * return <0: errors during allocating a data chunk,
  2721. * return 0 : no need to allocate a data chunk.
  2722. */
  2723. static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
  2724. u64 chunk_offset)
  2725. {
  2726. struct btrfs_block_group *cache;
  2727. u64 bytes_used;
  2728. u64 chunk_type;
  2729. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  2730. ASSERT(cache);
  2731. chunk_type = cache->flags;
  2732. btrfs_put_block_group(cache);
  2733. if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
  2734. return 0;
  2735. spin_lock(&fs_info->data_sinfo->lock);
  2736. bytes_used = fs_info->data_sinfo->bytes_used;
  2737. spin_unlock(&fs_info->data_sinfo->lock);
  2738. if (!bytes_used) {
  2739. struct btrfs_trans_handle *trans;
  2740. int ret;
  2741. trans = btrfs_join_transaction(fs_info->tree_root);
  2742. if (IS_ERR(trans))
  2743. return PTR_ERR(trans);
  2744. ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
  2745. btrfs_end_transaction(trans);
  2746. if (ret < 0)
  2747. return ret;
  2748. return 1;
  2749. }
  2750. return 0;
  2751. }
  2752. static int insert_balance_item(struct btrfs_fs_info *fs_info,
  2753. struct btrfs_balance_control *bctl)
  2754. {
  2755. struct btrfs_root *root = fs_info->tree_root;
  2756. struct btrfs_trans_handle *trans;
  2757. struct btrfs_balance_item *item;
  2758. struct btrfs_disk_balance_args disk_bargs;
  2759. struct btrfs_path *path;
  2760. struct extent_buffer *leaf;
  2761. struct btrfs_key key;
  2762. int ret, err;
  2763. path = btrfs_alloc_path();
  2764. if (!path)
  2765. return -ENOMEM;
  2766. trans = btrfs_start_transaction(root, 0);
  2767. if (IS_ERR(trans)) {
  2768. btrfs_free_path(path);
  2769. return PTR_ERR(trans);
  2770. }
  2771. key.objectid = BTRFS_BALANCE_OBJECTID;
  2772. key.type = BTRFS_TEMPORARY_ITEM_KEY;
  2773. key.offset = 0;
  2774. ret = btrfs_insert_empty_item(trans, root, path, &key,
  2775. sizeof(*item));
  2776. if (ret)
  2777. goto out;
  2778. leaf = path->nodes[0];
  2779. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
  2780. memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
  2781. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
  2782. btrfs_set_balance_data(leaf, item, &disk_bargs);
  2783. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
  2784. btrfs_set_balance_meta(leaf, item, &disk_bargs);
  2785. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
  2786. btrfs_set_balance_sys(leaf, item, &disk_bargs);
  2787. btrfs_set_balance_flags(leaf, item, bctl->flags);
  2788. btrfs_mark_buffer_dirty(leaf);
  2789. out:
  2790. btrfs_free_path(path);
  2791. err = btrfs_commit_transaction(trans);
  2792. if (err && !ret)
  2793. ret = err;
  2794. return ret;
  2795. }
  2796. static int del_balance_item(struct btrfs_fs_info *fs_info)
  2797. {
  2798. struct btrfs_root *root = fs_info->tree_root;
  2799. struct btrfs_trans_handle *trans;
  2800. struct btrfs_path *path;
  2801. struct btrfs_key key;
  2802. int ret, err;
  2803. path = btrfs_alloc_path();
  2804. if (!path)
  2805. return -ENOMEM;
  2806. trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
  2807. if (IS_ERR(trans)) {
  2808. btrfs_free_path(path);
  2809. return PTR_ERR(trans);
  2810. }
  2811. key.objectid = BTRFS_BALANCE_OBJECTID;
  2812. key.type = BTRFS_TEMPORARY_ITEM_KEY;
  2813. key.offset = 0;
  2814. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  2815. if (ret < 0)
  2816. goto out;
  2817. if (ret > 0) {
  2818. ret = -ENOENT;
  2819. goto out;
  2820. }
  2821. ret = btrfs_del_item(trans, root, path);
  2822. out:
  2823. btrfs_free_path(path);
  2824. err = btrfs_commit_transaction(trans);
  2825. if (err && !ret)
  2826. ret = err;
  2827. return ret;
  2828. }
  2829. /*
  2830. * This is a heuristic used to reduce the number of chunks balanced on
  2831. * resume after balance was interrupted.
  2832. */
  2833. static void update_balance_args(struct btrfs_balance_control *bctl)
  2834. {
  2835. /*
  2836. * Turn on soft mode for chunk types that were being converted.
  2837. */
  2838. if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
  2839. bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
  2840. if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
  2841. bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
  2842. if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
  2843. bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
  2844. /*
  2845. * Turn on usage filter if is not already used. The idea is
  2846. * that chunks that we have already balanced should be
  2847. * reasonably full. Don't do it for chunks that are being
  2848. * converted - that will keep us from relocating unconverted
  2849. * (albeit full) chunks.
  2850. */
  2851. if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  2852. !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
  2853. !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  2854. bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
  2855. bctl->data.usage = 90;
  2856. }
  2857. if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  2858. !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
  2859. !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  2860. bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
  2861. bctl->sys.usage = 90;
  2862. }
  2863. if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  2864. !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
  2865. !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  2866. bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
  2867. bctl->meta.usage = 90;
  2868. }
  2869. }
  2870. /*
  2871. * Clear the balance status in fs_info and delete the balance item from disk.
  2872. */
  2873. static void reset_balance_state(struct btrfs_fs_info *fs_info)
  2874. {
  2875. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  2876. int ret;
  2877. BUG_ON(!fs_info->balance_ctl);
  2878. spin_lock(&fs_info->balance_lock);
  2879. fs_info->balance_ctl = NULL;
  2880. spin_unlock(&fs_info->balance_lock);
  2881. kfree(bctl);
  2882. ret = del_balance_item(fs_info);
  2883. if (ret)
  2884. btrfs_handle_fs_error(fs_info, ret, NULL);
  2885. }
  2886. /*
  2887. * Balance filters. Return 1 if chunk should be filtered out
  2888. * (should not be balanced).
  2889. */
  2890. static int chunk_profiles_filter(u64 chunk_type,
  2891. struct btrfs_balance_args *bargs)
  2892. {
  2893. chunk_type = chunk_to_extended(chunk_type) &
  2894. BTRFS_EXTENDED_PROFILE_MASK;
  2895. if (bargs->profiles & chunk_type)
  2896. return 0;
  2897. return 1;
  2898. }
  2899. static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
  2900. struct btrfs_balance_args *bargs)
  2901. {
  2902. struct btrfs_block_group *cache;
  2903. u64 chunk_used;
  2904. u64 user_thresh_min;
  2905. u64 user_thresh_max;
  2906. int ret = 1;
  2907. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  2908. chunk_used = cache->used;
  2909. if (bargs->usage_min == 0)
  2910. user_thresh_min = 0;
  2911. else
  2912. user_thresh_min = div_factor_fine(cache->length,
  2913. bargs->usage_min);
  2914. if (bargs->usage_max == 0)
  2915. user_thresh_max = 1;
  2916. else if (bargs->usage_max > 100)
  2917. user_thresh_max = cache->length;
  2918. else
  2919. user_thresh_max = div_factor_fine(cache->length,
  2920. bargs->usage_max);
  2921. if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
  2922. ret = 0;
  2923. btrfs_put_block_group(cache);
  2924. return ret;
  2925. }
  2926. static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
  2927. u64 chunk_offset, struct btrfs_balance_args *bargs)
  2928. {
  2929. struct btrfs_block_group *cache;
  2930. u64 chunk_used, user_thresh;
  2931. int ret = 1;
  2932. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  2933. chunk_used = cache->used;
  2934. if (bargs->usage_min == 0)
  2935. user_thresh = 1;
  2936. else if (bargs->usage > 100)
  2937. user_thresh = cache->length;
  2938. else
  2939. user_thresh = div_factor_fine(cache->length, bargs->usage);
  2940. if (chunk_used < user_thresh)
  2941. ret = 0;
  2942. btrfs_put_block_group(cache);
  2943. return ret;
  2944. }
  2945. static int chunk_devid_filter(struct extent_buffer *leaf,
  2946. struct btrfs_chunk *chunk,
  2947. struct btrfs_balance_args *bargs)
  2948. {
  2949. struct btrfs_stripe *stripe;
  2950. int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  2951. int i;
  2952. for (i = 0; i < num_stripes; i++) {
  2953. stripe = btrfs_stripe_nr(chunk, i);
  2954. if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
  2955. return 0;
  2956. }
  2957. return 1;
  2958. }
  2959. static u64 calc_data_stripes(u64 type, int num_stripes)
  2960. {
  2961. const int index = btrfs_bg_flags_to_raid_index(type);
  2962. const int ncopies = btrfs_raid_array[index].ncopies;
  2963. const int nparity = btrfs_raid_array[index].nparity;
  2964. if (nparity)
  2965. return num_stripes - nparity;
  2966. else
  2967. return num_stripes / ncopies;
  2968. }
  2969. /* [pstart, pend) */
  2970. static int chunk_drange_filter(struct extent_buffer *leaf,
  2971. struct btrfs_chunk *chunk,
  2972. struct btrfs_balance_args *bargs)
  2973. {
  2974. struct btrfs_stripe *stripe;
  2975. int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  2976. u64 stripe_offset;
  2977. u64 stripe_length;
  2978. u64 type;
  2979. int factor;
  2980. int i;
  2981. if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
  2982. return 0;
  2983. type = btrfs_chunk_type(leaf, chunk);
  2984. factor = calc_data_stripes(type, num_stripes);
  2985. for (i = 0; i < num_stripes; i++) {
  2986. stripe = btrfs_stripe_nr(chunk, i);
  2987. if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
  2988. continue;
  2989. stripe_offset = btrfs_stripe_offset(leaf, stripe);
  2990. stripe_length = btrfs_chunk_length(leaf, chunk);
  2991. stripe_length = div_u64(stripe_length, factor);
  2992. if (stripe_offset < bargs->pend &&
  2993. stripe_offset + stripe_length > bargs->pstart)
  2994. return 0;
  2995. }
  2996. return 1;
  2997. }
  2998. /* [vstart, vend) */
  2999. static int chunk_vrange_filter(struct extent_buffer *leaf,
  3000. struct btrfs_chunk *chunk,
  3001. u64 chunk_offset,
  3002. struct btrfs_balance_args *bargs)
  3003. {
  3004. if (chunk_offset < bargs->vend &&
  3005. chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
  3006. /* at least part of the chunk is inside this vrange */
  3007. return 0;
  3008. return 1;
  3009. }
  3010. static int chunk_stripes_range_filter(struct extent_buffer *leaf,
  3011. struct btrfs_chunk *chunk,
  3012. struct btrfs_balance_args *bargs)
  3013. {
  3014. int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  3015. if (bargs->stripes_min <= num_stripes
  3016. && num_stripes <= bargs->stripes_max)
  3017. return 0;
  3018. return 1;
  3019. }
  3020. static int chunk_soft_convert_filter(u64 chunk_type,
  3021. struct btrfs_balance_args *bargs)
  3022. {
  3023. if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
  3024. return 0;
  3025. chunk_type = chunk_to_extended(chunk_type) &
  3026. BTRFS_EXTENDED_PROFILE_MASK;
  3027. if (bargs->target == chunk_type)
  3028. return 1;
  3029. return 0;
  3030. }
  3031. static int should_balance_chunk(struct extent_buffer *leaf,
  3032. struct btrfs_chunk *chunk, u64 chunk_offset)
  3033. {
  3034. struct btrfs_fs_info *fs_info = leaf->fs_info;
  3035. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3036. struct btrfs_balance_args *bargs = NULL;
  3037. u64 chunk_type = btrfs_chunk_type(leaf, chunk);
  3038. /* type filter */
  3039. if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
  3040. (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
  3041. return 0;
  3042. }
  3043. if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
  3044. bargs = &bctl->data;
  3045. else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
  3046. bargs = &bctl->sys;
  3047. else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
  3048. bargs = &bctl->meta;
  3049. /* profiles filter */
  3050. if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
  3051. chunk_profiles_filter(chunk_type, bargs)) {
  3052. return 0;
  3053. }
  3054. /* usage filter */
  3055. if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
  3056. chunk_usage_filter(fs_info, chunk_offset, bargs)) {
  3057. return 0;
  3058. } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
  3059. chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
  3060. return 0;
  3061. }
  3062. /* devid filter */
  3063. if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
  3064. chunk_devid_filter(leaf, chunk, bargs)) {
  3065. return 0;
  3066. }
  3067. /* drange filter, makes sense only with devid filter */
  3068. if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
  3069. chunk_drange_filter(leaf, chunk, bargs)) {
  3070. return 0;
  3071. }
  3072. /* vrange filter */
  3073. if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
  3074. chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
  3075. return 0;
  3076. }
  3077. /* stripes filter */
  3078. if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
  3079. chunk_stripes_range_filter(leaf, chunk, bargs)) {
  3080. return 0;
  3081. }
  3082. /* soft profile changing mode */
  3083. if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
  3084. chunk_soft_convert_filter(chunk_type, bargs)) {
  3085. return 0;
  3086. }
  3087. /*
  3088. * limited by count, must be the last filter
  3089. */
  3090. if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
  3091. if (bargs->limit == 0)
  3092. return 0;
  3093. else
  3094. bargs->limit--;
  3095. } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
  3096. /*
  3097. * Same logic as the 'limit' filter; the minimum cannot be
  3098. * determined here because we do not have the global information
  3099. * about the count of all chunks that satisfy the filters.
  3100. */
  3101. if (bargs->limit_max == 0)
  3102. return 0;
  3103. else
  3104. bargs->limit_max--;
  3105. }
  3106. return 1;
  3107. }
  3108. static int __btrfs_balance(struct btrfs_fs_info *fs_info)
  3109. {
  3110. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3111. struct btrfs_root *chunk_root = fs_info->chunk_root;
  3112. u64 chunk_type;
  3113. struct btrfs_chunk *chunk;
  3114. struct btrfs_path *path = NULL;
  3115. struct btrfs_key key;
  3116. struct btrfs_key found_key;
  3117. struct extent_buffer *leaf;
  3118. int slot;
  3119. int ret;
  3120. int enospc_errors = 0;
  3121. bool counting = true;
  3122. /* The single value limit and min/max limits use the same bytes in the */
  3123. u64 limit_data = bctl->data.limit;
  3124. u64 limit_meta = bctl->meta.limit;
  3125. u64 limit_sys = bctl->sys.limit;
  3126. u32 count_data = 0;
  3127. u32 count_meta = 0;
  3128. u32 count_sys = 0;
  3129. int chunk_reserved = 0;
  3130. path = btrfs_alloc_path();
  3131. if (!path) {
  3132. ret = -ENOMEM;
  3133. goto error;
  3134. }
  3135. /* zero out stat counters */
  3136. spin_lock(&fs_info->balance_lock);
  3137. memset(&bctl->stat, 0, sizeof(bctl->stat));
  3138. spin_unlock(&fs_info->balance_lock);
  3139. again:
  3140. if (!counting) {
  3141. /*
  3142. * The single value limit and min/max limits use the same bytes
  3143. * in the
  3144. */
  3145. bctl->data.limit = limit_data;
  3146. bctl->meta.limit = limit_meta;
  3147. bctl->sys.limit = limit_sys;
  3148. }
  3149. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  3150. key.offset = (u64)-1;
  3151. key.type = BTRFS_CHUNK_ITEM_KEY;
  3152. while (1) {
  3153. if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
  3154. atomic_read(&fs_info->balance_cancel_req)) {
  3155. ret = -ECANCELED;
  3156. goto error;
  3157. }
  3158. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  3159. ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
  3160. if (ret < 0) {
  3161. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3162. goto error;
  3163. }
  3164. /*
  3165. * this shouldn't happen, it means the last relocate
  3166. * failed
  3167. */
  3168. if (ret == 0)
  3169. BUG(); /* FIXME break ? */
  3170. ret = btrfs_previous_item(chunk_root, path, 0,
  3171. BTRFS_CHUNK_ITEM_KEY);
  3172. if (ret) {
  3173. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3174. ret = 0;
  3175. break;
  3176. }
  3177. leaf = path->nodes[0];
  3178. slot = path->slots[0];
  3179. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  3180. if (found_key.objectid != key.objectid) {
  3181. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3182. break;
  3183. }
  3184. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  3185. chunk_type = btrfs_chunk_type(leaf, chunk);
  3186. if (!counting) {
  3187. spin_lock(&fs_info->balance_lock);
  3188. bctl->stat.considered++;
  3189. spin_unlock(&fs_info->balance_lock);
  3190. }
  3191. ret = should_balance_chunk(leaf, chunk, found_key.offset);
  3192. btrfs_release_path(path);
  3193. if (!ret) {
  3194. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3195. goto loop;
  3196. }
  3197. if (counting) {
  3198. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3199. spin_lock(&fs_info->balance_lock);
  3200. bctl->stat.expected++;
  3201. spin_unlock(&fs_info->balance_lock);
  3202. if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
  3203. count_data++;
  3204. else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
  3205. count_sys++;
  3206. else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
  3207. count_meta++;
  3208. goto loop;
  3209. }
  3210. /*
  3211. * Apply limit_min filter, no need to check if the LIMITS
  3212. * filter is used, limit_min is 0 by default
  3213. */
  3214. if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
  3215. count_data < bctl->data.limit_min)
  3216. || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
  3217. count_meta < bctl->meta.limit_min)
  3218. || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
  3219. count_sys < bctl->sys.limit_min)) {
  3220. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3221. goto loop;
  3222. }
  3223. if (!chunk_reserved) {
  3224. /*
  3225. * We may be relocating the only data chunk we have,
  3226. * which could potentially end up with losing data's
  3227. * raid profile, so lets allocate an empty one in
  3228. * advance.
  3229. */
  3230. ret = btrfs_may_alloc_data_chunk(fs_info,
  3231. found_key.offset);
  3232. if (ret < 0) {
  3233. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3234. goto error;
  3235. } else if (ret == 1) {
  3236. chunk_reserved = 1;
  3237. }
  3238. }
  3239. ret = btrfs_relocate_chunk(fs_info, found_key.offset);
  3240. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  3241. if (ret == -ENOSPC) {
  3242. enospc_errors++;
  3243. } else if (ret == -ETXTBSY) {
  3244. btrfs_info(fs_info,
  3245. "skipping relocation of block group %llu due to active swapfile",
  3246. found_key.offset);
  3247. ret = 0;
  3248. } else if (ret) {
  3249. goto error;
  3250. } else {
  3251. spin_lock(&fs_info->balance_lock);
  3252. bctl->stat.completed++;
  3253. spin_unlock(&fs_info->balance_lock);
  3254. }
  3255. loop:
  3256. if (found_key.offset == 0)
  3257. break;
  3258. key.offset = found_key.offset - 1;
  3259. }
  3260. if (counting) {
  3261. btrfs_release_path(path);
  3262. counting = false;
  3263. goto again;
  3264. }
  3265. error:
  3266. btrfs_free_path(path);
  3267. if (enospc_errors) {
  3268. btrfs_info(fs_info, "%d enospc errors during balance",
  3269. enospc_errors);
  3270. if (!ret)
  3271. ret = -ENOSPC;
  3272. }
  3273. return ret;
  3274. }
  3275. /**
  3276. * alloc_profile_is_valid - see if a given profile is valid and reduced
  3277. * @flags: profile to validate
  3278. * @extended: if true @flags is treated as an extended profile
  3279. */
  3280. static int alloc_profile_is_valid(u64 flags, int extended)
  3281. {
  3282. u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
  3283. BTRFS_BLOCK_GROUP_PROFILE_MASK);
  3284. flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
  3285. /* 1) check that all other bits are zeroed */
  3286. if (flags & ~mask)
  3287. return 0;
  3288. /* 2) see if profile is reduced */
  3289. if (flags == 0)
  3290. return !extended; /* "0" is valid for usual profiles */
  3291. return has_single_bit_set(flags);
  3292. }
  3293. static inline int balance_need_close(struct btrfs_fs_info *fs_info)
  3294. {
  3295. /* cancel requested || normal exit path */
  3296. return atomic_read(&fs_info->balance_cancel_req) ||
  3297. (atomic_read(&fs_info->balance_pause_req) == 0 &&
  3298. atomic_read(&fs_info->balance_cancel_req) == 0);
  3299. }
  3300. /*
  3301. * Validate target profile against allowed profiles and return true if it's OK.
  3302. * Otherwise print the error message and return false.
  3303. */
  3304. static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
  3305. const struct btrfs_balance_args *bargs,
  3306. u64 allowed, const char *type)
  3307. {
  3308. if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
  3309. return true;
  3310. /* Profile is valid and does not have bits outside of the allowed set */
  3311. if (alloc_profile_is_valid(bargs->target, 1) &&
  3312. (bargs->target & ~allowed) == 0)
  3313. return true;
  3314. btrfs_err(fs_info, "balance: invalid convert %s profile %s",
  3315. type, btrfs_bg_type_to_raid_name(bargs->target));
  3316. return false;
  3317. }
  3318. /*
  3319. * Fill @buf with textual description of balance filter flags @bargs, up to
  3320. * @size_buf including the terminating null. The output may be trimmed if it
  3321. * does not fit into the provided buffer.
  3322. */
  3323. static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
  3324. u32 size_buf)
  3325. {
  3326. int ret;
  3327. u32 size_bp = size_buf;
  3328. char *bp = buf;
  3329. u64 flags = bargs->flags;
  3330. char tmp_buf[128] = {'\0'};
  3331. if (!flags)
  3332. return;
  3333. #define CHECK_APPEND_NOARG(a) \
  3334. do { \
  3335. ret = snprintf(bp, size_bp, (a)); \
  3336. if (ret < 0 || ret >= size_bp) \
  3337. goto out_overflow; \
  3338. size_bp -= ret; \
  3339. bp += ret; \
  3340. } while (0)
  3341. #define CHECK_APPEND_1ARG(a, v1) \
  3342. do { \
  3343. ret = snprintf(bp, size_bp, (a), (v1)); \
  3344. if (ret < 0 || ret >= size_bp) \
  3345. goto out_overflow; \
  3346. size_bp -= ret; \
  3347. bp += ret; \
  3348. } while (0)
  3349. #define CHECK_APPEND_2ARG(a, v1, v2) \
  3350. do { \
  3351. ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
  3352. if (ret < 0 || ret >= size_bp) \
  3353. goto out_overflow; \
  3354. size_bp -= ret; \
  3355. bp += ret; \
  3356. } while (0)
  3357. if (flags & BTRFS_BALANCE_ARGS_CONVERT)
  3358. CHECK_APPEND_1ARG("convert=%s,",
  3359. btrfs_bg_type_to_raid_name(bargs->target));
  3360. if (flags & BTRFS_BALANCE_ARGS_SOFT)
  3361. CHECK_APPEND_NOARG("soft,");
  3362. if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
  3363. btrfs_describe_block_groups(bargs->profiles, tmp_buf,
  3364. sizeof(tmp_buf));
  3365. CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
  3366. }
  3367. if (flags & BTRFS_BALANCE_ARGS_USAGE)
  3368. CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
  3369. if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
  3370. CHECK_APPEND_2ARG("usage=%u..%u,",
  3371. bargs->usage_min, bargs->usage_max);
  3372. if (flags & BTRFS_BALANCE_ARGS_DEVID)
  3373. CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
  3374. if (flags & BTRFS_BALANCE_ARGS_DRANGE)
  3375. CHECK_APPEND_2ARG("drange=%llu..%llu,",
  3376. bargs->pstart, bargs->pend);
  3377. if (flags & BTRFS_BALANCE_ARGS_VRANGE)
  3378. CHECK_APPEND_2ARG("vrange=%llu..%llu,",
  3379. bargs->vstart, bargs->vend);
  3380. if (flags & BTRFS_BALANCE_ARGS_LIMIT)
  3381. CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
  3382. if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
  3383. CHECK_APPEND_2ARG("limit=%u..%u,",
  3384. bargs->limit_min, bargs->limit_max);
  3385. if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
  3386. CHECK_APPEND_2ARG("stripes=%u..%u,",
  3387. bargs->stripes_min, bargs->stripes_max);
  3388. #undef CHECK_APPEND_2ARG
  3389. #undef CHECK_APPEND_1ARG
  3390. #undef CHECK_APPEND_NOARG
  3391. out_overflow:
  3392. if (size_bp < size_buf)
  3393. buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
  3394. else
  3395. buf[0] = '\0';
  3396. }
  3397. static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
  3398. {
  3399. u32 size_buf = 1024;
  3400. char tmp_buf[192] = {'\0'};
  3401. char *buf;
  3402. char *bp;
  3403. u32 size_bp = size_buf;
  3404. int ret;
  3405. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3406. buf = kzalloc(size_buf, GFP_KERNEL);
  3407. if (!buf)
  3408. return;
  3409. bp = buf;
  3410. #define CHECK_APPEND_1ARG(a, v1) \
  3411. do { \
  3412. ret = snprintf(bp, size_bp, (a), (v1)); \
  3413. if (ret < 0 || ret >= size_bp) \
  3414. goto out_overflow; \
  3415. size_bp -= ret; \
  3416. bp += ret; \
  3417. } while (0)
  3418. if (bctl->flags & BTRFS_BALANCE_FORCE)
  3419. CHECK_APPEND_1ARG("%s", "-f ");
  3420. if (bctl->flags & BTRFS_BALANCE_DATA) {
  3421. describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
  3422. CHECK_APPEND_1ARG("-d%s ", tmp_buf);
  3423. }
  3424. if (bctl->flags & BTRFS_BALANCE_METADATA) {
  3425. describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
  3426. CHECK_APPEND_1ARG("-m%s ", tmp_buf);
  3427. }
  3428. if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
  3429. describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
  3430. CHECK_APPEND_1ARG("-s%s ", tmp_buf);
  3431. }
  3432. #undef CHECK_APPEND_1ARG
  3433. out_overflow:
  3434. if (size_bp < size_buf)
  3435. buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
  3436. btrfs_info(fs_info, "balance: %s %s",
  3437. (bctl->flags & BTRFS_BALANCE_RESUME) ?
  3438. "resume" : "start", buf);
  3439. kfree(buf);
  3440. }
  3441. /*
  3442. * Should be called with balance mutexe held
  3443. */
  3444. int btrfs_balance(struct btrfs_fs_info *fs_info,
  3445. struct btrfs_balance_control *bctl,
  3446. struct btrfs_ioctl_balance_args *bargs)
  3447. {
  3448. u64 meta_target, data_target;
  3449. u64 allowed;
  3450. int mixed = 0;
  3451. int ret;
  3452. u64 num_devices;
  3453. unsigned seq;
  3454. bool reducing_redundancy;
  3455. int i;
  3456. if (btrfs_fs_closing(fs_info) ||
  3457. atomic_read(&fs_info->balance_pause_req) ||
  3458. btrfs_should_cancel_balance(fs_info)) {
  3459. ret = -EINVAL;
  3460. goto out;
  3461. }
  3462. allowed = btrfs_super_incompat_flags(fs_info->super_copy);
  3463. if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  3464. mixed = 1;
  3465. /*
  3466. * In case of mixed groups both data and meta should be picked,
  3467. * and identical options should be given for both of them.
  3468. */
  3469. allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
  3470. if (mixed && (bctl->flags & allowed)) {
  3471. if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
  3472. !(bctl->flags & BTRFS_BALANCE_METADATA) ||
  3473. memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
  3474. btrfs_err(fs_info,
  3475. "balance: mixed groups data and metadata options must be the same");
  3476. ret = -EINVAL;
  3477. goto out;
  3478. }
  3479. }
  3480. /*
  3481. * rw_devices will not change at the moment, device add/delete/replace
  3482. * are exclusive
  3483. */
  3484. num_devices = fs_info->fs_devices->rw_devices;
  3485. /*
  3486. * SINGLE profile on-disk has no profile bit, but in-memory we have a
  3487. * special bit for it, to make it easier to distinguish. Thus we need
  3488. * to set it manually, or balance would refuse the profile.
  3489. */
  3490. allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
  3491. for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
  3492. if (num_devices >= btrfs_raid_array[i].devs_min)
  3493. allowed |= btrfs_raid_array[i].bg_flag;
  3494. if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
  3495. !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
  3496. !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
  3497. ret = -EINVAL;
  3498. goto out;
  3499. }
  3500. /*
  3501. * Allow to reduce metadata or system integrity only if force set for
  3502. * profiles with redundancy (copies, parity)
  3503. */
  3504. allowed = 0;
  3505. for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
  3506. if (btrfs_raid_array[i].ncopies >= 2 ||
  3507. btrfs_raid_array[i].tolerated_failures >= 1)
  3508. allowed |= btrfs_raid_array[i].bg_flag;
  3509. }
  3510. do {
  3511. seq = read_seqbegin(&fs_info->profiles_lock);
  3512. if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  3513. (fs_info->avail_system_alloc_bits & allowed) &&
  3514. !(bctl->sys.target & allowed)) ||
  3515. ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  3516. (fs_info->avail_metadata_alloc_bits & allowed) &&
  3517. !(bctl->meta.target & allowed)))
  3518. reducing_redundancy = true;
  3519. else
  3520. reducing_redundancy = false;
  3521. /* if we're not converting, the target field is uninitialized */
  3522. meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
  3523. bctl->meta.target : fs_info->avail_metadata_alloc_bits;
  3524. data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
  3525. bctl->data.target : fs_info->avail_data_alloc_bits;
  3526. } while (read_seqretry(&fs_info->profiles_lock, seq));
  3527. if (reducing_redundancy) {
  3528. if (bctl->flags & BTRFS_BALANCE_FORCE) {
  3529. btrfs_info(fs_info,
  3530. "balance: force reducing metadata redundancy");
  3531. } else {
  3532. btrfs_err(fs_info,
  3533. "balance: reduces metadata redundancy, use --force if you want this");
  3534. ret = -EINVAL;
  3535. goto out;
  3536. }
  3537. }
  3538. if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
  3539. btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
  3540. btrfs_warn(fs_info,
  3541. "balance: metadata profile %s has lower redundancy than data profile %s",
  3542. btrfs_bg_type_to_raid_name(meta_target),
  3543. btrfs_bg_type_to_raid_name(data_target));
  3544. }
  3545. if (fs_info->send_in_progress) {
  3546. btrfs_warn_rl(fs_info,
  3547. "cannot run balance while send operations are in progress (%d in progress)",
  3548. fs_info->send_in_progress);
  3549. ret = -EAGAIN;
  3550. goto out;
  3551. }
  3552. ret = insert_balance_item(fs_info, bctl);
  3553. if (ret && ret != -EEXIST)
  3554. goto out;
  3555. if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
  3556. BUG_ON(ret == -EEXIST);
  3557. BUG_ON(fs_info->balance_ctl);
  3558. spin_lock(&fs_info->balance_lock);
  3559. fs_info->balance_ctl = bctl;
  3560. spin_unlock(&fs_info->balance_lock);
  3561. } else {
  3562. BUG_ON(ret != -EEXIST);
  3563. spin_lock(&fs_info->balance_lock);
  3564. update_balance_args(bctl);
  3565. spin_unlock(&fs_info->balance_lock);
  3566. }
  3567. ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
  3568. set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
  3569. describe_balance_start_or_resume(fs_info);
  3570. mutex_unlock(&fs_info->balance_mutex);
  3571. ret = __btrfs_balance(fs_info);
  3572. mutex_lock(&fs_info->balance_mutex);
  3573. if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
  3574. btrfs_info(fs_info, "balance: paused");
  3575. /*
  3576. * Balance can be canceled by:
  3577. *
  3578. * - Regular cancel request
  3579. * Then ret == -ECANCELED and balance_cancel_req > 0
  3580. *
  3581. * - Fatal signal to "btrfs" process
  3582. * Either the signal caught by wait_reserve_ticket() and callers
  3583. * got -EINTR, or caught by btrfs_should_cancel_balance() and
  3584. * got -ECANCELED.
  3585. * Either way, in this case balance_cancel_req = 0, and
  3586. * ret == -EINTR or ret == -ECANCELED.
  3587. *
  3588. * So here we only check the return value to catch canceled balance.
  3589. */
  3590. else if (ret == -ECANCELED || ret == -EINTR)
  3591. btrfs_info(fs_info, "balance: canceled");
  3592. else
  3593. btrfs_info(fs_info, "balance: ended with status: %d", ret);
  3594. clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
  3595. if (bargs) {
  3596. memset(bargs, 0, sizeof(*bargs));
  3597. btrfs_update_ioctl_balance_args(fs_info, bargs);
  3598. }
  3599. if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
  3600. balance_need_close(fs_info)) {
  3601. reset_balance_state(fs_info);
  3602. btrfs_exclop_finish(fs_info);
  3603. }
  3604. wake_up(&fs_info->balance_wait_q);
  3605. return ret;
  3606. out:
  3607. if (bctl->flags & BTRFS_BALANCE_RESUME)
  3608. reset_balance_state(fs_info);
  3609. else
  3610. kfree(bctl);
  3611. btrfs_exclop_finish(fs_info);
  3612. return ret;
  3613. }
  3614. static int balance_kthread(void *data)
  3615. {
  3616. struct btrfs_fs_info *fs_info = data;
  3617. int ret = 0;
  3618. sb_start_write(fs_info->sb);
  3619. mutex_lock(&fs_info->balance_mutex);
  3620. if (fs_info->balance_ctl)
  3621. ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
  3622. mutex_unlock(&fs_info->balance_mutex);
  3623. sb_end_write(fs_info->sb);
  3624. return ret;
  3625. }
  3626. int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
  3627. {
  3628. struct task_struct *tsk;
  3629. mutex_lock(&fs_info->balance_mutex);
  3630. if (!fs_info->balance_ctl) {
  3631. mutex_unlock(&fs_info->balance_mutex);
  3632. return 0;
  3633. }
  3634. mutex_unlock(&fs_info->balance_mutex);
  3635. if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
  3636. btrfs_info(fs_info, "balance: resume skipped");
  3637. return 0;
  3638. }
  3639. /*
  3640. * A ro->rw remount sequence should continue with the paused balance
  3641. * regardless of who pauses it, system or the user as of now, so set
  3642. * the resume flag.
  3643. */
  3644. spin_lock(&fs_info->balance_lock);
  3645. fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
  3646. spin_unlock(&fs_info->balance_lock);
  3647. tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
  3648. return PTR_ERR_OR_ZERO(tsk);
  3649. }
  3650. int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
  3651. {
  3652. struct btrfs_balance_control *bctl;
  3653. struct btrfs_balance_item *item;
  3654. struct btrfs_disk_balance_args disk_bargs;
  3655. struct btrfs_path *path;
  3656. struct extent_buffer *leaf;
  3657. struct btrfs_key key;
  3658. int ret;
  3659. path = btrfs_alloc_path();
  3660. if (!path)
  3661. return -ENOMEM;
  3662. key.objectid = BTRFS_BALANCE_OBJECTID;
  3663. key.type = BTRFS_TEMPORARY_ITEM_KEY;
  3664. key.offset = 0;
  3665. ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
  3666. if (ret < 0)
  3667. goto out;
  3668. if (ret > 0) { /* ret = -ENOENT; */
  3669. ret = 0;
  3670. goto out;
  3671. }
  3672. bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
  3673. if (!bctl) {
  3674. ret = -ENOMEM;
  3675. goto out;
  3676. }
  3677. leaf = path->nodes[0];
  3678. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
  3679. bctl->flags = btrfs_balance_flags(leaf, item);
  3680. bctl->flags |= BTRFS_BALANCE_RESUME;
  3681. btrfs_balance_data(leaf, item, &disk_bargs);
  3682. btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
  3683. btrfs_balance_meta(leaf, item, &disk_bargs);
  3684. btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
  3685. btrfs_balance_sys(leaf, item, &disk_bargs);
  3686. btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
  3687. /*
  3688. * This should never happen, as the paused balance state is recovered
  3689. * during mount without any chance of other exclusive ops to collide.
  3690. *
  3691. * This gives the exclusive op status to balance and keeps in paused
  3692. * state until user intervention (cancel or umount). If the ownership
  3693. * cannot be assigned, show a message but do not fail. The balance
  3694. * is in a paused state and must have fs_info::balance_ctl properly
  3695. * set up.
  3696. */
  3697. if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
  3698. btrfs_warn(fs_info,
  3699. "balance: cannot set exclusive op status, resume manually");
  3700. btrfs_release_path(path);
  3701. mutex_lock(&fs_info->balance_mutex);
  3702. BUG_ON(fs_info->balance_ctl);
  3703. spin_lock(&fs_info->balance_lock);
  3704. fs_info->balance_ctl = bctl;
  3705. spin_unlock(&fs_info->balance_lock);
  3706. mutex_unlock(&fs_info->balance_mutex);
  3707. out:
  3708. btrfs_free_path(path);
  3709. return ret;
  3710. }
  3711. int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
  3712. {
  3713. int ret = 0;
  3714. mutex_lock(&fs_info->balance_mutex);
  3715. if (!fs_info->balance_ctl) {
  3716. mutex_unlock(&fs_info->balance_mutex);
  3717. return -ENOTCONN;
  3718. }
  3719. if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
  3720. atomic_inc(&fs_info->balance_pause_req);
  3721. mutex_unlock(&fs_info->balance_mutex);
  3722. wait_event(fs_info->balance_wait_q,
  3723. !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
  3724. mutex_lock(&fs_info->balance_mutex);
  3725. /* we are good with balance_ctl ripped off from under us */
  3726. BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
  3727. atomic_dec(&fs_info->balance_pause_req);
  3728. } else {
  3729. ret = -ENOTCONN;
  3730. }
  3731. mutex_unlock(&fs_info->balance_mutex);
  3732. return ret;
  3733. }
  3734. int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
  3735. {
  3736. mutex_lock(&fs_info->balance_mutex);
  3737. if (!fs_info->balance_ctl) {
  3738. mutex_unlock(&fs_info->balance_mutex);
  3739. return -ENOTCONN;
  3740. }
  3741. /*
  3742. * A paused balance with the item stored on disk can be resumed at
  3743. * mount time if the mount is read-write. Otherwise it's still paused
  3744. * and we must not allow cancelling as it deletes the item.
  3745. */
  3746. if (sb_rdonly(fs_info->sb)) {
  3747. mutex_unlock(&fs_info->balance_mutex);
  3748. return -EROFS;
  3749. }
  3750. atomic_inc(&fs_info->balance_cancel_req);
  3751. /*
  3752. * if we are running just wait and return, balance item is
  3753. * deleted in btrfs_balance in this case
  3754. */
  3755. if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
  3756. mutex_unlock(&fs_info->balance_mutex);
  3757. wait_event(fs_info->balance_wait_q,
  3758. !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
  3759. mutex_lock(&fs_info->balance_mutex);
  3760. } else {
  3761. mutex_unlock(&fs_info->balance_mutex);
  3762. /*
  3763. * Lock released to allow other waiters to continue, we'll
  3764. * reexamine the status again.
  3765. */
  3766. mutex_lock(&fs_info->balance_mutex);
  3767. if (fs_info->balance_ctl) {
  3768. reset_balance_state(fs_info);
  3769. btrfs_exclop_finish(fs_info);
  3770. btrfs_info(fs_info, "balance: canceled");
  3771. }
  3772. }
  3773. BUG_ON(fs_info->balance_ctl ||
  3774. test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
  3775. atomic_dec(&fs_info->balance_cancel_req);
  3776. mutex_unlock(&fs_info->balance_mutex);
  3777. return 0;
  3778. }
  3779. int btrfs_uuid_scan_kthread(void *data)
  3780. {
  3781. struct btrfs_fs_info *fs_info = data;
  3782. struct btrfs_root *root = fs_info->tree_root;
  3783. struct btrfs_key key;
  3784. struct btrfs_path *path = NULL;
  3785. int ret = 0;
  3786. struct extent_buffer *eb;
  3787. int slot;
  3788. struct btrfs_root_item root_item;
  3789. u32 item_size;
  3790. struct btrfs_trans_handle *trans = NULL;
  3791. bool closing = false;
  3792. path = btrfs_alloc_path();
  3793. if (!path) {
  3794. ret = -ENOMEM;
  3795. goto out;
  3796. }
  3797. key.objectid = 0;
  3798. key.type = BTRFS_ROOT_ITEM_KEY;
  3799. key.offset = 0;
  3800. while (1) {
  3801. if (btrfs_fs_closing(fs_info)) {
  3802. closing = true;
  3803. break;
  3804. }
  3805. ret = btrfs_search_forward(root, &key, path,
  3806. BTRFS_OLDEST_GENERATION);
  3807. if (ret) {
  3808. if (ret > 0)
  3809. ret = 0;
  3810. break;
  3811. }
  3812. if (key.type != BTRFS_ROOT_ITEM_KEY ||
  3813. (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
  3814. key.objectid != BTRFS_FS_TREE_OBJECTID) ||
  3815. key.objectid > BTRFS_LAST_FREE_OBJECTID)
  3816. goto skip;
  3817. eb = path->nodes[0];
  3818. slot = path->slots[0];
  3819. item_size = btrfs_item_size_nr(eb, slot);
  3820. if (item_size < sizeof(root_item))
  3821. goto skip;
  3822. read_extent_buffer(eb, &root_item,
  3823. btrfs_item_ptr_offset(eb, slot),
  3824. (int)sizeof(root_item));
  3825. if (btrfs_root_refs(&root_item) == 0)
  3826. goto skip;
  3827. if (!btrfs_is_empty_uuid(root_item.uuid) ||
  3828. !btrfs_is_empty_uuid(root_item.received_uuid)) {
  3829. if (trans)
  3830. goto update_tree;
  3831. btrfs_release_path(path);
  3832. /*
  3833. * 1 - subvol uuid item
  3834. * 1 - received_subvol uuid item
  3835. */
  3836. trans = btrfs_start_transaction(fs_info->uuid_root, 2);
  3837. if (IS_ERR(trans)) {
  3838. ret = PTR_ERR(trans);
  3839. break;
  3840. }
  3841. continue;
  3842. } else {
  3843. goto skip;
  3844. }
  3845. update_tree:
  3846. btrfs_release_path(path);
  3847. if (!btrfs_is_empty_uuid(root_item.uuid)) {
  3848. ret = btrfs_uuid_tree_add(trans, root_item.uuid,
  3849. BTRFS_UUID_KEY_SUBVOL,
  3850. key.objectid);
  3851. if (ret < 0) {
  3852. btrfs_warn(fs_info, "uuid_tree_add failed %d",
  3853. ret);
  3854. break;
  3855. }
  3856. }
  3857. if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
  3858. ret = btrfs_uuid_tree_add(trans,
  3859. root_item.received_uuid,
  3860. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  3861. key.objectid);
  3862. if (ret < 0) {
  3863. btrfs_warn(fs_info, "uuid_tree_add failed %d",
  3864. ret);
  3865. break;
  3866. }
  3867. }
  3868. skip:
  3869. btrfs_release_path(path);
  3870. if (trans) {
  3871. ret = btrfs_end_transaction(trans);
  3872. trans = NULL;
  3873. if (ret)
  3874. break;
  3875. }
  3876. if (key.offset < (u64)-1) {
  3877. key.offset++;
  3878. } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
  3879. key.offset = 0;
  3880. key.type = BTRFS_ROOT_ITEM_KEY;
  3881. } else if (key.objectid < (u64)-1) {
  3882. key.offset = 0;
  3883. key.type = BTRFS_ROOT_ITEM_KEY;
  3884. key.objectid++;
  3885. } else {
  3886. break;
  3887. }
  3888. cond_resched();
  3889. }
  3890. out:
  3891. btrfs_free_path(path);
  3892. if (trans && !IS_ERR(trans))
  3893. btrfs_end_transaction(trans);
  3894. if (ret)
  3895. btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
  3896. else if (!closing)
  3897. set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
  3898. up(&fs_info->uuid_tree_rescan_sem);
  3899. return 0;
  3900. }
  3901. int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
  3902. {
  3903. struct btrfs_trans_handle *trans;
  3904. struct btrfs_root *tree_root = fs_info->tree_root;
  3905. struct btrfs_root *uuid_root;
  3906. struct task_struct *task;
  3907. int ret;
  3908. /*
  3909. * 1 - root node
  3910. * 1 - root item
  3911. */
  3912. trans = btrfs_start_transaction(tree_root, 2);
  3913. if (IS_ERR(trans))
  3914. return PTR_ERR(trans);
  3915. uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
  3916. if (IS_ERR(uuid_root)) {
  3917. ret = PTR_ERR(uuid_root);
  3918. btrfs_abort_transaction(trans, ret);
  3919. btrfs_end_transaction(trans);
  3920. return ret;
  3921. }
  3922. fs_info->uuid_root = uuid_root;
  3923. ret = btrfs_commit_transaction(trans);
  3924. if (ret)
  3925. return ret;
  3926. down(&fs_info->uuid_tree_rescan_sem);
  3927. task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
  3928. if (IS_ERR(task)) {
  3929. /* fs_info->update_uuid_tree_gen remains 0 in all error case */
  3930. btrfs_warn(fs_info, "failed to start uuid_scan task");
  3931. up(&fs_info->uuid_tree_rescan_sem);
  3932. return PTR_ERR(task);
  3933. }
  3934. return 0;
  3935. }
  3936. /*
  3937. * shrinking a device means finding all of the device extents past
  3938. * the new size, and then following the back refs to the chunks.
  3939. * The chunk relocation code actually frees the device extent
  3940. */
  3941. int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
  3942. {
  3943. struct btrfs_fs_info *fs_info = device->fs_info;
  3944. struct btrfs_root *root = fs_info->dev_root;
  3945. struct btrfs_trans_handle *trans;
  3946. struct btrfs_dev_extent *dev_extent = NULL;
  3947. struct btrfs_path *path;
  3948. u64 length;
  3949. u64 chunk_offset;
  3950. int ret;
  3951. int slot;
  3952. int failed = 0;
  3953. bool retried = false;
  3954. struct extent_buffer *l;
  3955. struct btrfs_key key;
  3956. struct btrfs_super_block *super_copy = fs_info->super_copy;
  3957. u64 old_total = btrfs_super_total_bytes(super_copy);
  3958. u64 old_size = btrfs_device_get_total_bytes(device);
  3959. u64 diff;
  3960. u64 start;
  3961. new_size = round_down(new_size, fs_info->sectorsize);
  3962. start = new_size;
  3963. diff = round_down(old_size - new_size, fs_info->sectorsize);
  3964. if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
  3965. return -EINVAL;
  3966. path = btrfs_alloc_path();
  3967. if (!path)
  3968. return -ENOMEM;
  3969. path->reada = READA_BACK;
  3970. trans = btrfs_start_transaction(root, 0);
  3971. if (IS_ERR(trans)) {
  3972. btrfs_free_path(path);
  3973. return PTR_ERR(trans);
  3974. }
  3975. mutex_lock(&fs_info->chunk_mutex);
  3976. btrfs_device_set_total_bytes(device, new_size);
  3977. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  3978. device->fs_devices->total_rw_bytes -= diff;
  3979. atomic64_sub(diff, &fs_info->free_chunk_space);
  3980. }
  3981. /*
  3982. * Once the device's size has been set to the new size, ensure all
  3983. * in-memory chunks are synced to disk so that the loop below sees them
  3984. * and relocates them accordingly.
  3985. */
  3986. if (contains_pending_extent(device, &start, diff)) {
  3987. mutex_unlock(&fs_info->chunk_mutex);
  3988. ret = btrfs_commit_transaction(trans);
  3989. if (ret)
  3990. goto done;
  3991. } else {
  3992. mutex_unlock(&fs_info->chunk_mutex);
  3993. btrfs_end_transaction(trans);
  3994. }
  3995. again:
  3996. key.objectid = device->devid;
  3997. key.offset = (u64)-1;
  3998. key.type = BTRFS_DEV_EXTENT_KEY;
  3999. do {
  4000. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  4001. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  4002. if (ret < 0) {
  4003. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4004. goto done;
  4005. }
  4006. ret = btrfs_previous_item(root, path, 0, key.type);
  4007. if (ret)
  4008. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4009. if (ret < 0)
  4010. goto done;
  4011. if (ret) {
  4012. ret = 0;
  4013. btrfs_release_path(path);
  4014. break;
  4015. }
  4016. l = path->nodes[0];
  4017. slot = path->slots[0];
  4018. btrfs_item_key_to_cpu(l, &key, path->slots[0]);
  4019. if (key.objectid != device->devid) {
  4020. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4021. btrfs_release_path(path);
  4022. break;
  4023. }
  4024. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  4025. length = btrfs_dev_extent_length(l, dev_extent);
  4026. if (key.offset + length <= new_size) {
  4027. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4028. btrfs_release_path(path);
  4029. break;
  4030. }
  4031. chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
  4032. btrfs_release_path(path);
  4033. /*
  4034. * We may be relocating the only data chunk we have,
  4035. * which could potentially end up with losing data's
  4036. * raid profile, so lets allocate an empty one in
  4037. * advance.
  4038. */
  4039. ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
  4040. if (ret < 0) {
  4041. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4042. goto done;
  4043. }
  4044. ret = btrfs_relocate_chunk(fs_info, chunk_offset);
  4045. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  4046. if (ret == -ENOSPC) {
  4047. failed++;
  4048. } else if (ret) {
  4049. if (ret == -ETXTBSY) {
  4050. btrfs_warn(fs_info,
  4051. "could not shrink block group %llu due to active swapfile",
  4052. chunk_offset);
  4053. }
  4054. goto done;
  4055. }
  4056. } while (key.offset-- > 0);
  4057. if (failed && !retried) {
  4058. failed = 0;
  4059. retried = true;
  4060. goto again;
  4061. } else if (failed && retried) {
  4062. ret = -ENOSPC;
  4063. goto done;
  4064. }
  4065. /* Shrinking succeeded, else we would be at "done". */
  4066. trans = btrfs_start_transaction(root, 0);
  4067. if (IS_ERR(trans)) {
  4068. ret = PTR_ERR(trans);
  4069. goto done;
  4070. }
  4071. mutex_lock(&fs_info->chunk_mutex);
  4072. /* Clear all state bits beyond the shrunk device size */
  4073. clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
  4074. CHUNK_STATE_MASK);
  4075. btrfs_device_set_disk_total_bytes(device, new_size);
  4076. if (list_empty(&device->post_commit_list))
  4077. list_add_tail(&device->post_commit_list,
  4078. &trans->transaction->dev_update_list);
  4079. WARN_ON(diff > old_total);
  4080. btrfs_set_super_total_bytes(super_copy,
  4081. round_down(old_total - diff, fs_info->sectorsize));
  4082. mutex_unlock(&fs_info->chunk_mutex);
  4083. /* Now btrfs_update_device() will change the on-disk size. */
  4084. ret = btrfs_update_device(trans, device);
  4085. if (ret < 0) {
  4086. btrfs_abort_transaction(trans, ret);
  4087. btrfs_end_transaction(trans);
  4088. } else {
  4089. ret = btrfs_commit_transaction(trans);
  4090. }
  4091. done:
  4092. btrfs_free_path(path);
  4093. if (ret) {
  4094. mutex_lock(&fs_info->chunk_mutex);
  4095. btrfs_device_set_total_bytes(device, old_size);
  4096. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
  4097. device->fs_devices->total_rw_bytes += diff;
  4098. atomic64_add(diff, &fs_info->free_chunk_space);
  4099. mutex_unlock(&fs_info->chunk_mutex);
  4100. }
  4101. return ret;
  4102. }
  4103. static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
  4104. struct btrfs_key *key,
  4105. struct btrfs_chunk *chunk, int item_size)
  4106. {
  4107. struct btrfs_super_block *super_copy = fs_info->super_copy;
  4108. struct btrfs_disk_key disk_key;
  4109. u32 array_size;
  4110. u8 *ptr;
  4111. mutex_lock(&fs_info->chunk_mutex);
  4112. array_size = btrfs_super_sys_array_size(super_copy);
  4113. if (array_size + item_size + sizeof(disk_key)
  4114. > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
  4115. mutex_unlock(&fs_info->chunk_mutex);
  4116. return -EFBIG;
  4117. }
  4118. ptr = super_copy->sys_chunk_array + array_size;
  4119. btrfs_cpu_key_to_disk(&disk_key, key);
  4120. memcpy(ptr, &disk_key, sizeof(disk_key));
  4121. ptr += sizeof(disk_key);
  4122. memcpy(ptr, chunk, item_size);
  4123. item_size += sizeof(disk_key);
  4124. btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
  4125. mutex_unlock(&fs_info->chunk_mutex);
  4126. return 0;
  4127. }
  4128. /*
  4129. * sort the devices in descending order by max_avail, total_avail
  4130. */
  4131. static int btrfs_cmp_device_info(const void *a, const void *b)
  4132. {
  4133. const struct btrfs_device_info *di_a = a;
  4134. const struct btrfs_device_info *di_b = b;
  4135. if (di_a->max_avail > di_b->max_avail)
  4136. return -1;
  4137. if (di_a->max_avail < di_b->max_avail)
  4138. return 1;
  4139. if (di_a->total_avail > di_b->total_avail)
  4140. return -1;
  4141. if (di_a->total_avail < di_b->total_avail)
  4142. return 1;
  4143. return 0;
  4144. }
  4145. static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
  4146. {
  4147. if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
  4148. return;
  4149. btrfs_set_fs_incompat(info, RAID56);
  4150. }
  4151. static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
  4152. {
  4153. if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
  4154. return;
  4155. btrfs_set_fs_incompat(info, RAID1C34);
  4156. }
  4157. /*
  4158. * Structure used internally for __btrfs_alloc_chunk() function.
  4159. * Wraps needed parameters.
  4160. */
  4161. struct alloc_chunk_ctl {
  4162. u64 start;
  4163. u64 type;
  4164. /* Total number of stripes to allocate */
  4165. int num_stripes;
  4166. /* sub_stripes info for map */
  4167. int sub_stripes;
  4168. /* Stripes per device */
  4169. int dev_stripes;
  4170. /* Maximum number of devices to use */
  4171. int devs_max;
  4172. /* Minimum number of devices to use */
  4173. int devs_min;
  4174. /* ndevs has to be a multiple of this */
  4175. int devs_increment;
  4176. /* Number of copies */
  4177. int ncopies;
  4178. /* Number of stripes worth of bytes to store parity information */
  4179. int nparity;
  4180. u64 max_stripe_size;
  4181. u64 max_chunk_size;
  4182. u64 dev_extent_min;
  4183. u64 stripe_size;
  4184. u64 chunk_size;
  4185. int ndevs;
  4186. };
  4187. static void init_alloc_chunk_ctl_policy_regular(
  4188. struct btrfs_fs_devices *fs_devices,
  4189. struct alloc_chunk_ctl *ctl)
  4190. {
  4191. u64 type = ctl->type;
  4192. if (type & BTRFS_BLOCK_GROUP_DATA) {
  4193. ctl->max_stripe_size = SZ_1G;
  4194. ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
  4195. } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
  4196. /* For larger filesystems, use larger metadata chunks */
  4197. if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
  4198. ctl->max_stripe_size = SZ_1G;
  4199. else
  4200. ctl->max_stripe_size = SZ_256M;
  4201. ctl->max_chunk_size = ctl->max_stripe_size;
  4202. } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
  4203. ctl->max_stripe_size = SZ_32M;
  4204. ctl->max_chunk_size = 2 * ctl->max_stripe_size;
  4205. ctl->devs_max = min_t(int, ctl->devs_max,
  4206. BTRFS_MAX_DEVS_SYS_CHUNK);
  4207. } else {
  4208. BUG();
  4209. }
  4210. /* We don't want a chunk larger than 10% of writable space */
  4211. ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
  4212. ctl->max_chunk_size);
  4213. ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
  4214. }
  4215. static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
  4216. struct alloc_chunk_ctl *ctl)
  4217. {
  4218. int index = btrfs_bg_flags_to_raid_index(ctl->type);
  4219. ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
  4220. ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
  4221. ctl->devs_max = btrfs_raid_array[index].devs_max;
  4222. if (!ctl->devs_max)
  4223. ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
  4224. ctl->devs_min = btrfs_raid_array[index].devs_min;
  4225. ctl->devs_increment = btrfs_raid_array[index].devs_increment;
  4226. ctl->ncopies = btrfs_raid_array[index].ncopies;
  4227. ctl->nparity = btrfs_raid_array[index].nparity;
  4228. ctl->ndevs = 0;
  4229. switch (fs_devices->chunk_alloc_policy) {
  4230. case BTRFS_CHUNK_ALLOC_REGULAR:
  4231. init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
  4232. break;
  4233. default:
  4234. BUG();
  4235. }
  4236. }
  4237. static int gather_device_info(struct btrfs_fs_devices *fs_devices,
  4238. struct alloc_chunk_ctl *ctl,
  4239. struct btrfs_device_info *devices_info)
  4240. {
  4241. struct btrfs_fs_info *info = fs_devices->fs_info;
  4242. struct btrfs_device *device;
  4243. u64 total_avail;
  4244. u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
  4245. int ret;
  4246. int ndevs = 0;
  4247. u64 max_avail;
  4248. u64 dev_offset;
  4249. /*
  4250. * in the first pass through the devices list, we gather information
  4251. * about the available holes on each device.
  4252. */
  4253. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  4254. if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
  4255. WARN(1, KERN_ERR
  4256. "BTRFS: read-only device in alloc_list\n");
  4257. continue;
  4258. }
  4259. if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
  4260. &device->dev_state) ||
  4261. test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
  4262. continue;
  4263. if (device->total_bytes > device->bytes_used)
  4264. total_avail = device->total_bytes - device->bytes_used;
  4265. else
  4266. total_avail = 0;
  4267. /* If there is no space on this device, skip it. */
  4268. if (total_avail < ctl->dev_extent_min)
  4269. continue;
  4270. ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
  4271. &max_avail);
  4272. if (ret && ret != -ENOSPC)
  4273. return ret;
  4274. if (ret == 0)
  4275. max_avail = dev_extent_want;
  4276. if (max_avail < ctl->dev_extent_min) {
  4277. if (btrfs_test_opt(info, ENOSPC_DEBUG))
  4278. btrfs_debug(info,
  4279. "%s: devid %llu has no free space, have=%llu want=%llu",
  4280. __func__, device->devid, max_avail,
  4281. ctl->dev_extent_min);
  4282. continue;
  4283. }
  4284. if (ndevs == fs_devices->rw_devices) {
  4285. WARN(1, "%s: found more than %llu devices\n",
  4286. __func__, fs_devices->rw_devices);
  4287. break;
  4288. }
  4289. devices_info[ndevs].dev_offset = dev_offset;
  4290. devices_info[ndevs].max_avail = max_avail;
  4291. devices_info[ndevs].total_avail = total_avail;
  4292. devices_info[ndevs].dev = device;
  4293. ++ndevs;
  4294. }
  4295. ctl->ndevs = ndevs;
  4296. /*
  4297. * now sort the devices by hole size / available space
  4298. */
  4299. sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
  4300. btrfs_cmp_device_info, NULL);
  4301. return 0;
  4302. }
  4303. static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
  4304. struct btrfs_device_info *devices_info)
  4305. {
  4306. /* Number of stripes that count for block group size */
  4307. int data_stripes;
  4308. /*
  4309. * The primary goal is to maximize the number of stripes, so use as
  4310. * many devices as possible, even if the stripes are not maximum sized.
  4311. *
  4312. * The DUP profile stores more than one stripe per device, the
  4313. * max_avail is the total size so we have to adjust.
  4314. */
  4315. ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
  4316. ctl->dev_stripes);
  4317. ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
  4318. /* This will have to be fixed for RAID1 and RAID10 over more drives */
  4319. data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
  4320. /*
  4321. * Use the number of data stripes to figure out how big this chunk is
  4322. * really going to be in terms of logical address space, and compare
  4323. * that answer with the max chunk size. If it's higher, we try to
  4324. * reduce stripe_size.
  4325. */
  4326. if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
  4327. /*
  4328. * Reduce stripe_size, round it up to a 16MB boundary again and
  4329. * then use it, unless it ends up being even bigger than the
  4330. * previous value we had already.
  4331. */
  4332. ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
  4333. data_stripes), SZ_16M),
  4334. ctl->stripe_size);
  4335. }
  4336. /* Align to BTRFS_STRIPE_LEN */
  4337. ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
  4338. ctl->chunk_size = ctl->stripe_size * data_stripes;
  4339. return 0;
  4340. }
  4341. static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
  4342. struct alloc_chunk_ctl *ctl,
  4343. struct btrfs_device_info *devices_info)
  4344. {
  4345. struct btrfs_fs_info *info = fs_devices->fs_info;
  4346. /*
  4347. * Round down to number of usable stripes, devs_increment can be any
  4348. * number so we can't use round_down() that requires power of 2, while
  4349. * rounddown is safe.
  4350. */
  4351. ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
  4352. if (ctl->ndevs < ctl->devs_min) {
  4353. if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
  4354. btrfs_debug(info,
  4355. "%s: not enough devices with free space: have=%d minimum required=%d",
  4356. __func__, ctl->ndevs, ctl->devs_min);
  4357. }
  4358. return -ENOSPC;
  4359. }
  4360. ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
  4361. switch (fs_devices->chunk_alloc_policy) {
  4362. case BTRFS_CHUNK_ALLOC_REGULAR:
  4363. return decide_stripe_size_regular(ctl, devices_info);
  4364. default:
  4365. BUG();
  4366. }
  4367. }
  4368. static int create_chunk(struct btrfs_trans_handle *trans,
  4369. struct alloc_chunk_ctl *ctl,
  4370. struct btrfs_device_info *devices_info)
  4371. {
  4372. struct btrfs_fs_info *info = trans->fs_info;
  4373. struct map_lookup *map = NULL;
  4374. struct extent_map_tree *em_tree;
  4375. struct extent_map *em;
  4376. u64 start = ctl->start;
  4377. u64 type = ctl->type;
  4378. int ret;
  4379. int i;
  4380. int j;
  4381. map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
  4382. if (!map)
  4383. return -ENOMEM;
  4384. map->num_stripes = ctl->num_stripes;
  4385. for (i = 0; i < ctl->ndevs; ++i) {
  4386. for (j = 0; j < ctl->dev_stripes; ++j) {
  4387. int s = i * ctl->dev_stripes + j;
  4388. map->stripes[s].dev = devices_info[i].dev;
  4389. map->stripes[s].physical = devices_info[i].dev_offset +
  4390. j * ctl->stripe_size;
  4391. }
  4392. }
  4393. map->stripe_len = BTRFS_STRIPE_LEN;
  4394. map->io_align = BTRFS_STRIPE_LEN;
  4395. map->io_width = BTRFS_STRIPE_LEN;
  4396. map->type = type;
  4397. map->sub_stripes = ctl->sub_stripes;
  4398. trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
  4399. em = alloc_extent_map();
  4400. if (!em) {
  4401. kfree(map);
  4402. return -ENOMEM;
  4403. }
  4404. set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
  4405. em->map_lookup = map;
  4406. em->start = start;
  4407. em->len = ctl->chunk_size;
  4408. em->block_start = 0;
  4409. em->block_len = em->len;
  4410. em->orig_block_len = ctl->stripe_size;
  4411. em_tree = &info->mapping_tree;
  4412. write_lock(&em_tree->lock);
  4413. ret = add_extent_mapping(em_tree, em, 0);
  4414. if (ret) {
  4415. write_unlock(&em_tree->lock);
  4416. free_extent_map(em);
  4417. return ret;
  4418. }
  4419. write_unlock(&em_tree->lock);
  4420. ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
  4421. if (ret)
  4422. goto error_del_extent;
  4423. for (i = 0; i < map->num_stripes; i++) {
  4424. struct btrfs_device *dev = map->stripes[i].dev;
  4425. btrfs_device_set_bytes_used(dev,
  4426. dev->bytes_used + ctl->stripe_size);
  4427. if (list_empty(&dev->post_commit_list))
  4428. list_add_tail(&dev->post_commit_list,
  4429. &trans->transaction->dev_update_list);
  4430. }
  4431. atomic64_sub(ctl->stripe_size * map->num_stripes,
  4432. &info->free_chunk_space);
  4433. free_extent_map(em);
  4434. check_raid56_incompat_flag(info, type);
  4435. check_raid1c34_incompat_flag(info, type);
  4436. return 0;
  4437. error_del_extent:
  4438. write_lock(&em_tree->lock);
  4439. remove_extent_mapping(em_tree, em);
  4440. write_unlock(&em_tree->lock);
  4441. /* One for our allocation */
  4442. free_extent_map(em);
  4443. /* One for the tree reference */
  4444. free_extent_map(em);
  4445. return ret;
  4446. }
  4447. int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
  4448. {
  4449. struct btrfs_fs_info *info = trans->fs_info;
  4450. struct btrfs_fs_devices *fs_devices = info->fs_devices;
  4451. struct btrfs_device_info *devices_info = NULL;
  4452. struct alloc_chunk_ctl ctl;
  4453. int ret;
  4454. lockdep_assert_held(&info->chunk_mutex);
  4455. if (!alloc_profile_is_valid(type, 0)) {
  4456. ASSERT(0);
  4457. return -EINVAL;
  4458. }
  4459. if (list_empty(&fs_devices->alloc_list)) {
  4460. if (btrfs_test_opt(info, ENOSPC_DEBUG))
  4461. btrfs_debug(info, "%s: no writable device", __func__);
  4462. return -ENOSPC;
  4463. }
  4464. if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  4465. btrfs_err(info, "invalid chunk type 0x%llx requested", type);
  4466. ASSERT(0);
  4467. return -EINVAL;
  4468. }
  4469. ctl.start = find_next_chunk(info);
  4470. ctl.type = type;
  4471. init_alloc_chunk_ctl(fs_devices, &ctl);
  4472. devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
  4473. GFP_NOFS);
  4474. if (!devices_info)
  4475. return -ENOMEM;
  4476. ret = gather_device_info(fs_devices, &ctl, devices_info);
  4477. if (ret < 0)
  4478. goto out;
  4479. ret = decide_stripe_size(fs_devices, &ctl, devices_info);
  4480. if (ret < 0)
  4481. goto out;
  4482. ret = create_chunk(trans, &ctl, devices_info);
  4483. out:
  4484. kfree(devices_info);
  4485. return ret;
  4486. }
  4487. /*
  4488. * Chunk allocation falls into two parts. The first part does work
  4489. * that makes the new allocated chunk usable, but does not do any operation
  4490. * that modifies the chunk tree. The second part does the work that
  4491. * requires modifying the chunk tree. This division is important for the
  4492. * bootstrap process of adding storage to a seed btrfs.
  4493. */
  4494. int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
  4495. u64 chunk_offset, u64 chunk_size)
  4496. {
  4497. struct btrfs_fs_info *fs_info = trans->fs_info;
  4498. struct btrfs_root *extent_root = fs_info->extent_root;
  4499. struct btrfs_root *chunk_root = fs_info->chunk_root;
  4500. struct btrfs_key key;
  4501. struct btrfs_device *device;
  4502. struct btrfs_chunk *chunk;
  4503. struct btrfs_stripe *stripe;
  4504. struct extent_map *em;
  4505. struct map_lookup *map;
  4506. size_t item_size;
  4507. u64 dev_offset;
  4508. u64 stripe_size;
  4509. int i = 0;
  4510. int ret = 0;
  4511. em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
  4512. if (IS_ERR(em))
  4513. return PTR_ERR(em);
  4514. map = em->map_lookup;
  4515. item_size = btrfs_chunk_item_size(map->num_stripes);
  4516. stripe_size = em->orig_block_len;
  4517. chunk = kzalloc(item_size, GFP_NOFS);
  4518. if (!chunk) {
  4519. ret = -ENOMEM;
  4520. goto out;
  4521. }
  4522. /*
  4523. * Take the device list mutex to prevent races with the final phase of
  4524. * a device replace operation that replaces the device object associated
  4525. * with the map's stripes, because the device object's id can change
  4526. * at any time during that final phase of the device replace operation
  4527. * (dev-replace.c:btrfs_dev_replace_finishing()).
  4528. */
  4529. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  4530. for (i = 0; i < map->num_stripes; i++) {
  4531. device = map->stripes[i].dev;
  4532. dev_offset = map->stripes[i].physical;
  4533. ret = btrfs_update_device(trans, device);
  4534. if (ret)
  4535. break;
  4536. ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
  4537. dev_offset, stripe_size);
  4538. if (ret)
  4539. break;
  4540. }
  4541. if (ret) {
  4542. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  4543. goto out;
  4544. }
  4545. stripe = &chunk->stripe;
  4546. for (i = 0; i < map->num_stripes; i++) {
  4547. device = map->stripes[i].dev;
  4548. dev_offset = map->stripes[i].physical;
  4549. btrfs_set_stack_stripe_devid(stripe, device->devid);
  4550. btrfs_set_stack_stripe_offset(stripe, dev_offset);
  4551. memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
  4552. stripe++;
  4553. }
  4554. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  4555. btrfs_set_stack_chunk_length(chunk, chunk_size);
  4556. btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
  4557. btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
  4558. btrfs_set_stack_chunk_type(chunk, map->type);
  4559. btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
  4560. btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
  4561. btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
  4562. btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
  4563. btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
  4564. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  4565. key.type = BTRFS_CHUNK_ITEM_KEY;
  4566. key.offset = chunk_offset;
  4567. ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
  4568. if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
  4569. /*
  4570. * TODO: Cleanup of inserted chunk root in case of
  4571. * failure.
  4572. */
  4573. ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
  4574. }
  4575. out:
  4576. kfree(chunk);
  4577. free_extent_map(em);
  4578. return ret;
  4579. }
  4580. static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
  4581. {
  4582. struct btrfs_fs_info *fs_info = trans->fs_info;
  4583. u64 alloc_profile;
  4584. int ret;
  4585. alloc_profile = btrfs_metadata_alloc_profile(fs_info);
  4586. ret = btrfs_alloc_chunk(trans, alloc_profile);
  4587. if (ret)
  4588. return ret;
  4589. alloc_profile = btrfs_system_alloc_profile(fs_info);
  4590. ret = btrfs_alloc_chunk(trans, alloc_profile);
  4591. return ret;
  4592. }
  4593. static inline int btrfs_chunk_max_errors(struct map_lookup *map)
  4594. {
  4595. const int index = btrfs_bg_flags_to_raid_index(map->type);
  4596. return btrfs_raid_array[index].tolerated_failures;
  4597. }
  4598. int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
  4599. {
  4600. struct extent_map *em;
  4601. struct map_lookup *map;
  4602. int readonly = 0;
  4603. int miss_ndevs = 0;
  4604. int i;
  4605. em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
  4606. if (IS_ERR(em))
  4607. return 1;
  4608. map = em->map_lookup;
  4609. for (i = 0; i < map->num_stripes; i++) {
  4610. if (test_bit(BTRFS_DEV_STATE_MISSING,
  4611. &map->stripes[i].dev->dev_state)) {
  4612. miss_ndevs++;
  4613. continue;
  4614. }
  4615. if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
  4616. &map->stripes[i].dev->dev_state)) {
  4617. readonly = 1;
  4618. goto end;
  4619. }
  4620. }
  4621. /*
  4622. * If the number of missing devices is larger than max errors,
  4623. * we can not write the data into that chunk successfully, so
  4624. * set it readonly.
  4625. */
  4626. if (miss_ndevs > btrfs_chunk_max_errors(map))
  4627. readonly = 1;
  4628. end:
  4629. free_extent_map(em);
  4630. return readonly;
  4631. }
  4632. void btrfs_mapping_tree_free(struct extent_map_tree *tree)
  4633. {
  4634. struct extent_map *em;
  4635. while (1) {
  4636. write_lock(&tree->lock);
  4637. em = lookup_extent_mapping(tree, 0, (u64)-1);
  4638. if (em)
  4639. remove_extent_mapping(tree, em);
  4640. write_unlock(&tree->lock);
  4641. if (!em)
  4642. break;
  4643. /* once for us */
  4644. free_extent_map(em);
  4645. /* once for the tree */
  4646. free_extent_map(em);
  4647. }
  4648. }
  4649. int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
  4650. {
  4651. struct extent_map *em;
  4652. struct map_lookup *map;
  4653. int ret;
  4654. em = btrfs_get_chunk_map(fs_info, logical, len);
  4655. if (IS_ERR(em))
  4656. /*
  4657. * We could return errors for these cases, but that could get
  4658. * ugly and we'd probably do the same thing which is just not do
  4659. * anything else and exit, so return 1 so the callers don't try
  4660. * to use other copies.
  4661. */
  4662. return 1;
  4663. map = em->map_lookup;
  4664. if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
  4665. ret = map->num_stripes;
  4666. else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
  4667. ret = map->sub_stripes;
  4668. else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
  4669. ret = 2;
  4670. else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
  4671. /*
  4672. * There could be two corrupted data stripes, we need
  4673. * to loop retry in order to rebuild the correct data.
  4674. *
  4675. * Fail a stripe at a time on every retry except the
  4676. * stripe under reconstruction.
  4677. */
  4678. ret = map->num_stripes;
  4679. else
  4680. ret = 1;
  4681. free_extent_map(em);
  4682. down_read(&fs_info->dev_replace.rwsem);
  4683. if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
  4684. fs_info->dev_replace.tgtdev)
  4685. ret++;
  4686. up_read(&fs_info->dev_replace.rwsem);
  4687. return ret;
  4688. }
  4689. unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
  4690. u64 logical)
  4691. {
  4692. struct extent_map *em;
  4693. struct map_lookup *map;
  4694. unsigned long len = fs_info->sectorsize;
  4695. em = btrfs_get_chunk_map(fs_info, logical, len);
  4696. if (!WARN_ON(IS_ERR(em))) {
  4697. map = em->map_lookup;
  4698. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
  4699. len = map->stripe_len * nr_data_stripes(map);
  4700. free_extent_map(em);
  4701. }
  4702. return len;
  4703. }
  4704. int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
  4705. {
  4706. struct extent_map *em;
  4707. struct map_lookup *map;
  4708. int ret = 0;
  4709. em = btrfs_get_chunk_map(fs_info, logical, len);
  4710. if(!WARN_ON(IS_ERR(em))) {
  4711. map = em->map_lookup;
  4712. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
  4713. ret = 1;
  4714. free_extent_map(em);
  4715. }
  4716. return ret;
  4717. }
  4718. static int find_live_mirror(struct btrfs_fs_info *fs_info,
  4719. struct map_lookup *map, int first,
  4720. int dev_replace_is_ongoing)
  4721. {
  4722. int i;
  4723. int num_stripes;
  4724. int preferred_mirror;
  4725. int tolerance;
  4726. struct btrfs_device *srcdev;
  4727. ASSERT((map->type &
  4728. (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
  4729. if (map->type & BTRFS_BLOCK_GROUP_RAID10)
  4730. num_stripes = map->sub_stripes;
  4731. else
  4732. num_stripes = map->num_stripes;
  4733. preferred_mirror = first + current->pid % num_stripes;
  4734. if (dev_replace_is_ongoing &&
  4735. fs_info->dev_replace.cont_reading_from_srcdev_mode ==
  4736. BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
  4737. srcdev = fs_info->dev_replace.srcdev;
  4738. else
  4739. srcdev = NULL;
  4740. /*
  4741. * try to avoid the drive that is the source drive for a
  4742. * dev-replace procedure, only choose it if no other non-missing
  4743. * mirror is available
  4744. */
  4745. for (tolerance = 0; tolerance < 2; tolerance++) {
  4746. if (map->stripes[preferred_mirror].dev->bdev &&
  4747. (tolerance || map->stripes[preferred_mirror].dev != srcdev))
  4748. return preferred_mirror;
  4749. for (i = first; i < first + num_stripes; i++) {
  4750. if (map->stripes[i].dev->bdev &&
  4751. (tolerance || map->stripes[i].dev != srcdev))
  4752. return i;
  4753. }
  4754. }
  4755. /* we couldn't find one that doesn't fail. Just return something
  4756. * and the io error handling code will clean up eventually
  4757. */
  4758. return preferred_mirror;
  4759. }
  4760. /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
  4761. static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
  4762. {
  4763. int i;
  4764. int again = 1;
  4765. while (again) {
  4766. again = 0;
  4767. for (i = 0; i < num_stripes - 1; i++) {
  4768. /* Swap if parity is on a smaller index */
  4769. if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
  4770. swap(bbio->stripes[i], bbio->stripes[i + 1]);
  4771. swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
  4772. again = 1;
  4773. }
  4774. }
  4775. }
  4776. }
  4777. static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
  4778. {
  4779. struct btrfs_bio *bbio = kzalloc(
  4780. /* the size of the btrfs_bio */
  4781. sizeof(struct btrfs_bio) +
  4782. /* plus the variable array for the stripes */
  4783. sizeof(struct btrfs_bio_stripe) * (total_stripes) +
  4784. /* plus the variable array for the tgt dev */
  4785. sizeof(int) * (real_stripes) +
  4786. /*
  4787. * plus the raid_map, which includes both the tgt dev
  4788. * and the stripes
  4789. */
  4790. sizeof(u64) * (total_stripes),
  4791. GFP_NOFS|__GFP_NOFAIL);
  4792. atomic_set(&bbio->error, 0);
  4793. refcount_set(&bbio->refs, 1);
  4794. bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
  4795. bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
  4796. return bbio;
  4797. }
  4798. void btrfs_get_bbio(struct btrfs_bio *bbio)
  4799. {
  4800. WARN_ON(!refcount_read(&bbio->refs));
  4801. refcount_inc(&bbio->refs);
  4802. }
  4803. void btrfs_put_bbio(struct btrfs_bio *bbio)
  4804. {
  4805. if (!bbio)
  4806. return;
  4807. if (refcount_dec_and_test(&bbio->refs))
  4808. kfree(bbio);
  4809. }
  4810. /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
  4811. /*
  4812. * Please note that, discard won't be sent to target device of device
  4813. * replace.
  4814. */
  4815. static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
  4816. u64 logical, u64 *length_ret,
  4817. struct btrfs_bio **bbio_ret)
  4818. {
  4819. struct extent_map *em;
  4820. struct map_lookup *map;
  4821. struct btrfs_bio *bbio;
  4822. u64 length = *length_ret;
  4823. u64 offset;
  4824. u64 stripe_nr;
  4825. u64 stripe_nr_end;
  4826. u64 stripe_end_offset;
  4827. u64 stripe_cnt;
  4828. u64 stripe_len;
  4829. u64 stripe_offset;
  4830. u64 num_stripes;
  4831. u32 stripe_index;
  4832. u32 factor = 0;
  4833. u32 sub_stripes = 0;
  4834. u64 stripes_per_dev = 0;
  4835. u32 remaining_stripes = 0;
  4836. u32 last_stripe = 0;
  4837. int ret = 0;
  4838. int i;
  4839. /* discard always return a bbio */
  4840. ASSERT(bbio_ret);
  4841. em = btrfs_get_chunk_map(fs_info, logical, length);
  4842. if (IS_ERR(em))
  4843. return PTR_ERR(em);
  4844. map = em->map_lookup;
  4845. /* we don't discard raid56 yet */
  4846. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  4847. ret = -EOPNOTSUPP;
  4848. goto out;
  4849. }
  4850. offset = logical - em->start;
  4851. length = min_t(u64, em->start + em->len - logical, length);
  4852. *length_ret = length;
  4853. stripe_len = map->stripe_len;
  4854. /*
  4855. * stripe_nr counts the total number of stripes we have to stride
  4856. * to get to this block
  4857. */
  4858. stripe_nr = div64_u64(offset, stripe_len);
  4859. /* stripe_offset is the offset of this block in its stripe */
  4860. stripe_offset = offset - stripe_nr * stripe_len;
  4861. stripe_nr_end = round_up(offset + length, map->stripe_len);
  4862. stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
  4863. stripe_cnt = stripe_nr_end - stripe_nr;
  4864. stripe_end_offset = stripe_nr_end * map->stripe_len -
  4865. (offset + length);
  4866. /*
  4867. * after this, stripe_nr is the number of stripes on this
  4868. * device we have to walk to find the data, and stripe_index is
  4869. * the number of our device in the stripe array
  4870. */
  4871. num_stripes = 1;
  4872. stripe_index = 0;
  4873. if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
  4874. BTRFS_BLOCK_GROUP_RAID10)) {
  4875. if (map->type & BTRFS_BLOCK_GROUP_RAID0)
  4876. sub_stripes = 1;
  4877. else
  4878. sub_stripes = map->sub_stripes;
  4879. factor = map->num_stripes / sub_stripes;
  4880. num_stripes = min_t(u64, map->num_stripes,
  4881. sub_stripes * stripe_cnt);
  4882. stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
  4883. stripe_index *= sub_stripes;
  4884. stripes_per_dev = div_u64_rem(stripe_cnt, factor,
  4885. &remaining_stripes);
  4886. div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
  4887. last_stripe *= sub_stripes;
  4888. } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
  4889. BTRFS_BLOCK_GROUP_DUP)) {
  4890. num_stripes = map->num_stripes;
  4891. } else {
  4892. stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
  4893. &stripe_index);
  4894. }
  4895. bbio = alloc_btrfs_bio(num_stripes, 0);
  4896. if (!bbio) {
  4897. ret = -ENOMEM;
  4898. goto out;
  4899. }
  4900. for (i = 0; i < num_stripes; i++) {
  4901. bbio->stripes[i].physical =
  4902. map->stripes[stripe_index].physical +
  4903. stripe_offset + stripe_nr * map->stripe_len;
  4904. bbio->stripes[i].dev = map->stripes[stripe_index].dev;
  4905. if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
  4906. BTRFS_BLOCK_GROUP_RAID10)) {
  4907. bbio->stripes[i].length = stripes_per_dev *
  4908. map->stripe_len;
  4909. if (i / sub_stripes < remaining_stripes)
  4910. bbio->stripes[i].length +=
  4911. map->stripe_len;
  4912. /*
  4913. * Special for the first stripe and
  4914. * the last stripe:
  4915. *
  4916. * |-------|...|-------|
  4917. * |----------|
  4918. * off end_off
  4919. */
  4920. if (i < sub_stripes)
  4921. bbio->stripes[i].length -=
  4922. stripe_offset;
  4923. if (stripe_index >= last_stripe &&
  4924. stripe_index <= (last_stripe +
  4925. sub_stripes - 1))
  4926. bbio->stripes[i].length -=
  4927. stripe_end_offset;
  4928. if (i == sub_stripes - 1)
  4929. stripe_offset = 0;
  4930. } else {
  4931. bbio->stripes[i].length = length;
  4932. }
  4933. stripe_index++;
  4934. if (stripe_index == map->num_stripes) {
  4935. stripe_index = 0;
  4936. stripe_nr++;
  4937. }
  4938. }
  4939. *bbio_ret = bbio;
  4940. bbio->map_type = map->type;
  4941. bbio->num_stripes = num_stripes;
  4942. out:
  4943. free_extent_map(em);
  4944. return ret;
  4945. }
  4946. /*
  4947. * In dev-replace case, for repair case (that's the only case where the mirror
  4948. * is selected explicitly when calling btrfs_map_block), blocks left of the
  4949. * left cursor can also be read from the target drive.
  4950. *
  4951. * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
  4952. * array of stripes.
  4953. * For READ, it also needs to be supported using the same mirror number.
  4954. *
  4955. * If the requested block is not left of the left cursor, EIO is returned. This
  4956. * can happen because btrfs_num_copies() returns one more in the dev-replace
  4957. * case.
  4958. */
  4959. static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
  4960. u64 logical, u64 length,
  4961. u64 srcdev_devid, int *mirror_num,
  4962. u64 *physical)
  4963. {
  4964. struct btrfs_bio *bbio = NULL;
  4965. int num_stripes;
  4966. int index_srcdev = 0;
  4967. int found = 0;
  4968. u64 physical_of_found = 0;
  4969. int i;
  4970. int ret = 0;
  4971. ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
  4972. logical, &length, &bbio, 0, 0);
  4973. if (ret) {
  4974. ASSERT(bbio == NULL);
  4975. return ret;
  4976. }
  4977. num_stripes = bbio->num_stripes;
  4978. if (*mirror_num > num_stripes) {
  4979. /*
  4980. * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
  4981. * that means that the requested area is not left of the left
  4982. * cursor
  4983. */
  4984. btrfs_put_bbio(bbio);
  4985. return -EIO;
  4986. }
  4987. /*
  4988. * process the rest of the function using the mirror_num of the source
  4989. * drive. Therefore look it up first. At the end, patch the device
  4990. * pointer to the one of the target drive.
  4991. */
  4992. for (i = 0; i < num_stripes; i++) {
  4993. if (bbio->stripes[i].dev->devid != srcdev_devid)
  4994. continue;
  4995. /*
  4996. * In case of DUP, in order to keep it simple, only add the
  4997. * mirror with the lowest physical address
  4998. */
  4999. if (found &&
  5000. physical_of_found <= bbio->stripes[i].physical)
  5001. continue;
  5002. index_srcdev = i;
  5003. found = 1;
  5004. physical_of_found = bbio->stripes[i].physical;
  5005. }
  5006. btrfs_put_bbio(bbio);
  5007. ASSERT(found);
  5008. if (!found)
  5009. return -EIO;
  5010. *mirror_num = index_srcdev + 1;
  5011. *physical = physical_of_found;
  5012. return ret;
  5013. }
  5014. static void handle_ops_on_dev_replace(enum btrfs_map_op op,
  5015. struct btrfs_bio **bbio_ret,
  5016. struct btrfs_dev_replace *dev_replace,
  5017. int *num_stripes_ret, int *max_errors_ret)
  5018. {
  5019. struct btrfs_bio *bbio = *bbio_ret;
  5020. u64 srcdev_devid = dev_replace->srcdev->devid;
  5021. int tgtdev_indexes = 0;
  5022. int num_stripes = *num_stripes_ret;
  5023. int max_errors = *max_errors_ret;
  5024. int i;
  5025. if (op == BTRFS_MAP_WRITE) {
  5026. int index_where_to_add;
  5027. /*
  5028. * duplicate the write operations while the dev replace
  5029. * procedure is running. Since the copying of the old disk to
  5030. * the new disk takes place at run time while the filesystem is
  5031. * mounted writable, the regular write operations to the old
  5032. * disk have to be duplicated to go to the new disk as well.
  5033. *
  5034. * Note that device->missing is handled by the caller, and that
  5035. * the write to the old disk is already set up in the stripes
  5036. * array.
  5037. */
  5038. index_where_to_add = num_stripes;
  5039. for (i = 0; i < num_stripes; i++) {
  5040. if (bbio->stripes[i].dev->devid == srcdev_devid) {
  5041. /* write to new disk, too */
  5042. struct btrfs_bio_stripe *new =
  5043. bbio->stripes + index_where_to_add;
  5044. struct btrfs_bio_stripe *old =
  5045. bbio->stripes + i;
  5046. new->physical = old->physical;
  5047. new->length = old->length;
  5048. new->dev = dev_replace->tgtdev;
  5049. bbio->tgtdev_map[i] = index_where_to_add;
  5050. index_where_to_add++;
  5051. max_errors++;
  5052. tgtdev_indexes++;
  5053. }
  5054. }
  5055. num_stripes = index_where_to_add;
  5056. } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
  5057. int index_srcdev = 0;
  5058. int found = 0;
  5059. u64 physical_of_found = 0;
  5060. /*
  5061. * During the dev-replace procedure, the target drive can also
  5062. * be used to read data in case it is needed to repair a corrupt
  5063. * block elsewhere. This is possible if the requested area is
  5064. * left of the left cursor. In this area, the target drive is a
  5065. * full copy of the source drive.
  5066. */
  5067. for (i = 0; i < num_stripes; i++) {
  5068. if (bbio->stripes[i].dev->devid == srcdev_devid) {
  5069. /*
  5070. * In case of DUP, in order to keep it simple,
  5071. * only add the mirror with the lowest physical
  5072. * address
  5073. */
  5074. if (found &&
  5075. physical_of_found <=
  5076. bbio->stripes[i].physical)
  5077. continue;
  5078. index_srcdev = i;
  5079. found = 1;
  5080. physical_of_found = bbio->stripes[i].physical;
  5081. }
  5082. }
  5083. if (found) {
  5084. struct btrfs_bio_stripe *tgtdev_stripe =
  5085. bbio->stripes + num_stripes;
  5086. tgtdev_stripe->physical = physical_of_found;
  5087. tgtdev_stripe->length =
  5088. bbio->stripes[index_srcdev].length;
  5089. tgtdev_stripe->dev = dev_replace->tgtdev;
  5090. bbio->tgtdev_map[index_srcdev] = num_stripes;
  5091. tgtdev_indexes++;
  5092. num_stripes++;
  5093. }
  5094. }
  5095. *num_stripes_ret = num_stripes;
  5096. *max_errors_ret = max_errors;
  5097. bbio->num_tgtdevs = tgtdev_indexes;
  5098. *bbio_ret = bbio;
  5099. }
  5100. static bool need_full_stripe(enum btrfs_map_op op)
  5101. {
  5102. return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
  5103. }
  5104. /*
  5105. * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
  5106. * tuple. This information is used to calculate how big a
  5107. * particular bio can get before it straddles a stripe.
  5108. *
  5109. * @fs_info - the filesystem
  5110. * @logical - address that we want to figure out the geometry of
  5111. * @len - the length of IO we are going to perform, starting at @logical
  5112. * @op - type of operation - write or read
  5113. * @io_geom - pointer used to return values
  5114. *
  5115. * Returns < 0 in case a chunk for the given logical address cannot be found,
  5116. * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
  5117. */
  5118. int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
  5119. u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
  5120. {
  5121. struct extent_map *em;
  5122. struct map_lookup *map;
  5123. u64 offset;
  5124. u64 stripe_offset;
  5125. u64 stripe_nr;
  5126. u64 stripe_len;
  5127. u64 raid56_full_stripe_start = (u64)-1;
  5128. int data_stripes;
  5129. int ret = 0;
  5130. ASSERT(op != BTRFS_MAP_DISCARD);
  5131. em = btrfs_get_chunk_map(fs_info, logical, len);
  5132. if (IS_ERR(em))
  5133. return PTR_ERR(em);
  5134. map = em->map_lookup;
  5135. /* Offset of this logical address in the chunk */
  5136. offset = logical - em->start;
  5137. /* Len of a stripe in a chunk */
  5138. stripe_len = map->stripe_len;
  5139. /* Stripe wher this block falls in */
  5140. stripe_nr = div64_u64(offset, stripe_len);
  5141. /* Offset of stripe in the chunk */
  5142. stripe_offset = stripe_nr * stripe_len;
  5143. if (offset < stripe_offset) {
  5144. btrfs_crit(fs_info,
  5145. "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
  5146. stripe_offset, offset, em->start, logical, stripe_len);
  5147. ret = -EINVAL;
  5148. goto out;
  5149. }
  5150. /* stripe_offset is the offset of this block in its stripe */
  5151. stripe_offset = offset - stripe_offset;
  5152. data_stripes = nr_data_stripes(map);
  5153. if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
  5154. u64 max_len = stripe_len - stripe_offset;
  5155. /*
  5156. * In case of raid56, we need to know the stripe aligned start
  5157. */
  5158. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  5159. unsigned long full_stripe_len = stripe_len * data_stripes;
  5160. raid56_full_stripe_start = offset;
  5161. /*
  5162. * Allow a write of a full stripe, but make sure we
  5163. * don't allow straddling of stripes
  5164. */
  5165. raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
  5166. full_stripe_len);
  5167. raid56_full_stripe_start *= full_stripe_len;
  5168. /*
  5169. * For writes to RAID[56], allow a full stripeset across
  5170. * all disks. For other RAID types and for RAID[56]
  5171. * reads, just allow a single stripe (on a single disk).
  5172. */
  5173. if (op == BTRFS_MAP_WRITE) {
  5174. max_len = stripe_len * data_stripes -
  5175. (offset - raid56_full_stripe_start);
  5176. }
  5177. }
  5178. len = min_t(u64, em->len - offset, max_len);
  5179. } else {
  5180. len = em->len - offset;
  5181. }
  5182. io_geom->len = len;
  5183. io_geom->offset = offset;
  5184. io_geom->stripe_len = stripe_len;
  5185. io_geom->stripe_nr = stripe_nr;
  5186. io_geom->stripe_offset = stripe_offset;
  5187. io_geom->raid56_stripe_offset = raid56_full_stripe_start;
  5188. out:
  5189. /* once for us */
  5190. free_extent_map(em);
  5191. return ret;
  5192. }
  5193. static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
  5194. enum btrfs_map_op op,
  5195. u64 logical, u64 *length,
  5196. struct btrfs_bio **bbio_ret,
  5197. int mirror_num, int need_raid_map)
  5198. {
  5199. struct extent_map *em;
  5200. struct map_lookup *map;
  5201. u64 stripe_offset;
  5202. u64 stripe_nr;
  5203. u64 stripe_len;
  5204. u32 stripe_index;
  5205. int data_stripes;
  5206. int i;
  5207. int ret = 0;
  5208. int num_stripes;
  5209. int max_errors = 0;
  5210. int tgtdev_indexes = 0;
  5211. struct btrfs_bio *bbio = NULL;
  5212. struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
  5213. int dev_replace_is_ongoing = 0;
  5214. int num_alloc_stripes;
  5215. int patch_the_first_stripe_for_dev_replace = 0;
  5216. u64 physical_to_patch_in_first_stripe = 0;
  5217. u64 raid56_full_stripe_start = (u64)-1;
  5218. struct btrfs_io_geometry geom;
  5219. ASSERT(bbio_ret);
  5220. ASSERT(op != BTRFS_MAP_DISCARD);
  5221. ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
  5222. if (ret < 0)
  5223. return ret;
  5224. em = btrfs_get_chunk_map(fs_info, logical, *length);
  5225. ASSERT(!IS_ERR(em));
  5226. map = em->map_lookup;
  5227. *length = geom.len;
  5228. stripe_len = geom.stripe_len;
  5229. stripe_nr = geom.stripe_nr;
  5230. stripe_offset = geom.stripe_offset;
  5231. raid56_full_stripe_start = geom.raid56_stripe_offset;
  5232. data_stripes = nr_data_stripes(map);
  5233. down_read(&dev_replace->rwsem);
  5234. dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
  5235. /*
  5236. * Hold the semaphore for read during the whole operation, write is
  5237. * requested at commit time but must wait.
  5238. */
  5239. if (!dev_replace_is_ongoing)
  5240. up_read(&dev_replace->rwsem);
  5241. if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
  5242. !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
  5243. ret = get_extra_mirror_from_replace(fs_info, logical, *length,
  5244. dev_replace->srcdev->devid,
  5245. &mirror_num,
  5246. &physical_to_patch_in_first_stripe);
  5247. if (ret)
  5248. goto out;
  5249. else
  5250. patch_the_first_stripe_for_dev_replace = 1;
  5251. } else if (mirror_num > map->num_stripes) {
  5252. mirror_num = 0;
  5253. }
  5254. num_stripes = 1;
  5255. stripe_index = 0;
  5256. if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  5257. stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
  5258. &stripe_index);
  5259. if (!need_full_stripe(op))
  5260. mirror_num = 1;
  5261. } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
  5262. if (need_full_stripe(op))
  5263. num_stripes = map->num_stripes;
  5264. else if (mirror_num)
  5265. stripe_index = mirror_num - 1;
  5266. else {
  5267. stripe_index = find_live_mirror(fs_info, map, 0,
  5268. dev_replace_is_ongoing);
  5269. mirror_num = stripe_index + 1;
  5270. }
  5271. } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
  5272. if (need_full_stripe(op)) {
  5273. num_stripes = map->num_stripes;
  5274. } else if (mirror_num) {
  5275. stripe_index = mirror_num - 1;
  5276. } else {
  5277. mirror_num = 1;
  5278. }
  5279. } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  5280. u32 factor = map->num_stripes / map->sub_stripes;
  5281. stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
  5282. stripe_index *= map->sub_stripes;
  5283. if (need_full_stripe(op))
  5284. num_stripes = map->sub_stripes;
  5285. else if (mirror_num)
  5286. stripe_index += mirror_num - 1;
  5287. else {
  5288. int old_stripe_index = stripe_index;
  5289. stripe_index = find_live_mirror(fs_info, map,
  5290. stripe_index,
  5291. dev_replace_is_ongoing);
  5292. mirror_num = stripe_index - old_stripe_index + 1;
  5293. }
  5294. } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
  5295. if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
  5296. /* push stripe_nr back to the start of the full stripe */
  5297. stripe_nr = div64_u64(raid56_full_stripe_start,
  5298. stripe_len * data_stripes);
  5299. /* RAID[56] write or recovery. Return all stripes */
  5300. num_stripes = map->num_stripes;
  5301. max_errors = nr_parity_stripes(map);
  5302. *length = map->stripe_len;
  5303. stripe_index = 0;
  5304. stripe_offset = 0;
  5305. } else {
  5306. /*
  5307. * Mirror #0 or #1 means the original data block.
  5308. * Mirror #2 is RAID5 parity block.
  5309. * Mirror #3 is RAID6 Q block.
  5310. */
  5311. stripe_nr = div_u64_rem(stripe_nr,
  5312. data_stripes, &stripe_index);
  5313. if (mirror_num > 1)
  5314. stripe_index = data_stripes + mirror_num - 2;
  5315. /* We distribute the parity blocks across stripes */
  5316. div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
  5317. &stripe_index);
  5318. if (!need_full_stripe(op) && mirror_num <= 1)
  5319. mirror_num = 1;
  5320. }
  5321. } else {
  5322. /*
  5323. * after this, stripe_nr is the number of stripes on this
  5324. * device we have to walk to find the data, and stripe_index is
  5325. * the number of our device in the stripe array
  5326. */
  5327. stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
  5328. &stripe_index);
  5329. mirror_num = stripe_index + 1;
  5330. }
  5331. if (stripe_index >= map->num_stripes) {
  5332. btrfs_crit(fs_info,
  5333. "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
  5334. stripe_index, map->num_stripes);
  5335. ret = -EINVAL;
  5336. goto out;
  5337. }
  5338. num_alloc_stripes = num_stripes;
  5339. if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
  5340. if (op == BTRFS_MAP_WRITE)
  5341. num_alloc_stripes <<= 1;
  5342. if (op == BTRFS_MAP_GET_READ_MIRRORS)
  5343. num_alloc_stripes++;
  5344. tgtdev_indexes = num_stripes;
  5345. }
  5346. bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
  5347. if (!bbio) {
  5348. ret = -ENOMEM;
  5349. goto out;
  5350. }
  5351. for (i = 0; i < num_stripes; i++) {
  5352. bbio->stripes[i].physical = map->stripes[stripe_index].physical +
  5353. stripe_offset + stripe_nr * map->stripe_len;
  5354. bbio->stripes[i].dev = map->stripes[stripe_index].dev;
  5355. stripe_index++;
  5356. }
  5357. /* build raid_map */
  5358. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
  5359. (need_full_stripe(op) || mirror_num > 1)) {
  5360. u64 tmp;
  5361. unsigned rot;
  5362. /* Work out the disk rotation on this stripe-set */
  5363. div_u64_rem(stripe_nr, num_stripes, &rot);
  5364. /* Fill in the logical address of each stripe */
  5365. tmp = stripe_nr * data_stripes;
  5366. for (i = 0; i < data_stripes; i++)
  5367. bbio->raid_map[(i+rot) % num_stripes] =
  5368. em->start + (tmp + i) * map->stripe_len;
  5369. bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
  5370. if (map->type & BTRFS_BLOCK_GROUP_RAID6)
  5371. bbio->raid_map[(i+rot+1) % num_stripes] =
  5372. RAID6_Q_STRIPE;
  5373. sort_parity_stripes(bbio, num_stripes);
  5374. }
  5375. if (need_full_stripe(op))
  5376. max_errors = btrfs_chunk_max_errors(map);
  5377. if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
  5378. need_full_stripe(op)) {
  5379. handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
  5380. &max_errors);
  5381. }
  5382. *bbio_ret = bbio;
  5383. bbio->map_type = map->type;
  5384. bbio->num_stripes = num_stripes;
  5385. bbio->max_errors = max_errors;
  5386. bbio->mirror_num = mirror_num;
  5387. /*
  5388. * this is the case that REQ_READ && dev_replace_is_ongoing &&
  5389. * mirror_num == num_stripes + 1 && dev_replace target drive is
  5390. * available as a mirror
  5391. */
  5392. if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
  5393. WARN_ON(num_stripes > 1);
  5394. bbio->stripes[0].dev = dev_replace->tgtdev;
  5395. bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
  5396. bbio->mirror_num = map->num_stripes + 1;
  5397. }
  5398. out:
  5399. if (dev_replace_is_ongoing) {
  5400. lockdep_assert_held(&dev_replace->rwsem);
  5401. /* Unlock and let waiting writers proceed */
  5402. up_read(&dev_replace->rwsem);
  5403. }
  5404. free_extent_map(em);
  5405. return ret;
  5406. }
  5407. int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
  5408. u64 logical, u64 *length,
  5409. struct btrfs_bio **bbio_ret, int mirror_num)
  5410. {
  5411. if (op == BTRFS_MAP_DISCARD)
  5412. return __btrfs_map_block_for_discard(fs_info, logical,
  5413. length, bbio_ret);
  5414. return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
  5415. mirror_num, 0);
  5416. }
  5417. /* For Scrub/replace */
  5418. int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
  5419. u64 logical, u64 *length,
  5420. struct btrfs_bio **bbio_ret)
  5421. {
  5422. return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
  5423. }
  5424. static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
  5425. {
  5426. bio->bi_private = bbio->private;
  5427. bio->bi_end_io = bbio->end_io;
  5428. bio_endio(bio);
  5429. btrfs_put_bbio(bbio);
  5430. }
  5431. static void btrfs_end_bio(struct bio *bio)
  5432. {
  5433. struct btrfs_bio *bbio = bio->bi_private;
  5434. int is_orig_bio = 0;
  5435. if (bio->bi_status) {
  5436. atomic_inc(&bbio->error);
  5437. if (bio->bi_status == BLK_STS_IOERR ||
  5438. bio->bi_status == BLK_STS_TARGET) {
  5439. struct btrfs_device *dev = btrfs_io_bio(bio)->device;
  5440. ASSERT(dev->bdev);
  5441. if (bio_op(bio) == REQ_OP_WRITE)
  5442. btrfs_dev_stat_inc_and_print(dev,
  5443. BTRFS_DEV_STAT_WRITE_ERRS);
  5444. else if (!(bio->bi_opf & REQ_RAHEAD))
  5445. btrfs_dev_stat_inc_and_print(dev,
  5446. BTRFS_DEV_STAT_READ_ERRS);
  5447. if (bio->bi_opf & REQ_PREFLUSH)
  5448. btrfs_dev_stat_inc_and_print(dev,
  5449. BTRFS_DEV_STAT_FLUSH_ERRS);
  5450. }
  5451. }
  5452. if (bio == bbio->orig_bio)
  5453. is_orig_bio = 1;
  5454. btrfs_bio_counter_dec(bbio->fs_info);
  5455. if (atomic_dec_and_test(&bbio->stripes_pending)) {
  5456. if (!is_orig_bio) {
  5457. bio_put(bio);
  5458. bio = bbio->orig_bio;
  5459. }
  5460. btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
  5461. /* only send an error to the higher layers if it is
  5462. * beyond the tolerance of the btrfs bio
  5463. */
  5464. if (atomic_read(&bbio->error) > bbio->max_errors) {
  5465. bio->bi_status = BLK_STS_IOERR;
  5466. } else {
  5467. /*
  5468. * this bio is actually up to date, we didn't
  5469. * go over the max number of errors
  5470. */
  5471. bio->bi_status = BLK_STS_OK;
  5472. }
  5473. btrfs_end_bbio(bbio, bio);
  5474. } else if (!is_orig_bio) {
  5475. bio_put(bio);
  5476. }
  5477. }
  5478. static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
  5479. u64 physical, struct btrfs_device *dev)
  5480. {
  5481. struct btrfs_fs_info *fs_info = bbio->fs_info;
  5482. bio->bi_private = bbio;
  5483. btrfs_io_bio(bio)->device = dev;
  5484. bio->bi_end_io = btrfs_end_bio;
  5485. bio->bi_iter.bi_sector = physical >> 9;
  5486. btrfs_debug_in_rcu(fs_info,
  5487. "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
  5488. bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
  5489. (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
  5490. dev->devid, bio->bi_iter.bi_size);
  5491. bio_set_dev(bio, dev->bdev);
  5492. btrfs_bio_counter_inc_noblocked(fs_info);
  5493. btrfsic_submit_bio(bio);
  5494. }
  5495. static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
  5496. {
  5497. atomic_inc(&bbio->error);
  5498. if (atomic_dec_and_test(&bbio->stripes_pending)) {
  5499. /* Should be the original bio. */
  5500. WARN_ON(bio != bbio->orig_bio);
  5501. btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
  5502. bio->bi_iter.bi_sector = logical >> 9;
  5503. if (atomic_read(&bbio->error) > bbio->max_errors)
  5504. bio->bi_status = BLK_STS_IOERR;
  5505. else
  5506. bio->bi_status = BLK_STS_OK;
  5507. btrfs_end_bbio(bbio, bio);
  5508. }
  5509. }
  5510. blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
  5511. int mirror_num)
  5512. {
  5513. struct btrfs_device *dev;
  5514. struct bio *first_bio = bio;
  5515. u64 logical = (u64)bio->bi_iter.bi_sector << 9;
  5516. u64 length = 0;
  5517. u64 map_length;
  5518. int ret;
  5519. int dev_nr;
  5520. int total_devs;
  5521. struct btrfs_bio *bbio = NULL;
  5522. length = bio->bi_iter.bi_size;
  5523. map_length = length;
  5524. btrfs_bio_counter_inc_blocked(fs_info);
  5525. ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
  5526. &map_length, &bbio, mirror_num, 1);
  5527. if (ret) {
  5528. btrfs_bio_counter_dec(fs_info);
  5529. return errno_to_blk_status(ret);
  5530. }
  5531. total_devs = bbio->num_stripes;
  5532. bbio->orig_bio = first_bio;
  5533. bbio->private = first_bio->bi_private;
  5534. bbio->end_io = first_bio->bi_end_io;
  5535. bbio->fs_info = fs_info;
  5536. atomic_set(&bbio->stripes_pending, bbio->num_stripes);
  5537. if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
  5538. ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
  5539. /* In this case, map_length has been set to the length of
  5540. a single stripe; not the whole write */
  5541. if (bio_op(bio) == REQ_OP_WRITE) {
  5542. ret = raid56_parity_write(fs_info, bio, bbio,
  5543. map_length);
  5544. } else {
  5545. ret = raid56_parity_recover(fs_info, bio, bbio,
  5546. map_length, mirror_num, 1);
  5547. }
  5548. btrfs_bio_counter_dec(fs_info);
  5549. return errno_to_blk_status(ret);
  5550. }
  5551. if (map_length < length) {
  5552. btrfs_crit(fs_info,
  5553. "mapping failed logical %llu bio len %llu len %llu",
  5554. logical, length, map_length);
  5555. BUG();
  5556. }
  5557. for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
  5558. dev = bbio->stripes[dev_nr].dev;
  5559. if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
  5560. &dev->dev_state) ||
  5561. (bio_op(first_bio) == REQ_OP_WRITE &&
  5562. !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
  5563. bbio_error(bbio, first_bio, logical);
  5564. continue;
  5565. }
  5566. if (dev_nr < total_devs - 1)
  5567. bio = btrfs_bio_clone(first_bio);
  5568. else
  5569. bio = first_bio;
  5570. submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
  5571. }
  5572. btrfs_bio_counter_dec(fs_info);
  5573. return BLK_STS_OK;
  5574. }
  5575. /*
  5576. * Find a device specified by @devid or @uuid in the list of @fs_devices, or
  5577. * return NULL.
  5578. *
  5579. * If devid and uuid are both specified, the match must be exact, otherwise
  5580. * only devid is used.
  5581. *
  5582. * If @seed is true, traverse through the seed devices.
  5583. */
  5584. struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
  5585. u64 devid, u8 *uuid, u8 *fsid,
  5586. bool seed)
  5587. {
  5588. struct btrfs_device *device;
  5589. struct btrfs_fs_devices *seed_devs;
  5590. if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
  5591. list_for_each_entry(device, &fs_devices->devices, dev_list) {
  5592. if (device->devid == devid &&
  5593. (!uuid || memcmp(device->uuid, uuid,
  5594. BTRFS_UUID_SIZE) == 0))
  5595. return device;
  5596. }
  5597. }
  5598. list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
  5599. if (!fsid ||
  5600. !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
  5601. list_for_each_entry(device, &seed_devs->devices,
  5602. dev_list) {
  5603. if (device->devid == devid &&
  5604. (!uuid || memcmp(device->uuid, uuid,
  5605. BTRFS_UUID_SIZE) == 0))
  5606. return device;
  5607. }
  5608. }
  5609. }
  5610. return NULL;
  5611. }
  5612. static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
  5613. u64 devid, u8 *dev_uuid)
  5614. {
  5615. struct btrfs_device *device;
  5616. unsigned int nofs_flag;
  5617. /*
  5618. * We call this under the chunk_mutex, so we want to use NOFS for this
  5619. * allocation, however we don't want to change btrfs_alloc_device() to
  5620. * always do NOFS because we use it in a lot of other GFP_KERNEL safe
  5621. * places.
  5622. */
  5623. nofs_flag = memalloc_nofs_save();
  5624. device = btrfs_alloc_device(NULL, &devid, dev_uuid);
  5625. memalloc_nofs_restore(nofs_flag);
  5626. if (IS_ERR(device))
  5627. return device;
  5628. list_add(&device->dev_list, &fs_devices->devices);
  5629. device->fs_devices = fs_devices;
  5630. fs_devices->num_devices++;
  5631. set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
  5632. fs_devices->missing_devices++;
  5633. return device;
  5634. }
  5635. /**
  5636. * btrfs_alloc_device - allocate struct btrfs_device
  5637. * @fs_info: used only for generating a new devid, can be NULL if
  5638. * devid is provided (i.e. @devid != NULL).
  5639. * @devid: a pointer to devid for this device. If NULL a new devid
  5640. * is generated.
  5641. * @uuid: a pointer to UUID for this device. If NULL a new UUID
  5642. * is generated.
  5643. *
  5644. * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
  5645. * on error. Returned struct is not linked onto any lists and must be
  5646. * destroyed with btrfs_free_device.
  5647. */
  5648. struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
  5649. const u64 *devid,
  5650. const u8 *uuid)
  5651. {
  5652. struct btrfs_device *dev;
  5653. u64 tmp;
  5654. if (WARN_ON(!devid && !fs_info))
  5655. return ERR_PTR(-EINVAL);
  5656. dev = __alloc_device(fs_info);
  5657. if (IS_ERR(dev))
  5658. return dev;
  5659. if (devid)
  5660. tmp = *devid;
  5661. else {
  5662. int ret;
  5663. ret = find_next_devid(fs_info, &tmp);
  5664. if (ret) {
  5665. btrfs_free_device(dev);
  5666. return ERR_PTR(ret);
  5667. }
  5668. }
  5669. dev->devid = tmp;
  5670. if (uuid)
  5671. memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
  5672. else
  5673. generate_random_uuid(dev->uuid);
  5674. return dev;
  5675. }
  5676. static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
  5677. u64 devid, u8 *uuid, bool error)
  5678. {
  5679. if (error)
  5680. btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
  5681. devid, uuid);
  5682. else
  5683. btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
  5684. devid, uuid);
  5685. }
  5686. static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
  5687. {
  5688. int index = btrfs_bg_flags_to_raid_index(type);
  5689. int ncopies = btrfs_raid_array[index].ncopies;
  5690. const int nparity = btrfs_raid_array[index].nparity;
  5691. int data_stripes;
  5692. if (nparity)
  5693. data_stripes = num_stripes - nparity;
  5694. else
  5695. data_stripes = num_stripes / ncopies;
  5696. return div_u64(chunk_len, data_stripes);
  5697. }
  5698. static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
  5699. struct btrfs_chunk *chunk)
  5700. {
  5701. struct btrfs_fs_info *fs_info = leaf->fs_info;
  5702. struct extent_map_tree *map_tree = &fs_info->mapping_tree;
  5703. struct map_lookup *map;
  5704. struct extent_map *em;
  5705. u64 logical;
  5706. u64 length;
  5707. u64 devid;
  5708. u8 uuid[BTRFS_UUID_SIZE];
  5709. int num_stripes;
  5710. int ret;
  5711. int i;
  5712. logical = key->offset;
  5713. length = btrfs_chunk_length(leaf, chunk);
  5714. num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  5715. /*
  5716. * Only need to verify chunk item if we're reading from sys chunk array,
  5717. * as chunk item in tree block is already verified by tree-checker.
  5718. */
  5719. if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
  5720. ret = btrfs_check_chunk_valid(leaf, chunk, logical);
  5721. if (ret)
  5722. return ret;
  5723. }
  5724. read_lock(&map_tree->lock);
  5725. em = lookup_extent_mapping(map_tree, logical, 1);
  5726. read_unlock(&map_tree->lock);
  5727. /* already mapped? */
  5728. if (em && em->start <= logical && em->start + em->len > logical) {
  5729. free_extent_map(em);
  5730. return 0;
  5731. } else if (em) {
  5732. free_extent_map(em);
  5733. }
  5734. em = alloc_extent_map();
  5735. if (!em)
  5736. return -ENOMEM;
  5737. map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
  5738. if (!map) {
  5739. free_extent_map(em);
  5740. return -ENOMEM;
  5741. }
  5742. set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
  5743. em->map_lookup = map;
  5744. em->start = logical;
  5745. em->len = length;
  5746. em->orig_start = 0;
  5747. em->block_start = 0;
  5748. em->block_len = em->len;
  5749. map->num_stripes = num_stripes;
  5750. map->io_width = btrfs_chunk_io_width(leaf, chunk);
  5751. map->io_align = btrfs_chunk_io_align(leaf, chunk);
  5752. map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
  5753. map->type = btrfs_chunk_type(leaf, chunk);
  5754. map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
  5755. map->verified_stripes = 0;
  5756. em->orig_block_len = calc_stripe_length(map->type, em->len,
  5757. map->num_stripes);
  5758. for (i = 0; i < num_stripes; i++) {
  5759. map->stripes[i].physical =
  5760. btrfs_stripe_offset_nr(leaf, chunk, i);
  5761. devid = btrfs_stripe_devid_nr(leaf, chunk, i);
  5762. read_extent_buffer(leaf, uuid, (unsigned long)
  5763. btrfs_stripe_dev_uuid_nr(chunk, i),
  5764. BTRFS_UUID_SIZE);
  5765. map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
  5766. devid, uuid, NULL, true);
  5767. if (!map->stripes[i].dev &&
  5768. !btrfs_test_opt(fs_info, DEGRADED)) {
  5769. free_extent_map(em);
  5770. btrfs_report_missing_device(fs_info, devid, uuid, true);
  5771. return -ENOENT;
  5772. }
  5773. if (!map->stripes[i].dev) {
  5774. map->stripes[i].dev =
  5775. add_missing_dev(fs_info->fs_devices, devid,
  5776. uuid);
  5777. if (IS_ERR(map->stripes[i].dev)) {
  5778. free_extent_map(em);
  5779. btrfs_err(fs_info,
  5780. "failed to init missing dev %llu: %ld",
  5781. devid, PTR_ERR(map->stripes[i].dev));
  5782. return PTR_ERR(map->stripes[i].dev);
  5783. }
  5784. btrfs_report_missing_device(fs_info, devid, uuid, false);
  5785. }
  5786. set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
  5787. &(map->stripes[i].dev->dev_state));
  5788. }
  5789. write_lock(&map_tree->lock);
  5790. ret = add_extent_mapping(map_tree, em, 0);
  5791. write_unlock(&map_tree->lock);
  5792. if (ret < 0) {
  5793. btrfs_err(fs_info,
  5794. "failed to add chunk map, start=%llu len=%llu: %d",
  5795. em->start, em->len, ret);
  5796. }
  5797. free_extent_map(em);
  5798. return ret;
  5799. }
  5800. static void fill_device_from_item(struct extent_buffer *leaf,
  5801. struct btrfs_dev_item *dev_item,
  5802. struct btrfs_device *device)
  5803. {
  5804. unsigned long ptr;
  5805. device->devid = btrfs_device_id(leaf, dev_item);
  5806. device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
  5807. device->total_bytes = device->disk_total_bytes;
  5808. device->commit_total_bytes = device->disk_total_bytes;
  5809. device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
  5810. device->commit_bytes_used = device->bytes_used;
  5811. device->type = btrfs_device_type(leaf, dev_item);
  5812. device->io_align = btrfs_device_io_align(leaf, dev_item);
  5813. device->io_width = btrfs_device_io_width(leaf, dev_item);
  5814. device->sector_size = btrfs_device_sector_size(leaf, dev_item);
  5815. WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
  5816. clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
  5817. ptr = btrfs_device_uuid(dev_item);
  5818. read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
  5819. }
  5820. static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
  5821. u8 *fsid)
  5822. {
  5823. struct btrfs_fs_devices *fs_devices;
  5824. int ret;
  5825. lockdep_assert_held(&uuid_mutex);
  5826. ASSERT(fsid);
  5827. /* This will match only for multi-device seed fs */
  5828. list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
  5829. if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
  5830. return fs_devices;
  5831. fs_devices = find_fsid(fsid, NULL);
  5832. if (!fs_devices) {
  5833. if (!btrfs_test_opt(fs_info, DEGRADED))
  5834. return ERR_PTR(-ENOENT);
  5835. fs_devices = alloc_fs_devices(fsid, NULL);
  5836. if (IS_ERR(fs_devices))
  5837. return fs_devices;
  5838. fs_devices->seeding = true;
  5839. fs_devices->opened = 1;
  5840. return fs_devices;
  5841. }
  5842. /*
  5843. * Upon first call for a seed fs fsid, just create a private copy of the
  5844. * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
  5845. */
  5846. fs_devices = clone_fs_devices(fs_devices);
  5847. if (IS_ERR(fs_devices))
  5848. return fs_devices;
  5849. ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
  5850. if (ret) {
  5851. free_fs_devices(fs_devices);
  5852. return ERR_PTR(ret);
  5853. }
  5854. if (!fs_devices->seeding) {
  5855. close_fs_devices(fs_devices);
  5856. free_fs_devices(fs_devices);
  5857. return ERR_PTR(-EINVAL);
  5858. }
  5859. list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
  5860. return fs_devices;
  5861. }
  5862. static int read_one_dev(struct extent_buffer *leaf,
  5863. struct btrfs_dev_item *dev_item)
  5864. {
  5865. struct btrfs_fs_info *fs_info = leaf->fs_info;
  5866. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  5867. struct btrfs_device *device;
  5868. u64 devid;
  5869. int ret;
  5870. u8 fs_uuid[BTRFS_FSID_SIZE];
  5871. u8 dev_uuid[BTRFS_UUID_SIZE];
  5872. devid = btrfs_device_id(leaf, dev_item);
  5873. read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
  5874. BTRFS_UUID_SIZE);
  5875. read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
  5876. BTRFS_FSID_SIZE);
  5877. if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
  5878. fs_devices = open_seed_devices(fs_info, fs_uuid);
  5879. if (IS_ERR(fs_devices))
  5880. return PTR_ERR(fs_devices);
  5881. }
  5882. device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
  5883. fs_uuid, true);
  5884. if (!device) {
  5885. if (!btrfs_test_opt(fs_info, DEGRADED)) {
  5886. btrfs_report_missing_device(fs_info, devid,
  5887. dev_uuid, true);
  5888. return -ENOENT;
  5889. }
  5890. device = add_missing_dev(fs_devices, devid, dev_uuid);
  5891. if (IS_ERR(device)) {
  5892. btrfs_err(fs_info,
  5893. "failed to add missing dev %llu: %ld",
  5894. devid, PTR_ERR(device));
  5895. return PTR_ERR(device);
  5896. }
  5897. btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
  5898. } else {
  5899. if (!device->bdev) {
  5900. if (!btrfs_test_opt(fs_info, DEGRADED)) {
  5901. btrfs_report_missing_device(fs_info,
  5902. devid, dev_uuid, true);
  5903. return -ENOENT;
  5904. }
  5905. btrfs_report_missing_device(fs_info, devid,
  5906. dev_uuid, false);
  5907. }
  5908. if (!device->bdev &&
  5909. !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
  5910. /*
  5911. * this happens when a device that was properly setup
  5912. * in the device info lists suddenly goes bad.
  5913. * device->bdev is NULL, and so we have to set
  5914. * device->missing to one here
  5915. */
  5916. device->fs_devices->missing_devices++;
  5917. set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
  5918. }
  5919. /* Move the device to its own fs_devices */
  5920. if (device->fs_devices != fs_devices) {
  5921. ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
  5922. &device->dev_state));
  5923. list_move(&device->dev_list, &fs_devices->devices);
  5924. device->fs_devices->num_devices--;
  5925. fs_devices->num_devices++;
  5926. device->fs_devices->missing_devices--;
  5927. fs_devices->missing_devices++;
  5928. device->fs_devices = fs_devices;
  5929. }
  5930. }
  5931. if (device->fs_devices != fs_info->fs_devices) {
  5932. BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
  5933. if (device->generation !=
  5934. btrfs_device_generation(leaf, dev_item))
  5935. return -EINVAL;
  5936. }
  5937. fill_device_from_item(leaf, dev_item, device);
  5938. set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
  5939. if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
  5940. !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
  5941. device->fs_devices->total_rw_bytes += device->total_bytes;
  5942. atomic64_add(device->total_bytes - device->bytes_used,
  5943. &fs_info->free_chunk_space);
  5944. }
  5945. ret = 0;
  5946. return ret;
  5947. }
  5948. int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
  5949. {
  5950. struct btrfs_root *root = fs_info->tree_root;
  5951. struct btrfs_super_block *super_copy = fs_info->super_copy;
  5952. struct extent_buffer *sb;
  5953. struct btrfs_disk_key *disk_key;
  5954. struct btrfs_chunk *chunk;
  5955. u8 *array_ptr;
  5956. unsigned long sb_array_offset;
  5957. int ret = 0;
  5958. u32 num_stripes;
  5959. u32 array_size;
  5960. u32 len = 0;
  5961. u32 cur_offset;
  5962. u64 type;
  5963. struct btrfs_key key;
  5964. ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
  5965. /*
  5966. * This will create extent buffer of nodesize, superblock size is
  5967. * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
  5968. * overallocate but we can keep it as-is, only the first page is used.
  5969. */
  5970. sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
  5971. if (IS_ERR(sb))
  5972. return PTR_ERR(sb);
  5973. set_extent_buffer_uptodate(sb);
  5974. btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
  5975. /*
  5976. * The sb extent buffer is artificial and just used to read the system array.
  5977. * set_extent_buffer_uptodate() call does not properly mark all it's
  5978. * pages up-to-date when the page is larger: extent does not cover the
  5979. * whole page and consequently check_page_uptodate does not find all
  5980. * the page's extents up-to-date (the hole beyond sb),
  5981. * write_extent_buffer then triggers a WARN_ON.
  5982. *
  5983. * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
  5984. * but sb spans only this function. Add an explicit SetPageUptodate call
  5985. * to silence the warning eg. on PowerPC 64.
  5986. */
  5987. if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
  5988. SetPageUptodate(sb->pages[0]);
  5989. write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
  5990. array_size = btrfs_super_sys_array_size(super_copy);
  5991. array_ptr = super_copy->sys_chunk_array;
  5992. sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
  5993. cur_offset = 0;
  5994. while (cur_offset < array_size) {
  5995. disk_key = (struct btrfs_disk_key *)array_ptr;
  5996. len = sizeof(*disk_key);
  5997. if (cur_offset + len > array_size)
  5998. goto out_short_read;
  5999. btrfs_disk_key_to_cpu(&key, disk_key);
  6000. array_ptr += len;
  6001. sb_array_offset += len;
  6002. cur_offset += len;
  6003. if (key.type != BTRFS_CHUNK_ITEM_KEY) {
  6004. btrfs_err(fs_info,
  6005. "unexpected item type %u in sys_array at offset %u",
  6006. (u32)key.type, cur_offset);
  6007. ret = -EIO;
  6008. break;
  6009. }
  6010. chunk = (struct btrfs_chunk *)sb_array_offset;
  6011. /*
  6012. * At least one btrfs_chunk with one stripe must be present,
  6013. * exact stripe count check comes afterwards
  6014. */
  6015. len = btrfs_chunk_item_size(1);
  6016. if (cur_offset + len > array_size)
  6017. goto out_short_read;
  6018. num_stripes = btrfs_chunk_num_stripes(sb, chunk);
  6019. if (!num_stripes) {
  6020. btrfs_err(fs_info,
  6021. "invalid number of stripes %u in sys_array at offset %u",
  6022. num_stripes, cur_offset);
  6023. ret = -EIO;
  6024. break;
  6025. }
  6026. type = btrfs_chunk_type(sb, chunk);
  6027. if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
  6028. btrfs_err(fs_info,
  6029. "invalid chunk type %llu in sys_array at offset %u",
  6030. type, cur_offset);
  6031. ret = -EIO;
  6032. break;
  6033. }
  6034. len = btrfs_chunk_item_size(num_stripes);
  6035. if (cur_offset + len > array_size)
  6036. goto out_short_read;
  6037. ret = read_one_chunk(&key, sb, chunk);
  6038. if (ret)
  6039. break;
  6040. array_ptr += len;
  6041. sb_array_offset += len;
  6042. cur_offset += len;
  6043. }
  6044. clear_extent_buffer_uptodate(sb);
  6045. free_extent_buffer_stale(sb);
  6046. return ret;
  6047. out_short_read:
  6048. btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
  6049. len, cur_offset);
  6050. clear_extent_buffer_uptodate(sb);
  6051. free_extent_buffer_stale(sb);
  6052. return -EIO;
  6053. }
  6054. /*
  6055. * Check if all chunks in the fs are OK for read-write degraded mount
  6056. *
  6057. * If the @failing_dev is specified, it's accounted as missing.
  6058. *
  6059. * Return true if all chunks meet the minimal RW mount requirements.
  6060. * Return false if any chunk doesn't meet the minimal RW mount requirements.
  6061. */
  6062. bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
  6063. struct btrfs_device *failing_dev)
  6064. {
  6065. struct extent_map_tree *map_tree = &fs_info->mapping_tree;
  6066. struct extent_map *em;
  6067. u64 next_start = 0;
  6068. bool ret = true;
  6069. read_lock(&map_tree->lock);
  6070. em = lookup_extent_mapping(map_tree, 0, (u64)-1);
  6071. read_unlock(&map_tree->lock);
  6072. /* No chunk at all? Return false anyway */
  6073. if (!em) {
  6074. ret = false;
  6075. goto out;
  6076. }
  6077. while (em) {
  6078. struct map_lookup *map;
  6079. int missing = 0;
  6080. int max_tolerated;
  6081. int i;
  6082. map = em->map_lookup;
  6083. max_tolerated =
  6084. btrfs_get_num_tolerated_disk_barrier_failures(
  6085. map->type);
  6086. for (i = 0; i < map->num_stripes; i++) {
  6087. struct btrfs_device *dev = map->stripes[i].dev;
  6088. if (!dev || !dev->bdev ||
  6089. test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
  6090. dev->last_flush_error)
  6091. missing++;
  6092. else if (failing_dev && failing_dev == dev)
  6093. missing++;
  6094. }
  6095. if (missing > max_tolerated) {
  6096. if (!failing_dev)
  6097. btrfs_warn(fs_info,
  6098. "chunk %llu missing %d devices, max tolerance is %d for writable mount",
  6099. em->start, missing, max_tolerated);
  6100. free_extent_map(em);
  6101. ret = false;
  6102. goto out;
  6103. }
  6104. next_start = extent_map_end(em);
  6105. free_extent_map(em);
  6106. read_lock(&map_tree->lock);
  6107. em = lookup_extent_mapping(map_tree, next_start,
  6108. (u64)(-1) - next_start);
  6109. read_unlock(&map_tree->lock);
  6110. }
  6111. out:
  6112. return ret;
  6113. }
  6114. static void readahead_tree_node_children(struct extent_buffer *node)
  6115. {
  6116. int i;
  6117. const int nr_items = btrfs_header_nritems(node);
  6118. for (i = 0; i < nr_items; i++) {
  6119. u64 start;
  6120. start = btrfs_node_blockptr(node, i);
  6121. readahead_tree_block(node->fs_info, start);
  6122. }
  6123. }
  6124. int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
  6125. {
  6126. struct btrfs_root *root = fs_info->chunk_root;
  6127. struct btrfs_path *path;
  6128. struct extent_buffer *leaf;
  6129. struct btrfs_key key;
  6130. struct btrfs_key found_key;
  6131. int ret;
  6132. int slot;
  6133. u64 total_dev = 0;
  6134. u64 last_ra_node = 0;
  6135. path = btrfs_alloc_path();
  6136. if (!path)
  6137. return -ENOMEM;
  6138. /*
  6139. * uuid_mutex is needed only if we are mounting a sprout FS
  6140. * otherwise we don't need it.
  6141. */
  6142. mutex_lock(&uuid_mutex);
  6143. /*
  6144. * It is possible for mount and umount to race in such a way that
  6145. * we execute this code path, but open_fs_devices failed to clear
  6146. * total_rw_bytes. We certainly want it cleared before reading the
  6147. * device items, so clear it here.
  6148. */
  6149. fs_info->fs_devices->total_rw_bytes = 0;
  6150. /*
  6151. * Read all device items, and then all the chunk items. All
  6152. * device items are found before any chunk item (their object id
  6153. * is smaller than the lowest possible object id for a chunk
  6154. * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
  6155. */
  6156. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  6157. key.offset = 0;
  6158. key.type = 0;
  6159. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6160. if (ret < 0)
  6161. goto error;
  6162. while (1) {
  6163. struct extent_buffer *node;
  6164. leaf = path->nodes[0];
  6165. slot = path->slots[0];
  6166. if (slot >= btrfs_header_nritems(leaf)) {
  6167. ret = btrfs_next_leaf(root, path);
  6168. if (ret == 0)
  6169. continue;
  6170. if (ret < 0)
  6171. goto error;
  6172. break;
  6173. }
  6174. /*
  6175. * The nodes on level 1 are not locked but we don't need to do
  6176. * that during mount time as nothing else can access the tree
  6177. */
  6178. node = path->nodes[1];
  6179. if (node) {
  6180. if (last_ra_node != node->start) {
  6181. readahead_tree_node_children(node);
  6182. last_ra_node = node->start;
  6183. }
  6184. }
  6185. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  6186. if (found_key.type == BTRFS_DEV_ITEM_KEY) {
  6187. struct btrfs_dev_item *dev_item;
  6188. dev_item = btrfs_item_ptr(leaf, slot,
  6189. struct btrfs_dev_item);
  6190. ret = read_one_dev(leaf, dev_item);
  6191. if (ret)
  6192. goto error;
  6193. total_dev++;
  6194. } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
  6195. struct btrfs_chunk *chunk;
  6196. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  6197. mutex_lock(&fs_info->chunk_mutex);
  6198. ret = read_one_chunk(&found_key, leaf, chunk);
  6199. mutex_unlock(&fs_info->chunk_mutex);
  6200. if (ret)
  6201. goto error;
  6202. }
  6203. path->slots[0]++;
  6204. }
  6205. /*
  6206. * After loading chunk tree, we've got all device information,
  6207. * do another round of validation checks.
  6208. */
  6209. if (total_dev != fs_info->fs_devices->total_devices) {
  6210. btrfs_err(fs_info,
  6211. "super_num_devices %llu mismatch with num_devices %llu found here",
  6212. btrfs_super_num_devices(fs_info->super_copy),
  6213. total_dev);
  6214. ret = -EINVAL;
  6215. goto error;
  6216. }
  6217. if (btrfs_super_total_bytes(fs_info->super_copy) <
  6218. fs_info->fs_devices->total_rw_bytes) {
  6219. btrfs_err(fs_info,
  6220. "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
  6221. btrfs_super_total_bytes(fs_info->super_copy),
  6222. fs_info->fs_devices->total_rw_bytes);
  6223. ret = -EINVAL;
  6224. goto error;
  6225. }
  6226. ret = 0;
  6227. error:
  6228. mutex_unlock(&uuid_mutex);
  6229. btrfs_free_path(path);
  6230. return ret;
  6231. }
  6232. void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
  6233. {
  6234. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
  6235. struct btrfs_device *device;
  6236. fs_devices->fs_info = fs_info;
  6237. mutex_lock(&fs_devices->device_list_mutex);
  6238. list_for_each_entry(device, &fs_devices->devices, dev_list)
  6239. device->fs_info = fs_info;
  6240. list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
  6241. list_for_each_entry(device, &seed_devs->devices, dev_list)
  6242. device->fs_info = fs_info;
  6243. seed_devs->fs_info = fs_info;
  6244. }
  6245. mutex_unlock(&fs_devices->device_list_mutex);
  6246. }
  6247. static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
  6248. const struct btrfs_dev_stats_item *ptr,
  6249. int index)
  6250. {
  6251. u64 val;
  6252. read_extent_buffer(eb, &val,
  6253. offsetof(struct btrfs_dev_stats_item, values) +
  6254. ((unsigned long)ptr) + (index * sizeof(u64)),
  6255. sizeof(val));
  6256. return val;
  6257. }
  6258. static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
  6259. struct btrfs_dev_stats_item *ptr,
  6260. int index, u64 val)
  6261. {
  6262. write_extent_buffer(eb, &val,
  6263. offsetof(struct btrfs_dev_stats_item, values) +
  6264. ((unsigned long)ptr) + (index * sizeof(u64)),
  6265. sizeof(val));
  6266. }
  6267. static int btrfs_device_init_dev_stats(struct btrfs_device *device,
  6268. struct btrfs_path *path)
  6269. {
  6270. struct btrfs_dev_stats_item *ptr;
  6271. struct extent_buffer *eb;
  6272. struct btrfs_key key;
  6273. int item_size;
  6274. int i, ret, slot;
  6275. key.objectid = BTRFS_DEV_STATS_OBJECTID;
  6276. key.type = BTRFS_PERSISTENT_ITEM_KEY;
  6277. key.offset = device->devid;
  6278. ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
  6279. if (ret) {
  6280. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
  6281. btrfs_dev_stat_set(device, i, 0);
  6282. device->dev_stats_valid = 1;
  6283. btrfs_release_path(path);
  6284. return ret < 0 ? ret : 0;
  6285. }
  6286. slot = path->slots[0];
  6287. eb = path->nodes[0];
  6288. item_size = btrfs_item_size_nr(eb, slot);
  6289. ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
  6290. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
  6291. if (item_size >= (1 + i) * sizeof(__le64))
  6292. btrfs_dev_stat_set(device, i,
  6293. btrfs_dev_stats_value(eb, ptr, i));
  6294. else
  6295. btrfs_dev_stat_set(device, i, 0);
  6296. }
  6297. device->dev_stats_valid = 1;
  6298. btrfs_dev_stat_print_on_load(device);
  6299. btrfs_release_path(path);
  6300. return 0;
  6301. }
  6302. int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
  6303. {
  6304. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
  6305. struct btrfs_device *device;
  6306. struct btrfs_path *path = NULL;
  6307. int ret = 0;
  6308. path = btrfs_alloc_path();
  6309. if (!path)
  6310. return -ENOMEM;
  6311. mutex_lock(&fs_devices->device_list_mutex);
  6312. list_for_each_entry(device, &fs_devices->devices, dev_list) {
  6313. ret = btrfs_device_init_dev_stats(device, path);
  6314. if (ret)
  6315. goto out;
  6316. }
  6317. list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
  6318. list_for_each_entry(device, &seed_devs->devices, dev_list) {
  6319. ret = btrfs_device_init_dev_stats(device, path);
  6320. if (ret)
  6321. goto out;
  6322. }
  6323. }
  6324. out:
  6325. mutex_unlock(&fs_devices->device_list_mutex);
  6326. btrfs_free_path(path);
  6327. return ret;
  6328. }
  6329. static int update_dev_stat_item(struct btrfs_trans_handle *trans,
  6330. struct btrfs_device *device)
  6331. {
  6332. struct btrfs_fs_info *fs_info = trans->fs_info;
  6333. struct btrfs_root *dev_root = fs_info->dev_root;
  6334. struct btrfs_path *path;
  6335. struct btrfs_key key;
  6336. struct extent_buffer *eb;
  6337. struct btrfs_dev_stats_item *ptr;
  6338. int ret;
  6339. int i;
  6340. key.objectid = BTRFS_DEV_STATS_OBJECTID;
  6341. key.type = BTRFS_PERSISTENT_ITEM_KEY;
  6342. key.offset = device->devid;
  6343. path = btrfs_alloc_path();
  6344. if (!path)
  6345. return -ENOMEM;
  6346. ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
  6347. if (ret < 0) {
  6348. btrfs_warn_in_rcu(fs_info,
  6349. "error %d while searching for dev_stats item for device %s",
  6350. ret, rcu_str_deref(device->name));
  6351. goto out;
  6352. }
  6353. if (ret == 0 &&
  6354. btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
  6355. /* need to delete old one and insert a new one */
  6356. ret = btrfs_del_item(trans, dev_root, path);
  6357. if (ret != 0) {
  6358. btrfs_warn_in_rcu(fs_info,
  6359. "delete too small dev_stats item for device %s failed %d",
  6360. rcu_str_deref(device->name), ret);
  6361. goto out;
  6362. }
  6363. ret = 1;
  6364. }
  6365. if (ret == 1) {
  6366. /* need to insert a new item */
  6367. btrfs_release_path(path);
  6368. ret = btrfs_insert_empty_item(trans, dev_root, path,
  6369. &key, sizeof(*ptr));
  6370. if (ret < 0) {
  6371. btrfs_warn_in_rcu(fs_info,
  6372. "insert dev_stats item for device %s failed %d",
  6373. rcu_str_deref(device->name), ret);
  6374. goto out;
  6375. }
  6376. }
  6377. eb = path->nodes[0];
  6378. ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
  6379. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
  6380. btrfs_set_dev_stats_value(eb, ptr, i,
  6381. btrfs_dev_stat_read(device, i));
  6382. btrfs_mark_buffer_dirty(eb);
  6383. out:
  6384. btrfs_free_path(path);
  6385. return ret;
  6386. }
  6387. /*
  6388. * called from commit_transaction. Writes all changed device stats to disk.
  6389. */
  6390. int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
  6391. {
  6392. struct btrfs_fs_info *fs_info = trans->fs_info;
  6393. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  6394. struct btrfs_device *device;
  6395. int stats_cnt;
  6396. int ret = 0;
  6397. mutex_lock(&fs_devices->device_list_mutex);
  6398. list_for_each_entry(device, &fs_devices->devices, dev_list) {
  6399. stats_cnt = atomic_read(&device->dev_stats_ccnt);
  6400. if (!device->dev_stats_valid || stats_cnt == 0)
  6401. continue;
  6402. /*
  6403. * There is a LOAD-LOAD control dependency between the value of
  6404. * dev_stats_ccnt and updating the on-disk values which requires
  6405. * reading the in-memory counters. Such control dependencies
  6406. * require explicit read memory barriers.
  6407. *
  6408. * This memory barriers pairs with smp_mb__before_atomic in
  6409. * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
  6410. * barrier implied by atomic_xchg in
  6411. * btrfs_dev_stats_read_and_reset
  6412. */
  6413. smp_rmb();
  6414. ret = update_dev_stat_item(trans, device);
  6415. if (!ret)
  6416. atomic_sub(stats_cnt, &device->dev_stats_ccnt);
  6417. }
  6418. mutex_unlock(&fs_devices->device_list_mutex);
  6419. return ret;
  6420. }
  6421. void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
  6422. {
  6423. btrfs_dev_stat_inc(dev, index);
  6424. btrfs_dev_stat_print_on_error(dev);
  6425. }
  6426. static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
  6427. {
  6428. if (!dev->dev_stats_valid)
  6429. return;
  6430. btrfs_err_rl_in_rcu(dev->fs_info,
  6431. "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
  6432. rcu_str_deref(dev->name),
  6433. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
  6434. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
  6435. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
  6436. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
  6437. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
  6438. }
  6439. static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
  6440. {
  6441. int i;
  6442. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
  6443. if (btrfs_dev_stat_read(dev, i) != 0)
  6444. break;
  6445. if (i == BTRFS_DEV_STAT_VALUES_MAX)
  6446. return; /* all values == 0, suppress message */
  6447. btrfs_info_in_rcu(dev->fs_info,
  6448. "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
  6449. rcu_str_deref(dev->name),
  6450. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
  6451. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
  6452. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
  6453. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
  6454. btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
  6455. }
  6456. int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
  6457. struct btrfs_ioctl_get_dev_stats *stats)
  6458. {
  6459. struct btrfs_device *dev;
  6460. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  6461. int i;
  6462. mutex_lock(&fs_devices->device_list_mutex);
  6463. dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
  6464. true);
  6465. mutex_unlock(&fs_devices->device_list_mutex);
  6466. if (!dev) {
  6467. btrfs_warn(fs_info, "get dev_stats failed, device not found");
  6468. return -ENODEV;
  6469. } else if (!dev->dev_stats_valid) {
  6470. btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
  6471. return -ENODEV;
  6472. } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
  6473. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
  6474. if (stats->nr_items > i)
  6475. stats->values[i] =
  6476. btrfs_dev_stat_read_and_reset(dev, i);
  6477. else
  6478. btrfs_dev_stat_set(dev, i, 0);
  6479. }
  6480. btrfs_info(fs_info, "device stats zeroed by %s (%d)",
  6481. current->comm, task_pid_nr(current));
  6482. } else {
  6483. for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
  6484. if (stats->nr_items > i)
  6485. stats->values[i] = btrfs_dev_stat_read(dev, i);
  6486. }
  6487. if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
  6488. stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
  6489. return 0;
  6490. }
  6491. /*
  6492. * Update the size and bytes used for each device where it changed. This is
  6493. * delayed since we would otherwise get errors while writing out the
  6494. * superblocks.
  6495. *
  6496. * Must be invoked during transaction commit.
  6497. */
  6498. void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
  6499. {
  6500. struct btrfs_device *curr, *next;
  6501. ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
  6502. if (list_empty(&trans->dev_update_list))
  6503. return;
  6504. /*
  6505. * We don't need the device_list_mutex here. This list is owned by the
  6506. * transaction and the transaction must complete before the device is
  6507. * released.
  6508. */
  6509. mutex_lock(&trans->fs_info->chunk_mutex);
  6510. list_for_each_entry_safe(curr, next, &trans->dev_update_list,
  6511. post_commit_list) {
  6512. list_del_init(&curr->post_commit_list);
  6513. curr->commit_total_bytes = curr->disk_total_bytes;
  6514. curr->commit_bytes_used = curr->bytes_used;
  6515. }
  6516. mutex_unlock(&trans->fs_info->chunk_mutex);
  6517. }
  6518. /*
  6519. * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
  6520. */
  6521. int btrfs_bg_type_to_factor(u64 flags)
  6522. {
  6523. const int index = btrfs_bg_flags_to_raid_index(flags);
  6524. return btrfs_raid_array[index].ncopies;
  6525. }
  6526. static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
  6527. u64 chunk_offset, u64 devid,
  6528. u64 physical_offset, u64 physical_len)
  6529. {
  6530. struct extent_map_tree *em_tree = &fs_info->mapping_tree;
  6531. struct extent_map *em;
  6532. struct map_lookup *map;
  6533. struct btrfs_device *dev;
  6534. u64 stripe_len;
  6535. bool found = false;
  6536. int ret = 0;
  6537. int i;
  6538. read_lock(&em_tree->lock);
  6539. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  6540. read_unlock(&em_tree->lock);
  6541. if (!em) {
  6542. btrfs_err(fs_info,
  6543. "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
  6544. physical_offset, devid);
  6545. ret = -EUCLEAN;
  6546. goto out;
  6547. }
  6548. map = em->map_lookup;
  6549. stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
  6550. if (physical_len != stripe_len) {
  6551. btrfs_err(fs_info,
  6552. "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
  6553. physical_offset, devid, em->start, physical_len,
  6554. stripe_len);
  6555. ret = -EUCLEAN;
  6556. goto out;
  6557. }
  6558. for (i = 0; i < map->num_stripes; i++) {
  6559. if (map->stripes[i].dev->devid == devid &&
  6560. map->stripes[i].physical == physical_offset) {
  6561. found = true;
  6562. if (map->verified_stripes >= map->num_stripes) {
  6563. btrfs_err(fs_info,
  6564. "too many dev extents for chunk %llu found",
  6565. em->start);
  6566. ret = -EUCLEAN;
  6567. goto out;
  6568. }
  6569. map->verified_stripes++;
  6570. break;
  6571. }
  6572. }
  6573. if (!found) {
  6574. btrfs_err(fs_info,
  6575. "dev extent physical offset %llu devid %llu has no corresponding chunk",
  6576. physical_offset, devid);
  6577. ret = -EUCLEAN;
  6578. }
  6579. /* Make sure no dev extent is beyond device bondary */
  6580. dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
  6581. if (!dev) {
  6582. btrfs_err(fs_info, "failed to find devid %llu", devid);
  6583. ret = -EUCLEAN;
  6584. goto out;
  6585. }
  6586. /* It's possible this device is a dummy for seed device */
  6587. if (dev->disk_total_bytes == 0) {
  6588. struct btrfs_fs_devices *devs;
  6589. devs = list_first_entry(&fs_info->fs_devices->seed_list,
  6590. struct btrfs_fs_devices, seed_list);
  6591. dev = btrfs_find_device(devs, devid, NULL, NULL, false);
  6592. if (!dev) {
  6593. btrfs_err(fs_info, "failed to find seed devid %llu",
  6594. devid);
  6595. ret = -EUCLEAN;
  6596. goto out;
  6597. }
  6598. }
  6599. if (physical_offset + physical_len > dev->disk_total_bytes) {
  6600. btrfs_err(fs_info,
  6601. "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
  6602. devid, physical_offset, physical_len,
  6603. dev->disk_total_bytes);
  6604. ret = -EUCLEAN;
  6605. goto out;
  6606. }
  6607. out:
  6608. free_extent_map(em);
  6609. return ret;
  6610. }
  6611. static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
  6612. {
  6613. struct extent_map_tree *em_tree = &fs_info->mapping_tree;
  6614. struct extent_map *em;
  6615. struct rb_node *node;
  6616. int ret = 0;
  6617. read_lock(&em_tree->lock);
  6618. for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
  6619. em = rb_entry(node, struct extent_map, rb_node);
  6620. if (em->map_lookup->num_stripes !=
  6621. em->map_lookup->verified_stripes) {
  6622. btrfs_err(fs_info,
  6623. "chunk %llu has missing dev extent, have %d expect %d",
  6624. em->start, em->map_lookup->verified_stripes,
  6625. em->map_lookup->num_stripes);
  6626. ret = -EUCLEAN;
  6627. goto out;
  6628. }
  6629. }
  6630. out:
  6631. read_unlock(&em_tree->lock);
  6632. return ret;
  6633. }
  6634. /*
  6635. * Ensure that all dev extents are mapped to correct chunk, otherwise
  6636. * later chunk allocation/free would cause unexpected behavior.
  6637. *
  6638. * NOTE: This will iterate through the whole device tree, which should be of
  6639. * the same size level as the chunk tree. This slightly increases mount time.
  6640. */
  6641. int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
  6642. {
  6643. struct btrfs_path *path;
  6644. struct btrfs_root *root = fs_info->dev_root;
  6645. struct btrfs_key key;
  6646. u64 prev_devid = 0;
  6647. u64 prev_dev_ext_end = 0;
  6648. int ret = 0;
  6649. key.objectid = 1;
  6650. key.type = BTRFS_DEV_EXTENT_KEY;
  6651. key.offset = 0;
  6652. path = btrfs_alloc_path();
  6653. if (!path)
  6654. return -ENOMEM;
  6655. path->reada = READA_FORWARD;
  6656. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  6657. if (ret < 0)
  6658. goto out;
  6659. if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
  6660. ret = btrfs_next_item(root, path);
  6661. if (ret < 0)
  6662. goto out;
  6663. /* No dev extents at all? Not good */
  6664. if (ret > 0) {
  6665. ret = -EUCLEAN;
  6666. goto out;
  6667. }
  6668. }
  6669. while (1) {
  6670. struct extent_buffer *leaf = path->nodes[0];
  6671. struct btrfs_dev_extent *dext;
  6672. int slot = path->slots[0];
  6673. u64 chunk_offset;
  6674. u64 physical_offset;
  6675. u64 physical_len;
  6676. u64 devid;
  6677. btrfs_item_key_to_cpu(leaf, &key, slot);
  6678. if (key.type != BTRFS_DEV_EXTENT_KEY)
  6679. break;
  6680. devid = key.objectid;
  6681. physical_offset = key.offset;
  6682. dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
  6683. chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
  6684. physical_len = btrfs_dev_extent_length(leaf, dext);
  6685. /* Check if this dev extent overlaps with the previous one */
  6686. if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
  6687. btrfs_err(fs_info,
  6688. "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
  6689. devid, physical_offset, prev_dev_ext_end);
  6690. ret = -EUCLEAN;
  6691. goto out;
  6692. }
  6693. ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
  6694. physical_offset, physical_len);
  6695. if (ret < 0)
  6696. goto out;
  6697. prev_devid = devid;
  6698. prev_dev_ext_end = physical_offset + physical_len;
  6699. ret = btrfs_next_item(root, path);
  6700. if (ret < 0)
  6701. goto out;
  6702. if (ret > 0) {
  6703. ret = 0;
  6704. break;
  6705. }
  6706. }
  6707. /* Ensure all chunks have corresponding dev extents */
  6708. ret = verify_chunk_dev_extent_mapping(fs_info);
  6709. out:
  6710. btrfs_free_path(path);
  6711. return ret;
  6712. }
  6713. /*
  6714. * Check whether the given block group or device is pinned by any inode being
  6715. * used as a swapfile.
  6716. */
  6717. bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
  6718. {
  6719. struct btrfs_swapfile_pin *sp;
  6720. struct rb_node *node;
  6721. spin_lock(&fs_info->swapfile_pins_lock);
  6722. node = fs_info->swapfile_pins.rb_node;
  6723. while (node) {
  6724. sp = rb_entry(node, struct btrfs_swapfile_pin, node);
  6725. if (ptr < sp->ptr)
  6726. node = node->rb_left;
  6727. else if (ptr > sp->ptr)
  6728. node = node->rb_right;
  6729. else
  6730. break;
  6731. }
  6732. spin_unlock(&fs_info->swapfile_pins_lock);
  6733. return node != NULL;
  6734. }