block-group.c 97 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "misc.h"
  3. #include "ctree.h"
  4. #include "block-group.h"
  5. #include "space-info.h"
  6. #include "disk-io.h"
  7. #include "free-space-cache.h"
  8. #include "free-space-tree.h"
  9. #include "volumes.h"
  10. #include "transaction.h"
  11. #include "ref-verify.h"
  12. #include "sysfs.h"
  13. #include "tree-log.h"
  14. #include "delalloc-space.h"
  15. #include "discard.h"
  16. #include "raid56.h"
  17. /*
  18. * Return target flags in extended format or 0 if restripe for this chunk_type
  19. * is not in progress
  20. *
  21. * Should be called with balance_lock held
  22. */
  23. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  24. {
  25. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  26. u64 target = 0;
  27. if (!bctl)
  28. return 0;
  29. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  30. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  31. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  32. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  33. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  34. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  35. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  36. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  37. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  38. }
  39. return target;
  40. }
  41. /*
  42. * @flags: available profiles in extended format (see ctree.h)
  43. *
  44. * Return reduced profile in chunk format. If profile changing is in progress
  45. * (either running or paused) picks the target profile (if it's already
  46. * available), otherwise falls back to plain reducing.
  47. */
  48. static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  49. {
  50. u64 num_devices = fs_info->fs_devices->rw_devices;
  51. u64 target;
  52. u64 raid_type;
  53. u64 allowed = 0;
  54. /*
  55. * See if restripe for this chunk_type is in progress, if so try to
  56. * reduce to the target profile
  57. */
  58. spin_lock(&fs_info->balance_lock);
  59. target = get_restripe_target(fs_info, flags);
  60. if (target) {
  61. spin_unlock(&fs_info->balance_lock);
  62. return extended_to_chunk(target);
  63. }
  64. spin_unlock(&fs_info->balance_lock);
  65. /* First, mask out the RAID levels which aren't possible */
  66. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  67. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  68. allowed |= btrfs_raid_array[raid_type].bg_flag;
  69. }
  70. allowed &= flags;
  71. if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  72. allowed = BTRFS_BLOCK_GROUP_RAID6;
  73. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  74. allowed = BTRFS_BLOCK_GROUP_RAID5;
  75. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  76. allowed = BTRFS_BLOCK_GROUP_RAID10;
  77. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  78. allowed = BTRFS_BLOCK_GROUP_RAID1;
  79. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  80. allowed = BTRFS_BLOCK_GROUP_RAID0;
  81. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  82. return extended_to_chunk(flags | allowed);
  83. }
  84. u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  85. {
  86. unsigned seq;
  87. u64 flags;
  88. do {
  89. flags = orig_flags;
  90. seq = read_seqbegin(&fs_info->profiles_lock);
  91. if (flags & BTRFS_BLOCK_GROUP_DATA)
  92. flags |= fs_info->avail_data_alloc_bits;
  93. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  94. flags |= fs_info->avail_system_alloc_bits;
  95. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  96. flags |= fs_info->avail_metadata_alloc_bits;
  97. } while (read_seqretry(&fs_info->profiles_lock, seq));
  98. return btrfs_reduce_alloc_profile(fs_info, flags);
  99. }
  100. void btrfs_get_block_group(struct btrfs_block_group *cache)
  101. {
  102. refcount_inc(&cache->refs);
  103. }
  104. void btrfs_put_block_group(struct btrfs_block_group *cache)
  105. {
  106. if (refcount_dec_and_test(&cache->refs)) {
  107. WARN_ON(cache->pinned > 0);
  108. WARN_ON(cache->reserved > 0);
  109. /*
  110. * A block_group shouldn't be on the discard_list anymore.
  111. * Remove the block_group from the discard_list to prevent us
  112. * from causing a panic due to NULL pointer dereference.
  113. */
  114. if (WARN_ON(!list_empty(&cache->discard_list)))
  115. btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
  116. cache);
  117. /*
  118. * If not empty, someone is still holding mutex of
  119. * full_stripe_lock, which can only be released by caller.
  120. * And it will definitely cause use-after-free when caller
  121. * tries to release full stripe lock.
  122. *
  123. * No better way to resolve, but only to warn.
  124. */
  125. WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
  126. kfree(cache->free_space_ctl);
  127. kfree(cache);
  128. }
  129. }
  130. /*
  131. * This adds the block group to the fs_info rb tree for the block group cache
  132. */
  133. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  134. struct btrfs_block_group *block_group)
  135. {
  136. struct rb_node **p;
  137. struct rb_node *parent = NULL;
  138. struct btrfs_block_group *cache;
  139. ASSERT(block_group->length != 0);
  140. spin_lock(&info->block_group_cache_lock);
  141. p = &info->block_group_cache_tree.rb_node;
  142. while (*p) {
  143. parent = *p;
  144. cache = rb_entry(parent, struct btrfs_block_group, cache_node);
  145. if (block_group->start < cache->start) {
  146. p = &(*p)->rb_left;
  147. } else if (block_group->start > cache->start) {
  148. p = &(*p)->rb_right;
  149. } else {
  150. spin_unlock(&info->block_group_cache_lock);
  151. return -EEXIST;
  152. }
  153. }
  154. rb_link_node(&block_group->cache_node, parent, p);
  155. rb_insert_color(&block_group->cache_node,
  156. &info->block_group_cache_tree);
  157. if (info->first_logical_byte > block_group->start)
  158. info->first_logical_byte = block_group->start;
  159. spin_unlock(&info->block_group_cache_lock);
  160. return 0;
  161. }
  162. /*
  163. * This will return the block group at or after bytenr if contains is 0, else
  164. * it will return the block group that contains the bytenr
  165. */
  166. static struct btrfs_block_group *block_group_cache_tree_search(
  167. struct btrfs_fs_info *info, u64 bytenr, int contains)
  168. {
  169. struct btrfs_block_group *cache, *ret = NULL;
  170. struct rb_node *n;
  171. u64 end, start;
  172. spin_lock(&info->block_group_cache_lock);
  173. n = info->block_group_cache_tree.rb_node;
  174. while (n) {
  175. cache = rb_entry(n, struct btrfs_block_group, cache_node);
  176. end = cache->start + cache->length - 1;
  177. start = cache->start;
  178. if (bytenr < start) {
  179. if (!contains && (!ret || start < ret->start))
  180. ret = cache;
  181. n = n->rb_left;
  182. } else if (bytenr > start) {
  183. if (contains && bytenr <= end) {
  184. ret = cache;
  185. break;
  186. }
  187. n = n->rb_right;
  188. } else {
  189. ret = cache;
  190. break;
  191. }
  192. }
  193. if (ret) {
  194. btrfs_get_block_group(ret);
  195. if (bytenr == 0 && info->first_logical_byte > ret->start)
  196. info->first_logical_byte = ret->start;
  197. }
  198. spin_unlock(&info->block_group_cache_lock);
  199. return ret;
  200. }
  201. /*
  202. * Return the block group that starts at or after bytenr
  203. */
  204. struct btrfs_block_group *btrfs_lookup_first_block_group(
  205. struct btrfs_fs_info *info, u64 bytenr)
  206. {
  207. return block_group_cache_tree_search(info, bytenr, 0);
  208. }
  209. /*
  210. * Return the block group that contains the given bytenr
  211. */
  212. struct btrfs_block_group *btrfs_lookup_block_group(
  213. struct btrfs_fs_info *info, u64 bytenr)
  214. {
  215. return block_group_cache_tree_search(info, bytenr, 1);
  216. }
  217. struct btrfs_block_group *btrfs_next_block_group(
  218. struct btrfs_block_group *cache)
  219. {
  220. struct btrfs_fs_info *fs_info = cache->fs_info;
  221. struct rb_node *node;
  222. spin_lock(&fs_info->block_group_cache_lock);
  223. /* If our block group was removed, we need a full search. */
  224. if (RB_EMPTY_NODE(&cache->cache_node)) {
  225. const u64 next_bytenr = cache->start + cache->length;
  226. spin_unlock(&fs_info->block_group_cache_lock);
  227. btrfs_put_block_group(cache);
  228. cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
  229. }
  230. node = rb_next(&cache->cache_node);
  231. btrfs_put_block_group(cache);
  232. if (node) {
  233. cache = rb_entry(node, struct btrfs_block_group, cache_node);
  234. btrfs_get_block_group(cache);
  235. } else
  236. cache = NULL;
  237. spin_unlock(&fs_info->block_group_cache_lock);
  238. return cache;
  239. }
  240. bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  241. {
  242. struct btrfs_block_group *bg;
  243. bool ret = true;
  244. bg = btrfs_lookup_block_group(fs_info, bytenr);
  245. if (!bg)
  246. return false;
  247. spin_lock(&bg->lock);
  248. if (bg->ro)
  249. ret = false;
  250. else
  251. atomic_inc(&bg->nocow_writers);
  252. spin_unlock(&bg->lock);
  253. /* No put on block group, done by btrfs_dec_nocow_writers */
  254. if (!ret)
  255. btrfs_put_block_group(bg);
  256. return ret;
  257. }
  258. void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  259. {
  260. struct btrfs_block_group *bg;
  261. bg = btrfs_lookup_block_group(fs_info, bytenr);
  262. ASSERT(bg);
  263. if (atomic_dec_and_test(&bg->nocow_writers))
  264. wake_up_var(&bg->nocow_writers);
  265. /*
  266. * Once for our lookup and once for the lookup done by a previous call
  267. * to btrfs_inc_nocow_writers()
  268. */
  269. btrfs_put_block_group(bg);
  270. btrfs_put_block_group(bg);
  271. }
  272. void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
  273. {
  274. wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
  275. }
  276. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  277. const u64 start)
  278. {
  279. struct btrfs_block_group *bg;
  280. bg = btrfs_lookup_block_group(fs_info, start);
  281. ASSERT(bg);
  282. if (atomic_dec_and_test(&bg->reservations))
  283. wake_up_var(&bg->reservations);
  284. btrfs_put_block_group(bg);
  285. }
  286. void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
  287. {
  288. struct btrfs_space_info *space_info = bg->space_info;
  289. ASSERT(bg->ro);
  290. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  291. return;
  292. /*
  293. * Our block group is read only but before we set it to read only,
  294. * some task might have had allocated an extent from it already, but it
  295. * has not yet created a respective ordered extent (and added it to a
  296. * root's list of ordered extents).
  297. * Therefore wait for any task currently allocating extents, since the
  298. * block group's reservations counter is incremented while a read lock
  299. * on the groups' semaphore is held and decremented after releasing
  300. * the read access on that semaphore and creating the ordered extent.
  301. */
  302. down_write(&space_info->groups_sem);
  303. up_write(&space_info->groups_sem);
  304. wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
  305. }
  306. struct btrfs_caching_control *btrfs_get_caching_control(
  307. struct btrfs_block_group *cache)
  308. {
  309. struct btrfs_caching_control *ctl;
  310. spin_lock(&cache->lock);
  311. if (!cache->caching_ctl) {
  312. spin_unlock(&cache->lock);
  313. return NULL;
  314. }
  315. ctl = cache->caching_ctl;
  316. refcount_inc(&ctl->count);
  317. spin_unlock(&cache->lock);
  318. return ctl;
  319. }
  320. void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
  321. {
  322. if (refcount_dec_and_test(&ctl->count))
  323. kfree(ctl);
  324. }
  325. /*
  326. * When we wait for progress in the block group caching, its because our
  327. * allocation attempt failed at least once. So, we must sleep and let some
  328. * progress happen before we try again.
  329. *
  330. * This function will sleep at least once waiting for new free space to show
  331. * up, and then it will check the block group free space numbers for our min
  332. * num_bytes. Another option is to have it go ahead and look in the rbtree for
  333. * a free extent of a given size, but this is a good start.
  334. *
  335. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  336. * any of the information in this block group.
  337. */
  338. void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
  339. u64 num_bytes)
  340. {
  341. struct btrfs_caching_control *caching_ctl;
  342. caching_ctl = btrfs_get_caching_control(cache);
  343. if (!caching_ctl)
  344. return;
  345. wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
  346. (cache->free_space_ctl->free_space >= num_bytes));
  347. btrfs_put_caching_control(caching_ctl);
  348. }
  349. int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
  350. {
  351. struct btrfs_caching_control *caching_ctl;
  352. int ret = 0;
  353. caching_ctl = btrfs_get_caching_control(cache);
  354. if (!caching_ctl)
  355. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  356. wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
  357. if (cache->cached == BTRFS_CACHE_ERROR)
  358. ret = -EIO;
  359. btrfs_put_caching_control(caching_ctl);
  360. return ret;
  361. }
  362. #ifdef CONFIG_BTRFS_DEBUG
  363. static void fragment_free_space(struct btrfs_block_group *block_group)
  364. {
  365. struct btrfs_fs_info *fs_info = block_group->fs_info;
  366. u64 start = block_group->start;
  367. u64 len = block_group->length;
  368. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  369. fs_info->nodesize : fs_info->sectorsize;
  370. u64 step = chunk << 1;
  371. while (len > chunk) {
  372. btrfs_remove_free_space(block_group, start, chunk);
  373. start += step;
  374. if (len < step)
  375. len = 0;
  376. else
  377. len -= step;
  378. }
  379. }
  380. #endif
  381. /*
  382. * This is only called by btrfs_cache_block_group, since we could have freed
  383. * extents we need to check the pinned_extents for any extents that can't be
  384. * used yet since their free space will be released as soon as the transaction
  385. * commits.
  386. */
  387. u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
  388. {
  389. struct btrfs_fs_info *info = block_group->fs_info;
  390. u64 extent_start, extent_end, size, total_added = 0;
  391. int ret;
  392. while (start < end) {
  393. ret = find_first_extent_bit(&info->excluded_extents, start,
  394. &extent_start, &extent_end,
  395. EXTENT_DIRTY | EXTENT_UPTODATE,
  396. NULL);
  397. if (ret)
  398. break;
  399. if (extent_start <= start) {
  400. start = extent_end + 1;
  401. } else if (extent_start > start && extent_start < end) {
  402. size = extent_start - start;
  403. total_added += size;
  404. ret = btrfs_add_free_space_async_trimmed(block_group,
  405. start, size);
  406. BUG_ON(ret); /* -ENOMEM or logic error */
  407. start = extent_end + 1;
  408. } else {
  409. break;
  410. }
  411. }
  412. if (start < end) {
  413. size = end - start;
  414. total_added += size;
  415. ret = btrfs_add_free_space_async_trimmed(block_group, start,
  416. size);
  417. BUG_ON(ret); /* -ENOMEM or logic error */
  418. }
  419. return total_added;
  420. }
  421. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  422. {
  423. struct btrfs_block_group *block_group = caching_ctl->block_group;
  424. struct btrfs_fs_info *fs_info = block_group->fs_info;
  425. struct btrfs_root *extent_root = fs_info->extent_root;
  426. struct btrfs_path *path;
  427. struct extent_buffer *leaf;
  428. struct btrfs_key key;
  429. u64 total_found = 0;
  430. u64 last = 0;
  431. u32 nritems;
  432. int ret;
  433. bool wakeup = true;
  434. path = btrfs_alloc_path();
  435. if (!path)
  436. return -ENOMEM;
  437. last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
  438. #ifdef CONFIG_BTRFS_DEBUG
  439. /*
  440. * If we're fragmenting we don't want to make anybody think we can
  441. * allocate from this block group until we've had a chance to fragment
  442. * the free space.
  443. */
  444. if (btrfs_should_fragment_free_space(block_group))
  445. wakeup = false;
  446. #endif
  447. /*
  448. * We don't want to deadlock with somebody trying to allocate a new
  449. * extent for the extent root while also trying to search the extent
  450. * root to add free space. So we skip locking and search the commit
  451. * root, since its read-only
  452. */
  453. path->skip_locking = 1;
  454. path->search_commit_root = 1;
  455. path->reada = READA_FORWARD;
  456. key.objectid = last;
  457. key.offset = 0;
  458. key.type = BTRFS_EXTENT_ITEM_KEY;
  459. next:
  460. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  461. if (ret < 0)
  462. goto out;
  463. leaf = path->nodes[0];
  464. nritems = btrfs_header_nritems(leaf);
  465. while (1) {
  466. if (btrfs_fs_closing(fs_info) > 1) {
  467. last = (u64)-1;
  468. break;
  469. }
  470. if (path->slots[0] < nritems) {
  471. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  472. } else {
  473. ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
  474. if (ret)
  475. break;
  476. if (need_resched() ||
  477. rwsem_is_contended(&fs_info->commit_root_sem)) {
  478. if (wakeup)
  479. caching_ctl->progress = last;
  480. btrfs_release_path(path);
  481. up_read(&fs_info->commit_root_sem);
  482. mutex_unlock(&caching_ctl->mutex);
  483. cond_resched();
  484. mutex_lock(&caching_ctl->mutex);
  485. down_read(&fs_info->commit_root_sem);
  486. goto next;
  487. }
  488. ret = btrfs_next_leaf(extent_root, path);
  489. if (ret < 0)
  490. goto out;
  491. if (ret)
  492. break;
  493. leaf = path->nodes[0];
  494. nritems = btrfs_header_nritems(leaf);
  495. continue;
  496. }
  497. if (key.objectid < last) {
  498. key.objectid = last;
  499. key.offset = 0;
  500. key.type = BTRFS_EXTENT_ITEM_KEY;
  501. if (wakeup)
  502. caching_ctl->progress = last;
  503. btrfs_release_path(path);
  504. goto next;
  505. }
  506. if (key.objectid < block_group->start) {
  507. path->slots[0]++;
  508. continue;
  509. }
  510. if (key.objectid >= block_group->start + block_group->length)
  511. break;
  512. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  513. key.type == BTRFS_METADATA_ITEM_KEY) {
  514. total_found += add_new_free_space(block_group, last,
  515. key.objectid);
  516. if (key.type == BTRFS_METADATA_ITEM_KEY)
  517. last = key.objectid +
  518. fs_info->nodesize;
  519. else
  520. last = key.objectid + key.offset;
  521. if (total_found > CACHING_CTL_WAKE_UP) {
  522. total_found = 0;
  523. if (wakeup)
  524. wake_up(&caching_ctl->wait);
  525. }
  526. }
  527. path->slots[0]++;
  528. }
  529. ret = 0;
  530. total_found += add_new_free_space(block_group, last,
  531. block_group->start + block_group->length);
  532. caching_ctl->progress = (u64)-1;
  533. out:
  534. btrfs_free_path(path);
  535. return ret;
  536. }
  537. static noinline void caching_thread(struct btrfs_work *work)
  538. {
  539. struct btrfs_block_group *block_group;
  540. struct btrfs_fs_info *fs_info;
  541. struct btrfs_caching_control *caching_ctl;
  542. int ret;
  543. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  544. block_group = caching_ctl->block_group;
  545. fs_info = block_group->fs_info;
  546. mutex_lock(&caching_ctl->mutex);
  547. down_read(&fs_info->commit_root_sem);
  548. /*
  549. * If we are in the transaction that populated the free space tree we
  550. * can't actually cache from the free space tree as our commit root and
  551. * real root are the same, so we could change the contents of the blocks
  552. * while caching. Instead do the slow caching in this case, and after
  553. * the transaction has committed we will be safe.
  554. */
  555. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
  556. !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
  557. ret = load_free_space_tree(caching_ctl);
  558. else
  559. ret = load_extent_tree_free(caching_ctl);
  560. spin_lock(&block_group->lock);
  561. block_group->caching_ctl = NULL;
  562. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  563. spin_unlock(&block_group->lock);
  564. #ifdef CONFIG_BTRFS_DEBUG
  565. if (btrfs_should_fragment_free_space(block_group)) {
  566. u64 bytes_used;
  567. spin_lock(&block_group->space_info->lock);
  568. spin_lock(&block_group->lock);
  569. bytes_used = block_group->length - block_group->used;
  570. block_group->space_info->bytes_used += bytes_used >> 1;
  571. spin_unlock(&block_group->lock);
  572. spin_unlock(&block_group->space_info->lock);
  573. fragment_free_space(block_group);
  574. }
  575. #endif
  576. caching_ctl->progress = (u64)-1;
  577. up_read(&fs_info->commit_root_sem);
  578. btrfs_free_excluded_extents(block_group);
  579. mutex_unlock(&caching_ctl->mutex);
  580. wake_up(&caching_ctl->wait);
  581. btrfs_put_caching_control(caching_ctl);
  582. btrfs_put_block_group(block_group);
  583. }
  584. int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
  585. {
  586. DEFINE_WAIT(wait);
  587. struct btrfs_fs_info *fs_info = cache->fs_info;
  588. struct btrfs_caching_control *caching_ctl;
  589. int ret = 0;
  590. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  591. if (!caching_ctl)
  592. return -ENOMEM;
  593. INIT_LIST_HEAD(&caching_ctl->list);
  594. mutex_init(&caching_ctl->mutex);
  595. init_waitqueue_head(&caching_ctl->wait);
  596. caching_ctl->block_group = cache;
  597. caching_ctl->progress = cache->start;
  598. refcount_set(&caching_ctl->count, 1);
  599. btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
  600. spin_lock(&cache->lock);
  601. /*
  602. * This should be a rare occasion, but this could happen I think in the
  603. * case where one thread starts to load the space cache info, and then
  604. * some other thread starts a transaction commit which tries to do an
  605. * allocation while the other thread is still loading the space cache
  606. * info. The previous loop should have kept us from choosing this block
  607. * group, but if we've moved to the state where we will wait on caching
  608. * block groups we need to first check if we're doing a fast load here,
  609. * so we can wait for it to finish, otherwise we could end up allocating
  610. * from a block group who's cache gets evicted for one reason or
  611. * another.
  612. */
  613. while (cache->cached == BTRFS_CACHE_FAST) {
  614. struct btrfs_caching_control *ctl;
  615. ctl = cache->caching_ctl;
  616. refcount_inc(&ctl->count);
  617. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  618. spin_unlock(&cache->lock);
  619. schedule();
  620. finish_wait(&ctl->wait, &wait);
  621. btrfs_put_caching_control(ctl);
  622. spin_lock(&cache->lock);
  623. }
  624. if (cache->cached != BTRFS_CACHE_NO) {
  625. spin_unlock(&cache->lock);
  626. kfree(caching_ctl);
  627. return 0;
  628. }
  629. WARN_ON(cache->caching_ctl);
  630. cache->caching_ctl = caching_ctl;
  631. cache->cached = BTRFS_CACHE_FAST;
  632. spin_unlock(&cache->lock);
  633. if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
  634. mutex_lock(&caching_ctl->mutex);
  635. ret = load_free_space_cache(cache);
  636. spin_lock(&cache->lock);
  637. if (ret == 1) {
  638. cache->caching_ctl = NULL;
  639. cache->cached = BTRFS_CACHE_FINISHED;
  640. cache->last_byte_to_unpin = (u64)-1;
  641. caching_ctl->progress = (u64)-1;
  642. } else {
  643. if (load_cache_only) {
  644. cache->caching_ctl = NULL;
  645. cache->cached = BTRFS_CACHE_NO;
  646. } else {
  647. cache->cached = BTRFS_CACHE_STARTED;
  648. cache->has_caching_ctl = 1;
  649. }
  650. }
  651. spin_unlock(&cache->lock);
  652. #ifdef CONFIG_BTRFS_DEBUG
  653. if (ret == 1 &&
  654. btrfs_should_fragment_free_space(cache)) {
  655. u64 bytes_used;
  656. spin_lock(&cache->space_info->lock);
  657. spin_lock(&cache->lock);
  658. bytes_used = cache->length - cache->used;
  659. cache->space_info->bytes_used += bytes_used >> 1;
  660. spin_unlock(&cache->lock);
  661. spin_unlock(&cache->space_info->lock);
  662. fragment_free_space(cache);
  663. }
  664. #endif
  665. mutex_unlock(&caching_ctl->mutex);
  666. wake_up(&caching_ctl->wait);
  667. if (ret == 1) {
  668. btrfs_put_caching_control(caching_ctl);
  669. btrfs_free_excluded_extents(cache);
  670. return 0;
  671. }
  672. } else {
  673. /*
  674. * We're either using the free space tree or no caching at all.
  675. * Set cached to the appropriate value and wakeup any waiters.
  676. */
  677. spin_lock(&cache->lock);
  678. if (load_cache_only) {
  679. cache->caching_ctl = NULL;
  680. cache->cached = BTRFS_CACHE_NO;
  681. } else {
  682. cache->cached = BTRFS_CACHE_STARTED;
  683. cache->has_caching_ctl = 1;
  684. }
  685. spin_unlock(&cache->lock);
  686. wake_up(&caching_ctl->wait);
  687. }
  688. if (load_cache_only) {
  689. btrfs_put_caching_control(caching_ctl);
  690. return 0;
  691. }
  692. down_write(&fs_info->commit_root_sem);
  693. refcount_inc(&caching_ctl->count);
  694. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  695. up_write(&fs_info->commit_root_sem);
  696. btrfs_get_block_group(cache);
  697. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  698. return ret;
  699. }
  700. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  701. {
  702. u64 extra_flags = chunk_to_extended(flags) &
  703. BTRFS_EXTENDED_PROFILE_MASK;
  704. write_seqlock(&fs_info->profiles_lock);
  705. if (flags & BTRFS_BLOCK_GROUP_DATA)
  706. fs_info->avail_data_alloc_bits &= ~extra_flags;
  707. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  708. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  709. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  710. fs_info->avail_system_alloc_bits &= ~extra_flags;
  711. write_sequnlock(&fs_info->profiles_lock);
  712. }
  713. /*
  714. * Clear incompat bits for the following feature(s):
  715. *
  716. * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
  717. * in the whole filesystem
  718. *
  719. * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
  720. */
  721. static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
  722. {
  723. bool found_raid56 = false;
  724. bool found_raid1c34 = false;
  725. if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
  726. (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
  727. (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
  728. struct list_head *head = &fs_info->space_info;
  729. struct btrfs_space_info *sinfo;
  730. list_for_each_entry_rcu(sinfo, head, list) {
  731. down_read(&sinfo->groups_sem);
  732. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
  733. found_raid56 = true;
  734. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
  735. found_raid56 = true;
  736. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
  737. found_raid1c34 = true;
  738. if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
  739. found_raid1c34 = true;
  740. up_read(&sinfo->groups_sem);
  741. }
  742. if (!found_raid56)
  743. btrfs_clear_fs_incompat(fs_info, RAID56);
  744. if (!found_raid1c34)
  745. btrfs_clear_fs_incompat(fs_info, RAID1C34);
  746. }
  747. }
  748. static int remove_block_group_item(struct btrfs_trans_handle *trans,
  749. struct btrfs_path *path,
  750. struct btrfs_block_group *block_group)
  751. {
  752. struct btrfs_fs_info *fs_info = trans->fs_info;
  753. struct btrfs_root *root;
  754. struct btrfs_key key;
  755. int ret;
  756. root = fs_info->extent_root;
  757. key.objectid = block_group->start;
  758. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  759. key.offset = block_group->length;
  760. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  761. if (ret > 0)
  762. ret = -ENOENT;
  763. if (ret < 0)
  764. return ret;
  765. ret = btrfs_del_item(trans, root, path);
  766. return ret;
  767. }
  768. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  769. u64 group_start, struct extent_map *em)
  770. {
  771. struct btrfs_fs_info *fs_info = trans->fs_info;
  772. struct btrfs_path *path;
  773. struct btrfs_block_group *block_group;
  774. struct btrfs_free_cluster *cluster;
  775. struct btrfs_root *tree_root = fs_info->tree_root;
  776. struct btrfs_key key;
  777. struct inode *inode;
  778. struct kobject *kobj = NULL;
  779. int ret;
  780. int index;
  781. int factor;
  782. struct btrfs_caching_control *caching_ctl = NULL;
  783. bool remove_em;
  784. bool remove_rsv = false;
  785. block_group = btrfs_lookup_block_group(fs_info, group_start);
  786. BUG_ON(!block_group);
  787. BUG_ON(!block_group->ro);
  788. trace_btrfs_remove_block_group(block_group);
  789. /*
  790. * Free the reserved super bytes from this block group before
  791. * remove it.
  792. */
  793. btrfs_free_excluded_extents(block_group);
  794. btrfs_free_ref_tree_range(fs_info, block_group->start,
  795. block_group->length);
  796. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  797. factor = btrfs_bg_type_to_factor(block_group->flags);
  798. /* make sure this block group isn't part of an allocation cluster */
  799. cluster = &fs_info->data_alloc_cluster;
  800. spin_lock(&cluster->refill_lock);
  801. btrfs_return_cluster_to_free_space(block_group, cluster);
  802. spin_unlock(&cluster->refill_lock);
  803. /*
  804. * make sure this block group isn't part of a metadata
  805. * allocation cluster
  806. */
  807. cluster = &fs_info->meta_alloc_cluster;
  808. spin_lock(&cluster->refill_lock);
  809. btrfs_return_cluster_to_free_space(block_group, cluster);
  810. spin_unlock(&cluster->refill_lock);
  811. path = btrfs_alloc_path();
  812. if (!path) {
  813. ret = -ENOMEM;
  814. goto out;
  815. }
  816. /*
  817. * get the inode first so any iput calls done for the io_list
  818. * aren't the final iput (no unlinks allowed now)
  819. */
  820. inode = lookup_free_space_inode(block_group, path);
  821. mutex_lock(&trans->transaction->cache_write_mutex);
  822. /*
  823. * Make sure our free space cache IO is done before removing the
  824. * free space inode
  825. */
  826. spin_lock(&trans->transaction->dirty_bgs_lock);
  827. if (!list_empty(&block_group->io_list)) {
  828. list_del_init(&block_group->io_list);
  829. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  830. spin_unlock(&trans->transaction->dirty_bgs_lock);
  831. btrfs_wait_cache_io(trans, block_group, path);
  832. btrfs_put_block_group(block_group);
  833. spin_lock(&trans->transaction->dirty_bgs_lock);
  834. }
  835. if (!list_empty(&block_group->dirty_list)) {
  836. list_del_init(&block_group->dirty_list);
  837. remove_rsv = true;
  838. btrfs_put_block_group(block_group);
  839. }
  840. spin_unlock(&trans->transaction->dirty_bgs_lock);
  841. mutex_unlock(&trans->transaction->cache_write_mutex);
  842. if (!IS_ERR(inode)) {
  843. ret = btrfs_orphan_add(trans, BTRFS_I(inode));
  844. if (ret) {
  845. btrfs_add_delayed_iput(inode);
  846. goto out;
  847. }
  848. clear_nlink(inode);
  849. /* One for the block groups ref */
  850. spin_lock(&block_group->lock);
  851. if (block_group->iref) {
  852. block_group->iref = 0;
  853. block_group->inode = NULL;
  854. spin_unlock(&block_group->lock);
  855. iput(inode);
  856. } else {
  857. spin_unlock(&block_group->lock);
  858. }
  859. /* One for our lookup ref */
  860. btrfs_add_delayed_iput(inode);
  861. }
  862. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  863. key.type = 0;
  864. key.offset = block_group->start;
  865. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  866. if (ret < 0)
  867. goto out;
  868. if (ret > 0)
  869. btrfs_release_path(path);
  870. if (ret == 0) {
  871. ret = btrfs_del_item(trans, tree_root, path);
  872. if (ret)
  873. goto out;
  874. btrfs_release_path(path);
  875. }
  876. spin_lock(&fs_info->block_group_cache_lock);
  877. rb_erase(&block_group->cache_node,
  878. &fs_info->block_group_cache_tree);
  879. RB_CLEAR_NODE(&block_group->cache_node);
  880. /* Once for the block groups rbtree */
  881. btrfs_put_block_group(block_group);
  882. if (fs_info->first_logical_byte == block_group->start)
  883. fs_info->first_logical_byte = (u64)-1;
  884. spin_unlock(&fs_info->block_group_cache_lock);
  885. down_write(&block_group->space_info->groups_sem);
  886. /*
  887. * we must use list_del_init so people can check to see if they
  888. * are still on the list after taking the semaphore
  889. */
  890. list_del_init(&block_group->list);
  891. if (list_empty(&block_group->space_info->block_groups[index])) {
  892. kobj = block_group->space_info->block_group_kobjs[index];
  893. block_group->space_info->block_group_kobjs[index] = NULL;
  894. clear_avail_alloc_bits(fs_info, block_group->flags);
  895. }
  896. up_write(&block_group->space_info->groups_sem);
  897. clear_incompat_bg_bits(fs_info, block_group->flags);
  898. if (kobj) {
  899. kobject_del(kobj);
  900. kobject_put(kobj);
  901. }
  902. if (block_group->has_caching_ctl)
  903. caching_ctl = btrfs_get_caching_control(block_group);
  904. if (block_group->cached == BTRFS_CACHE_STARTED)
  905. btrfs_wait_block_group_cache_done(block_group);
  906. if (block_group->has_caching_ctl) {
  907. down_write(&fs_info->commit_root_sem);
  908. if (!caching_ctl) {
  909. struct btrfs_caching_control *ctl;
  910. list_for_each_entry(ctl,
  911. &fs_info->caching_block_groups, list)
  912. if (ctl->block_group == block_group) {
  913. caching_ctl = ctl;
  914. refcount_inc(&caching_ctl->count);
  915. break;
  916. }
  917. }
  918. if (caching_ctl)
  919. list_del_init(&caching_ctl->list);
  920. up_write(&fs_info->commit_root_sem);
  921. if (caching_ctl) {
  922. /* Once for the caching bgs list and once for us. */
  923. btrfs_put_caching_control(caching_ctl);
  924. btrfs_put_caching_control(caching_ctl);
  925. }
  926. }
  927. spin_lock(&trans->transaction->dirty_bgs_lock);
  928. WARN_ON(!list_empty(&block_group->dirty_list));
  929. WARN_ON(!list_empty(&block_group->io_list));
  930. spin_unlock(&trans->transaction->dirty_bgs_lock);
  931. btrfs_remove_free_space_cache(block_group);
  932. spin_lock(&block_group->space_info->lock);
  933. list_del_init(&block_group->ro_list);
  934. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  935. WARN_ON(block_group->space_info->total_bytes
  936. < block_group->length);
  937. WARN_ON(block_group->space_info->bytes_readonly
  938. < block_group->length);
  939. WARN_ON(block_group->space_info->disk_total
  940. < block_group->length * factor);
  941. }
  942. block_group->space_info->total_bytes -= block_group->length;
  943. block_group->space_info->bytes_readonly -= block_group->length;
  944. block_group->space_info->disk_total -= block_group->length * factor;
  945. spin_unlock(&block_group->space_info->lock);
  946. /*
  947. * Remove the free space for the block group from the free space tree
  948. * and the block group's item from the extent tree before marking the
  949. * block group as removed. This is to prevent races with tasks that
  950. * freeze and unfreeze a block group, this task and another task
  951. * allocating a new block group - the unfreeze task ends up removing
  952. * the block group's extent map before the task calling this function
  953. * deletes the block group item from the extent tree, allowing for
  954. * another task to attempt to create another block group with the same
  955. * item key (and failing with -EEXIST and a transaction abort).
  956. */
  957. ret = remove_block_group_free_space(trans, block_group);
  958. if (ret)
  959. goto out;
  960. ret = remove_block_group_item(trans, path, block_group);
  961. if (ret < 0)
  962. goto out;
  963. spin_lock(&block_group->lock);
  964. block_group->removed = 1;
  965. /*
  966. * At this point trimming or scrub can't start on this block group,
  967. * because we removed the block group from the rbtree
  968. * fs_info->block_group_cache_tree so no one can't find it anymore and
  969. * even if someone already got this block group before we removed it
  970. * from the rbtree, they have already incremented block_group->frozen -
  971. * if they didn't, for the trimming case they won't find any free space
  972. * entries because we already removed them all when we called
  973. * btrfs_remove_free_space_cache().
  974. *
  975. * And we must not remove the extent map from the fs_info->mapping_tree
  976. * to prevent the same logical address range and physical device space
  977. * ranges from being reused for a new block group. This is needed to
  978. * avoid races with trimming and scrub.
  979. *
  980. * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  981. * completely transactionless, so while it is trimming a range the
  982. * currently running transaction might finish and a new one start,
  983. * allowing for new block groups to be created that can reuse the same
  984. * physical device locations unless we take this special care.
  985. *
  986. * There may also be an implicit trim operation if the file system
  987. * is mounted with -odiscard. The same protections must remain
  988. * in place until the extents have been discarded completely when
  989. * the transaction commit has completed.
  990. */
  991. remove_em = (atomic_read(&block_group->frozen) == 0);
  992. spin_unlock(&block_group->lock);
  993. if (remove_em) {
  994. struct extent_map_tree *em_tree;
  995. em_tree = &fs_info->mapping_tree;
  996. write_lock(&em_tree->lock);
  997. remove_extent_mapping(em_tree, em);
  998. write_unlock(&em_tree->lock);
  999. /* once for the tree */
  1000. free_extent_map(em);
  1001. }
  1002. out:
  1003. /* Once for the lookup reference */
  1004. btrfs_put_block_group(block_group);
  1005. if (remove_rsv)
  1006. btrfs_delayed_refs_rsv_release(fs_info, 1);
  1007. btrfs_free_path(path);
  1008. return ret;
  1009. }
  1010. struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
  1011. struct btrfs_fs_info *fs_info, const u64 chunk_offset)
  1012. {
  1013. struct extent_map_tree *em_tree = &fs_info->mapping_tree;
  1014. struct extent_map *em;
  1015. struct map_lookup *map;
  1016. unsigned int num_items;
  1017. read_lock(&em_tree->lock);
  1018. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  1019. read_unlock(&em_tree->lock);
  1020. ASSERT(em && em->start == chunk_offset);
  1021. /*
  1022. * We need to reserve 3 + N units from the metadata space info in order
  1023. * to remove a block group (done at btrfs_remove_chunk() and at
  1024. * btrfs_remove_block_group()), which are used for:
  1025. *
  1026. * 1 unit for adding the free space inode's orphan (located in the tree
  1027. * of tree roots).
  1028. * 1 unit for deleting the block group item (located in the extent
  1029. * tree).
  1030. * 1 unit for deleting the free space item (located in tree of tree
  1031. * roots).
  1032. * N units for deleting N device extent items corresponding to each
  1033. * stripe (located in the device tree).
  1034. *
  1035. * In order to remove a block group we also need to reserve units in the
  1036. * system space info in order to update the chunk tree (update one or
  1037. * more device items and remove one chunk item), but this is done at
  1038. * btrfs_remove_chunk() through a call to check_system_chunk().
  1039. */
  1040. map = em->map_lookup;
  1041. num_items = 3 + map->num_stripes;
  1042. free_extent_map(em);
  1043. return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
  1044. num_items);
  1045. }
  1046. /*
  1047. * Mark block group @cache read-only, so later write won't happen to block
  1048. * group @cache.
  1049. *
  1050. * If @force is not set, this function will only mark the block group readonly
  1051. * if we have enough free space (1M) in other metadata/system block groups.
  1052. * If @force is not set, this function will mark the block group readonly
  1053. * without checking free space.
  1054. *
  1055. * NOTE: This function doesn't care if other block groups can contain all the
  1056. * data in this block group. That check should be done by relocation routine,
  1057. * not this function.
  1058. */
  1059. static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
  1060. {
  1061. struct btrfs_space_info *sinfo = cache->space_info;
  1062. u64 num_bytes;
  1063. int ret = -ENOSPC;
  1064. spin_lock(&sinfo->lock);
  1065. spin_lock(&cache->lock);
  1066. if (cache->swap_extents) {
  1067. ret = -ETXTBSY;
  1068. goto out;
  1069. }
  1070. if (cache->ro) {
  1071. cache->ro++;
  1072. ret = 0;
  1073. goto out;
  1074. }
  1075. num_bytes = cache->length - cache->reserved - cache->pinned -
  1076. cache->bytes_super - cache->used;
  1077. /*
  1078. * Data never overcommits, even in mixed mode, so do just the straight
  1079. * check of left over space in how much we have allocated.
  1080. */
  1081. if (force) {
  1082. ret = 0;
  1083. } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
  1084. u64 sinfo_used = btrfs_space_info_used(sinfo, true);
  1085. /*
  1086. * Here we make sure if we mark this bg RO, we still have enough
  1087. * free space as buffer.
  1088. */
  1089. if (sinfo_used + num_bytes <= sinfo->total_bytes)
  1090. ret = 0;
  1091. } else {
  1092. /*
  1093. * We overcommit metadata, so we need to do the
  1094. * btrfs_can_overcommit check here, and we need to pass in
  1095. * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
  1096. * leeway to allow us to mark this block group as read only.
  1097. */
  1098. if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
  1099. BTRFS_RESERVE_NO_FLUSH))
  1100. ret = 0;
  1101. }
  1102. if (!ret) {
  1103. sinfo->bytes_readonly += num_bytes;
  1104. cache->ro++;
  1105. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  1106. }
  1107. out:
  1108. spin_unlock(&cache->lock);
  1109. spin_unlock(&sinfo->lock);
  1110. if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
  1111. btrfs_info(cache->fs_info,
  1112. "unable to make block group %llu ro", cache->start);
  1113. btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
  1114. }
  1115. return ret;
  1116. }
  1117. static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
  1118. struct btrfs_block_group *bg)
  1119. {
  1120. struct btrfs_fs_info *fs_info = bg->fs_info;
  1121. struct btrfs_transaction *prev_trans = NULL;
  1122. const u64 start = bg->start;
  1123. const u64 end = start + bg->length - 1;
  1124. int ret;
  1125. spin_lock(&fs_info->trans_lock);
  1126. if (trans->transaction->list.prev != &fs_info->trans_list) {
  1127. prev_trans = list_last_entry(&trans->transaction->list,
  1128. struct btrfs_transaction, list);
  1129. refcount_inc(&prev_trans->use_count);
  1130. }
  1131. spin_unlock(&fs_info->trans_lock);
  1132. /*
  1133. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  1134. * btrfs_finish_extent_commit(). If we are at transaction N, another
  1135. * task might be running finish_extent_commit() for the previous
  1136. * transaction N - 1, and have seen a range belonging to the block
  1137. * group in pinned_extents before we were able to clear the whole block
  1138. * group range from pinned_extents. This means that task can lookup for
  1139. * the block group after we unpinned it from pinned_extents and removed
  1140. * it, leading to a BUG_ON() at unpin_extent_range().
  1141. */
  1142. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  1143. if (prev_trans) {
  1144. ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
  1145. EXTENT_DIRTY);
  1146. if (ret)
  1147. goto out;
  1148. }
  1149. ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
  1150. EXTENT_DIRTY);
  1151. out:
  1152. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  1153. if (prev_trans)
  1154. btrfs_put_transaction(prev_trans);
  1155. return ret == 0;
  1156. }
  1157. /*
  1158. * Process the unused_bgs list and remove any that don't have any allocated
  1159. * space inside of them.
  1160. */
  1161. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  1162. {
  1163. struct btrfs_block_group *block_group;
  1164. struct btrfs_space_info *space_info;
  1165. struct btrfs_trans_handle *trans;
  1166. const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
  1167. int ret = 0;
  1168. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  1169. return;
  1170. spin_lock(&fs_info->unused_bgs_lock);
  1171. while (!list_empty(&fs_info->unused_bgs)) {
  1172. int trimming;
  1173. block_group = list_first_entry(&fs_info->unused_bgs,
  1174. struct btrfs_block_group,
  1175. bg_list);
  1176. list_del_init(&block_group->bg_list);
  1177. space_info = block_group->space_info;
  1178. if (ret || btrfs_mixed_space_info(space_info)) {
  1179. btrfs_put_block_group(block_group);
  1180. continue;
  1181. }
  1182. spin_unlock(&fs_info->unused_bgs_lock);
  1183. btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
  1184. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  1185. /* Don't want to race with allocators so take the groups_sem */
  1186. down_write(&space_info->groups_sem);
  1187. /*
  1188. * Async discard moves the final block group discard to be prior
  1189. * to the unused_bgs code path. Therefore, if it's not fully
  1190. * trimmed, punt it back to the async discard lists.
  1191. */
  1192. if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
  1193. !btrfs_is_free_space_trimmed(block_group)) {
  1194. trace_btrfs_skip_unused_block_group(block_group);
  1195. up_write(&space_info->groups_sem);
  1196. /* Requeue if we failed because of async discard */
  1197. btrfs_discard_queue_work(&fs_info->discard_ctl,
  1198. block_group);
  1199. goto next;
  1200. }
  1201. spin_lock(&block_group->lock);
  1202. if (block_group->reserved || block_group->pinned ||
  1203. block_group->used || block_group->ro ||
  1204. list_is_singular(&block_group->list)) {
  1205. /*
  1206. * We want to bail if we made new allocations or have
  1207. * outstanding allocations in this block group. We do
  1208. * the ro check in case balance is currently acting on
  1209. * this block group.
  1210. */
  1211. trace_btrfs_skip_unused_block_group(block_group);
  1212. spin_unlock(&block_group->lock);
  1213. up_write(&space_info->groups_sem);
  1214. goto next;
  1215. }
  1216. spin_unlock(&block_group->lock);
  1217. /* We don't want to force the issue, only flip if it's ok. */
  1218. ret = inc_block_group_ro(block_group, 0);
  1219. up_write(&space_info->groups_sem);
  1220. if (ret < 0) {
  1221. ret = 0;
  1222. goto next;
  1223. }
  1224. /*
  1225. * Want to do this before we do anything else so we can recover
  1226. * properly if we fail to join the transaction.
  1227. */
  1228. trans = btrfs_start_trans_remove_block_group(fs_info,
  1229. block_group->start);
  1230. if (IS_ERR(trans)) {
  1231. btrfs_dec_block_group_ro(block_group);
  1232. ret = PTR_ERR(trans);
  1233. goto next;
  1234. }
  1235. /*
  1236. * We could have pending pinned extents for this block group,
  1237. * just delete them, we don't care about them anymore.
  1238. */
  1239. if (!clean_pinned_extents(trans, block_group)) {
  1240. btrfs_dec_block_group_ro(block_group);
  1241. goto end_trans;
  1242. }
  1243. /*
  1244. * At this point, the block_group is read only and should fail
  1245. * new allocations. However, btrfs_finish_extent_commit() can
  1246. * cause this block_group to be placed back on the discard
  1247. * lists because now the block_group isn't fully discarded.
  1248. * Bail here and try again later after discarding everything.
  1249. */
  1250. spin_lock(&fs_info->discard_ctl.lock);
  1251. if (!list_empty(&block_group->discard_list)) {
  1252. spin_unlock(&fs_info->discard_ctl.lock);
  1253. btrfs_dec_block_group_ro(block_group);
  1254. btrfs_discard_queue_work(&fs_info->discard_ctl,
  1255. block_group);
  1256. goto end_trans;
  1257. }
  1258. spin_unlock(&fs_info->discard_ctl.lock);
  1259. /* Reset pinned so btrfs_put_block_group doesn't complain */
  1260. spin_lock(&space_info->lock);
  1261. spin_lock(&block_group->lock);
  1262. btrfs_space_info_update_bytes_pinned(fs_info, space_info,
  1263. -block_group->pinned);
  1264. space_info->bytes_readonly += block_group->pinned;
  1265. __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
  1266. block_group->pinned = 0;
  1267. spin_unlock(&block_group->lock);
  1268. spin_unlock(&space_info->lock);
  1269. /*
  1270. * The normal path here is an unused block group is passed here,
  1271. * then trimming is handled in the transaction commit path.
  1272. * Async discard interposes before this to do the trimming
  1273. * before coming down the unused block group path as trimming
  1274. * will no longer be done later in the transaction commit path.
  1275. */
  1276. if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
  1277. goto flip_async;
  1278. /* DISCARD can flip during remount */
  1279. trimming = btrfs_test_opt(fs_info, DISCARD_SYNC);
  1280. /* Implicit trim during transaction commit. */
  1281. if (trimming)
  1282. btrfs_freeze_block_group(block_group);
  1283. /*
  1284. * Btrfs_remove_chunk will abort the transaction if things go
  1285. * horribly wrong.
  1286. */
  1287. ret = btrfs_remove_chunk(trans, block_group->start);
  1288. if (ret) {
  1289. if (trimming)
  1290. btrfs_unfreeze_block_group(block_group);
  1291. goto end_trans;
  1292. }
  1293. /*
  1294. * If we're not mounted with -odiscard, we can just forget
  1295. * about this block group. Otherwise we'll need to wait
  1296. * until transaction commit to do the actual discard.
  1297. */
  1298. if (trimming) {
  1299. spin_lock(&fs_info->unused_bgs_lock);
  1300. /*
  1301. * A concurrent scrub might have added us to the list
  1302. * fs_info->unused_bgs, so use a list_move operation
  1303. * to add the block group to the deleted_bgs list.
  1304. */
  1305. list_move(&block_group->bg_list,
  1306. &trans->transaction->deleted_bgs);
  1307. spin_unlock(&fs_info->unused_bgs_lock);
  1308. btrfs_get_block_group(block_group);
  1309. }
  1310. end_trans:
  1311. btrfs_end_transaction(trans);
  1312. next:
  1313. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  1314. btrfs_put_block_group(block_group);
  1315. spin_lock(&fs_info->unused_bgs_lock);
  1316. }
  1317. spin_unlock(&fs_info->unused_bgs_lock);
  1318. return;
  1319. flip_async:
  1320. btrfs_end_transaction(trans);
  1321. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  1322. btrfs_put_block_group(block_group);
  1323. btrfs_discard_punt_unused_bgs_list(fs_info);
  1324. }
  1325. void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
  1326. {
  1327. struct btrfs_fs_info *fs_info = bg->fs_info;
  1328. spin_lock(&fs_info->unused_bgs_lock);
  1329. if (list_empty(&bg->bg_list)) {
  1330. btrfs_get_block_group(bg);
  1331. trace_btrfs_add_unused_block_group(bg);
  1332. list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
  1333. }
  1334. spin_unlock(&fs_info->unused_bgs_lock);
  1335. }
  1336. static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
  1337. struct btrfs_path *path)
  1338. {
  1339. struct extent_map_tree *em_tree;
  1340. struct extent_map *em;
  1341. struct btrfs_block_group_item bg;
  1342. struct extent_buffer *leaf;
  1343. int slot;
  1344. u64 flags;
  1345. int ret = 0;
  1346. slot = path->slots[0];
  1347. leaf = path->nodes[0];
  1348. em_tree = &fs_info->mapping_tree;
  1349. read_lock(&em_tree->lock);
  1350. em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
  1351. read_unlock(&em_tree->lock);
  1352. if (!em) {
  1353. btrfs_err(fs_info,
  1354. "logical %llu len %llu found bg but no related chunk",
  1355. key->objectid, key->offset);
  1356. return -ENOENT;
  1357. }
  1358. if (em->start != key->objectid || em->len != key->offset) {
  1359. btrfs_err(fs_info,
  1360. "block group %llu len %llu mismatch with chunk %llu len %llu",
  1361. key->objectid, key->offset, em->start, em->len);
  1362. ret = -EUCLEAN;
  1363. goto out_free_em;
  1364. }
  1365. read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
  1366. sizeof(bg));
  1367. flags = btrfs_stack_block_group_flags(&bg) &
  1368. BTRFS_BLOCK_GROUP_TYPE_MASK;
  1369. if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  1370. btrfs_err(fs_info,
  1371. "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
  1372. key->objectid, key->offset, flags,
  1373. (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
  1374. ret = -EUCLEAN;
  1375. }
  1376. out_free_em:
  1377. free_extent_map(em);
  1378. return ret;
  1379. }
  1380. static int find_first_block_group(struct btrfs_fs_info *fs_info,
  1381. struct btrfs_path *path,
  1382. struct btrfs_key *key)
  1383. {
  1384. struct btrfs_root *root = fs_info->extent_root;
  1385. int ret;
  1386. struct btrfs_key found_key;
  1387. struct extent_buffer *leaf;
  1388. int slot;
  1389. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  1390. if (ret < 0)
  1391. return ret;
  1392. while (1) {
  1393. slot = path->slots[0];
  1394. leaf = path->nodes[0];
  1395. if (slot >= btrfs_header_nritems(leaf)) {
  1396. ret = btrfs_next_leaf(root, path);
  1397. if (ret == 0)
  1398. continue;
  1399. if (ret < 0)
  1400. goto out;
  1401. break;
  1402. }
  1403. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  1404. if (found_key.objectid >= key->objectid &&
  1405. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1406. ret = read_bg_from_eb(fs_info, &found_key, path);
  1407. break;
  1408. }
  1409. path->slots[0]++;
  1410. }
  1411. out:
  1412. return ret;
  1413. }
  1414. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  1415. {
  1416. u64 extra_flags = chunk_to_extended(flags) &
  1417. BTRFS_EXTENDED_PROFILE_MASK;
  1418. write_seqlock(&fs_info->profiles_lock);
  1419. if (flags & BTRFS_BLOCK_GROUP_DATA)
  1420. fs_info->avail_data_alloc_bits |= extra_flags;
  1421. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  1422. fs_info->avail_metadata_alloc_bits |= extra_flags;
  1423. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  1424. fs_info->avail_system_alloc_bits |= extra_flags;
  1425. write_sequnlock(&fs_info->profiles_lock);
  1426. }
  1427. /**
  1428. * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
  1429. * @chunk_start: logical address of block group
  1430. * @physical: physical address to map to logical addresses
  1431. * @logical: return array of logical addresses which map to @physical
  1432. * @naddrs: length of @logical
  1433. * @stripe_len: size of IO stripe for the given block group
  1434. *
  1435. * Maps a particular @physical disk address to a list of @logical addresses.
  1436. * Used primarily to exclude those portions of a block group that contain super
  1437. * block copies.
  1438. */
  1439. EXPORT_FOR_TESTS
  1440. int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
  1441. u64 physical, u64 **logical, int *naddrs, int *stripe_len)
  1442. {
  1443. struct extent_map *em;
  1444. struct map_lookup *map;
  1445. u64 *buf;
  1446. u64 bytenr;
  1447. u64 data_stripe_length;
  1448. u64 io_stripe_size;
  1449. int i, nr = 0;
  1450. int ret = 0;
  1451. em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
  1452. if (IS_ERR(em))
  1453. return -EIO;
  1454. map = em->map_lookup;
  1455. data_stripe_length = em->orig_block_len;
  1456. io_stripe_size = map->stripe_len;
  1457. /* For RAID5/6 adjust to a full IO stripe length */
  1458. if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
  1459. io_stripe_size = map->stripe_len * nr_data_stripes(map);
  1460. buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
  1461. if (!buf) {
  1462. ret = -ENOMEM;
  1463. goto out;
  1464. }
  1465. for (i = 0; i < map->num_stripes; i++) {
  1466. bool already_inserted = false;
  1467. u64 stripe_nr;
  1468. int j;
  1469. if (!in_range(physical, map->stripes[i].physical,
  1470. data_stripe_length))
  1471. continue;
  1472. stripe_nr = physical - map->stripes[i].physical;
  1473. stripe_nr = div64_u64(stripe_nr, map->stripe_len);
  1474. if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  1475. stripe_nr = stripe_nr * map->num_stripes + i;
  1476. stripe_nr = div_u64(stripe_nr, map->sub_stripes);
  1477. } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  1478. stripe_nr = stripe_nr * map->num_stripes + i;
  1479. }
  1480. /*
  1481. * The remaining case would be for RAID56, multiply by
  1482. * nr_data_stripes(). Alternatively, just use rmap_len below
  1483. * instead of map->stripe_len
  1484. */
  1485. bytenr = chunk_start + stripe_nr * io_stripe_size;
  1486. /* Ensure we don't add duplicate addresses */
  1487. for (j = 0; j < nr; j++) {
  1488. if (buf[j] == bytenr) {
  1489. already_inserted = true;
  1490. break;
  1491. }
  1492. }
  1493. if (!already_inserted)
  1494. buf[nr++] = bytenr;
  1495. }
  1496. *logical = buf;
  1497. *naddrs = nr;
  1498. *stripe_len = io_stripe_size;
  1499. out:
  1500. free_extent_map(em);
  1501. return ret;
  1502. }
  1503. static int exclude_super_stripes(struct btrfs_block_group *cache)
  1504. {
  1505. struct btrfs_fs_info *fs_info = cache->fs_info;
  1506. u64 bytenr;
  1507. u64 *logical;
  1508. int stripe_len;
  1509. int i, nr, ret;
  1510. if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
  1511. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
  1512. cache->bytes_super += stripe_len;
  1513. ret = btrfs_add_excluded_extent(fs_info, cache->start,
  1514. stripe_len);
  1515. if (ret)
  1516. return ret;
  1517. }
  1518. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  1519. bytenr = btrfs_sb_offset(i);
  1520. ret = btrfs_rmap_block(fs_info, cache->start,
  1521. bytenr, &logical, &nr, &stripe_len);
  1522. if (ret)
  1523. return ret;
  1524. while (nr--) {
  1525. u64 len = min_t(u64, stripe_len,
  1526. cache->start + cache->length - logical[nr]);
  1527. cache->bytes_super += len;
  1528. ret = btrfs_add_excluded_extent(fs_info, logical[nr],
  1529. len);
  1530. if (ret) {
  1531. kfree(logical);
  1532. return ret;
  1533. }
  1534. }
  1535. kfree(logical);
  1536. }
  1537. return 0;
  1538. }
  1539. static void link_block_group(struct btrfs_block_group *cache)
  1540. {
  1541. struct btrfs_space_info *space_info = cache->space_info;
  1542. int index = btrfs_bg_flags_to_raid_index(cache->flags);
  1543. down_write(&space_info->groups_sem);
  1544. list_add_tail(&cache->list, &space_info->block_groups[index]);
  1545. up_write(&space_info->groups_sem);
  1546. }
  1547. static struct btrfs_block_group *btrfs_create_block_group_cache(
  1548. struct btrfs_fs_info *fs_info, u64 start)
  1549. {
  1550. struct btrfs_block_group *cache;
  1551. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  1552. if (!cache)
  1553. return NULL;
  1554. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  1555. GFP_NOFS);
  1556. if (!cache->free_space_ctl) {
  1557. kfree(cache);
  1558. return NULL;
  1559. }
  1560. cache->start = start;
  1561. cache->fs_info = fs_info;
  1562. cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
  1563. cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
  1564. refcount_set(&cache->refs, 1);
  1565. spin_lock_init(&cache->lock);
  1566. init_rwsem(&cache->data_rwsem);
  1567. INIT_LIST_HEAD(&cache->list);
  1568. INIT_LIST_HEAD(&cache->cluster_list);
  1569. INIT_LIST_HEAD(&cache->bg_list);
  1570. INIT_LIST_HEAD(&cache->ro_list);
  1571. INIT_LIST_HEAD(&cache->discard_list);
  1572. INIT_LIST_HEAD(&cache->dirty_list);
  1573. INIT_LIST_HEAD(&cache->io_list);
  1574. btrfs_init_free_space_ctl(cache);
  1575. atomic_set(&cache->frozen, 0);
  1576. mutex_init(&cache->free_space_lock);
  1577. btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
  1578. return cache;
  1579. }
  1580. /*
  1581. * Iterate all chunks and verify that each of them has the corresponding block
  1582. * group
  1583. */
  1584. static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
  1585. {
  1586. struct extent_map_tree *map_tree = &fs_info->mapping_tree;
  1587. struct extent_map *em;
  1588. struct btrfs_block_group *bg;
  1589. u64 start = 0;
  1590. int ret = 0;
  1591. while (1) {
  1592. read_lock(&map_tree->lock);
  1593. /*
  1594. * lookup_extent_mapping will return the first extent map
  1595. * intersecting the range, so setting @len to 1 is enough to
  1596. * get the first chunk.
  1597. */
  1598. em = lookup_extent_mapping(map_tree, start, 1);
  1599. read_unlock(&map_tree->lock);
  1600. if (!em)
  1601. break;
  1602. bg = btrfs_lookup_block_group(fs_info, em->start);
  1603. if (!bg) {
  1604. btrfs_err(fs_info,
  1605. "chunk start=%llu len=%llu doesn't have corresponding block group",
  1606. em->start, em->len);
  1607. ret = -EUCLEAN;
  1608. free_extent_map(em);
  1609. break;
  1610. }
  1611. if (bg->start != em->start || bg->length != em->len ||
  1612. (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
  1613. (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  1614. btrfs_err(fs_info,
  1615. "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
  1616. em->start, em->len,
  1617. em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
  1618. bg->start, bg->length,
  1619. bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
  1620. ret = -EUCLEAN;
  1621. free_extent_map(em);
  1622. btrfs_put_block_group(bg);
  1623. break;
  1624. }
  1625. start = em->start + em->len;
  1626. free_extent_map(em);
  1627. btrfs_put_block_group(bg);
  1628. }
  1629. return ret;
  1630. }
  1631. static void read_block_group_item(struct btrfs_block_group *cache,
  1632. struct btrfs_path *path,
  1633. const struct btrfs_key *key)
  1634. {
  1635. struct extent_buffer *leaf = path->nodes[0];
  1636. struct btrfs_block_group_item bgi;
  1637. int slot = path->slots[0];
  1638. cache->length = key->offset;
  1639. read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
  1640. sizeof(bgi));
  1641. cache->used = btrfs_stack_block_group_used(&bgi);
  1642. cache->flags = btrfs_stack_block_group_flags(&bgi);
  1643. }
  1644. static int read_one_block_group(struct btrfs_fs_info *info,
  1645. struct btrfs_path *path,
  1646. const struct btrfs_key *key,
  1647. int need_clear)
  1648. {
  1649. struct btrfs_block_group *cache;
  1650. struct btrfs_space_info *space_info;
  1651. const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
  1652. int ret;
  1653. ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
  1654. cache = btrfs_create_block_group_cache(info, key->objectid);
  1655. if (!cache)
  1656. return -ENOMEM;
  1657. read_block_group_item(cache, path, key);
  1658. set_free_space_tree_thresholds(cache);
  1659. if (need_clear) {
  1660. /*
  1661. * When we mount with old space cache, we need to
  1662. * set BTRFS_DC_CLEAR and set dirty flag.
  1663. *
  1664. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  1665. * truncate the old free space cache inode and
  1666. * setup a new one.
  1667. * b) Setting 'dirty flag' makes sure that we flush
  1668. * the new space cache info onto disk.
  1669. */
  1670. if (btrfs_test_opt(info, SPACE_CACHE))
  1671. cache->disk_cache_state = BTRFS_DC_CLEAR;
  1672. }
  1673. if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  1674. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  1675. btrfs_err(info,
  1676. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  1677. cache->start);
  1678. ret = -EINVAL;
  1679. goto error;
  1680. }
  1681. /*
  1682. * We need to exclude the super stripes now so that the space info has
  1683. * super bytes accounted for, otherwise we'll think we have more space
  1684. * than we actually do.
  1685. */
  1686. ret = exclude_super_stripes(cache);
  1687. if (ret) {
  1688. /* We may have excluded something, so call this just in case. */
  1689. btrfs_free_excluded_extents(cache);
  1690. goto error;
  1691. }
  1692. /*
  1693. * Check for two cases, either we are full, and therefore don't need
  1694. * to bother with the caching work since we won't find any space, or we
  1695. * are empty, and we can just add all the space in and be done with it.
  1696. * This saves us _a_lot_ of time, particularly in the full case.
  1697. */
  1698. if (cache->length == cache->used) {
  1699. cache->last_byte_to_unpin = (u64)-1;
  1700. cache->cached = BTRFS_CACHE_FINISHED;
  1701. btrfs_free_excluded_extents(cache);
  1702. } else if (cache->used == 0) {
  1703. cache->last_byte_to_unpin = (u64)-1;
  1704. cache->cached = BTRFS_CACHE_FINISHED;
  1705. add_new_free_space(cache, cache->start,
  1706. cache->start + cache->length);
  1707. btrfs_free_excluded_extents(cache);
  1708. }
  1709. ret = btrfs_add_block_group_cache(info, cache);
  1710. if (ret) {
  1711. btrfs_remove_free_space_cache(cache);
  1712. goto error;
  1713. }
  1714. trace_btrfs_add_block_group(info, cache, 0);
  1715. btrfs_update_space_info(info, cache->flags, cache->length,
  1716. cache->used, cache->bytes_super, &space_info);
  1717. cache->space_info = space_info;
  1718. link_block_group(cache);
  1719. set_avail_alloc_bits(info, cache->flags);
  1720. if (btrfs_chunk_readonly(info, cache->start)) {
  1721. inc_block_group_ro(cache, 1);
  1722. } else if (cache->used == 0) {
  1723. ASSERT(list_empty(&cache->bg_list));
  1724. if (btrfs_test_opt(info, DISCARD_ASYNC))
  1725. btrfs_discard_queue_work(&info->discard_ctl, cache);
  1726. else
  1727. btrfs_mark_bg_unused(cache);
  1728. }
  1729. return 0;
  1730. error:
  1731. btrfs_put_block_group(cache);
  1732. return ret;
  1733. }
  1734. int btrfs_read_block_groups(struct btrfs_fs_info *info)
  1735. {
  1736. struct btrfs_path *path;
  1737. int ret;
  1738. struct btrfs_block_group *cache;
  1739. struct btrfs_space_info *space_info;
  1740. struct btrfs_key key;
  1741. int need_clear = 0;
  1742. u64 cache_gen;
  1743. key.objectid = 0;
  1744. key.offset = 0;
  1745. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  1746. path = btrfs_alloc_path();
  1747. if (!path)
  1748. return -ENOMEM;
  1749. cache_gen = btrfs_super_cache_generation(info->super_copy);
  1750. if (btrfs_test_opt(info, SPACE_CACHE) &&
  1751. btrfs_super_generation(info->super_copy) != cache_gen)
  1752. need_clear = 1;
  1753. if (btrfs_test_opt(info, CLEAR_CACHE))
  1754. need_clear = 1;
  1755. while (1) {
  1756. ret = find_first_block_group(info, path, &key);
  1757. if (ret > 0)
  1758. break;
  1759. if (ret != 0)
  1760. goto error;
  1761. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  1762. ret = read_one_block_group(info, path, &key, need_clear);
  1763. if (ret < 0)
  1764. goto error;
  1765. key.objectid += key.offset;
  1766. key.offset = 0;
  1767. btrfs_release_path(path);
  1768. }
  1769. btrfs_release_path(path);
  1770. list_for_each_entry(space_info, &info->space_info, list) {
  1771. int i;
  1772. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  1773. if (list_empty(&space_info->block_groups[i]))
  1774. continue;
  1775. cache = list_first_entry(&space_info->block_groups[i],
  1776. struct btrfs_block_group,
  1777. list);
  1778. btrfs_sysfs_add_block_group_type(cache);
  1779. }
  1780. if (!(btrfs_get_alloc_profile(info, space_info->flags) &
  1781. (BTRFS_BLOCK_GROUP_RAID10 |
  1782. BTRFS_BLOCK_GROUP_RAID1_MASK |
  1783. BTRFS_BLOCK_GROUP_RAID56_MASK |
  1784. BTRFS_BLOCK_GROUP_DUP)))
  1785. continue;
  1786. /*
  1787. * Avoid allocating from un-mirrored block group if there are
  1788. * mirrored block groups.
  1789. */
  1790. list_for_each_entry(cache,
  1791. &space_info->block_groups[BTRFS_RAID_RAID0],
  1792. list)
  1793. inc_block_group_ro(cache, 1);
  1794. list_for_each_entry(cache,
  1795. &space_info->block_groups[BTRFS_RAID_SINGLE],
  1796. list)
  1797. inc_block_group_ro(cache, 1);
  1798. }
  1799. btrfs_init_global_block_rsv(info);
  1800. ret = check_chunk_block_group_mappings(info);
  1801. error:
  1802. btrfs_free_path(path);
  1803. return ret;
  1804. }
  1805. static int insert_block_group_item(struct btrfs_trans_handle *trans,
  1806. struct btrfs_block_group *block_group)
  1807. {
  1808. struct btrfs_fs_info *fs_info = trans->fs_info;
  1809. struct btrfs_block_group_item bgi;
  1810. struct btrfs_root *root;
  1811. struct btrfs_key key;
  1812. spin_lock(&block_group->lock);
  1813. btrfs_set_stack_block_group_used(&bgi, block_group->used);
  1814. btrfs_set_stack_block_group_chunk_objectid(&bgi,
  1815. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  1816. btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
  1817. key.objectid = block_group->start;
  1818. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  1819. key.offset = block_group->length;
  1820. spin_unlock(&block_group->lock);
  1821. root = fs_info->extent_root;
  1822. return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
  1823. }
  1824. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
  1825. {
  1826. struct btrfs_fs_info *fs_info = trans->fs_info;
  1827. struct btrfs_block_group *block_group;
  1828. int ret = 0;
  1829. if (!trans->can_flush_pending_bgs)
  1830. return;
  1831. while (!list_empty(&trans->new_bgs)) {
  1832. int index;
  1833. block_group = list_first_entry(&trans->new_bgs,
  1834. struct btrfs_block_group,
  1835. bg_list);
  1836. if (ret)
  1837. goto next;
  1838. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  1839. ret = insert_block_group_item(trans, block_group);
  1840. if (ret)
  1841. btrfs_abort_transaction(trans, ret);
  1842. ret = btrfs_finish_chunk_alloc(trans, block_group->start,
  1843. block_group->length);
  1844. if (ret)
  1845. btrfs_abort_transaction(trans, ret);
  1846. add_block_group_free_space(trans, block_group);
  1847. /*
  1848. * If we restriped during balance, we may have added a new raid
  1849. * type, so now add the sysfs entries when it is safe to do so.
  1850. * We don't have to worry about locking here as it's handled in
  1851. * btrfs_sysfs_add_block_group_type.
  1852. */
  1853. if (block_group->space_info->block_group_kobjs[index] == NULL)
  1854. btrfs_sysfs_add_block_group_type(block_group);
  1855. /* Already aborted the transaction if it failed. */
  1856. next:
  1857. btrfs_delayed_refs_rsv_release(fs_info, 1);
  1858. list_del_init(&block_group->bg_list);
  1859. }
  1860. btrfs_trans_release_chunk_metadata(trans);
  1861. }
  1862. int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
  1863. u64 type, u64 chunk_offset, u64 size)
  1864. {
  1865. struct btrfs_fs_info *fs_info = trans->fs_info;
  1866. struct btrfs_block_group *cache;
  1867. int ret;
  1868. btrfs_set_log_full_commit(trans);
  1869. cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
  1870. if (!cache)
  1871. return -ENOMEM;
  1872. cache->length = size;
  1873. set_free_space_tree_thresholds(cache);
  1874. cache->used = bytes_used;
  1875. cache->flags = type;
  1876. cache->last_byte_to_unpin = (u64)-1;
  1877. cache->cached = BTRFS_CACHE_FINISHED;
  1878. cache->needs_free_space = 1;
  1879. ret = exclude_super_stripes(cache);
  1880. if (ret) {
  1881. /* We may have excluded something, so call this just in case */
  1882. btrfs_free_excluded_extents(cache);
  1883. btrfs_put_block_group(cache);
  1884. return ret;
  1885. }
  1886. add_new_free_space(cache, chunk_offset, chunk_offset + size);
  1887. btrfs_free_excluded_extents(cache);
  1888. #ifdef CONFIG_BTRFS_DEBUG
  1889. if (btrfs_should_fragment_free_space(cache)) {
  1890. u64 new_bytes_used = size - bytes_used;
  1891. bytes_used += new_bytes_used >> 1;
  1892. fragment_free_space(cache);
  1893. }
  1894. #endif
  1895. /*
  1896. * Ensure the corresponding space_info object is created and
  1897. * assigned to our block group. We want our bg to be added to the rbtree
  1898. * with its ->space_info set.
  1899. */
  1900. cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
  1901. ASSERT(cache->space_info);
  1902. ret = btrfs_add_block_group_cache(fs_info, cache);
  1903. if (ret) {
  1904. btrfs_remove_free_space_cache(cache);
  1905. btrfs_put_block_group(cache);
  1906. return ret;
  1907. }
  1908. /*
  1909. * Now that our block group has its ->space_info set and is inserted in
  1910. * the rbtree, update the space info's counters.
  1911. */
  1912. trace_btrfs_add_block_group(fs_info, cache, 1);
  1913. btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
  1914. cache->bytes_super, &cache->space_info);
  1915. btrfs_update_global_block_rsv(fs_info);
  1916. link_block_group(cache);
  1917. list_add_tail(&cache->bg_list, &trans->new_bgs);
  1918. trans->delayed_ref_updates++;
  1919. btrfs_update_delayed_refs_rsv(trans);
  1920. set_avail_alloc_bits(fs_info, type);
  1921. return 0;
  1922. }
  1923. /*
  1924. * Mark one block group RO, can be called several times for the same block
  1925. * group.
  1926. *
  1927. * @cache: the destination block group
  1928. * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
  1929. * ensure we still have some free space after marking this
  1930. * block group RO.
  1931. */
  1932. int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
  1933. bool do_chunk_alloc)
  1934. {
  1935. struct btrfs_fs_info *fs_info = cache->fs_info;
  1936. struct btrfs_trans_handle *trans;
  1937. u64 alloc_flags;
  1938. int ret;
  1939. again:
  1940. trans = btrfs_join_transaction(fs_info->extent_root);
  1941. if (IS_ERR(trans))
  1942. return PTR_ERR(trans);
  1943. /*
  1944. * we're not allowed to set block groups readonly after the dirty
  1945. * block groups cache has started writing. If it already started,
  1946. * back off and let this transaction commit
  1947. */
  1948. mutex_lock(&fs_info->ro_block_group_mutex);
  1949. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  1950. u64 transid = trans->transid;
  1951. mutex_unlock(&fs_info->ro_block_group_mutex);
  1952. btrfs_end_transaction(trans);
  1953. ret = btrfs_wait_for_commit(fs_info, transid);
  1954. if (ret)
  1955. return ret;
  1956. goto again;
  1957. }
  1958. if (do_chunk_alloc) {
  1959. /*
  1960. * If we are changing raid levels, try to allocate a
  1961. * corresponding block group with the new raid level.
  1962. */
  1963. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
  1964. if (alloc_flags != cache->flags) {
  1965. ret = btrfs_chunk_alloc(trans, alloc_flags,
  1966. CHUNK_ALLOC_FORCE);
  1967. /*
  1968. * ENOSPC is allowed here, we may have enough space
  1969. * already allocated at the new raid level to carry on
  1970. */
  1971. if (ret == -ENOSPC)
  1972. ret = 0;
  1973. if (ret < 0)
  1974. goto out;
  1975. }
  1976. }
  1977. ret = inc_block_group_ro(cache, 0);
  1978. if (!do_chunk_alloc || ret == -ETXTBSY)
  1979. goto unlock_out;
  1980. if (!ret)
  1981. goto out;
  1982. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
  1983. ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  1984. if (ret < 0)
  1985. goto out;
  1986. ret = inc_block_group_ro(cache, 0);
  1987. if (ret == -ETXTBSY)
  1988. goto unlock_out;
  1989. out:
  1990. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  1991. alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
  1992. mutex_lock(&fs_info->chunk_mutex);
  1993. check_system_chunk(trans, alloc_flags);
  1994. mutex_unlock(&fs_info->chunk_mutex);
  1995. }
  1996. unlock_out:
  1997. mutex_unlock(&fs_info->ro_block_group_mutex);
  1998. btrfs_end_transaction(trans);
  1999. return ret;
  2000. }
  2001. void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
  2002. {
  2003. struct btrfs_space_info *sinfo = cache->space_info;
  2004. u64 num_bytes;
  2005. BUG_ON(!cache->ro);
  2006. spin_lock(&sinfo->lock);
  2007. spin_lock(&cache->lock);
  2008. if (!--cache->ro) {
  2009. num_bytes = cache->length - cache->reserved -
  2010. cache->pinned - cache->bytes_super - cache->used;
  2011. sinfo->bytes_readonly -= num_bytes;
  2012. list_del_init(&cache->ro_list);
  2013. }
  2014. spin_unlock(&cache->lock);
  2015. spin_unlock(&sinfo->lock);
  2016. }
  2017. static int update_block_group_item(struct btrfs_trans_handle *trans,
  2018. struct btrfs_path *path,
  2019. struct btrfs_block_group *cache)
  2020. {
  2021. struct btrfs_fs_info *fs_info = trans->fs_info;
  2022. int ret;
  2023. struct btrfs_root *root = fs_info->extent_root;
  2024. unsigned long bi;
  2025. struct extent_buffer *leaf;
  2026. struct btrfs_block_group_item bgi;
  2027. struct btrfs_key key;
  2028. key.objectid = cache->start;
  2029. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  2030. key.offset = cache->length;
  2031. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  2032. if (ret) {
  2033. if (ret > 0)
  2034. ret = -ENOENT;
  2035. goto fail;
  2036. }
  2037. leaf = path->nodes[0];
  2038. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2039. btrfs_set_stack_block_group_used(&bgi, cache->used);
  2040. btrfs_set_stack_block_group_chunk_objectid(&bgi,
  2041. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  2042. btrfs_set_stack_block_group_flags(&bgi, cache->flags);
  2043. write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
  2044. btrfs_mark_buffer_dirty(leaf);
  2045. fail:
  2046. btrfs_release_path(path);
  2047. return ret;
  2048. }
  2049. static int cache_save_setup(struct btrfs_block_group *block_group,
  2050. struct btrfs_trans_handle *trans,
  2051. struct btrfs_path *path)
  2052. {
  2053. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2054. struct btrfs_root *root = fs_info->tree_root;
  2055. struct inode *inode = NULL;
  2056. struct extent_changeset *data_reserved = NULL;
  2057. u64 alloc_hint = 0;
  2058. int dcs = BTRFS_DC_ERROR;
  2059. u64 num_pages = 0;
  2060. int retries = 0;
  2061. int ret = 0;
  2062. /*
  2063. * If this block group is smaller than 100 megs don't bother caching the
  2064. * block group.
  2065. */
  2066. if (block_group->length < (100 * SZ_1M)) {
  2067. spin_lock(&block_group->lock);
  2068. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2069. spin_unlock(&block_group->lock);
  2070. return 0;
  2071. }
  2072. if (TRANS_ABORTED(trans))
  2073. return 0;
  2074. again:
  2075. inode = lookup_free_space_inode(block_group, path);
  2076. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2077. ret = PTR_ERR(inode);
  2078. btrfs_release_path(path);
  2079. goto out;
  2080. }
  2081. if (IS_ERR(inode)) {
  2082. BUG_ON(retries);
  2083. retries++;
  2084. if (block_group->ro)
  2085. goto out_free;
  2086. ret = create_free_space_inode(trans, block_group, path);
  2087. if (ret)
  2088. goto out_free;
  2089. goto again;
  2090. }
  2091. /*
  2092. * We want to set the generation to 0, that way if anything goes wrong
  2093. * from here on out we know not to trust this cache when we load up next
  2094. * time.
  2095. */
  2096. BTRFS_I(inode)->generation = 0;
  2097. ret = btrfs_update_inode(trans, root, inode);
  2098. if (ret) {
  2099. /*
  2100. * So theoretically we could recover from this, simply set the
  2101. * super cache generation to 0 so we know to invalidate the
  2102. * cache, but then we'd have to keep track of the block groups
  2103. * that fail this way so we know we _have_ to reset this cache
  2104. * before the next commit or risk reading stale cache. So to
  2105. * limit our exposure to horrible edge cases lets just abort the
  2106. * transaction, this only happens in really bad situations
  2107. * anyway.
  2108. */
  2109. btrfs_abort_transaction(trans, ret);
  2110. goto out_put;
  2111. }
  2112. WARN_ON(ret);
  2113. /* We've already setup this transaction, go ahead and exit */
  2114. if (block_group->cache_generation == trans->transid &&
  2115. i_size_read(inode)) {
  2116. dcs = BTRFS_DC_SETUP;
  2117. goto out_put;
  2118. }
  2119. if (i_size_read(inode) > 0) {
  2120. ret = btrfs_check_trunc_cache_free_space(fs_info,
  2121. &fs_info->global_block_rsv);
  2122. if (ret)
  2123. goto out_put;
  2124. ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
  2125. if (ret)
  2126. goto out_put;
  2127. }
  2128. spin_lock(&block_group->lock);
  2129. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2130. !btrfs_test_opt(fs_info, SPACE_CACHE)) {
  2131. /*
  2132. * don't bother trying to write stuff out _if_
  2133. * a) we're not cached,
  2134. * b) we're with nospace_cache mount option,
  2135. * c) we're with v2 space_cache (FREE_SPACE_TREE).
  2136. */
  2137. dcs = BTRFS_DC_WRITTEN;
  2138. spin_unlock(&block_group->lock);
  2139. goto out_put;
  2140. }
  2141. spin_unlock(&block_group->lock);
  2142. /*
  2143. * We hit an ENOSPC when setting up the cache in this transaction, just
  2144. * skip doing the setup, we've already cleared the cache so we're safe.
  2145. */
  2146. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  2147. ret = -ENOSPC;
  2148. goto out_put;
  2149. }
  2150. /*
  2151. * Try to preallocate enough space based on how big the block group is.
  2152. * Keep in mind this has to include any pinned space which could end up
  2153. * taking up quite a bit since it's not folded into the other space
  2154. * cache.
  2155. */
  2156. num_pages = div_u64(block_group->length, SZ_256M);
  2157. if (!num_pages)
  2158. num_pages = 1;
  2159. num_pages *= 16;
  2160. num_pages *= PAGE_SIZE;
  2161. ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
  2162. num_pages);
  2163. if (ret)
  2164. goto out_put;
  2165. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2166. num_pages, num_pages,
  2167. &alloc_hint);
  2168. /*
  2169. * Our cache requires contiguous chunks so that we don't modify a bunch
  2170. * of metadata or split extents when writing the cache out, which means
  2171. * we can enospc if we are heavily fragmented in addition to just normal
  2172. * out of space conditions. So if we hit this just skip setting up any
  2173. * other block groups for this transaction, maybe we'll unpin enough
  2174. * space the next time around.
  2175. */
  2176. if (!ret)
  2177. dcs = BTRFS_DC_SETUP;
  2178. else if (ret == -ENOSPC)
  2179. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  2180. out_put:
  2181. iput(inode);
  2182. out_free:
  2183. btrfs_release_path(path);
  2184. out:
  2185. spin_lock(&block_group->lock);
  2186. if (!ret && dcs == BTRFS_DC_SETUP)
  2187. block_group->cache_generation = trans->transid;
  2188. block_group->disk_cache_state = dcs;
  2189. spin_unlock(&block_group->lock);
  2190. extent_changeset_free(data_reserved);
  2191. return ret;
  2192. }
  2193. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
  2194. {
  2195. struct btrfs_fs_info *fs_info = trans->fs_info;
  2196. struct btrfs_block_group *cache, *tmp;
  2197. struct btrfs_transaction *cur_trans = trans->transaction;
  2198. struct btrfs_path *path;
  2199. if (list_empty(&cur_trans->dirty_bgs) ||
  2200. !btrfs_test_opt(fs_info, SPACE_CACHE))
  2201. return 0;
  2202. path = btrfs_alloc_path();
  2203. if (!path)
  2204. return -ENOMEM;
  2205. /* Could add new block groups, use _safe just in case */
  2206. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  2207. dirty_list) {
  2208. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2209. cache_save_setup(cache, trans, path);
  2210. }
  2211. btrfs_free_path(path);
  2212. return 0;
  2213. }
  2214. /*
  2215. * Transaction commit does final block group cache writeback during a critical
  2216. * section where nothing is allowed to change the FS. This is required in
  2217. * order for the cache to actually match the block group, but can introduce a
  2218. * lot of latency into the commit.
  2219. *
  2220. * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
  2221. * There's a chance we'll have to redo some of it if the block group changes
  2222. * again during the commit, but it greatly reduces the commit latency by
  2223. * getting rid of the easy block groups while we're still allowing others to
  2224. * join the commit.
  2225. */
  2226. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
  2227. {
  2228. struct btrfs_fs_info *fs_info = trans->fs_info;
  2229. struct btrfs_block_group *cache;
  2230. struct btrfs_transaction *cur_trans = trans->transaction;
  2231. int ret = 0;
  2232. int should_put;
  2233. struct btrfs_path *path = NULL;
  2234. LIST_HEAD(dirty);
  2235. struct list_head *io = &cur_trans->io_bgs;
  2236. int loops = 0;
  2237. spin_lock(&cur_trans->dirty_bgs_lock);
  2238. if (list_empty(&cur_trans->dirty_bgs)) {
  2239. spin_unlock(&cur_trans->dirty_bgs_lock);
  2240. return 0;
  2241. }
  2242. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  2243. spin_unlock(&cur_trans->dirty_bgs_lock);
  2244. again:
  2245. /* Make sure all the block groups on our dirty list actually exist */
  2246. btrfs_create_pending_block_groups(trans);
  2247. if (!path) {
  2248. path = btrfs_alloc_path();
  2249. if (!path) {
  2250. ret = -ENOMEM;
  2251. goto out;
  2252. }
  2253. }
  2254. /*
  2255. * cache_write_mutex is here only to save us from balance or automatic
  2256. * removal of empty block groups deleting this block group while we are
  2257. * writing out the cache
  2258. */
  2259. mutex_lock(&trans->transaction->cache_write_mutex);
  2260. while (!list_empty(&dirty)) {
  2261. bool drop_reserve = true;
  2262. cache = list_first_entry(&dirty, struct btrfs_block_group,
  2263. dirty_list);
  2264. /*
  2265. * This can happen if something re-dirties a block group that
  2266. * is already under IO. Just wait for it to finish and then do
  2267. * it all again
  2268. */
  2269. if (!list_empty(&cache->io_list)) {
  2270. list_del_init(&cache->io_list);
  2271. btrfs_wait_cache_io(trans, cache, path);
  2272. btrfs_put_block_group(cache);
  2273. }
  2274. /*
  2275. * btrfs_wait_cache_io uses the cache->dirty_list to decide if
  2276. * it should update the cache_state. Don't delete until after
  2277. * we wait.
  2278. *
  2279. * Since we're not running in the commit critical section
  2280. * we need the dirty_bgs_lock to protect from update_block_group
  2281. */
  2282. spin_lock(&cur_trans->dirty_bgs_lock);
  2283. list_del_init(&cache->dirty_list);
  2284. spin_unlock(&cur_trans->dirty_bgs_lock);
  2285. should_put = 1;
  2286. cache_save_setup(cache, trans, path);
  2287. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  2288. cache->io_ctl.inode = NULL;
  2289. ret = btrfs_write_out_cache(trans, cache, path);
  2290. if (ret == 0 && cache->io_ctl.inode) {
  2291. should_put = 0;
  2292. /*
  2293. * The cache_write_mutex is protecting the
  2294. * io_list, also refer to the definition of
  2295. * btrfs_transaction::io_bgs for more details
  2296. */
  2297. list_add_tail(&cache->io_list, io);
  2298. } else {
  2299. /*
  2300. * If we failed to write the cache, the
  2301. * generation will be bad and life goes on
  2302. */
  2303. ret = 0;
  2304. }
  2305. }
  2306. if (!ret) {
  2307. ret = update_block_group_item(trans, path, cache);
  2308. /*
  2309. * Our block group might still be attached to the list
  2310. * of new block groups in the transaction handle of some
  2311. * other task (struct btrfs_trans_handle->new_bgs). This
  2312. * means its block group item isn't yet in the extent
  2313. * tree. If this happens ignore the error, as we will
  2314. * try again later in the critical section of the
  2315. * transaction commit.
  2316. */
  2317. if (ret == -ENOENT) {
  2318. ret = 0;
  2319. spin_lock(&cur_trans->dirty_bgs_lock);
  2320. if (list_empty(&cache->dirty_list)) {
  2321. list_add_tail(&cache->dirty_list,
  2322. &cur_trans->dirty_bgs);
  2323. btrfs_get_block_group(cache);
  2324. drop_reserve = false;
  2325. }
  2326. spin_unlock(&cur_trans->dirty_bgs_lock);
  2327. } else if (ret) {
  2328. btrfs_abort_transaction(trans, ret);
  2329. }
  2330. }
  2331. /* If it's not on the io list, we need to put the block group */
  2332. if (should_put)
  2333. btrfs_put_block_group(cache);
  2334. if (drop_reserve)
  2335. btrfs_delayed_refs_rsv_release(fs_info, 1);
  2336. /*
  2337. * Avoid blocking other tasks for too long. It might even save
  2338. * us from writing caches for block groups that are going to be
  2339. * removed.
  2340. */
  2341. mutex_unlock(&trans->transaction->cache_write_mutex);
  2342. if (ret)
  2343. goto out;
  2344. mutex_lock(&trans->transaction->cache_write_mutex);
  2345. }
  2346. mutex_unlock(&trans->transaction->cache_write_mutex);
  2347. /*
  2348. * Go through delayed refs for all the stuff we've just kicked off
  2349. * and then loop back (just once)
  2350. */
  2351. if (!ret)
  2352. ret = btrfs_run_delayed_refs(trans, 0);
  2353. if (!ret && loops == 0) {
  2354. loops++;
  2355. spin_lock(&cur_trans->dirty_bgs_lock);
  2356. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  2357. /*
  2358. * dirty_bgs_lock protects us from concurrent block group
  2359. * deletes too (not just cache_write_mutex).
  2360. */
  2361. if (!list_empty(&dirty)) {
  2362. spin_unlock(&cur_trans->dirty_bgs_lock);
  2363. goto again;
  2364. }
  2365. spin_unlock(&cur_trans->dirty_bgs_lock);
  2366. }
  2367. out:
  2368. if (ret < 0) {
  2369. spin_lock(&cur_trans->dirty_bgs_lock);
  2370. list_splice_init(&dirty, &cur_trans->dirty_bgs);
  2371. spin_unlock(&cur_trans->dirty_bgs_lock);
  2372. btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
  2373. }
  2374. btrfs_free_path(path);
  2375. return ret;
  2376. }
  2377. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
  2378. {
  2379. struct btrfs_fs_info *fs_info = trans->fs_info;
  2380. struct btrfs_block_group *cache;
  2381. struct btrfs_transaction *cur_trans = trans->transaction;
  2382. int ret = 0;
  2383. int should_put;
  2384. struct btrfs_path *path;
  2385. struct list_head *io = &cur_trans->io_bgs;
  2386. path = btrfs_alloc_path();
  2387. if (!path)
  2388. return -ENOMEM;
  2389. /*
  2390. * Even though we are in the critical section of the transaction commit,
  2391. * we can still have concurrent tasks adding elements to this
  2392. * transaction's list of dirty block groups. These tasks correspond to
  2393. * endio free space workers started when writeback finishes for a
  2394. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  2395. * allocate new block groups as a result of COWing nodes of the root
  2396. * tree when updating the free space inode. The writeback for the space
  2397. * caches is triggered by an earlier call to
  2398. * btrfs_start_dirty_block_groups() and iterations of the following
  2399. * loop.
  2400. * Also we want to do the cache_save_setup first and then run the
  2401. * delayed refs to make sure we have the best chance at doing this all
  2402. * in one shot.
  2403. */
  2404. spin_lock(&cur_trans->dirty_bgs_lock);
  2405. while (!list_empty(&cur_trans->dirty_bgs)) {
  2406. cache = list_first_entry(&cur_trans->dirty_bgs,
  2407. struct btrfs_block_group,
  2408. dirty_list);
  2409. /*
  2410. * This can happen if cache_save_setup re-dirties a block group
  2411. * that is already under IO. Just wait for it to finish and
  2412. * then do it all again
  2413. */
  2414. if (!list_empty(&cache->io_list)) {
  2415. spin_unlock(&cur_trans->dirty_bgs_lock);
  2416. list_del_init(&cache->io_list);
  2417. btrfs_wait_cache_io(trans, cache, path);
  2418. btrfs_put_block_group(cache);
  2419. spin_lock(&cur_trans->dirty_bgs_lock);
  2420. }
  2421. /*
  2422. * Don't remove from the dirty list until after we've waited on
  2423. * any pending IO
  2424. */
  2425. list_del_init(&cache->dirty_list);
  2426. spin_unlock(&cur_trans->dirty_bgs_lock);
  2427. should_put = 1;
  2428. cache_save_setup(cache, trans, path);
  2429. if (!ret)
  2430. ret = btrfs_run_delayed_refs(trans,
  2431. (unsigned long) -1);
  2432. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  2433. cache->io_ctl.inode = NULL;
  2434. ret = btrfs_write_out_cache(trans, cache, path);
  2435. if (ret == 0 && cache->io_ctl.inode) {
  2436. should_put = 0;
  2437. list_add_tail(&cache->io_list, io);
  2438. } else {
  2439. /*
  2440. * If we failed to write the cache, the
  2441. * generation will be bad and life goes on
  2442. */
  2443. ret = 0;
  2444. }
  2445. }
  2446. if (!ret) {
  2447. ret = update_block_group_item(trans, path, cache);
  2448. /*
  2449. * One of the free space endio workers might have
  2450. * created a new block group while updating a free space
  2451. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  2452. * and hasn't released its transaction handle yet, in
  2453. * which case the new block group is still attached to
  2454. * its transaction handle and its creation has not
  2455. * finished yet (no block group item in the extent tree
  2456. * yet, etc). If this is the case, wait for all free
  2457. * space endio workers to finish and retry. This is a
  2458. * very rare case so no need for a more efficient and
  2459. * complex approach.
  2460. */
  2461. if (ret == -ENOENT) {
  2462. wait_event(cur_trans->writer_wait,
  2463. atomic_read(&cur_trans->num_writers) == 1);
  2464. ret = update_block_group_item(trans, path, cache);
  2465. }
  2466. if (ret)
  2467. btrfs_abort_transaction(trans, ret);
  2468. }
  2469. /* If its not on the io list, we need to put the block group */
  2470. if (should_put)
  2471. btrfs_put_block_group(cache);
  2472. btrfs_delayed_refs_rsv_release(fs_info, 1);
  2473. spin_lock(&cur_trans->dirty_bgs_lock);
  2474. }
  2475. spin_unlock(&cur_trans->dirty_bgs_lock);
  2476. /*
  2477. * Refer to the definition of io_bgs member for details why it's safe
  2478. * to use it without any locking
  2479. */
  2480. while (!list_empty(io)) {
  2481. cache = list_first_entry(io, struct btrfs_block_group,
  2482. io_list);
  2483. list_del_init(&cache->io_list);
  2484. btrfs_wait_cache_io(trans, cache, path);
  2485. btrfs_put_block_group(cache);
  2486. }
  2487. btrfs_free_path(path);
  2488. return ret;
  2489. }
  2490. int btrfs_update_block_group(struct btrfs_trans_handle *trans,
  2491. u64 bytenr, u64 num_bytes, int alloc)
  2492. {
  2493. struct btrfs_fs_info *info = trans->fs_info;
  2494. struct btrfs_block_group *cache = NULL;
  2495. u64 total = num_bytes;
  2496. u64 old_val;
  2497. u64 byte_in_group;
  2498. int factor;
  2499. int ret = 0;
  2500. /* Block accounting for super block */
  2501. spin_lock(&info->delalloc_root_lock);
  2502. old_val = btrfs_super_bytes_used(info->super_copy);
  2503. if (alloc)
  2504. old_val += num_bytes;
  2505. else
  2506. old_val -= num_bytes;
  2507. btrfs_set_super_bytes_used(info->super_copy, old_val);
  2508. spin_unlock(&info->delalloc_root_lock);
  2509. while (total) {
  2510. cache = btrfs_lookup_block_group(info, bytenr);
  2511. if (!cache) {
  2512. ret = -ENOENT;
  2513. break;
  2514. }
  2515. factor = btrfs_bg_type_to_factor(cache->flags);
  2516. /*
  2517. * If this block group has free space cache written out, we
  2518. * need to make sure to load it if we are removing space. This
  2519. * is because we need the unpinning stage to actually add the
  2520. * space back to the block group, otherwise we will leak space.
  2521. */
  2522. if (!alloc && !btrfs_block_group_done(cache))
  2523. btrfs_cache_block_group(cache, 1);
  2524. byte_in_group = bytenr - cache->start;
  2525. WARN_ON(byte_in_group > cache->length);
  2526. spin_lock(&cache->space_info->lock);
  2527. spin_lock(&cache->lock);
  2528. if (btrfs_test_opt(info, SPACE_CACHE) &&
  2529. cache->disk_cache_state < BTRFS_DC_CLEAR)
  2530. cache->disk_cache_state = BTRFS_DC_CLEAR;
  2531. old_val = cache->used;
  2532. num_bytes = min(total, cache->length - byte_in_group);
  2533. if (alloc) {
  2534. old_val += num_bytes;
  2535. cache->used = old_val;
  2536. cache->reserved -= num_bytes;
  2537. cache->space_info->bytes_reserved -= num_bytes;
  2538. cache->space_info->bytes_used += num_bytes;
  2539. cache->space_info->disk_used += num_bytes * factor;
  2540. spin_unlock(&cache->lock);
  2541. spin_unlock(&cache->space_info->lock);
  2542. } else {
  2543. old_val -= num_bytes;
  2544. cache->used = old_val;
  2545. cache->pinned += num_bytes;
  2546. btrfs_space_info_update_bytes_pinned(info,
  2547. cache->space_info, num_bytes);
  2548. cache->space_info->bytes_used -= num_bytes;
  2549. cache->space_info->disk_used -= num_bytes * factor;
  2550. spin_unlock(&cache->lock);
  2551. spin_unlock(&cache->space_info->lock);
  2552. __btrfs_mod_total_bytes_pinned(cache->space_info,
  2553. num_bytes);
  2554. set_extent_dirty(&trans->transaction->pinned_extents,
  2555. bytenr, bytenr + num_bytes - 1,
  2556. GFP_NOFS | __GFP_NOFAIL);
  2557. }
  2558. spin_lock(&trans->transaction->dirty_bgs_lock);
  2559. if (list_empty(&cache->dirty_list)) {
  2560. list_add_tail(&cache->dirty_list,
  2561. &trans->transaction->dirty_bgs);
  2562. trans->delayed_ref_updates++;
  2563. btrfs_get_block_group(cache);
  2564. }
  2565. spin_unlock(&trans->transaction->dirty_bgs_lock);
  2566. /*
  2567. * No longer have used bytes in this block group, queue it for
  2568. * deletion. We do this after adding the block group to the
  2569. * dirty list to avoid races between cleaner kthread and space
  2570. * cache writeout.
  2571. */
  2572. if (!alloc && old_val == 0) {
  2573. if (!btrfs_test_opt(info, DISCARD_ASYNC))
  2574. btrfs_mark_bg_unused(cache);
  2575. }
  2576. btrfs_put_block_group(cache);
  2577. total -= num_bytes;
  2578. bytenr += num_bytes;
  2579. }
  2580. /* Modified block groups are accounted for in the delayed_refs_rsv. */
  2581. btrfs_update_delayed_refs_rsv(trans);
  2582. return ret;
  2583. }
  2584. /**
  2585. * btrfs_add_reserved_bytes - update the block_group and space info counters
  2586. * @cache: The cache we are manipulating
  2587. * @ram_bytes: The number of bytes of file content, and will be same to
  2588. * @num_bytes except for the compress path.
  2589. * @num_bytes: The number of bytes in question
  2590. * @delalloc: The blocks are allocated for the delalloc write
  2591. *
  2592. * This is called by the allocator when it reserves space. If this is a
  2593. * reservation and the block group has become read only we cannot make the
  2594. * reservation and return -EAGAIN, otherwise this function always succeeds.
  2595. */
  2596. int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
  2597. u64 ram_bytes, u64 num_bytes, int delalloc)
  2598. {
  2599. struct btrfs_space_info *space_info = cache->space_info;
  2600. int ret = 0;
  2601. spin_lock(&space_info->lock);
  2602. spin_lock(&cache->lock);
  2603. if (cache->ro) {
  2604. ret = -EAGAIN;
  2605. } else {
  2606. cache->reserved += num_bytes;
  2607. space_info->bytes_reserved += num_bytes;
  2608. trace_btrfs_space_reservation(cache->fs_info, "space_info",
  2609. space_info->flags, num_bytes, 1);
  2610. btrfs_space_info_update_bytes_may_use(cache->fs_info,
  2611. space_info, -ram_bytes);
  2612. if (delalloc)
  2613. cache->delalloc_bytes += num_bytes;
  2614. /*
  2615. * Compression can use less space than we reserved, so wake
  2616. * tickets if that happens
  2617. */
  2618. if (num_bytes < ram_bytes)
  2619. btrfs_try_granting_tickets(cache->fs_info, space_info);
  2620. }
  2621. spin_unlock(&cache->lock);
  2622. spin_unlock(&space_info->lock);
  2623. return ret;
  2624. }
  2625. /**
  2626. * btrfs_free_reserved_bytes - update the block_group and space info counters
  2627. * @cache: The cache we are manipulating
  2628. * @num_bytes: The number of bytes in question
  2629. * @delalloc: The blocks are allocated for the delalloc write
  2630. *
  2631. * This is called by somebody who is freeing space that was never actually used
  2632. * on disk. For example if you reserve some space for a new leaf in transaction
  2633. * A and before transaction A commits you free that leaf, you call this with
  2634. * reserve set to 0 in order to clear the reservation.
  2635. */
  2636. void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
  2637. u64 num_bytes, int delalloc)
  2638. {
  2639. struct btrfs_space_info *space_info = cache->space_info;
  2640. spin_lock(&space_info->lock);
  2641. spin_lock(&cache->lock);
  2642. if (cache->ro)
  2643. space_info->bytes_readonly += num_bytes;
  2644. cache->reserved -= num_bytes;
  2645. space_info->bytes_reserved -= num_bytes;
  2646. space_info->max_extent_size = 0;
  2647. if (delalloc)
  2648. cache->delalloc_bytes -= num_bytes;
  2649. spin_unlock(&cache->lock);
  2650. btrfs_try_granting_tickets(cache->fs_info, space_info);
  2651. spin_unlock(&space_info->lock);
  2652. }
  2653. static void force_metadata_allocation(struct btrfs_fs_info *info)
  2654. {
  2655. struct list_head *head = &info->space_info;
  2656. struct btrfs_space_info *found;
  2657. list_for_each_entry(found, head, list) {
  2658. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  2659. found->force_alloc = CHUNK_ALLOC_FORCE;
  2660. }
  2661. }
  2662. static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
  2663. struct btrfs_space_info *sinfo, int force)
  2664. {
  2665. u64 bytes_used = btrfs_space_info_used(sinfo, false);
  2666. u64 thresh;
  2667. if (force == CHUNK_ALLOC_FORCE)
  2668. return 1;
  2669. /*
  2670. * in limited mode, we want to have some free space up to
  2671. * about 1% of the FS size.
  2672. */
  2673. if (force == CHUNK_ALLOC_LIMITED) {
  2674. thresh = btrfs_super_total_bytes(fs_info->super_copy);
  2675. thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
  2676. if (sinfo->total_bytes - bytes_used < thresh)
  2677. return 1;
  2678. }
  2679. if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
  2680. return 0;
  2681. return 1;
  2682. }
  2683. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
  2684. {
  2685. u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
  2686. return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  2687. }
  2688. /*
  2689. * If force is CHUNK_ALLOC_FORCE:
  2690. * - return 1 if it successfully allocates a chunk,
  2691. * - return errors including -ENOSPC otherwise.
  2692. * If force is NOT CHUNK_ALLOC_FORCE:
  2693. * - return 0 if it doesn't need to allocate a new chunk,
  2694. * - return 1 if it successfully allocates a chunk,
  2695. * - return errors including -ENOSPC otherwise.
  2696. */
  2697. int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
  2698. enum btrfs_chunk_alloc_enum force)
  2699. {
  2700. struct btrfs_fs_info *fs_info = trans->fs_info;
  2701. struct btrfs_space_info *space_info;
  2702. bool wait_for_alloc = false;
  2703. bool should_alloc = false;
  2704. int ret = 0;
  2705. /* Don't re-enter if we're already allocating a chunk */
  2706. if (trans->allocating_chunk)
  2707. return -ENOSPC;
  2708. space_info = btrfs_find_space_info(fs_info, flags);
  2709. ASSERT(space_info);
  2710. do {
  2711. spin_lock(&space_info->lock);
  2712. if (force < space_info->force_alloc)
  2713. force = space_info->force_alloc;
  2714. should_alloc = should_alloc_chunk(fs_info, space_info, force);
  2715. if (space_info->full) {
  2716. /* No more free physical space */
  2717. if (should_alloc)
  2718. ret = -ENOSPC;
  2719. else
  2720. ret = 0;
  2721. spin_unlock(&space_info->lock);
  2722. return ret;
  2723. } else if (!should_alloc) {
  2724. spin_unlock(&space_info->lock);
  2725. return 0;
  2726. } else if (space_info->chunk_alloc) {
  2727. /*
  2728. * Someone is already allocating, so we need to block
  2729. * until this someone is finished and then loop to
  2730. * recheck if we should continue with our allocation
  2731. * attempt.
  2732. */
  2733. wait_for_alloc = true;
  2734. spin_unlock(&space_info->lock);
  2735. mutex_lock(&fs_info->chunk_mutex);
  2736. mutex_unlock(&fs_info->chunk_mutex);
  2737. } else {
  2738. /* Proceed with allocation */
  2739. space_info->chunk_alloc = 1;
  2740. wait_for_alloc = false;
  2741. spin_unlock(&space_info->lock);
  2742. }
  2743. cond_resched();
  2744. } while (wait_for_alloc);
  2745. mutex_lock(&fs_info->chunk_mutex);
  2746. trans->allocating_chunk = true;
  2747. /*
  2748. * If we have mixed data/metadata chunks we want to make sure we keep
  2749. * allocating mixed chunks instead of individual chunks.
  2750. */
  2751. if (btrfs_mixed_space_info(space_info))
  2752. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  2753. /*
  2754. * if we're doing a data chunk, go ahead and make sure that
  2755. * we keep a reasonable number of metadata chunks allocated in the
  2756. * FS as well.
  2757. */
  2758. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  2759. fs_info->data_chunk_allocations++;
  2760. if (!(fs_info->data_chunk_allocations %
  2761. fs_info->metadata_ratio))
  2762. force_metadata_allocation(fs_info);
  2763. }
  2764. /*
  2765. * Check if we have enough space in SYSTEM chunk because we may need
  2766. * to update devices.
  2767. */
  2768. check_system_chunk(trans, flags);
  2769. ret = btrfs_alloc_chunk(trans, flags);
  2770. trans->allocating_chunk = false;
  2771. spin_lock(&space_info->lock);
  2772. if (ret < 0) {
  2773. if (ret == -ENOSPC)
  2774. space_info->full = 1;
  2775. else
  2776. goto out;
  2777. } else {
  2778. ret = 1;
  2779. space_info->max_extent_size = 0;
  2780. }
  2781. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  2782. out:
  2783. space_info->chunk_alloc = 0;
  2784. spin_unlock(&space_info->lock);
  2785. mutex_unlock(&fs_info->chunk_mutex);
  2786. /*
  2787. * When we allocate a new chunk we reserve space in the chunk block
  2788. * reserve to make sure we can COW nodes/leafs in the chunk tree or
  2789. * add new nodes/leafs to it if we end up needing to do it when
  2790. * inserting the chunk item and updating device items as part of the
  2791. * second phase of chunk allocation, performed by
  2792. * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
  2793. * large number of new block groups to create in our transaction
  2794. * handle's new_bgs list to avoid exhausting the chunk block reserve
  2795. * in extreme cases - like having a single transaction create many new
  2796. * block groups when starting to write out the free space caches of all
  2797. * the block groups that were made dirty during the lifetime of the
  2798. * transaction.
  2799. */
  2800. if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
  2801. btrfs_create_pending_block_groups(trans);
  2802. return ret;
  2803. }
  2804. static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
  2805. {
  2806. u64 num_dev;
  2807. num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
  2808. if (!num_dev)
  2809. num_dev = fs_info->fs_devices->rw_devices;
  2810. return num_dev;
  2811. }
  2812. /*
  2813. * Reserve space in the system space for allocating or removing a chunk
  2814. */
  2815. void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
  2816. {
  2817. struct btrfs_fs_info *fs_info = trans->fs_info;
  2818. struct btrfs_space_info *info;
  2819. u64 left;
  2820. u64 thresh;
  2821. int ret = 0;
  2822. u64 num_devs;
  2823. /*
  2824. * Needed because we can end up allocating a system chunk and for an
  2825. * atomic and race free space reservation in the chunk block reserve.
  2826. */
  2827. lockdep_assert_held(&fs_info->chunk_mutex);
  2828. info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  2829. spin_lock(&info->lock);
  2830. left = info->total_bytes - btrfs_space_info_used(info, true);
  2831. spin_unlock(&info->lock);
  2832. num_devs = get_profile_num_devs(fs_info, type);
  2833. /* num_devs device items to update and 1 chunk item to add or remove */
  2834. thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
  2835. btrfs_calc_insert_metadata_size(fs_info, 1);
  2836. if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  2837. btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
  2838. left, thresh, type);
  2839. btrfs_dump_space_info(fs_info, info, 0, 0);
  2840. }
  2841. if (left < thresh) {
  2842. u64 flags = btrfs_system_alloc_profile(fs_info);
  2843. /*
  2844. * Ignore failure to create system chunk. We might end up not
  2845. * needing it, as we might not need to COW all nodes/leafs from
  2846. * the paths we visit in the chunk tree (they were already COWed
  2847. * or created in the current transaction for example).
  2848. */
  2849. ret = btrfs_alloc_chunk(trans, flags);
  2850. }
  2851. if (!ret) {
  2852. ret = btrfs_block_rsv_add(fs_info->chunk_root,
  2853. &fs_info->chunk_block_rsv,
  2854. thresh, BTRFS_RESERVE_NO_FLUSH);
  2855. if (!ret)
  2856. trans->chunk_bytes_reserved += thresh;
  2857. }
  2858. }
  2859. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  2860. {
  2861. struct btrfs_block_group *block_group;
  2862. u64 last = 0;
  2863. while (1) {
  2864. struct inode *inode;
  2865. block_group = btrfs_lookup_first_block_group(info, last);
  2866. while (block_group) {
  2867. btrfs_wait_block_group_cache_done(block_group);
  2868. spin_lock(&block_group->lock);
  2869. if (block_group->iref)
  2870. break;
  2871. spin_unlock(&block_group->lock);
  2872. block_group = btrfs_next_block_group(block_group);
  2873. }
  2874. if (!block_group) {
  2875. if (last == 0)
  2876. break;
  2877. last = 0;
  2878. continue;
  2879. }
  2880. inode = block_group->inode;
  2881. block_group->iref = 0;
  2882. block_group->inode = NULL;
  2883. spin_unlock(&block_group->lock);
  2884. ASSERT(block_group->io_ctl.inode == NULL);
  2885. iput(inode);
  2886. last = block_group->start + block_group->length;
  2887. btrfs_put_block_group(block_group);
  2888. }
  2889. }
  2890. /*
  2891. * Must be called only after stopping all workers, since we could have block
  2892. * group caching kthreads running, and therefore they could race with us if we
  2893. * freed the block groups before stopping them.
  2894. */
  2895. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  2896. {
  2897. struct btrfs_block_group *block_group;
  2898. struct btrfs_space_info *space_info;
  2899. struct btrfs_caching_control *caching_ctl;
  2900. struct rb_node *n;
  2901. down_write(&info->commit_root_sem);
  2902. while (!list_empty(&info->caching_block_groups)) {
  2903. caching_ctl = list_entry(info->caching_block_groups.next,
  2904. struct btrfs_caching_control, list);
  2905. list_del(&caching_ctl->list);
  2906. btrfs_put_caching_control(caching_ctl);
  2907. }
  2908. up_write(&info->commit_root_sem);
  2909. spin_lock(&info->unused_bgs_lock);
  2910. while (!list_empty(&info->unused_bgs)) {
  2911. block_group = list_first_entry(&info->unused_bgs,
  2912. struct btrfs_block_group,
  2913. bg_list);
  2914. list_del_init(&block_group->bg_list);
  2915. btrfs_put_block_group(block_group);
  2916. }
  2917. spin_unlock(&info->unused_bgs_lock);
  2918. spin_lock(&info->block_group_cache_lock);
  2919. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  2920. block_group = rb_entry(n, struct btrfs_block_group,
  2921. cache_node);
  2922. rb_erase(&block_group->cache_node,
  2923. &info->block_group_cache_tree);
  2924. RB_CLEAR_NODE(&block_group->cache_node);
  2925. spin_unlock(&info->block_group_cache_lock);
  2926. down_write(&block_group->space_info->groups_sem);
  2927. list_del(&block_group->list);
  2928. up_write(&block_group->space_info->groups_sem);
  2929. /*
  2930. * We haven't cached this block group, which means we could
  2931. * possibly have excluded extents on this block group.
  2932. */
  2933. if (block_group->cached == BTRFS_CACHE_NO ||
  2934. block_group->cached == BTRFS_CACHE_ERROR)
  2935. btrfs_free_excluded_extents(block_group);
  2936. btrfs_remove_free_space_cache(block_group);
  2937. ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
  2938. ASSERT(list_empty(&block_group->dirty_list));
  2939. ASSERT(list_empty(&block_group->io_list));
  2940. ASSERT(list_empty(&block_group->bg_list));
  2941. ASSERT(refcount_read(&block_group->refs) == 1);
  2942. ASSERT(block_group->swap_extents == 0);
  2943. btrfs_put_block_group(block_group);
  2944. spin_lock(&info->block_group_cache_lock);
  2945. }
  2946. spin_unlock(&info->block_group_cache_lock);
  2947. btrfs_release_global_block_rsv(info);
  2948. while (!list_empty(&info->space_info)) {
  2949. space_info = list_entry(info->space_info.next,
  2950. struct btrfs_space_info,
  2951. list);
  2952. /*
  2953. * Do not hide this behind enospc_debug, this is actually
  2954. * important and indicates a real bug if this happens.
  2955. */
  2956. if (WARN_ON(space_info->bytes_pinned > 0 ||
  2957. space_info->bytes_reserved > 0 ||
  2958. space_info->bytes_may_use > 0))
  2959. btrfs_dump_space_info(info, space_info, 0, 0);
  2960. WARN_ON(space_info->reclaim_size > 0);
  2961. list_del(&space_info->list);
  2962. btrfs_sysfs_remove_space_info(space_info);
  2963. }
  2964. return 0;
  2965. }
  2966. void btrfs_freeze_block_group(struct btrfs_block_group *cache)
  2967. {
  2968. atomic_inc(&cache->frozen);
  2969. }
  2970. void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
  2971. {
  2972. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2973. struct extent_map_tree *em_tree;
  2974. struct extent_map *em;
  2975. bool cleanup;
  2976. spin_lock(&block_group->lock);
  2977. cleanup = (atomic_dec_and_test(&block_group->frozen) &&
  2978. block_group->removed);
  2979. spin_unlock(&block_group->lock);
  2980. if (cleanup) {
  2981. em_tree = &fs_info->mapping_tree;
  2982. write_lock(&em_tree->lock);
  2983. em = lookup_extent_mapping(em_tree, block_group->start,
  2984. 1);
  2985. BUG_ON(!em); /* logic error, can't happen */
  2986. remove_extent_mapping(em_tree, em);
  2987. write_unlock(&em_tree->lock);
  2988. /* once for us and once for the tree */
  2989. free_extent_map(em);
  2990. free_extent_map(em);
  2991. /*
  2992. * We may have left one free space entry and other possible
  2993. * tasks trimming this block group have left 1 entry each one.
  2994. * Free them if any.
  2995. */
  2996. __btrfs_remove_free_space_cache(block_group->free_space_ctl);
  2997. }
  2998. }
  2999. bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
  3000. {
  3001. bool ret = true;
  3002. spin_lock(&bg->lock);
  3003. if (bg->ro)
  3004. ret = false;
  3005. else
  3006. bg->swap_extents++;
  3007. spin_unlock(&bg->lock);
  3008. return ret;
  3009. }
  3010. void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
  3011. {
  3012. spin_lock(&bg->lock);
  3013. ASSERT(!bg->ro);
  3014. ASSERT(bg->swap_extents >= amount);
  3015. bg->swap_extents -= amount;
  3016. spin_unlock(&bg->lock);
  3017. }