backref.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2011 STRATO. All rights reserved.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/rbtree.h>
  7. #include <trace/events/btrfs.h>
  8. #include "ctree.h"
  9. #include "disk-io.h"
  10. #include "backref.h"
  11. #include "ulist.h"
  12. #include "transaction.h"
  13. #include "delayed-ref.h"
  14. #include "locking.h"
  15. #include "misc.h"
  16. /* Just an arbitrary number so we can be sure this happened */
  17. #define BACKREF_FOUND_SHARED 6
  18. struct extent_inode_elem {
  19. u64 inum;
  20. u64 offset;
  21. struct extent_inode_elem *next;
  22. };
  23. static int check_extent_in_eb(const struct btrfs_key *key,
  24. const struct extent_buffer *eb,
  25. const struct btrfs_file_extent_item *fi,
  26. u64 extent_item_pos,
  27. struct extent_inode_elem **eie,
  28. bool ignore_offset)
  29. {
  30. u64 offset = 0;
  31. struct extent_inode_elem *e;
  32. if (!ignore_offset &&
  33. !btrfs_file_extent_compression(eb, fi) &&
  34. !btrfs_file_extent_encryption(eb, fi) &&
  35. !btrfs_file_extent_other_encoding(eb, fi)) {
  36. u64 data_offset;
  37. u64 data_len;
  38. data_offset = btrfs_file_extent_offset(eb, fi);
  39. data_len = btrfs_file_extent_num_bytes(eb, fi);
  40. if (extent_item_pos < data_offset ||
  41. extent_item_pos >= data_offset + data_len)
  42. return 1;
  43. offset = extent_item_pos - data_offset;
  44. }
  45. e = kmalloc(sizeof(*e), GFP_NOFS);
  46. if (!e)
  47. return -ENOMEM;
  48. e->next = *eie;
  49. e->inum = key->objectid;
  50. e->offset = key->offset + offset;
  51. *eie = e;
  52. return 0;
  53. }
  54. static void free_inode_elem_list(struct extent_inode_elem *eie)
  55. {
  56. struct extent_inode_elem *eie_next;
  57. for (; eie; eie = eie_next) {
  58. eie_next = eie->next;
  59. kfree(eie);
  60. }
  61. }
  62. static int find_extent_in_eb(const struct extent_buffer *eb,
  63. u64 wanted_disk_byte, u64 extent_item_pos,
  64. struct extent_inode_elem **eie,
  65. bool ignore_offset)
  66. {
  67. u64 disk_byte;
  68. struct btrfs_key key;
  69. struct btrfs_file_extent_item *fi;
  70. int slot;
  71. int nritems;
  72. int extent_type;
  73. int ret;
  74. /*
  75. * from the shared data ref, we only have the leaf but we need
  76. * the key. thus, we must look into all items and see that we
  77. * find one (some) with a reference to our extent item.
  78. */
  79. nritems = btrfs_header_nritems(eb);
  80. for (slot = 0; slot < nritems; ++slot) {
  81. btrfs_item_key_to_cpu(eb, &key, slot);
  82. if (key.type != BTRFS_EXTENT_DATA_KEY)
  83. continue;
  84. fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  85. extent_type = btrfs_file_extent_type(eb, fi);
  86. if (extent_type == BTRFS_FILE_EXTENT_INLINE)
  87. continue;
  88. /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
  89. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  90. if (disk_byte != wanted_disk_byte)
  91. continue;
  92. ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
  93. if (ret < 0)
  94. return ret;
  95. }
  96. return 0;
  97. }
  98. struct preftree {
  99. struct rb_root_cached root;
  100. unsigned int count;
  101. };
  102. #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
  103. struct preftrees {
  104. struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
  105. struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
  106. struct preftree indirect_missing_keys;
  107. };
  108. /*
  109. * Checks for a shared extent during backref search.
  110. *
  111. * The share_count tracks prelim_refs (direct and indirect) having a
  112. * ref->count >0:
  113. * - incremented when a ref->count transitions to >0
  114. * - decremented when a ref->count transitions to <1
  115. */
  116. struct share_check {
  117. u64 root_objectid;
  118. u64 inum;
  119. int share_count;
  120. };
  121. static inline int extent_is_shared(struct share_check *sc)
  122. {
  123. return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
  124. }
  125. static struct kmem_cache *btrfs_prelim_ref_cache;
  126. int __init btrfs_prelim_ref_init(void)
  127. {
  128. btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
  129. sizeof(struct prelim_ref),
  130. 0,
  131. SLAB_MEM_SPREAD,
  132. NULL);
  133. if (!btrfs_prelim_ref_cache)
  134. return -ENOMEM;
  135. return 0;
  136. }
  137. void __cold btrfs_prelim_ref_exit(void)
  138. {
  139. kmem_cache_destroy(btrfs_prelim_ref_cache);
  140. }
  141. static void free_pref(struct prelim_ref *ref)
  142. {
  143. kmem_cache_free(btrfs_prelim_ref_cache, ref);
  144. }
  145. /*
  146. * Return 0 when both refs are for the same block (and can be merged).
  147. * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
  148. * indicates a 'higher' block.
  149. */
  150. static int prelim_ref_compare(struct prelim_ref *ref1,
  151. struct prelim_ref *ref2)
  152. {
  153. if (ref1->level < ref2->level)
  154. return -1;
  155. if (ref1->level > ref2->level)
  156. return 1;
  157. if (ref1->root_id < ref2->root_id)
  158. return -1;
  159. if (ref1->root_id > ref2->root_id)
  160. return 1;
  161. if (ref1->key_for_search.type < ref2->key_for_search.type)
  162. return -1;
  163. if (ref1->key_for_search.type > ref2->key_for_search.type)
  164. return 1;
  165. if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
  166. return -1;
  167. if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
  168. return 1;
  169. if (ref1->key_for_search.offset < ref2->key_for_search.offset)
  170. return -1;
  171. if (ref1->key_for_search.offset > ref2->key_for_search.offset)
  172. return 1;
  173. if (ref1->parent < ref2->parent)
  174. return -1;
  175. if (ref1->parent > ref2->parent)
  176. return 1;
  177. return 0;
  178. }
  179. static void update_share_count(struct share_check *sc, int oldcount,
  180. int newcount)
  181. {
  182. if ((!sc) || (oldcount == 0 && newcount < 1))
  183. return;
  184. if (oldcount > 0 && newcount < 1)
  185. sc->share_count--;
  186. else if (oldcount < 1 && newcount > 0)
  187. sc->share_count++;
  188. }
  189. /*
  190. * Add @newref to the @root rbtree, merging identical refs.
  191. *
  192. * Callers should assume that newref has been freed after calling.
  193. */
  194. static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
  195. struct preftree *preftree,
  196. struct prelim_ref *newref,
  197. struct share_check *sc)
  198. {
  199. struct rb_root_cached *root;
  200. struct rb_node **p;
  201. struct rb_node *parent = NULL;
  202. struct prelim_ref *ref;
  203. int result;
  204. bool leftmost = true;
  205. root = &preftree->root;
  206. p = &root->rb_root.rb_node;
  207. while (*p) {
  208. parent = *p;
  209. ref = rb_entry(parent, struct prelim_ref, rbnode);
  210. result = prelim_ref_compare(ref, newref);
  211. if (result < 0) {
  212. p = &(*p)->rb_left;
  213. } else if (result > 0) {
  214. p = &(*p)->rb_right;
  215. leftmost = false;
  216. } else {
  217. /* Identical refs, merge them and free @newref */
  218. struct extent_inode_elem *eie = ref->inode_list;
  219. while (eie && eie->next)
  220. eie = eie->next;
  221. if (!eie)
  222. ref->inode_list = newref->inode_list;
  223. else
  224. eie->next = newref->inode_list;
  225. trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
  226. preftree->count);
  227. /*
  228. * A delayed ref can have newref->count < 0.
  229. * The ref->count is updated to follow any
  230. * BTRFS_[ADD|DROP]_DELAYED_REF actions.
  231. */
  232. update_share_count(sc, ref->count,
  233. ref->count + newref->count);
  234. ref->count += newref->count;
  235. free_pref(newref);
  236. return;
  237. }
  238. }
  239. update_share_count(sc, 0, newref->count);
  240. preftree->count++;
  241. trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
  242. rb_link_node(&newref->rbnode, parent, p);
  243. rb_insert_color_cached(&newref->rbnode, root, leftmost);
  244. }
  245. /*
  246. * Release the entire tree. We don't care about internal consistency so
  247. * just free everything and then reset the tree root.
  248. */
  249. static void prelim_release(struct preftree *preftree)
  250. {
  251. struct prelim_ref *ref, *next_ref;
  252. rbtree_postorder_for_each_entry_safe(ref, next_ref,
  253. &preftree->root.rb_root, rbnode)
  254. free_pref(ref);
  255. preftree->root = RB_ROOT_CACHED;
  256. preftree->count = 0;
  257. }
  258. /*
  259. * the rules for all callers of this function are:
  260. * - obtaining the parent is the goal
  261. * - if you add a key, you must know that it is a correct key
  262. * - if you cannot add the parent or a correct key, then we will look into the
  263. * block later to set a correct key
  264. *
  265. * delayed refs
  266. * ============
  267. * backref type | shared | indirect | shared | indirect
  268. * information | tree | tree | data | data
  269. * --------------------+--------+----------+--------+----------
  270. * parent logical | y | - | - | -
  271. * key to resolve | - | y | y | y
  272. * tree block logical | - | - | - | -
  273. * root for resolving | y | y | y | y
  274. *
  275. * - column 1: we've the parent -> done
  276. * - column 2, 3, 4: we use the key to find the parent
  277. *
  278. * on disk refs (inline or keyed)
  279. * ==============================
  280. * backref type | shared | indirect | shared | indirect
  281. * information | tree | tree | data | data
  282. * --------------------+--------+----------+--------+----------
  283. * parent logical | y | - | y | -
  284. * key to resolve | - | - | - | y
  285. * tree block logical | y | y | y | y
  286. * root for resolving | - | y | y | y
  287. *
  288. * - column 1, 3: we've the parent -> done
  289. * - column 2: we take the first key from the block to find the parent
  290. * (see add_missing_keys)
  291. * - column 4: we use the key to find the parent
  292. *
  293. * additional information that's available but not required to find the parent
  294. * block might help in merging entries to gain some speed.
  295. */
  296. static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
  297. struct preftree *preftree, u64 root_id,
  298. const struct btrfs_key *key, int level, u64 parent,
  299. u64 wanted_disk_byte, int count,
  300. struct share_check *sc, gfp_t gfp_mask)
  301. {
  302. struct prelim_ref *ref;
  303. if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
  304. return 0;
  305. ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
  306. if (!ref)
  307. return -ENOMEM;
  308. ref->root_id = root_id;
  309. if (key)
  310. ref->key_for_search = *key;
  311. else
  312. memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
  313. ref->inode_list = NULL;
  314. ref->level = level;
  315. ref->count = count;
  316. ref->parent = parent;
  317. ref->wanted_disk_byte = wanted_disk_byte;
  318. prelim_ref_insert(fs_info, preftree, ref, sc);
  319. return extent_is_shared(sc);
  320. }
  321. /* direct refs use root == 0, key == NULL */
  322. static int add_direct_ref(const struct btrfs_fs_info *fs_info,
  323. struct preftrees *preftrees, int level, u64 parent,
  324. u64 wanted_disk_byte, int count,
  325. struct share_check *sc, gfp_t gfp_mask)
  326. {
  327. return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
  328. parent, wanted_disk_byte, count, sc, gfp_mask);
  329. }
  330. /* indirect refs use parent == 0 */
  331. static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
  332. struct preftrees *preftrees, u64 root_id,
  333. const struct btrfs_key *key, int level,
  334. u64 wanted_disk_byte, int count,
  335. struct share_check *sc, gfp_t gfp_mask)
  336. {
  337. struct preftree *tree = &preftrees->indirect;
  338. if (!key)
  339. tree = &preftrees->indirect_missing_keys;
  340. return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
  341. wanted_disk_byte, count, sc, gfp_mask);
  342. }
  343. static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
  344. {
  345. struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
  346. struct rb_node *parent = NULL;
  347. struct prelim_ref *ref = NULL;
  348. struct prelim_ref target = {};
  349. int result;
  350. target.parent = bytenr;
  351. while (*p) {
  352. parent = *p;
  353. ref = rb_entry(parent, struct prelim_ref, rbnode);
  354. result = prelim_ref_compare(ref, &target);
  355. if (result < 0)
  356. p = &(*p)->rb_left;
  357. else if (result > 0)
  358. p = &(*p)->rb_right;
  359. else
  360. return 1;
  361. }
  362. return 0;
  363. }
  364. static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
  365. struct ulist *parents,
  366. struct preftrees *preftrees, struct prelim_ref *ref,
  367. int level, u64 time_seq, const u64 *extent_item_pos,
  368. bool ignore_offset)
  369. {
  370. int ret = 0;
  371. int slot;
  372. struct extent_buffer *eb;
  373. struct btrfs_key key;
  374. struct btrfs_key *key_for_search = &ref->key_for_search;
  375. struct btrfs_file_extent_item *fi;
  376. struct extent_inode_elem *eie = NULL, *old = NULL;
  377. u64 disk_byte;
  378. u64 wanted_disk_byte = ref->wanted_disk_byte;
  379. u64 count = 0;
  380. u64 data_offset;
  381. if (level != 0) {
  382. eb = path->nodes[level];
  383. ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
  384. if (ret < 0)
  385. return ret;
  386. return 0;
  387. }
  388. /*
  389. * 1. We normally enter this function with the path already pointing to
  390. * the first item to check. But sometimes, we may enter it with
  391. * slot == nritems.
  392. * 2. We are searching for normal backref but bytenr of this leaf
  393. * matches shared data backref
  394. * 3. The leaf owner is not equal to the root we are searching
  395. *
  396. * For these cases, go to the next leaf before we continue.
  397. */
  398. eb = path->nodes[0];
  399. if (path->slots[0] >= btrfs_header_nritems(eb) ||
  400. is_shared_data_backref(preftrees, eb->start) ||
  401. ref->root_id != btrfs_header_owner(eb)) {
  402. if (time_seq == SEQ_LAST)
  403. ret = btrfs_next_leaf(root, path);
  404. else
  405. ret = btrfs_next_old_leaf(root, path, time_seq);
  406. }
  407. while (!ret && count < ref->count) {
  408. eb = path->nodes[0];
  409. slot = path->slots[0];
  410. btrfs_item_key_to_cpu(eb, &key, slot);
  411. if (key.objectid != key_for_search->objectid ||
  412. key.type != BTRFS_EXTENT_DATA_KEY)
  413. break;
  414. /*
  415. * We are searching for normal backref but bytenr of this leaf
  416. * matches shared data backref, OR
  417. * the leaf owner is not equal to the root we are searching for
  418. */
  419. if (slot == 0 &&
  420. (is_shared_data_backref(preftrees, eb->start) ||
  421. ref->root_id != btrfs_header_owner(eb))) {
  422. if (time_seq == SEQ_LAST)
  423. ret = btrfs_next_leaf(root, path);
  424. else
  425. ret = btrfs_next_old_leaf(root, path, time_seq);
  426. continue;
  427. }
  428. fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  429. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  430. data_offset = btrfs_file_extent_offset(eb, fi);
  431. if (disk_byte == wanted_disk_byte) {
  432. eie = NULL;
  433. old = NULL;
  434. if (ref->key_for_search.offset == key.offset - data_offset)
  435. count++;
  436. else
  437. goto next;
  438. if (extent_item_pos) {
  439. ret = check_extent_in_eb(&key, eb, fi,
  440. *extent_item_pos,
  441. &eie, ignore_offset);
  442. if (ret < 0)
  443. break;
  444. }
  445. if (ret > 0)
  446. goto next;
  447. ret = ulist_add_merge_ptr(parents, eb->start,
  448. eie, (void **)&old, GFP_NOFS);
  449. if (ret < 0)
  450. break;
  451. if (!ret && extent_item_pos) {
  452. while (old->next)
  453. old = old->next;
  454. old->next = eie;
  455. }
  456. eie = NULL;
  457. }
  458. next:
  459. if (time_seq == SEQ_LAST)
  460. ret = btrfs_next_item(root, path);
  461. else
  462. ret = btrfs_next_old_item(root, path, time_seq);
  463. }
  464. if (ret > 0)
  465. ret = 0;
  466. else if (ret < 0)
  467. free_inode_elem_list(eie);
  468. return ret;
  469. }
  470. /*
  471. * resolve an indirect backref in the form (root_id, key, level)
  472. * to a logical address
  473. */
  474. static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
  475. struct btrfs_path *path, u64 time_seq,
  476. struct preftrees *preftrees,
  477. struct prelim_ref *ref, struct ulist *parents,
  478. const u64 *extent_item_pos, bool ignore_offset)
  479. {
  480. struct btrfs_root *root;
  481. struct extent_buffer *eb;
  482. int ret = 0;
  483. int root_level;
  484. int level = ref->level;
  485. struct btrfs_key search_key = ref->key_for_search;
  486. /*
  487. * If we're search_commit_root we could possibly be holding locks on
  488. * other tree nodes. This happens when qgroups does backref walks when
  489. * adding new delayed refs. To deal with this we need to look in cache
  490. * for the root, and if we don't find it then we need to search the
  491. * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
  492. * here.
  493. */
  494. if (path->search_commit_root)
  495. root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
  496. else
  497. root = btrfs_get_fs_root(fs_info, ref->root_id, false);
  498. if (IS_ERR(root)) {
  499. ret = PTR_ERR(root);
  500. goto out_free;
  501. }
  502. if (!path->search_commit_root &&
  503. test_bit(BTRFS_ROOT_DELETING, &root->state)) {
  504. ret = -ENOENT;
  505. goto out;
  506. }
  507. if (btrfs_is_testing(fs_info)) {
  508. ret = -ENOENT;
  509. goto out;
  510. }
  511. if (path->search_commit_root)
  512. root_level = btrfs_header_level(root->commit_root);
  513. else if (time_seq == SEQ_LAST)
  514. root_level = btrfs_header_level(root->node);
  515. else
  516. root_level = btrfs_old_root_level(root, time_seq);
  517. if (root_level + 1 == level)
  518. goto out;
  519. /*
  520. * We can often find data backrefs with an offset that is too large
  521. * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
  522. * subtracting a file's offset with the data offset of its
  523. * corresponding extent data item. This can happen for example in the
  524. * clone ioctl.
  525. *
  526. * So if we detect such case we set the search key's offset to zero to
  527. * make sure we will find the matching file extent item at
  528. * add_all_parents(), otherwise we will miss it because the offset
  529. * taken form the backref is much larger then the offset of the file
  530. * extent item. This can make us scan a very large number of file
  531. * extent items, but at least it will not make us miss any.
  532. *
  533. * This is an ugly workaround for a behaviour that should have never
  534. * existed, but it does and a fix for the clone ioctl would touch a lot
  535. * of places, cause backwards incompatibility and would not fix the
  536. * problem for extents cloned with older kernels.
  537. */
  538. if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
  539. search_key.offset >= LLONG_MAX)
  540. search_key.offset = 0;
  541. path->lowest_level = level;
  542. if (time_seq == SEQ_LAST)
  543. ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
  544. else
  545. ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
  546. btrfs_debug(fs_info,
  547. "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
  548. ref->root_id, level, ref->count, ret,
  549. ref->key_for_search.objectid, ref->key_for_search.type,
  550. ref->key_for_search.offset);
  551. if (ret < 0)
  552. goto out;
  553. eb = path->nodes[level];
  554. while (!eb) {
  555. if (WARN_ON(!level)) {
  556. ret = 1;
  557. goto out;
  558. }
  559. level--;
  560. eb = path->nodes[level];
  561. }
  562. ret = add_all_parents(root, path, parents, preftrees, ref, level,
  563. time_seq, extent_item_pos, ignore_offset);
  564. out:
  565. btrfs_put_root(root);
  566. out_free:
  567. path->lowest_level = 0;
  568. btrfs_release_path(path);
  569. return ret;
  570. }
  571. static struct extent_inode_elem *
  572. unode_aux_to_inode_list(struct ulist_node *node)
  573. {
  574. if (!node)
  575. return NULL;
  576. return (struct extent_inode_elem *)(uintptr_t)node->aux;
  577. }
  578. /*
  579. * We maintain three separate rbtrees: one for direct refs, one for
  580. * indirect refs which have a key, and one for indirect refs which do not
  581. * have a key. Each tree does merge on insertion.
  582. *
  583. * Once all of the references are located, we iterate over the tree of
  584. * indirect refs with missing keys. An appropriate key is located and
  585. * the ref is moved onto the tree for indirect refs. After all missing
  586. * keys are thus located, we iterate over the indirect ref tree, resolve
  587. * each reference, and then insert the resolved reference onto the
  588. * direct tree (merging there too).
  589. *
  590. * New backrefs (i.e., for parent nodes) are added to the appropriate
  591. * rbtree as they are encountered. The new backrefs are subsequently
  592. * resolved as above.
  593. */
  594. static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
  595. struct btrfs_path *path, u64 time_seq,
  596. struct preftrees *preftrees,
  597. const u64 *extent_item_pos,
  598. struct share_check *sc, bool ignore_offset)
  599. {
  600. int err;
  601. int ret = 0;
  602. struct ulist *parents;
  603. struct ulist_node *node;
  604. struct ulist_iterator uiter;
  605. struct rb_node *rnode;
  606. parents = ulist_alloc(GFP_NOFS);
  607. if (!parents)
  608. return -ENOMEM;
  609. /*
  610. * We could trade memory usage for performance here by iterating
  611. * the tree, allocating new refs for each insertion, and then
  612. * freeing the entire indirect tree when we're done. In some test
  613. * cases, the tree can grow quite large (~200k objects).
  614. */
  615. while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
  616. struct prelim_ref *ref;
  617. ref = rb_entry(rnode, struct prelim_ref, rbnode);
  618. if (WARN(ref->parent,
  619. "BUG: direct ref found in indirect tree")) {
  620. ret = -EINVAL;
  621. goto out;
  622. }
  623. rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
  624. preftrees->indirect.count--;
  625. if (ref->count == 0) {
  626. free_pref(ref);
  627. continue;
  628. }
  629. if (sc && sc->root_objectid &&
  630. ref->root_id != sc->root_objectid) {
  631. free_pref(ref);
  632. ret = BACKREF_FOUND_SHARED;
  633. goto out;
  634. }
  635. err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
  636. ref, parents, extent_item_pos,
  637. ignore_offset);
  638. /*
  639. * we can only tolerate ENOENT,otherwise,we should catch error
  640. * and return directly.
  641. */
  642. if (err == -ENOENT) {
  643. prelim_ref_insert(fs_info, &preftrees->direct, ref,
  644. NULL);
  645. continue;
  646. } else if (err) {
  647. free_pref(ref);
  648. ret = err;
  649. goto out;
  650. }
  651. /* we put the first parent into the ref at hand */
  652. ULIST_ITER_INIT(&uiter);
  653. node = ulist_next(parents, &uiter);
  654. ref->parent = node ? node->val : 0;
  655. ref->inode_list = unode_aux_to_inode_list(node);
  656. /* Add a prelim_ref(s) for any other parent(s). */
  657. while ((node = ulist_next(parents, &uiter))) {
  658. struct prelim_ref *new_ref;
  659. new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
  660. GFP_NOFS);
  661. if (!new_ref) {
  662. free_pref(ref);
  663. ret = -ENOMEM;
  664. goto out;
  665. }
  666. memcpy(new_ref, ref, sizeof(*ref));
  667. new_ref->parent = node->val;
  668. new_ref->inode_list = unode_aux_to_inode_list(node);
  669. prelim_ref_insert(fs_info, &preftrees->direct,
  670. new_ref, NULL);
  671. }
  672. /*
  673. * Now it's a direct ref, put it in the direct tree. We must
  674. * do this last because the ref could be merged/freed here.
  675. */
  676. prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
  677. ulist_reinit(parents);
  678. cond_resched();
  679. }
  680. out:
  681. ulist_free(parents);
  682. return ret;
  683. }
  684. /*
  685. * read tree blocks and add keys where required.
  686. */
  687. static int add_missing_keys(struct btrfs_fs_info *fs_info,
  688. struct preftrees *preftrees, bool lock)
  689. {
  690. struct prelim_ref *ref;
  691. struct extent_buffer *eb;
  692. struct preftree *tree = &preftrees->indirect_missing_keys;
  693. struct rb_node *node;
  694. while ((node = rb_first_cached(&tree->root))) {
  695. ref = rb_entry(node, struct prelim_ref, rbnode);
  696. rb_erase_cached(node, &tree->root);
  697. BUG_ON(ref->parent); /* should not be a direct ref */
  698. BUG_ON(ref->key_for_search.type);
  699. BUG_ON(!ref->wanted_disk_byte);
  700. eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
  701. ref->level - 1, NULL);
  702. if (IS_ERR(eb)) {
  703. free_pref(ref);
  704. return PTR_ERR(eb);
  705. } else if (!extent_buffer_uptodate(eb)) {
  706. free_pref(ref);
  707. free_extent_buffer(eb);
  708. return -EIO;
  709. }
  710. if (lock)
  711. btrfs_tree_read_lock(eb);
  712. if (btrfs_header_level(eb) == 0)
  713. btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
  714. else
  715. btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
  716. if (lock)
  717. btrfs_tree_read_unlock(eb);
  718. free_extent_buffer(eb);
  719. prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
  720. cond_resched();
  721. }
  722. return 0;
  723. }
  724. /*
  725. * add all currently queued delayed refs from this head whose seq nr is
  726. * smaller or equal that seq to the list
  727. */
  728. static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
  729. struct btrfs_delayed_ref_head *head, u64 seq,
  730. struct preftrees *preftrees, struct share_check *sc)
  731. {
  732. struct btrfs_delayed_ref_node *node;
  733. struct btrfs_delayed_extent_op *extent_op = head->extent_op;
  734. struct btrfs_key key;
  735. struct btrfs_key tmp_op_key;
  736. struct rb_node *n;
  737. int count;
  738. int ret = 0;
  739. if (extent_op && extent_op->update_key)
  740. btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
  741. spin_lock(&head->lock);
  742. for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
  743. node = rb_entry(n, struct btrfs_delayed_ref_node,
  744. ref_node);
  745. if (node->seq > seq)
  746. continue;
  747. switch (node->action) {
  748. case BTRFS_ADD_DELAYED_EXTENT:
  749. case BTRFS_UPDATE_DELAYED_HEAD:
  750. WARN_ON(1);
  751. continue;
  752. case BTRFS_ADD_DELAYED_REF:
  753. count = node->ref_mod;
  754. break;
  755. case BTRFS_DROP_DELAYED_REF:
  756. count = node->ref_mod * -1;
  757. break;
  758. default:
  759. BUG();
  760. }
  761. switch (node->type) {
  762. case BTRFS_TREE_BLOCK_REF_KEY: {
  763. /* NORMAL INDIRECT METADATA backref */
  764. struct btrfs_delayed_tree_ref *ref;
  765. ref = btrfs_delayed_node_to_tree_ref(node);
  766. ret = add_indirect_ref(fs_info, preftrees, ref->root,
  767. &tmp_op_key, ref->level + 1,
  768. node->bytenr, count, sc,
  769. GFP_ATOMIC);
  770. break;
  771. }
  772. case BTRFS_SHARED_BLOCK_REF_KEY: {
  773. /* SHARED DIRECT METADATA backref */
  774. struct btrfs_delayed_tree_ref *ref;
  775. ref = btrfs_delayed_node_to_tree_ref(node);
  776. ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
  777. ref->parent, node->bytenr, count,
  778. sc, GFP_ATOMIC);
  779. break;
  780. }
  781. case BTRFS_EXTENT_DATA_REF_KEY: {
  782. /* NORMAL INDIRECT DATA backref */
  783. struct btrfs_delayed_data_ref *ref;
  784. ref = btrfs_delayed_node_to_data_ref(node);
  785. key.objectid = ref->objectid;
  786. key.type = BTRFS_EXTENT_DATA_KEY;
  787. key.offset = ref->offset;
  788. /*
  789. * Found a inum that doesn't match our known inum, we
  790. * know it's shared.
  791. */
  792. if (sc && sc->inum && ref->objectid != sc->inum) {
  793. ret = BACKREF_FOUND_SHARED;
  794. goto out;
  795. }
  796. ret = add_indirect_ref(fs_info, preftrees, ref->root,
  797. &key, 0, node->bytenr, count, sc,
  798. GFP_ATOMIC);
  799. break;
  800. }
  801. case BTRFS_SHARED_DATA_REF_KEY: {
  802. /* SHARED DIRECT FULL backref */
  803. struct btrfs_delayed_data_ref *ref;
  804. ref = btrfs_delayed_node_to_data_ref(node);
  805. ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
  806. node->bytenr, count, sc,
  807. GFP_ATOMIC);
  808. break;
  809. }
  810. default:
  811. WARN_ON(1);
  812. }
  813. /*
  814. * We must ignore BACKREF_FOUND_SHARED until all delayed
  815. * refs have been checked.
  816. */
  817. if (ret && (ret != BACKREF_FOUND_SHARED))
  818. break;
  819. }
  820. if (!ret)
  821. ret = extent_is_shared(sc);
  822. out:
  823. spin_unlock(&head->lock);
  824. return ret;
  825. }
  826. /*
  827. * add all inline backrefs for bytenr to the list
  828. *
  829. * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
  830. */
  831. static int add_inline_refs(const struct btrfs_fs_info *fs_info,
  832. struct btrfs_path *path, u64 bytenr,
  833. int *info_level, struct preftrees *preftrees,
  834. struct share_check *sc)
  835. {
  836. int ret = 0;
  837. int slot;
  838. struct extent_buffer *leaf;
  839. struct btrfs_key key;
  840. struct btrfs_key found_key;
  841. unsigned long ptr;
  842. unsigned long end;
  843. struct btrfs_extent_item *ei;
  844. u64 flags;
  845. u64 item_size;
  846. /*
  847. * enumerate all inline refs
  848. */
  849. leaf = path->nodes[0];
  850. slot = path->slots[0];
  851. item_size = btrfs_item_size_nr(leaf, slot);
  852. BUG_ON(item_size < sizeof(*ei));
  853. ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
  854. flags = btrfs_extent_flags(leaf, ei);
  855. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  856. ptr = (unsigned long)(ei + 1);
  857. end = (unsigned long)ei + item_size;
  858. if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
  859. flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  860. struct btrfs_tree_block_info *info;
  861. info = (struct btrfs_tree_block_info *)ptr;
  862. *info_level = btrfs_tree_block_level(leaf, info);
  863. ptr += sizeof(struct btrfs_tree_block_info);
  864. BUG_ON(ptr > end);
  865. } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
  866. *info_level = found_key.offset;
  867. } else {
  868. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  869. }
  870. while (ptr < end) {
  871. struct btrfs_extent_inline_ref *iref;
  872. u64 offset;
  873. int type;
  874. iref = (struct btrfs_extent_inline_ref *)ptr;
  875. type = btrfs_get_extent_inline_ref_type(leaf, iref,
  876. BTRFS_REF_TYPE_ANY);
  877. if (type == BTRFS_REF_TYPE_INVALID)
  878. return -EUCLEAN;
  879. offset = btrfs_extent_inline_ref_offset(leaf, iref);
  880. switch (type) {
  881. case BTRFS_SHARED_BLOCK_REF_KEY:
  882. ret = add_direct_ref(fs_info, preftrees,
  883. *info_level + 1, offset,
  884. bytenr, 1, NULL, GFP_NOFS);
  885. break;
  886. case BTRFS_SHARED_DATA_REF_KEY: {
  887. struct btrfs_shared_data_ref *sdref;
  888. int count;
  889. sdref = (struct btrfs_shared_data_ref *)(iref + 1);
  890. count = btrfs_shared_data_ref_count(leaf, sdref);
  891. ret = add_direct_ref(fs_info, preftrees, 0, offset,
  892. bytenr, count, sc, GFP_NOFS);
  893. break;
  894. }
  895. case BTRFS_TREE_BLOCK_REF_KEY:
  896. ret = add_indirect_ref(fs_info, preftrees, offset,
  897. NULL, *info_level + 1,
  898. bytenr, 1, NULL, GFP_NOFS);
  899. break;
  900. case BTRFS_EXTENT_DATA_REF_KEY: {
  901. struct btrfs_extent_data_ref *dref;
  902. int count;
  903. u64 root;
  904. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  905. count = btrfs_extent_data_ref_count(leaf, dref);
  906. key.objectid = btrfs_extent_data_ref_objectid(leaf,
  907. dref);
  908. key.type = BTRFS_EXTENT_DATA_KEY;
  909. key.offset = btrfs_extent_data_ref_offset(leaf, dref);
  910. if (sc && sc->inum && key.objectid != sc->inum) {
  911. ret = BACKREF_FOUND_SHARED;
  912. break;
  913. }
  914. root = btrfs_extent_data_ref_root(leaf, dref);
  915. ret = add_indirect_ref(fs_info, preftrees, root,
  916. &key, 0, bytenr, count,
  917. sc, GFP_NOFS);
  918. break;
  919. }
  920. default:
  921. WARN_ON(1);
  922. }
  923. if (ret)
  924. return ret;
  925. ptr += btrfs_extent_inline_ref_size(type);
  926. }
  927. return 0;
  928. }
  929. /*
  930. * add all non-inline backrefs for bytenr to the list
  931. *
  932. * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
  933. */
  934. static int add_keyed_refs(struct btrfs_fs_info *fs_info,
  935. struct btrfs_path *path, u64 bytenr,
  936. int info_level, struct preftrees *preftrees,
  937. struct share_check *sc)
  938. {
  939. struct btrfs_root *extent_root = fs_info->extent_root;
  940. int ret;
  941. int slot;
  942. struct extent_buffer *leaf;
  943. struct btrfs_key key;
  944. while (1) {
  945. ret = btrfs_next_item(extent_root, path);
  946. if (ret < 0)
  947. break;
  948. if (ret) {
  949. ret = 0;
  950. break;
  951. }
  952. slot = path->slots[0];
  953. leaf = path->nodes[0];
  954. btrfs_item_key_to_cpu(leaf, &key, slot);
  955. if (key.objectid != bytenr)
  956. break;
  957. if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
  958. continue;
  959. if (key.type > BTRFS_SHARED_DATA_REF_KEY)
  960. break;
  961. switch (key.type) {
  962. case BTRFS_SHARED_BLOCK_REF_KEY:
  963. /* SHARED DIRECT METADATA backref */
  964. ret = add_direct_ref(fs_info, preftrees,
  965. info_level + 1, key.offset,
  966. bytenr, 1, NULL, GFP_NOFS);
  967. break;
  968. case BTRFS_SHARED_DATA_REF_KEY: {
  969. /* SHARED DIRECT FULL backref */
  970. struct btrfs_shared_data_ref *sdref;
  971. int count;
  972. sdref = btrfs_item_ptr(leaf, slot,
  973. struct btrfs_shared_data_ref);
  974. count = btrfs_shared_data_ref_count(leaf, sdref);
  975. ret = add_direct_ref(fs_info, preftrees, 0,
  976. key.offset, bytenr, count,
  977. sc, GFP_NOFS);
  978. break;
  979. }
  980. case BTRFS_TREE_BLOCK_REF_KEY:
  981. /* NORMAL INDIRECT METADATA backref */
  982. ret = add_indirect_ref(fs_info, preftrees, key.offset,
  983. NULL, info_level + 1, bytenr,
  984. 1, NULL, GFP_NOFS);
  985. break;
  986. case BTRFS_EXTENT_DATA_REF_KEY: {
  987. /* NORMAL INDIRECT DATA backref */
  988. struct btrfs_extent_data_ref *dref;
  989. int count;
  990. u64 root;
  991. dref = btrfs_item_ptr(leaf, slot,
  992. struct btrfs_extent_data_ref);
  993. count = btrfs_extent_data_ref_count(leaf, dref);
  994. key.objectid = btrfs_extent_data_ref_objectid(leaf,
  995. dref);
  996. key.type = BTRFS_EXTENT_DATA_KEY;
  997. key.offset = btrfs_extent_data_ref_offset(leaf, dref);
  998. if (sc && sc->inum && key.objectid != sc->inum) {
  999. ret = BACKREF_FOUND_SHARED;
  1000. break;
  1001. }
  1002. root = btrfs_extent_data_ref_root(leaf, dref);
  1003. ret = add_indirect_ref(fs_info, preftrees, root,
  1004. &key, 0, bytenr, count,
  1005. sc, GFP_NOFS);
  1006. break;
  1007. }
  1008. default:
  1009. WARN_ON(1);
  1010. }
  1011. if (ret)
  1012. return ret;
  1013. }
  1014. return ret;
  1015. }
  1016. /*
  1017. * this adds all existing backrefs (inline backrefs, backrefs and delayed
  1018. * refs) for the given bytenr to the refs list, merges duplicates and resolves
  1019. * indirect refs to their parent bytenr.
  1020. * When roots are found, they're added to the roots list
  1021. *
  1022. * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
  1023. * much like trans == NULL case, the difference only lies in it will not
  1024. * commit root.
  1025. * The special case is for qgroup to search roots in commit_transaction().
  1026. *
  1027. * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
  1028. * shared extent is detected.
  1029. *
  1030. * Otherwise this returns 0 for success and <0 for an error.
  1031. *
  1032. * If ignore_offset is set to false, only extent refs whose offsets match
  1033. * extent_item_pos are returned. If true, every extent ref is returned
  1034. * and extent_item_pos is ignored.
  1035. *
  1036. * FIXME some caching might speed things up
  1037. */
  1038. static int find_parent_nodes(struct btrfs_trans_handle *trans,
  1039. struct btrfs_fs_info *fs_info, u64 bytenr,
  1040. u64 time_seq, struct ulist *refs,
  1041. struct ulist *roots, const u64 *extent_item_pos,
  1042. struct share_check *sc, bool ignore_offset)
  1043. {
  1044. struct btrfs_key key;
  1045. struct btrfs_path *path;
  1046. struct btrfs_delayed_ref_root *delayed_refs = NULL;
  1047. struct btrfs_delayed_ref_head *head;
  1048. int info_level = 0;
  1049. int ret;
  1050. struct prelim_ref *ref;
  1051. struct rb_node *node;
  1052. struct extent_inode_elem *eie = NULL;
  1053. struct preftrees preftrees = {
  1054. .direct = PREFTREE_INIT,
  1055. .indirect = PREFTREE_INIT,
  1056. .indirect_missing_keys = PREFTREE_INIT
  1057. };
  1058. key.objectid = bytenr;
  1059. key.offset = (u64)-1;
  1060. if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  1061. key.type = BTRFS_METADATA_ITEM_KEY;
  1062. else
  1063. key.type = BTRFS_EXTENT_ITEM_KEY;
  1064. path = btrfs_alloc_path();
  1065. if (!path)
  1066. return -ENOMEM;
  1067. if (!trans) {
  1068. path->search_commit_root = 1;
  1069. path->skip_locking = 1;
  1070. }
  1071. if (time_seq == SEQ_LAST)
  1072. path->skip_locking = 1;
  1073. /*
  1074. * grab both a lock on the path and a lock on the delayed ref head.
  1075. * We need both to get a consistent picture of how the refs look
  1076. * at a specified point in time
  1077. */
  1078. again:
  1079. head = NULL;
  1080. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
  1081. if (ret < 0)
  1082. goto out;
  1083. if (ret == 0) {
  1084. /* This shouldn't happen, indicates a bug or fs corruption. */
  1085. ASSERT(ret != 0);
  1086. ret = -EUCLEAN;
  1087. goto out;
  1088. }
  1089. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  1090. if (trans && likely(trans->type != __TRANS_DUMMY) &&
  1091. time_seq != SEQ_LAST) {
  1092. #else
  1093. if (trans && time_seq != SEQ_LAST) {
  1094. #endif
  1095. /*
  1096. * look if there are updates for this ref queued and lock the
  1097. * head
  1098. */
  1099. delayed_refs = &trans->transaction->delayed_refs;
  1100. spin_lock(&delayed_refs->lock);
  1101. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  1102. if (head) {
  1103. if (!mutex_trylock(&head->mutex)) {
  1104. refcount_inc(&head->refs);
  1105. spin_unlock(&delayed_refs->lock);
  1106. btrfs_release_path(path);
  1107. /*
  1108. * Mutex was contended, block until it's
  1109. * released and try again
  1110. */
  1111. mutex_lock(&head->mutex);
  1112. mutex_unlock(&head->mutex);
  1113. btrfs_put_delayed_ref_head(head);
  1114. goto again;
  1115. }
  1116. spin_unlock(&delayed_refs->lock);
  1117. ret = add_delayed_refs(fs_info, head, time_seq,
  1118. &preftrees, sc);
  1119. mutex_unlock(&head->mutex);
  1120. if (ret)
  1121. goto out;
  1122. } else {
  1123. spin_unlock(&delayed_refs->lock);
  1124. }
  1125. }
  1126. if (path->slots[0]) {
  1127. struct extent_buffer *leaf;
  1128. int slot;
  1129. path->slots[0]--;
  1130. leaf = path->nodes[0];
  1131. slot = path->slots[0];
  1132. btrfs_item_key_to_cpu(leaf, &key, slot);
  1133. if (key.objectid == bytenr &&
  1134. (key.type == BTRFS_EXTENT_ITEM_KEY ||
  1135. key.type == BTRFS_METADATA_ITEM_KEY)) {
  1136. ret = add_inline_refs(fs_info, path, bytenr,
  1137. &info_level, &preftrees, sc);
  1138. if (ret)
  1139. goto out;
  1140. ret = add_keyed_refs(fs_info, path, bytenr, info_level,
  1141. &preftrees, sc);
  1142. if (ret)
  1143. goto out;
  1144. }
  1145. }
  1146. btrfs_release_path(path);
  1147. ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
  1148. if (ret)
  1149. goto out;
  1150. WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
  1151. ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
  1152. extent_item_pos, sc, ignore_offset);
  1153. if (ret)
  1154. goto out;
  1155. WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
  1156. /*
  1157. * This walks the tree of merged and resolved refs. Tree blocks are
  1158. * read in as needed. Unique entries are added to the ulist, and
  1159. * the list of found roots is updated.
  1160. *
  1161. * We release the entire tree in one go before returning.
  1162. */
  1163. node = rb_first_cached(&preftrees.direct.root);
  1164. while (node) {
  1165. ref = rb_entry(node, struct prelim_ref, rbnode);
  1166. node = rb_next(&ref->rbnode);
  1167. /*
  1168. * ref->count < 0 can happen here if there are delayed
  1169. * refs with a node->action of BTRFS_DROP_DELAYED_REF.
  1170. * prelim_ref_insert() relies on this when merging
  1171. * identical refs to keep the overall count correct.
  1172. * prelim_ref_insert() will merge only those refs
  1173. * which compare identically. Any refs having
  1174. * e.g. different offsets would not be merged,
  1175. * and would retain their original ref->count < 0.
  1176. */
  1177. if (roots && ref->count && ref->root_id && ref->parent == 0) {
  1178. if (sc && sc->root_objectid &&
  1179. ref->root_id != sc->root_objectid) {
  1180. ret = BACKREF_FOUND_SHARED;
  1181. goto out;
  1182. }
  1183. /* no parent == root of tree */
  1184. ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
  1185. if (ret < 0)
  1186. goto out;
  1187. }
  1188. if (ref->count && ref->parent) {
  1189. if (extent_item_pos && !ref->inode_list &&
  1190. ref->level == 0) {
  1191. struct extent_buffer *eb;
  1192. eb = read_tree_block(fs_info, ref->parent, 0,
  1193. ref->level, NULL);
  1194. if (IS_ERR(eb)) {
  1195. ret = PTR_ERR(eb);
  1196. goto out;
  1197. } else if (!extent_buffer_uptodate(eb)) {
  1198. free_extent_buffer(eb);
  1199. ret = -EIO;
  1200. goto out;
  1201. }
  1202. if (!path->skip_locking) {
  1203. btrfs_tree_read_lock(eb);
  1204. btrfs_set_lock_blocking_read(eb);
  1205. }
  1206. ret = find_extent_in_eb(eb, bytenr,
  1207. *extent_item_pos, &eie, ignore_offset);
  1208. if (!path->skip_locking)
  1209. btrfs_tree_read_unlock_blocking(eb);
  1210. free_extent_buffer(eb);
  1211. if (ret < 0)
  1212. goto out;
  1213. ref->inode_list = eie;
  1214. }
  1215. ret = ulist_add_merge_ptr(refs, ref->parent,
  1216. ref->inode_list,
  1217. (void **)&eie, GFP_NOFS);
  1218. if (ret < 0)
  1219. goto out;
  1220. if (!ret && extent_item_pos) {
  1221. /*
  1222. * We've recorded that parent, so we must extend
  1223. * its inode list here.
  1224. *
  1225. * However if there was corruption we may not
  1226. * have found an eie, return an error in this
  1227. * case.
  1228. */
  1229. ASSERT(eie);
  1230. if (!eie) {
  1231. ret = -EUCLEAN;
  1232. goto out;
  1233. }
  1234. while (eie->next)
  1235. eie = eie->next;
  1236. eie->next = ref->inode_list;
  1237. }
  1238. eie = NULL;
  1239. }
  1240. cond_resched();
  1241. }
  1242. out:
  1243. btrfs_free_path(path);
  1244. prelim_release(&preftrees.direct);
  1245. prelim_release(&preftrees.indirect);
  1246. prelim_release(&preftrees.indirect_missing_keys);
  1247. if (ret < 0)
  1248. free_inode_elem_list(eie);
  1249. return ret;
  1250. }
  1251. static void free_leaf_list(struct ulist *blocks)
  1252. {
  1253. struct ulist_node *node = NULL;
  1254. struct extent_inode_elem *eie;
  1255. struct ulist_iterator uiter;
  1256. ULIST_ITER_INIT(&uiter);
  1257. while ((node = ulist_next(blocks, &uiter))) {
  1258. if (!node->aux)
  1259. continue;
  1260. eie = unode_aux_to_inode_list(node);
  1261. free_inode_elem_list(eie);
  1262. node->aux = 0;
  1263. }
  1264. ulist_free(blocks);
  1265. }
  1266. /*
  1267. * Finds all leafs with a reference to the specified combination of bytenr and
  1268. * offset. key_list_head will point to a list of corresponding keys (caller must
  1269. * free each list element). The leafs will be stored in the leafs ulist, which
  1270. * must be freed with ulist_free.
  1271. *
  1272. * returns 0 on success, <0 on error
  1273. */
  1274. int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
  1275. struct btrfs_fs_info *fs_info, u64 bytenr,
  1276. u64 time_seq, struct ulist **leafs,
  1277. const u64 *extent_item_pos, bool ignore_offset)
  1278. {
  1279. int ret;
  1280. *leafs = ulist_alloc(GFP_NOFS);
  1281. if (!*leafs)
  1282. return -ENOMEM;
  1283. ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
  1284. *leafs, NULL, extent_item_pos, NULL, ignore_offset);
  1285. if (ret < 0 && ret != -ENOENT) {
  1286. free_leaf_list(*leafs);
  1287. return ret;
  1288. }
  1289. return 0;
  1290. }
  1291. /*
  1292. * walk all backrefs for a given extent to find all roots that reference this
  1293. * extent. Walking a backref means finding all extents that reference this
  1294. * extent and in turn walk the backrefs of those, too. Naturally this is a
  1295. * recursive process, but here it is implemented in an iterative fashion: We
  1296. * find all referencing extents for the extent in question and put them on a
  1297. * list. In turn, we find all referencing extents for those, further appending
  1298. * to the list. The way we iterate the list allows adding more elements after
  1299. * the current while iterating. The process stops when we reach the end of the
  1300. * list. Found roots are added to the roots list.
  1301. *
  1302. * returns 0 on success, < 0 on error.
  1303. */
  1304. static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
  1305. struct btrfs_fs_info *fs_info, u64 bytenr,
  1306. u64 time_seq, struct ulist **roots,
  1307. bool ignore_offset)
  1308. {
  1309. struct ulist *tmp;
  1310. struct ulist_node *node = NULL;
  1311. struct ulist_iterator uiter;
  1312. int ret;
  1313. tmp = ulist_alloc(GFP_NOFS);
  1314. if (!tmp)
  1315. return -ENOMEM;
  1316. *roots = ulist_alloc(GFP_NOFS);
  1317. if (!*roots) {
  1318. ulist_free(tmp);
  1319. return -ENOMEM;
  1320. }
  1321. ULIST_ITER_INIT(&uiter);
  1322. while (1) {
  1323. ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
  1324. tmp, *roots, NULL, NULL, ignore_offset);
  1325. if (ret < 0 && ret != -ENOENT) {
  1326. ulist_free(tmp);
  1327. ulist_free(*roots);
  1328. *roots = NULL;
  1329. return ret;
  1330. }
  1331. node = ulist_next(tmp, &uiter);
  1332. if (!node)
  1333. break;
  1334. bytenr = node->val;
  1335. cond_resched();
  1336. }
  1337. ulist_free(tmp);
  1338. return 0;
  1339. }
  1340. int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
  1341. struct btrfs_fs_info *fs_info, u64 bytenr,
  1342. u64 time_seq, struct ulist **roots,
  1343. bool ignore_offset)
  1344. {
  1345. int ret;
  1346. if (!trans)
  1347. down_read(&fs_info->commit_root_sem);
  1348. ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
  1349. time_seq, roots, ignore_offset);
  1350. if (!trans)
  1351. up_read(&fs_info->commit_root_sem);
  1352. return ret;
  1353. }
  1354. /**
  1355. * btrfs_check_shared - tell us whether an extent is shared
  1356. *
  1357. * btrfs_check_shared uses the backref walking code but will short
  1358. * circuit as soon as it finds a root or inode that doesn't match the
  1359. * one passed in. This provides a significant performance benefit for
  1360. * callers (such as fiemap) which want to know whether the extent is
  1361. * shared but do not need a ref count.
  1362. *
  1363. * This attempts to attach to the running transaction in order to account for
  1364. * delayed refs, but continues on even when no running transaction exists.
  1365. *
  1366. * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
  1367. */
  1368. int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
  1369. struct ulist *roots, struct ulist *tmp)
  1370. {
  1371. struct btrfs_fs_info *fs_info = root->fs_info;
  1372. struct btrfs_trans_handle *trans;
  1373. struct ulist_iterator uiter;
  1374. struct ulist_node *node;
  1375. struct seq_list elem = SEQ_LIST_INIT(elem);
  1376. int ret = 0;
  1377. struct share_check shared = {
  1378. .root_objectid = root->root_key.objectid,
  1379. .inum = inum,
  1380. .share_count = 0,
  1381. };
  1382. ulist_init(roots);
  1383. ulist_init(tmp);
  1384. trans = btrfs_join_transaction_nostart(root);
  1385. if (IS_ERR(trans)) {
  1386. if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
  1387. ret = PTR_ERR(trans);
  1388. goto out;
  1389. }
  1390. trans = NULL;
  1391. down_read(&fs_info->commit_root_sem);
  1392. } else {
  1393. btrfs_get_tree_mod_seq(fs_info, &elem);
  1394. }
  1395. ULIST_ITER_INIT(&uiter);
  1396. while (1) {
  1397. ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
  1398. roots, NULL, &shared, false);
  1399. if (ret == BACKREF_FOUND_SHARED) {
  1400. /* this is the only condition under which we return 1 */
  1401. ret = 1;
  1402. break;
  1403. }
  1404. if (ret < 0 && ret != -ENOENT)
  1405. break;
  1406. ret = 0;
  1407. node = ulist_next(tmp, &uiter);
  1408. if (!node)
  1409. break;
  1410. bytenr = node->val;
  1411. shared.share_count = 0;
  1412. cond_resched();
  1413. }
  1414. if (trans) {
  1415. btrfs_put_tree_mod_seq(fs_info, &elem);
  1416. btrfs_end_transaction(trans);
  1417. } else {
  1418. up_read(&fs_info->commit_root_sem);
  1419. }
  1420. out:
  1421. ulist_release(roots);
  1422. ulist_release(tmp);
  1423. return ret;
  1424. }
  1425. int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
  1426. u64 start_off, struct btrfs_path *path,
  1427. struct btrfs_inode_extref **ret_extref,
  1428. u64 *found_off)
  1429. {
  1430. int ret, slot;
  1431. struct btrfs_key key;
  1432. struct btrfs_key found_key;
  1433. struct btrfs_inode_extref *extref;
  1434. const struct extent_buffer *leaf;
  1435. unsigned long ptr;
  1436. key.objectid = inode_objectid;
  1437. key.type = BTRFS_INODE_EXTREF_KEY;
  1438. key.offset = start_off;
  1439. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1440. if (ret < 0)
  1441. return ret;
  1442. while (1) {
  1443. leaf = path->nodes[0];
  1444. slot = path->slots[0];
  1445. if (slot >= btrfs_header_nritems(leaf)) {
  1446. /*
  1447. * If the item at offset is not found,
  1448. * btrfs_search_slot will point us to the slot
  1449. * where it should be inserted. In our case
  1450. * that will be the slot directly before the
  1451. * next INODE_REF_KEY_V2 item. In the case
  1452. * that we're pointing to the last slot in a
  1453. * leaf, we must move one leaf over.
  1454. */
  1455. ret = btrfs_next_leaf(root, path);
  1456. if (ret) {
  1457. if (ret >= 1)
  1458. ret = -ENOENT;
  1459. break;
  1460. }
  1461. continue;
  1462. }
  1463. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  1464. /*
  1465. * Check that we're still looking at an extended ref key for
  1466. * this particular objectid. If we have different
  1467. * objectid or type then there are no more to be found
  1468. * in the tree and we can exit.
  1469. */
  1470. ret = -ENOENT;
  1471. if (found_key.objectid != inode_objectid)
  1472. break;
  1473. if (found_key.type != BTRFS_INODE_EXTREF_KEY)
  1474. break;
  1475. ret = 0;
  1476. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  1477. extref = (struct btrfs_inode_extref *)ptr;
  1478. *ret_extref = extref;
  1479. if (found_off)
  1480. *found_off = found_key.offset;
  1481. break;
  1482. }
  1483. return ret;
  1484. }
  1485. /*
  1486. * this iterates to turn a name (from iref/extref) into a full filesystem path.
  1487. * Elements of the path are separated by '/' and the path is guaranteed to be
  1488. * 0-terminated. the path is only given within the current file system.
  1489. * Therefore, it never starts with a '/'. the caller is responsible to provide
  1490. * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
  1491. * the start point of the resulting string is returned. this pointer is within
  1492. * dest, normally.
  1493. * in case the path buffer would overflow, the pointer is decremented further
  1494. * as if output was written to the buffer, though no more output is actually
  1495. * generated. that way, the caller can determine how much space would be
  1496. * required for the path to fit into the buffer. in that case, the returned
  1497. * value will be smaller than dest. callers must check this!
  1498. */
  1499. char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
  1500. u32 name_len, unsigned long name_off,
  1501. struct extent_buffer *eb_in, u64 parent,
  1502. char *dest, u32 size)
  1503. {
  1504. int slot;
  1505. u64 next_inum;
  1506. int ret;
  1507. s64 bytes_left = ((s64)size) - 1;
  1508. struct extent_buffer *eb = eb_in;
  1509. struct btrfs_key found_key;
  1510. int leave_spinning = path->leave_spinning;
  1511. struct btrfs_inode_ref *iref;
  1512. if (bytes_left >= 0)
  1513. dest[bytes_left] = '\0';
  1514. path->leave_spinning = 1;
  1515. while (1) {
  1516. bytes_left -= name_len;
  1517. if (bytes_left >= 0)
  1518. read_extent_buffer(eb, dest + bytes_left,
  1519. name_off, name_len);
  1520. if (eb != eb_in) {
  1521. if (!path->skip_locking)
  1522. btrfs_tree_read_unlock_blocking(eb);
  1523. free_extent_buffer(eb);
  1524. }
  1525. ret = btrfs_find_item(fs_root, path, parent, 0,
  1526. BTRFS_INODE_REF_KEY, &found_key);
  1527. if (ret > 0)
  1528. ret = -ENOENT;
  1529. if (ret)
  1530. break;
  1531. next_inum = found_key.offset;
  1532. /* regular exit ahead */
  1533. if (parent == next_inum)
  1534. break;
  1535. slot = path->slots[0];
  1536. eb = path->nodes[0];
  1537. /* make sure we can use eb after releasing the path */
  1538. if (eb != eb_in) {
  1539. if (!path->skip_locking)
  1540. btrfs_set_lock_blocking_read(eb);
  1541. path->nodes[0] = NULL;
  1542. path->locks[0] = 0;
  1543. }
  1544. btrfs_release_path(path);
  1545. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  1546. name_len = btrfs_inode_ref_name_len(eb, iref);
  1547. name_off = (unsigned long)(iref + 1);
  1548. parent = next_inum;
  1549. --bytes_left;
  1550. if (bytes_left >= 0)
  1551. dest[bytes_left] = '/';
  1552. }
  1553. btrfs_release_path(path);
  1554. path->leave_spinning = leave_spinning;
  1555. if (ret)
  1556. return ERR_PTR(ret);
  1557. return dest + bytes_left;
  1558. }
  1559. /*
  1560. * this makes the path point to (logical EXTENT_ITEM *)
  1561. * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
  1562. * tree blocks and <0 on error.
  1563. */
  1564. int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
  1565. struct btrfs_path *path, struct btrfs_key *found_key,
  1566. u64 *flags_ret)
  1567. {
  1568. int ret;
  1569. u64 flags;
  1570. u64 size = 0;
  1571. u32 item_size;
  1572. const struct extent_buffer *eb;
  1573. struct btrfs_extent_item *ei;
  1574. struct btrfs_key key;
  1575. if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  1576. key.type = BTRFS_METADATA_ITEM_KEY;
  1577. else
  1578. key.type = BTRFS_EXTENT_ITEM_KEY;
  1579. key.objectid = logical;
  1580. key.offset = (u64)-1;
  1581. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  1582. if (ret < 0)
  1583. return ret;
  1584. ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
  1585. if (ret) {
  1586. if (ret > 0)
  1587. ret = -ENOENT;
  1588. return ret;
  1589. }
  1590. btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
  1591. if (found_key->type == BTRFS_METADATA_ITEM_KEY)
  1592. size = fs_info->nodesize;
  1593. else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
  1594. size = found_key->offset;
  1595. if (found_key->objectid > logical ||
  1596. found_key->objectid + size <= logical) {
  1597. btrfs_debug(fs_info,
  1598. "logical %llu is not within any extent", logical);
  1599. return -ENOENT;
  1600. }
  1601. eb = path->nodes[0];
  1602. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  1603. BUG_ON(item_size < sizeof(*ei));
  1604. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  1605. flags = btrfs_extent_flags(eb, ei);
  1606. btrfs_debug(fs_info,
  1607. "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
  1608. logical, logical - found_key->objectid, found_key->objectid,
  1609. found_key->offset, flags, item_size);
  1610. WARN_ON(!flags_ret);
  1611. if (flags_ret) {
  1612. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1613. *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
  1614. else if (flags & BTRFS_EXTENT_FLAG_DATA)
  1615. *flags_ret = BTRFS_EXTENT_FLAG_DATA;
  1616. else
  1617. BUG();
  1618. return 0;
  1619. }
  1620. return -EIO;
  1621. }
  1622. /*
  1623. * helper function to iterate extent inline refs. ptr must point to a 0 value
  1624. * for the first call and may be modified. it is used to track state.
  1625. * if more refs exist, 0 is returned and the next call to
  1626. * get_extent_inline_ref must pass the modified ptr parameter to get the
  1627. * next ref. after the last ref was processed, 1 is returned.
  1628. * returns <0 on error
  1629. */
  1630. static int get_extent_inline_ref(unsigned long *ptr,
  1631. const struct extent_buffer *eb,
  1632. const struct btrfs_key *key,
  1633. const struct btrfs_extent_item *ei,
  1634. u32 item_size,
  1635. struct btrfs_extent_inline_ref **out_eiref,
  1636. int *out_type)
  1637. {
  1638. unsigned long end;
  1639. u64 flags;
  1640. struct btrfs_tree_block_info *info;
  1641. if (!*ptr) {
  1642. /* first call */
  1643. flags = btrfs_extent_flags(eb, ei);
  1644. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1645. if (key->type == BTRFS_METADATA_ITEM_KEY) {
  1646. /* a skinny metadata extent */
  1647. *out_eiref =
  1648. (struct btrfs_extent_inline_ref *)(ei + 1);
  1649. } else {
  1650. WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
  1651. info = (struct btrfs_tree_block_info *)(ei + 1);
  1652. *out_eiref =
  1653. (struct btrfs_extent_inline_ref *)(info + 1);
  1654. }
  1655. } else {
  1656. *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
  1657. }
  1658. *ptr = (unsigned long)*out_eiref;
  1659. if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
  1660. return -ENOENT;
  1661. }
  1662. end = (unsigned long)ei + item_size;
  1663. *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
  1664. *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
  1665. BTRFS_REF_TYPE_ANY);
  1666. if (*out_type == BTRFS_REF_TYPE_INVALID)
  1667. return -EUCLEAN;
  1668. *ptr += btrfs_extent_inline_ref_size(*out_type);
  1669. WARN_ON(*ptr > end);
  1670. if (*ptr == end)
  1671. return 1; /* last */
  1672. return 0;
  1673. }
  1674. /*
  1675. * reads the tree block backref for an extent. tree level and root are returned
  1676. * through out_level and out_root. ptr must point to a 0 value for the first
  1677. * call and may be modified (see get_extent_inline_ref comment).
  1678. * returns 0 if data was provided, 1 if there was no more data to provide or
  1679. * <0 on error.
  1680. */
  1681. int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
  1682. struct btrfs_key *key, struct btrfs_extent_item *ei,
  1683. u32 item_size, u64 *out_root, u8 *out_level)
  1684. {
  1685. int ret;
  1686. int type;
  1687. struct btrfs_extent_inline_ref *eiref;
  1688. if (*ptr == (unsigned long)-1)
  1689. return 1;
  1690. while (1) {
  1691. ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
  1692. &eiref, &type);
  1693. if (ret < 0)
  1694. return ret;
  1695. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  1696. type == BTRFS_SHARED_BLOCK_REF_KEY)
  1697. break;
  1698. if (ret == 1)
  1699. return 1;
  1700. }
  1701. /* we can treat both ref types equally here */
  1702. *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
  1703. if (key->type == BTRFS_EXTENT_ITEM_KEY) {
  1704. struct btrfs_tree_block_info *info;
  1705. info = (struct btrfs_tree_block_info *)(ei + 1);
  1706. *out_level = btrfs_tree_block_level(eb, info);
  1707. } else {
  1708. ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
  1709. *out_level = (u8)key->offset;
  1710. }
  1711. if (ret == 1)
  1712. *ptr = (unsigned long)-1;
  1713. return 0;
  1714. }
  1715. static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
  1716. struct extent_inode_elem *inode_list,
  1717. u64 root, u64 extent_item_objectid,
  1718. iterate_extent_inodes_t *iterate, void *ctx)
  1719. {
  1720. struct extent_inode_elem *eie;
  1721. int ret = 0;
  1722. for (eie = inode_list; eie; eie = eie->next) {
  1723. btrfs_debug(fs_info,
  1724. "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
  1725. extent_item_objectid, eie->inum,
  1726. eie->offset, root);
  1727. ret = iterate(eie->inum, eie->offset, root, ctx);
  1728. if (ret) {
  1729. btrfs_debug(fs_info,
  1730. "stopping iteration for %llu due to ret=%d",
  1731. extent_item_objectid, ret);
  1732. break;
  1733. }
  1734. }
  1735. return ret;
  1736. }
  1737. /*
  1738. * calls iterate() for every inode that references the extent identified by
  1739. * the given parameters.
  1740. * when the iterator function returns a non-zero value, iteration stops.
  1741. */
  1742. int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
  1743. u64 extent_item_objectid, u64 extent_item_pos,
  1744. int search_commit_root,
  1745. iterate_extent_inodes_t *iterate, void *ctx,
  1746. bool ignore_offset)
  1747. {
  1748. int ret;
  1749. struct btrfs_trans_handle *trans = NULL;
  1750. struct ulist *refs = NULL;
  1751. struct ulist *roots = NULL;
  1752. struct ulist_node *ref_node = NULL;
  1753. struct ulist_node *root_node = NULL;
  1754. struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
  1755. struct ulist_iterator ref_uiter;
  1756. struct ulist_iterator root_uiter;
  1757. btrfs_debug(fs_info, "resolving all inodes for extent %llu",
  1758. extent_item_objectid);
  1759. if (!search_commit_root) {
  1760. trans = btrfs_attach_transaction(fs_info->extent_root);
  1761. if (IS_ERR(trans)) {
  1762. if (PTR_ERR(trans) != -ENOENT &&
  1763. PTR_ERR(trans) != -EROFS)
  1764. return PTR_ERR(trans);
  1765. trans = NULL;
  1766. }
  1767. }
  1768. if (trans)
  1769. btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
  1770. else
  1771. down_read(&fs_info->commit_root_sem);
  1772. ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
  1773. tree_mod_seq_elem.seq, &refs,
  1774. &extent_item_pos, ignore_offset);
  1775. if (ret)
  1776. goto out;
  1777. ULIST_ITER_INIT(&ref_uiter);
  1778. while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
  1779. ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
  1780. tree_mod_seq_elem.seq, &roots,
  1781. ignore_offset);
  1782. if (ret)
  1783. break;
  1784. ULIST_ITER_INIT(&root_uiter);
  1785. while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
  1786. btrfs_debug(fs_info,
  1787. "root %llu references leaf %llu, data list %#llx",
  1788. root_node->val, ref_node->val,
  1789. ref_node->aux);
  1790. ret = iterate_leaf_refs(fs_info,
  1791. (struct extent_inode_elem *)
  1792. (uintptr_t)ref_node->aux,
  1793. root_node->val,
  1794. extent_item_objectid,
  1795. iterate, ctx);
  1796. }
  1797. ulist_free(roots);
  1798. }
  1799. free_leaf_list(refs);
  1800. out:
  1801. if (trans) {
  1802. btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
  1803. btrfs_end_transaction(trans);
  1804. } else {
  1805. up_read(&fs_info->commit_root_sem);
  1806. }
  1807. return ret;
  1808. }
  1809. int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
  1810. struct btrfs_path *path,
  1811. iterate_extent_inodes_t *iterate, void *ctx,
  1812. bool ignore_offset)
  1813. {
  1814. int ret;
  1815. u64 extent_item_pos;
  1816. u64 flags = 0;
  1817. struct btrfs_key found_key;
  1818. int search_commit_root = path->search_commit_root;
  1819. ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
  1820. btrfs_release_path(path);
  1821. if (ret < 0)
  1822. return ret;
  1823. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1824. return -EINVAL;
  1825. extent_item_pos = logical - found_key.objectid;
  1826. ret = iterate_extent_inodes(fs_info, found_key.objectid,
  1827. extent_item_pos, search_commit_root,
  1828. iterate, ctx, ignore_offset);
  1829. return ret;
  1830. }
  1831. typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
  1832. struct extent_buffer *eb, void *ctx);
  1833. static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
  1834. struct btrfs_path *path,
  1835. iterate_irefs_t *iterate, void *ctx)
  1836. {
  1837. int ret = 0;
  1838. int slot;
  1839. u32 cur;
  1840. u32 len;
  1841. u32 name_len;
  1842. u64 parent = 0;
  1843. int found = 0;
  1844. struct extent_buffer *eb;
  1845. struct btrfs_item *item;
  1846. struct btrfs_inode_ref *iref;
  1847. struct btrfs_key found_key;
  1848. while (!ret) {
  1849. ret = btrfs_find_item(fs_root, path, inum,
  1850. parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
  1851. &found_key);
  1852. if (ret < 0)
  1853. break;
  1854. if (ret) {
  1855. ret = found ? 0 : -ENOENT;
  1856. break;
  1857. }
  1858. ++found;
  1859. parent = found_key.offset;
  1860. slot = path->slots[0];
  1861. eb = btrfs_clone_extent_buffer(path->nodes[0]);
  1862. if (!eb) {
  1863. ret = -ENOMEM;
  1864. break;
  1865. }
  1866. btrfs_release_path(path);
  1867. item = btrfs_item_nr(slot);
  1868. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  1869. for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
  1870. name_len = btrfs_inode_ref_name_len(eb, iref);
  1871. /* path must be released before calling iterate()! */
  1872. btrfs_debug(fs_root->fs_info,
  1873. "following ref at offset %u for inode %llu in tree %llu",
  1874. cur, found_key.objectid,
  1875. fs_root->root_key.objectid);
  1876. ret = iterate(parent, name_len,
  1877. (unsigned long)(iref + 1), eb, ctx);
  1878. if (ret)
  1879. break;
  1880. len = sizeof(*iref) + name_len;
  1881. iref = (struct btrfs_inode_ref *)((char *)iref + len);
  1882. }
  1883. free_extent_buffer(eb);
  1884. }
  1885. btrfs_release_path(path);
  1886. return ret;
  1887. }
  1888. static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
  1889. struct btrfs_path *path,
  1890. iterate_irefs_t *iterate, void *ctx)
  1891. {
  1892. int ret;
  1893. int slot;
  1894. u64 offset = 0;
  1895. u64 parent;
  1896. int found = 0;
  1897. struct extent_buffer *eb;
  1898. struct btrfs_inode_extref *extref;
  1899. u32 item_size;
  1900. u32 cur_offset;
  1901. unsigned long ptr;
  1902. while (1) {
  1903. ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
  1904. &offset);
  1905. if (ret < 0)
  1906. break;
  1907. if (ret) {
  1908. ret = found ? 0 : -ENOENT;
  1909. break;
  1910. }
  1911. ++found;
  1912. slot = path->slots[0];
  1913. eb = btrfs_clone_extent_buffer(path->nodes[0]);
  1914. if (!eb) {
  1915. ret = -ENOMEM;
  1916. break;
  1917. }
  1918. btrfs_release_path(path);
  1919. item_size = btrfs_item_size_nr(eb, slot);
  1920. ptr = btrfs_item_ptr_offset(eb, slot);
  1921. cur_offset = 0;
  1922. while (cur_offset < item_size) {
  1923. u32 name_len;
  1924. extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
  1925. parent = btrfs_inode_extref_parent(eb, extref);
  1926. name_len = btrfs_inode_extref_name_len(eb, extref);
  1927. ret = iterate(parent, name_len,
  1928. (unsigned long)&extref->name, eb, ctx);
  1929. if (ret)
  1930. break;
  1931. cur_offset += btrfs_inode_extref_name_len(eb, extref);
  1932. cur_offset += sizeof(*extref);
  1933. }
  1934. free_extent_buffer(eb);
  1935. offset++;
  1936. }
  1937. btrfs_release_path(path);
  1938. return ret;
  1939. }
  1940. static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
  1941. struct btrfs_path *path, iterate_irefs_t *iterate,
  1942. void *ctx)
  1943. {
  1944. int ret;
  1945. int found_refs = 0;
  1946. ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
  1947. if (!ret)
  1948. ++found_refs;
  1949. else if (ret != -ENOENT)
  1950. return ret;
  1951. ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
  1952. if (ret == -ENOENT && found_refs)
  1953. return 0;
  1954. return ret;
  1955. }
  1956. /*
  1957. * returns 0 if the path could be dumped (probably truncated)
  1958. * returns <0 in case of an error
  1959. */
  1960. static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
  1961. struct extent_buffer *eb, void *ctx)
  1962. {
  1963. struct inode_fs_paths *ipath = ctx;
  1964. char *fspath;
  1965. char *fspath_min;
  1966. int i = ipath->fspath->elem_cnt;
  1967. const int s_ptr = sizeof(char *);
  1968. u32 bytes_left;
  1969. bytes_left = ipath->fspath->bytes_left > s_ptr ?
  1970. ipath->fspath->bytes_left - s_ptr : 0;
  1971. fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
  1972. fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
  1973. name_off, eb, inum, fspath_min, bytes_left);
  1974. if (IS_ERR(fspath))
  1975. return PTR_ERR(fspath);
  1976. if (fspath > fspath_min) {
  1977. ipath->fspath->val[i] = (u64)(unsigned long)fspath;
  1978. ++ipath->fspath->elem_cnt;
  1979. ipath->fspath->bytes_left = fspath - fspath_min;
  1980. } else {
  1981. ++ipath->fspath->elem_missed;
  1982. ipath->fspath->bytes_missing += fspath_min - fspath;
  1983. ipath->fspath->bytes_left = 0;
  1984. }
  1985. return 0;
  1986. }
  1987. /*
  1988. * this dumps all file system paths to the inode into the ipath struct, provided
  1989. * is has been created large enough. each path is zero-terminated and accessed
  1990. * from ipath->fspath->val[i].
  1991. * when it returns, there are ipath->fspath->elem_cnt number of paths available
  1992. * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
  1993. * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
  1994. * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
  1995. * have been needed to return all paths.
  1996. */
  1997. int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
  1998. {
  1999. return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
  2000. inode_to_path, ipath);
  2001. }
  2002. struct btrfs_data_container *init_data_container(u32 total_bytes)
  2003. {
  2004. struct btrfs_data_container *data;
  2005. size_t alloc_bytes;
  2006. alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
  2007. data = kvmalloc(alloc_bytes, GFP_KERNEL);
  2008. if (!data)
  2009. return ERR_PTR(-ENOMEM);
  2010. if (total_bytes >= sizeof(*data)) {
  2011. data->bytes_left = total_bytes - sizeof(*data);
  2012. data->bytes_missing = 0;
  2013. } else {
  2014. data->bytes_missing = sizeof(*data) - total_bytes;
  2015. data->bytes_left = 0;
  2016. }
  2017. data->elem_cnt = 0;
  2018. data->elem_missed = 0;
  2019. return data;
  2020. }
  2021. /*
  2022. * allocates space to return multiple file system paths for an inode.
  2023. * total_bytes to allocate are passed, note that space usable for actual path
  2024. * information will be total_bytes - sizeof(struct inode_fs_paths).
  2025. * the returned pointer must be freed with free_ipath() in the end.
  2026. */
  2027. struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
  2028. struct btrfs_path *path)
  2029. {
  2030. struct inode_fs_paths *ifp;
  2031. struct btrfs_data_container *fspath;
  2032. fspath = init_data_container(total_bytes);
  2033. if (IS_ERR(fspath))
  2034. return ERR_CAST(fspath);
  2035. ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
  2036. if (!ifp) {
  2037. kvfree(fspath);
  2038. return ERR_PTR(-ENOMEM);
  2039. }
  2040. ifp->btrfs_path = path;
  2041. ifp->fspath = fspath;
  2042. ifp->fs_root = fs_root;
  2043. return ifp;
  2044. }
  2045. void free_ipath(struct inode_fs_paths *ipath)
  2046. {
  2047. if (!ipath)
  2048. return;
  2049. kvfree(ipath->fspath);
  2050. kfree(ipath);
  2051. }
  2052. struct btrfs_backref_iter *btrfs_backref_iter_alloc(
  2053. struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
  2054. {
  2055. struct btrfs_backref_iter *ret;
  2056. ret = kzalloc(sizeof(*ret), gfp_flag);
  2057. if (!ret)
  2058. return NULL;
  2059. ret->path = btrfs_alloc_path();
  2060. if (!ret->path) {
  2061. kfree(ret);
  2062. return NULL;
  2063. }
  2064. /* Current backref iterator only supports iteration in commit root */
  2065. ret->path->search_commit_root = 1;
  2066. ret->path->skip_locking = 1;
  2067. ret->fs_info = fs_info;
  2068. return ret;
  2069. }
  2070. int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
  2071. {
  2072. struct btrfs_fs_info *fs_info = iter->fs_info;
  2073. struct btrfs_path *path = iter->path;
  2074. struct btrfs_extent_item *ei;
  2075. struct btrfs_key key;
  2076. int ret;
  2077. key.objectid = bytenr;
  2078. key.type = BTRFS_METADATA_ITEM_KEY;
  2079. key.offset = (u64)-1;
  2080. iter->bytenr = bytenr;
  2081. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  2082. if (ret < 0)
  2083. return ret;
  2084. if (ret == 0) {
  2085. ret = -EUCLEAN;
  2086. goto release;
  2087. }
  2088. if (path->slots[0] == 0) {
  2089. WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
  2090. ret = -EUCLEAN;
  2091. goto release;
  2092. }
  2093. path->slots[0]--;
  2094. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  2095. if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
  2096. key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
  2097. ret = -ENOENT;
  2098. goto release;
  2099. }
  2100. memcpy(&iter->cur_key, &key, sizeof(key));
  2101. iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
  2102. path->slots[0]);
  2103. iter->end_ptr = (u32)(iter->item_ptr +
  2104. btrfs_item_size_nr(path->nodes[0], path->slots[0]));
  2105. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  2106. struct btrfs_extent_item);
  2107. /*
  2108. * Only support iteration on tree backref yet.
  2109. *
  2110. * This is an extra precaution for non skinny-metadata, where
  2111. * EXTENT_ITEM is also used for tree blocks, that we can only use
  2112. * extent flags to determine if it's a tree block.
  2113. */
  2114. if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
  2115. ret = -ENOTSUPP;
  2116. goto release;
  2117. }
  2118. iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
  2119. /* If there is no inline backref, go search for keyed backref */
  2120. if (iter->cur_ptr >= iter->end_ptr) {
  2121. ret = btrfs_next_item(fs_info->extent_root, path);
  2122. /* No inline nor keyed ref */
  2123. if (ret > 0) {
  2124. ret = -ENOENT;
  2125. goto release;
  2126. }
  2127. if (ret < 0)
  2128. goto release;
  2129. btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
  2130. path->slots[0]);
  2131. if (iter->cur_key.objectid != bytenr ||
  2132. (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
  2133. iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
  2134. ret = -ENOENT;
  2135. goto release;
  2136. }
  2137. iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
  2138. path->slots[0]);
  2139. iter->item_ptr = iter->cur_ptr;
  2140. iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
  2141. path->nodes[0], path->slots[0]));
  2142. }
  2143. return 0;
  2144. release:
  2145. btrfs_backref_iter_release(iter);
  2146. return ret;
  2147. }
  2148. /*
  2149. * Go to the next backref item of current bytenr, can be either inlined or
  2150. * keyed.
  2151. *
  2152. * Caller needs to check whether it's inline ref or not by iter->cur_key.
  2153. *
  2154. * Return 0 if we get next backref without problem.
  2155. * Return >0 if there is no extra backref for this bytenr.
  2156. * Return <0 if there is something wrong happened.
  2157. */
  2158. int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
  2159. {
  2160. struct extent_buffer *eb = btrfs_backref_get_eb(iter);
  2161. struct btrfs_path *path = iter->path;
  2162. struct btrfs_extent_inline_ref *iref;
  2163. int ret;
  2164. u32 size;
  2165. if (btrfs_backref_iter_is_inline_ref(iter)) {
  2166. /* We're still inside the inline refs */
  2167. ASSERT(iter->cur_ptr < iter->end_ptr);
  2168. if (btrfs_backref_has_tree_block_info(iter)) {
  2169. /* First tree block info */
  2170. size = sizeof(struct btrfs_tree_block_info);
  2171. } else {
  2172. /* Use inline ref type to determine the size */
  2173. int type;
  2174. iref = (struct btrfs_extent_inline_ref *)
  2175. ((unsigned long)iter->cur_ptr);
  2176. type = btrfs_extent_inline_ref_type(eb, iref);
  2177. size = btrfs_extent_inline_ref_size(type);
  2178. }
  2179. iter->cur_ptr += size;
  2180. if (iter->cur_ptr < iter->end_ptr)
  2181. return 0;
  2182. /* All inline items iterated, fall through */
  2183. }
  2184. /* We're at keyed items, there is no inline item, go to the next one */
  2185. ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
  2186. if (ret)
  2187. return ret;
  2188. btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
  2189. if (iter->cur_key.objectid != iter->bytenr ||
  2190. (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
  2191. iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
  2192. return 1;
  2193. iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
  2194. path->slots[0]);
  2195. iter->cur_ptr = iter->item_ptr;
  2196. iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
  2197. path->slots[0]);
  2198. return 0;
  2199. }
  2200. void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
  2201. struct btrfs_backref_cache *cache, int is_reloc)
  2202. {
  2203. int i;
  2204. cache->rb_root = RB_ROOT;
  2205. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  2206. INIT_LIST_HEAD(&cache->pending[i]);
  2207. INIT_LIST_HEAD(&cache->changed);
  2208. INIT_LIST_HEAD(&cache->detached);
  2209. INIT_LIST_HEAD(&cache->leaves);
  2210. INIT_LIST_HEAD(&cache->pending_edge);
  2211. INIT_LIST_HEAD(&cache->useless_node);
  2212. cache->fs_info = fs_info;
  2213. cache->is_reloc = is_reloc;
  2214. }
  2215. struct btrfs_backref_node *btrfs_backref_alloc_node(
  2216. struct btrfs_backref_cache *cache, u64 bytenr, int level)
  2217. {
  2218. struct btrfs_backref_node *node;
  2219. ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
  2220. node = kzalloc(sizeof(*node), GFP_NOFS);
  2221. if (!node)
  2222. return node;
  2223. INIT_LIST_HEAD(&node->list);
  2224. INIT_LIST_HEAD(&node->upper);
  2225. INIT_LIST_HEAD(&node->lower);
  2226. RB_CLEAR_NODE(&node->rb_node);
  2227. cache->nr_nodes++;
  2228. node->level = level;
  2229. node->bytenr = bytenr;
  2230. return node;
  2231. }
  2232. struct btrfs_backref_edge *btrfs_backref_alloc_edge(
  2233. struct btrfs_backref_cache *cache)
  2234. {
  2235. struct btrfs_backref_edge *edge;
  2236. edge = kzalloc(sizeof(*edge), GFP_NOFS);
  2237. if (edge)
  2238. cache->nr_edges++;
  2239. return edge;
  2240. }
  2241. /*
  2242. * Drop the backref node from cache, also cleaning up all its
  2243. * upper edges and any uncached nodes in the path.
  2244. *
  2245. * This cleanup happens bottom up, thus the node should either
  2246. * be the lowest node in the cache or a detached node.
  2247. */
  2248. void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
  2249. struct btrfs_backref_node *node)
  2250. {
  2251. struct btrfs_backref_node *upper;
  2252. struct btrfs_backref_edge *edge;
  2253. if (!node)
  2254. return;
  2255. BUG_ON(!node->lowest && !node->detached);
  2256. while (!list_empty(&node->upper)) {
  2257. edge = list_entry(node->upper.next, struct btrfs_backref_edge,
  2258. list[LOWER]);
  2259. upper = edge->node[UPPER];
  2260. list_del(&edge->list[LOWER]);
  2261. list_del(&edge->list[UPPER]);
  2262. btrfs_backref_free_edge(cache, edge);
  2263. /*
  2264. * Add the node to leaf node list if no other child block
  2265. * cached.
  2266. */
  2267. if (list_empty(&upper->lower)) {
  2268. list_add_tail(&upper->lower, &cache->leaves);
  2269. upper->lowest = 1;
  2270. }
  2271. }
  2272. btrfs_backref_drop_node(cache, node);
  2273. }
  2274. /*
  2275. * Release all nodes/edges from current cache
  2276. */
  2277. void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
  2278. {
  2279. struct btrfs_backref_node *node;
  2280. int i;
  2281. while (!list_empty(&cache->detached)) {
  2282. node = list_entry(cache->detached.next,
  2283. struct btrfs_backref_node, list);
  2284. btrfs_backref_cleanup_node(cache, node);
  2285. }
  2286. while (!list_empty(&cache->leaves)) {
  2287. node = list_entry(cache->leaves.next,
  2288. struct btrfs_backref_node, lower);
  2289. btrfs_backref_cleanup_node(cache, node);
  2290. }
  2291. cache->last_trans = 0;
  2292. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  2293. ASSERT(list_empty(&cache->pending[i]));
  2294. ASSERT(list_empty(&cache->pending_edge));
  2295. ASSERT(list_empty(&cache->useless_node));
  2296. ASSERT(list_empty(&cache->changed));
  2297. ASSERT(list_empty(&cache->detached));
  2298. ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
  2299. ASSERT(!cache->nr_nodes);
  2300. ASSERT(!cache->nr_edges);
  2301. }
  2302. /*
  2303. * Handle direct tree backref
  2304. *
  2305. * Direct tree backref means, the backref item shows its parent bytenr
  2306. * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
  2307. *
  2308. * @ref_key: The converted backref key.
  2309. * For keyed backref, it's the item key.
  2310. * For inlined backref, objectid is the bytenr,
  2311. * type is btrfs_inline_ref_type, offset is
  2312. * btrfs_inline_ref_offset.
  2313. */
  2314. static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
  2315. struct btrfs_key *ref_key,
  2316. struct btrfs_backref_node *cur)
  2317. {
  2318. struct btrfs_backref_edge *edge;
  2319. struct btrfs_backref_node *upper;
  2320. struct rb_node *rb_node;
  2321. ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
  2322. /* Only reloc root uses backref pointing to itself */
  2323. if (ref_key->objectid == ref_key->offset) {
  2324. struct btrfs_root *root;
  2325. cur->is_reloc_root = 1;
  2326. /* Only reloc backref cache cares about a specific root */
  2327. if (cache->is_reloc) {
  2328. root = find_reloc_root(cache->fs_info, cur->bytenr);
  2329. if (!root)
  2330. return -ENOENT;
  2331. cur->root = root;
  2332. } else {
  2333. /*
  2334. * For generic purpose backref cache, reloc root node
  2335. * is useless.
  2336. */
  2337. list_add(&cur->list, &cache->useless_node);
  2338. }
  2339. return 0;
  2340. }
  2341. edge = btrfs_backref_alloc_edge(cache);
  2342. if (!edge)
  2343. return -ENOMEM;
  2344. rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
  2345. if (!rb_node) {
  2346. /* Parent node not yet cached */
  2347. upper = btrfs_backref_alloc_node(cache, ref_key->offset,
  2348. cur->level + 1);
  2349. if (!upper) {
  2350. btrfs_backref_free_edge(cache, edge);
  2351. return -ENOMEM;
  2352. }
  2353. /*
  2354. * Backrefs for the upper level block isn't cached, add the
  2355. * block to pending list
  2356. */
  2357. list_add_tail(&edge->list[UPPER], &cache->pending_edge);
  2358. } else {
  2359. /* Parent node already cached */
  2360. upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
  2361. ASSERT(upper->checked);
  2362. INIT_LIST_HEAD(&edge->list[UPPER]);
  2363. }
  2364. btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
  2365. return 0;
  2366. }
  2367. /*
  2368. * Handle indirect tree backref
  2369. *
  2370. * Indirect tree backref means, we only know which tree the node belongs to.
  2371. * We still need to do a tree search to find out the parents. This is for
  2372. * TREE_BLOCK_REF backref (keyed or inlined).
  2373. *
  2374. * @ref_key: The same as @ref_key in handle_direct_tree_backref()
  2375. * @tree_key: The first key of this tree block.
  2376. * @path: A clean (released) path, to avoid allocating path everytime
  2377. * the function get called.
  2378. */
  2379. static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
  2380. struct btrfs_path *path,
  2381. struct btrfs_key *ref_key,
  2382. struct btrfs_key *tree_key,
  2383. struct btrfs_backref_node *cur)
  2384. {
  2385. struct btrfs_fs_info *fs_info = cache->fs_info;
  2386. struct btrfs_backref_node *upper;
  2387. struct btrfs_backref_node *lower;
  2388. struct btrfs_backref_edge *edge;
  2389. struct extent_buffer *eb;
  2390. struct btrfs_root *root;
  2391. struct rb_node *rb_node;
  2392. int level;
  2393. bool need_check = true;
  2394. int ret;
  2395. root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
  2396. if (IS_ERR(root))
  2397. return PTR_ERR(root);
  2398. if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
  2399. cur->cowonly = 1;
  2400. if (btrfs_root_level(&root->root_item) == cur->level) {
  2401. /* Tree root */
  2402. ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
  2403. /*
  2404. * For reloc backref cache, we may ignore reloc root. But for
  2405. * general purpose backref cache, we can't rely on
  2406. * btrfs_should_ignore_reloc_root() as it may conflict with
  2407. * current running relocation and lead to missing root.
  2408. *
  2409. * For general purpose backref cache, reloc root detection is
  2410. * completely relying on direct backref (key->offset is parent
  2411. * bytenr), thus only do such check for reloc cache.
  2412. */
  2413. if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
  2414. btrfs_put_root(root);
  2415. list_add(&cur->list, &cache->useless_node);
  2416. } else {
  2417. cur->root = root;
  2418. }
  2419. return 0;
  2420. }
  2421. level = cur->level + 1;
  2422. /* Search the tree to find parent blocks referring to the block */
  2423. path->search_commit_root = 1;
  2424. path->skip_locking = 1;
  2425. path->lowest_level = level;
  2426. ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
  2427. path->lowest_level = 0;
  2428. if (ret < 0) {
  2429. btrfs_put_root(root);
  2430. return ret;
  2431. }
  2432. if (ret > 0 && path->slots[level] > 0)
  2433. path->slots[level]--;
  2434. eb = path->nodes[level];
  2435. if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
  2436. btrfs_err(fs_info,
  2437. "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
  2438. cur->bytenr, level - 1, root->root_key.objectid,
  2439. tree_key->objectid, tree_key->type, tree_key->offset);
  2440. btrfs_put_root(root);
  2441. ret = -ENOENT;
  2442. goto out;
  2443. }
  2444. lower = cur;
  2445. /* Add all nodes and edges in the path */
  2446. for (; level < BTRFS_MAX_LEVEL; level++) {
  2447. if (!path->nodes[level]) {
  2448. ASSERT(btrfs_root_bytenr(&root->root_item) ==
  2449. lower->bytenr);
  2450. /* Same as previous should_ignore_reloc_root() call */
  2451. if (btrfs_should_ignore_reloc_root(root) &&
  2452. cache->is_reloc) {
  2453. btrfs_put_root(root);
  2454. list_add(&lower->list, &cache->useless_node);
  2455. } else {
  2456. lower->root = root;
  2457. }
  2458. break;
  2459. }
  2460. edge = btrfs_backref_alloc_edge(cache);
  2461. if (!edge) {
  2462. btrfs_put_root(root);
  2463. ret = -ENOMEM;
  2464. goto out;
  2465. }
  2466. eb = path->nodes[level];
  2467. rb_node = rb_simple_search(&cache->rb_root, eb->start);
  2468. if (!rb_node) {
  2469. upper = btrfs_backref_alloc_node(cache, eb->start,
  2470. lower->level + 1);
  2471. if (!upper) {
  2472. btrfs_put_root(root);
  2473. btrfs_backref_free_edge(cache, edge);
  2474. ret = -ENOMEM;
  2475. goto out;
  2476. }
  2477. upper->owner = btrfs_header_owner(eb);
  2478. if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
  2479. upper->cowonly = 1;
  2480. /*
  2481. * If we know the block isn't shared we can avoid
  2482. * checking its backrefs.
  2483. */
  2484. if (btrfs_block_can_be_shared(root, eb))
  2485. upper->checked = 0;
  2486. else
  2487. upper->checked = 1;
  2488. /*
  2489. * Add the block to pending list if we need to check its
  2490. * backrefs, we only do this once while walking up a
  2491. * tree as we will catch anything else later on.
  2492. */
  2493. if (!upper->checked && need_check) {
  2494. need_check = false;
  2495. list_add_tail(&edge->list[UPPER],
  2496. &cache->pending_edge);
  2497. } else {
  2498. if (upper->checked)
  2499. need_check = true;
  2500. INIT_LIST_HEAD(&edge->list[UPPER]);
  2501. }
  2502. } else {
  2503. upper = rb_entry(rb_node, struct btrfs_backref_node,
  2504. rb_node);
  2505. ASSERT(upper->checked);
  2506. INIT_LIST_HEAD(&edge->list[UPPER]);
  2507. if (!upper->owner)
  2508. upper->owner = btrfs_header_owner(eb);
  2509. }
  2510. btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
  2511. if (rb_node) {
  2512. btrfs_put_root(root);
  2513. break;
  2514. }
  2515. lower = upper;
  2516. upper = NULL;
  2517. }
  2518. out:
  2519. btrfs_release_path(path);
  2520. return ret;
  2521. }
  2522. /*
  2523. * Add backref node @cur into @cache.
  2524. *
  2525. * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
  2526. * links aren't yet bi-directional. Needs to finish such links.
  2527. * Use btrfs_backref_finish_upper_links() to finish such linkage.
  2528. *
  2529. * @path: Released path for indirect tree backref lookup
  2530. * @iter: Released backref iter for extent tree search
  2531. * @node_key: The first key of the tree block
  2532. */
  2533. int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
  2534. struct btrfs_path *path,
  2535. struct btrfs_backref_iter *iter,
  2536. struct btrfs_key *node_key,
  2537. struct btrfs_backref_node *cur)
  2538. {
  2539. struct btrfs_fs_info *fs_info = cache->fs_info;
  2540. struct btrfs_backref_edge *edge;
  2541. struct btrfs_backref_node *exist;
  2542. int ret;
  2543. ret = btrfs_backref_iter_start(iter, cur->bytenr);
  2544. if (ret < 0)
  2545. return ret;
  2546. /*
  2547. * We skip the first btrfs_tree_block_info, as we don't use the key
  2548. * stored in it, but fetch it from the tree block
  2549. */
  2550. if (btrfs_backref_has_tree_block_info(iter)) {
  2551. ret = btrfs_backref_iter_next(iter);
  2552. if (ret < 0)
  2553. goto out;
  2554. /* No extra backref? This means the tree block is corrupted */
  2555. if (ret > 0) {
  2556. ret = -EUCLEAN;
  2557. goto out;
  2558. }
  2559. }
  2560. WARN_ON(cur->checked);
  2561. if (!list_empty(&cur->upper)) {
  2562. /*
  2563. * The backref was added previously when processing backref of
  2564. * type BTRFS_TREE_BLOCK_REF_KEY
  2565. */
  2566. ASSERT(list_is_singular(&cur->upper));
  2567. edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
  2568. list[LOWER]);
  2569. ASSERT(list_empty(&edge->list[UPPER]));
  2570. exist = edge->node[UPPER];
  2571. /*
  2572. * Add the upper level block to pending list if we need check
  2573. * its backrefs
  2574. */
  2575. if (!exist->checked)
  2576. list_add_tail(&edge->list[UPPER], &cache->pending_edge);
  2577. } else {
  2578. exist = NULL;
  2579. }
  2580. for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
  2581. struct extent_buffer *eb;
  2582. struct btrfs_key key;
  2583. int type;
  2584. cond_resched();
  2585. eb = btrfs_backref_get_eb(iter);
  2586. key.objectid = iter->bytenr;
  2587. if (btrfs_backref_iter_is_inline_ref(iter)) {
  2588. struct btrfs_extent_inline_ref *iref;
  2589. /* Update key for inline backref */
  2590. iref = (struct btrfs_extent_inline_ref *)
  2591. ((unsigned long)iter->cur_ptr);
  2592. type = btrfs_get_extent_inline_ref_type(eb, iref,
  2593. BTRFS_REF_TYPE_BLOCK);
  2594. if (type == BTRFS_REF_TYPE_INVALID) {
  2595. ret = -EUCLEAN;
  2596. goto out;
  2597. }
  2598. key.type = type;
  2599. key.offset = btrfs_extent_inline_ref_offset(eb, iref);
  2600. } else {
  2601. key.type = iter->cur_key.type;
  2602. key.offset = iter->cur_key.offset;
  2603. }
  2604. /*
  2605. * Parent node found and matches current inline ref, no need to
  2606. * rebuild this node for this inline ref
  2607. */
  2608. if (exist &&
  2609. ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
  2610. exist->owner == key.offset) ||
  2611. (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
  2612. exist->bytenr == key.offset))) {
  2613. exist = NULL;
  2614. continue;
  2615. }
  2616. /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
  2617. if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
  2618. ret = handle_direct_tree_backref(cache, &key, cur);
  2619. if (ret < 0)
  2620. goto out;
  2621. continue;
  2622. } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
  2623. ret = -EINVAL;
  2624. btrfs_print_v0_err(fs_info);
  2625. btrfs_handle_fs_error(fs_info, ret, NULL);
  2626. goto out;
  2627. } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
  2628. continue;
  2629. }
  2630. /*
  2631. * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
  2632. * means the root objectid. We need to search the tree to get
  2633. * its parent bytenr.
  2634. */
  2635. ret = handle_indirect_tree_backref(cache, path, &key, node_key,
  2636. cur);
  2637. if (ret < 0)
  2638. goto out;
  2639. }
  2640. ret = 0;
  2641. cur->checked = 1;
  2642. WARN_ON(exist);
  2643. out:
  2644. btrfs_backref_iter_release(iter);
  2645. return ret;
  2646. }
  2647. /*
  2648. * Finish the upwards linkage created by btrfs_backref_add_tree_node()
  2649. */
  2650. int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
  2651. struct btrfs_backref_node *start)
  2652. {
  2653. struct list_head *useless_node = &cache->useless_node;
  2654. struct btrfs_backref_edge *edge;
  2655. struct rb_node *rb_node;
  2656. LIST_HEAD(pending_edge);
  2657. ASSERT(start->checked);
  2658. /* Insert this node to cache if it's not COW-only */
  2659. if (!start->cowonly) {
  2660. rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
  2661. &start->rb_node);
  2662. if (rb_node)
  2663. btrfs_backref_panic(cache->fs_info, start->bytenr,
  2664. -EEXIST);
  2665. list_add_tail(&start->lower, &cache->leaves);
  2666. }
  2667. /*
  2668. * Use breadth first search to iterate all related edges.
  2669. *
  2670. * The starting points are all the edges of this node
  2671. */
  2672. list_for_each_entry(edge, &start->upper, list[LOWER])
  2673. list_add_tail(&edge->list[UPPER], &pending_edge);
  2674. while (!list_empty(&pending_edge)) {
  2675. struct btrfs_backref_node *upper;
  2676. struct btrfs_backref_node *lower;
  2677. edge = list_first_entry(&pending_edge,
  2678. struct btrfs_backref_edge, list[UPPER]);
  2679. list_del_init(&edge->list[UPPER]);
  2680. upper = edge->node[UPPER];
  2681. lower = edge->node[LOWER];
  2682. /* Parent is detached, no need to keep any edges */
  2683. if (upper->detached) {
  2684. list_del(&edge->list[LOWER]);
  2685. btrfs_backref_free_edge(cache, edge);
  2686. /* Lower node is orphan, queue for cleanup */
  2687. if (list_empty(&lower->upper))
  2688. list_add(&lower->list, useless_node);
  2689. continue;
  2690. }
  2691. /*
  2692. * All new nodes added in current build_backref_tree() haven't
  2693. * been linked to the cache rb tree.
  2694. * So if we have upper->rb_node populated, this means a cache
  2695. * hit. We only need to link the edge, as @upper and all its
  2696. * parents have already been linked.
  2697. */
  2698. if (!RB_EMPTY_NODE(&upper->rb_node)) {
  2699. if (upper->lowest) {
  2700. list_del_init(&upper->lower);
  2701. upper->lowest = 0;
  2702. }
  2703. list_add_tail(&edge->list[UPPER], &upper->lower);
  2704. continue;
  2705. }
  2706. /* Sanity check, we shouldn't have any unchecked nodes */
  2707. if (!upper->checked) {
  2708. ASSERT(0);
  2709. return -EUCLEAN;
  2710. }
  2711. /* Sanity check, COW-only node has non-COW-only parent */
  2712. if (start->cowonly != upper->cowonly) {
  2713. ASSERT(0);
  2714. return -EUCLEAN;
  2715. }
  2716. /* Only cache non-COW-only (subvolume trees) tree blocks */
  2717. if (!upper->cowonly) {
  2718. rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
  2719. &upper->rb_node);
  2720. if (rb_node) {
  2721. btrfs_backref_panic(cache->fs_info,
  2722. upper->bytenr, -EEXIST);
  2723. return -EUCLEAN;
  2724. }
  2725. }
  2726. list_add_tail(&edge->list[UPPER], &upper->lower);
  2727. /*
  2728. * Also queue all the parent edges of this uncached node
  2729. * to finish the upper linkage
  2730. */
  2731. list_for_each_entry(edge, &upper->upper, list[LOWER])
  2732. list_add_tail(&edge->list[UPPER], &pending_edge);
  2733. }
  2734. return 0;
  2735. }
  2736. void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
  2737. struct btrfs_backref_node *node)
  2738. {
  2739. struct btrfs_backref_node *lower;
  2740. struct btrfs_backref_node *upper;
  2741. struct btrfs_backref_edge *edge;
  2742. while (!list_empty(&cache->useless_node)) {
  2743. lower = list_first_entry(&cache->useless_node,
  2744. struct btrfs_backref_node, list);
  2745. list_del_init(&lower->list);
  2746. }
  2747. while (!list_empty(&cache->pending_edge)) {
  2748. edge = list_first_entry(&cache->pending_edge,
  2749. struct btrfs_backref_edge, list[UPPER]);
  2750. list_del(&edge->list[UPPER]);
  2751. list_del(&edge->list[LOWER]);
  2752. lower = edge->node[LOWER];
  2753. upper = edge->node[UPPER];
  2754. btrfs_backref_free_edge(cache, edge);
  2755. /*
  2756. * Lower is no longer linked to any upper backref nodes and
  2757. * isn't in the cache, we can free it ourselves.
  2758. */
  2759. if (list_empty(&lower->upper) &&
  2760. RB_EMPTY_NODE(&lower->rb_node))
  2761. list_add(&lower->list, &cache->useless_node);
  2762. if (!RB_EMPTY_NODE(&upper->rb_node))
  2763. continue;
  2764. /* Add this guy's upper edges to the list to process */
  2765. list_for_each_entry(edge, &upper->upper, list[LOWER])
  2766. list_add_tail(&edge->list[UPPER],
  2767. &cache->pending_edge);
  2768. if (list_empty(&upper->upper))
  2769. list_add(&upper->list, &cache->useless_node);
  2770. }
  2771. while (!list_empty(&cache->useless_node)) {
  2772. lower = list_first_entry(&cache->useless_node,
  2773. struct btrfs_backref_node, list);
  2774. list_del_init(&lower->list);
  2775. if (lower == node)
  2776. node = NULL;
  2777. btrfs_backref_drop_node(cache, lower);
  2778. }
  2779. btrfs_backref_cleanup_node(cache, node);
  2780. ASSERT(list_empty(&cache->useless_node) &&
  2781. list_empty(&cache->pending_edge));
  2782. }