do_balan.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137
  1. /*
  2. * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  3. */
  4. /* Now we have all buffers that must be used in balancing of the tree */
  5. /* Further calculations can not cause schedule(), and thus the buffer */
  6. /* tree will be stable until the balancing will be finished */
  7. /* balance the tree according to the analysis made before, */
  8. /* and using buffers obtained after all above. */
  9. /**
  10. ** balance_leaf_when_delete
  11. ** balance_leaf
  12. ** do_balance
  13. **
  14. **/
  15. #include <asm/uaccess.h>
  16. #include <linux/time.h>
  17. #include <linux/reiserfs_fs.h>
  18. #include <linux/buffer_head.h>
  19. #include <linux/kernel.h>
  20. #ifdef CONFIG_REISERFS_CHECK
  21. struct tree_balance *cur_tb = NULL; /* detects whether more than one
  22. copy of tb exists as a means
  23. of checking whether schedule
  24. is interrupting do_balance */
  25. #endif
  26. inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
  27. struct buffer_head *bh, int flag)
  28. {
  29. journal_mark_dirty(tb->transaction_handle,
  30. tb->transaction_handle->t_super, bh);
  31. }
  32. #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
  33. #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
  34. /* summary:
  35. if deleting something ( tb->insert_size[0] < 0 )
  36. return(balance_leaf_when_delete()); (flag d handled here)
  37. else
  38. if lnum is larger than 0 we put items into the left node
  39. if rnum is larger than 0 we put items into the right node
  40. if snum1 is larger than 0 we put items into the new node s1
  41. if snum2 is larger than 0 we put items into the new node s2
  42. Note that all *num* count new items being created.
  43. It would be easier to read balance_leaf() if each of these summary
  44. lines was a separate procedure rather than being inlined. I think
  45. that there are many passages here and in balance_leaf_when_delete() in
  46. which two calls to one procedure can replace two passages, and it
  47. might save cache space and improve software maintenance costs to do so.
  48. Vladimir made the perceptive comment that we should offload most of
  49. the decision making in this function into fix_nodes/check_balance, and
  50. then create some sort of structure in tb that says what actions should
  51. be performed by do_balance.
  52. -Hans */
  53. /* Balance leaf node in case of delete or cut: insert_size[0] < 0
  54. *
  55. * lnum, rnum can have values >= -1
  56. * -1 means that the neighbor must be joined with S
  57. * 0 means that nothing should be done with the neighbor
  58. * >0 means to shift entirely or partly the specified number of items to the neighbor
  59. */
  60. static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
  61. {
  62. struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
  63. int item_pos = PATH_LAST_POSITION(tb->tb_path);
  64. int pos_in_item = tb->tb_path->pos_in_item;
  65. struct buffer_info bi;
  66. int n;
  67. struct item_head *ih;
  68. RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
  69. "vs- 12000: level: wrong FR %z", tb->FR[0]);
  70. RFALSE(tb->blknum[0] > 1,
  71. "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]);
  72. RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
  73. "PAP-12010: tree can not be empty");
  74. ih = B_N_PITEM_HEAD(tbS0, item_pos);
  75. /* Delete or truncate the item */
  76. switch (flag) {
  77. case M_DELETE: /* delete item in S[0] */
  78. RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
  79. "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
  80. -tb->insert_size[0], ih);
  81. bi.tb = tb;
  82. bi.bi_bh = tbS0;
  83. bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
  84. bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
  85. leaf_delete_items(&bi, 0, item_pos, 1, -1);
  86. if (!item_pos && tb->CFL[0]) {
  87. if (B_NR_ITEMS(tbS0)) {
  88. replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0,
  89. 0);
  90. } else {
  91. if (!PATH_H_POSITION(tb->tb_path, 1))
  92. replace_key(tb, tb->CFL[0], tb->lkey[0],
  93. PATH_H_PPARENT(tb->tb_path,
  94. 0), 0);
  95. }
  96. }
  97. RFALSE(!item_pos && !tb->CFL[0],
  98. "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
  99. tb->L[0]);
  100. break;
  101. case M_CUT:{ /* cut item in S[0] */
  102. bi.tb = tb;
  103. bi.bi_bh = tbS0;
  104. bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
  105. bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
  106. if (is_direntry_le_ih(ih)) {
  107. /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
  108. /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
  109. tb->insert_size[0] = -1;
  110. leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
  111. -tb->insert_size[0]);
  112. RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
  113. "PAP-12030: can not change delimiting key. CFL[0]=%p",
  114. tb->CFL[0]);
  115. if (!item_pos && !pos_in_item && tb->CFL[0]) {
  116. replace_key(tb, tb->CFL[0], tb->lkey[0],
  117. tbS0, 0);
  118. }
  119. } else {
  120. leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
  121. -tb->insert_size[0]);
  122. RFALSE(!ih_item_len(ih),
  123. "PAP-12035: cut must leave non-zero dynamic length of item");
  124. }
  125. break;
  126. }
  127. default:
  128. print_cur_tb("12040");
  129. reiserfs_panic(tb->tb_sb,
  130. "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)",
  131. (flag ==
  132. M_PASTE) ? "PASTE" : ((flag ==
  133. M_INSERT) ? "INSERT" :
  134. "UNKNOWN"), flag);
  135. }
  136. /* the rule is that no shifting occurs unless by shifting a node can be freed */
  137. n = B_NR_ITEMS(tbS0);
  138. if (tb->lnum[0]) { /* L[0] takes part in balancing */
  139. if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */
  140. if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */
  141. if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
  142. /* all contents of all the 3 buffers will be in L[0] */
  143. if (PATH_H_POSITION(tb->tb_path, 1) == 0
  144. && 1 < B_NR_ITEMS(tb->FR[0]))
  145. replace_key(tb, tb->CFL[0],
  146. tb->lkey[0],
  147. tb->FR[0], 1);
  148. leaf_move_items(LEAF_FROM_S_TO_L, tb, n,
  149. -1, NULL);
  150. leaf_move_items(LEAF_FROM_R_TO_L, tb,
  151. B_NR_ITEMS(tb->R[0]),
  152. -1, NULL);
  153. reiserfs_invalidate_buffer(tb, tbS0);
  154. reiserfs_invalidate_buffer(tb,
  155. tb->R[0]);
  156. return 0;
  157. }
  158. /* all contents of all the 3 buffers will be in R[0] */
  159. leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1,
  160. NULL);
  161. leaf_move_items(LEAF_FROM_L_TO_R, tb,
  162. B_NR_ITEMS(tb->L[0]), -1, NULL);
  163. /* right_delimiting_key is correct in R[0] */
  164. replace_key(tb, tb->CFR[0], tb->rkey[0],
  165. tb->R[0], 0);
  166. reiserfs_invalidate_buffer(tb, tbS0);
  167. reiserfs_invalidate_buffer(tb, tb->L[0]);
  168. return -1;
  169. }
  170. RFALSE(tb->rnum[0] != 0,
  171. "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
  172. /* all contents of L[0] and S[0] will be in L[0] */
  173. leaf_shift_left(tb, n, -1);
  174. reiserfs_invalidate_buffer(tb, tbS0);
  175. return 0;
  176. }
  177. /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
  178. RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
  179. (tb->lnum[0] + tb->rnum[0] > n + 1),
  180. "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent",
  181. tb->rnum[0], tb->lnum[0], n);
  182. RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
  183. (tb->lbytes != -1 || tb->rbytes != -1),
  184. "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split",
  185. tb->rbytes, tb->lbytes);
  186. RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
  187. (tb->lbytes < 1 || tb->rbytes != -1),
  188. "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split",
  189. tb->rbytes, tb->lbytes);
  190. leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
  191. leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
  192. reiserfs_invalidate_buffer(tb, tbS0);
  193. return 0;
  194. }
  195. if (tb->rnum[0] == -1) {
  196. /* all contents of R[0] and S[0] will be in R[0] */
  197. leaf_shift_right(tb, n, -1);
  198. reiserfs_invalidate_buffer(tb, tbS0);
  199. return 0;
  200. }
  201. RFALSE(tb->rnum[0],
  202. "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
  203. return 0;
  204. }
  205. static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item header of inserted item (this is on little endian) */
  206. const char *body, /* body of inserted item or bytes to paste */
  207. int flag, /* i - insert, d - delete, c - cut, p - paste
  208. (see comment to do_balance) */
  209. struct item_head *insert_key, /* in our processing of one level we sometimes determine what
  210. must be inserted into the next higher level. This insertion
  211. consists of a key or two keys and their corresponding
  212. pointers */
  213. struct buffer_head **insert_ptr /* inserted node-ptrs for the next level */
  214. )
  215. {
  216. struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
  217. int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
  218. of the affected item */
  219. struct buffer_info bi;
  220. struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
  221. int snum[2]; /* number of items that will be placed
  222. into S_new (includes partially shifted
  223. items) */
  224. int sbytes[2]; /* if an item is partially shifted into S_new then
  225. if it is a directory item
  226. it is the number of entries from the item that are shifted into S_new
  227. else
  228. it is the number of bytes from the item that are shifted into S_new
  229. */
  230. int n, i;
  231. int ret_val;
  232. int pos_in_item;
  233. int zeros_num;
  234. PROC_INFO_INC(tb->tb_sb, balance_at[0]);
  235. /* Make balance in case insert_size[0] < 0 */
  236. if (tb->insert_size[0] < 0)
  237. return balance_leaf_when_delete(tb, flag);
  238. zeros_num = 0;
  239. if (flag == M_INSERT && body == 0)
  240. zeros_num = ih_item_len(ih);
  241. pos_in_item = tb->tb_path->pos_in_item;
  242. /* for indirect item pos_in_item is measured in unformatted node
  243. pointers. Recalculate to bytes */
  244. if (flag != M_INSERT
  245. && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0, item_pos)))
  246. pos_in_item *= UNFM_P_SIZE;
  247. if (tb->lnum[0] > 0) {
  248. /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
  249. if (item_pos < tb->lnum[0]) {
  250. /* new item or it part falls to L[0], shift it too */
  251. n = B_NR_ITEMS(tb->L[0]);
  252. switch (flag) {
  253. case M_INSERT: /* insert item into L[0] */
  254. if (item_pos == tb->lnum[0] - 1
  255. && tb->lbytes != -1) {
  256. /* part of new item falls into L[0] */
  257. int new_item_len;
  258. int version;
  259. ret_val =
  260. leaf_shift_left(tb, tb->lnum[0] - 1,
  261. -1);
  262. /* Calculate item length to insert to S[0] */
  263. new_item_len =
  264. ih_item_len(ih) - tb->lbytes;
  265. /* Calculate and check item length to insert to L[0] */
  266. put_ih_item_len(ih,
  267. ih_item_len(ih) -
  268. new_item_len);
  269. RFALSE(ih_item_len(ih) <= 0,
  270. "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
  271. ih_item_len(ih));
  272. /* Insert new item into L[0] */
  273. bi.tb = tb;
  274. bi.bi_bh = tb->L[0];
  275. bi.bi_parent = tb->FL[0];
  276. bi.bi_position =
  277. get_left_neighbor_position(tb, 0);
  278. leaf_insert_into_buf(&bi,
  279. n + item_pos -
  280. ret_val, ih, body,
  281. zeros_num >
  282. ih_item_len(ih) ?
  283. ih_item_len(ih) :
  284. zeros_num);
  285. version = ih_version(ih);
  286. /* Calculate key component, item length and body to insert into S[0] */
  287. set_le_ih_k_offset(ih,
  288. le_ih_k_offset(ih) +
  289. (tb->
  290. lbytes <<
  291. (is_indirect_le_ih
  292. (ih) ? tb->tb_sb->
  293. s_blocksize_bits -
  294. UNFM_P_SHIFT :
  295. 0)));
  296. put_ih_item_len(ih, new_item_len);
  297. if (tb->lbytes > zeros_num) {
  298. body +=
  299. (tb->lbytes - zeros_num);
  300. zeros_num = 0;
  301. } else
  302. zeros_num -= tb->lbytes;
  303. RFALSE(ih_item_len(ih) <= 0,
  304. "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d",
  305. ih_item_len(ih));
  306. } else {
  307. /* new item in whole falls into L[0] */
  308. /* Shift lnum[0]-1 items to L[0] */
  309. ret_val =
  310. leaf_shift_left(tb, tb->lnum[0] - 1,
  311. tb->lbytes);
  312. /* Insert new item into L[0] */
  313. bi.tb = tb;
  314. bi.bi_bh = tb->L[0];
  315. bi.bi_parent = tb->FL[0];
  316. bi.bi_position =
  317. get_left_neighbor_position(tb, 0);
  318. leaf_insert_into_buf(&bi,
  319. n + item_pos -
  320. ret_val, ih, body,
  321. zeros_num);
  322. tb->insert_size[0] = 0;
  323. zeros_num = 0;
  324. }
  325. break;
  326. case M_PASTE: /* append item in L[0] */
  327. if (item_pos == tb->lnum[0] - 1
  328. && tb->lbytes != -1) {
  329. /* we must shift the part of the appended item */
  330. if (is_direntry_le_ih
  331. (B_N_PITEM_HEAD(tbS0, item_pos))) {
  332. RFALSE(zeros_num,
  333. "PAP-12090: invalid parameter in case of a directory");
  334. /* directory item */
  335. if (tb->lbytes > pos_in_item) {
  336. /* new directory entry falls into L[0] */
  337. struct item_head
  338. *pasted;
  339. int l_pos_in_item =
  340. pos_in_item;
  341. /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
  342. ret_val =
  343. leaf_shift_left(tb,
  344. tb->
  345. lnum
  346. [0],
  347. tb->
  348. lbytes
  349. -
  350. 1);
  351. if (ret_val
  352. && !item_pos) {
  353. pasted =
  354. B_N_PITEM_HEAD
  355. (tb->L[0],
  356. B_NR_ITEMS
  357. (tb->
  358. L[0]) -
  359. 1);
  360. l_pos_in_item +=
  361. I_ENTRY_COUNT
  362. (pasted) -
  363. (tb->
  364. lbytes -
  365. 1);
  366. }
  367. /* Append given directory entry to directory item */
  368. bi.tb = tb;
  369. bi.bi_bh = tb->L[0];
  370. bi.bi_parent =
  371. tb->FL[0];
  372. bi.bi_position =
  373. get_left_neighbor_position
  374. (tb, 0);
  375. leaf_paste_in_buffer
  376. (&bi,
  377. n + item_pos -
  378. ret_val,
  379. l_pos_in_item,
  380. tb->insert_size[0],
  381. body, zeros_num);
  382. /* previous string prepared space for pasting new entry, following string pastes this entry */
  383. /* when we have merge directory item, pos_in_item has been changed too */
  384. /* paste new directory entry. 1 is entry number */
  385. leaf_paste_entries(bi.
  386. bi_bh,
  387. n +
  388. item_pos
  389. -
  390. ret_val,
  391. l_pos_in_item,
  392. 1,
  393. (struct
  394. reiserfs_de_head
  395. *)
  396. body,
  397. body
  398. +
  399. DEH_SIZE,
  400. tb->
  401. insert_size
  402. [0]
  403. );
  404. tb->insert_size[0] = 0;
  405. } else {
  406. /* new directory item doesn't fall into L[0] */
  407. /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
  408. leaf_shift_left(tb,
  409. tb->
  410. lnum[0],
  411. tb->
  412. lbytes);
  413. }
  414. /* Calculate new position to append in item body */
  415. pos_in_item -= tb->lbytes;
  416. } else {
  417. /* regular object */
  418. RFALSE(tb->lbytes <= 0,
  419. "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
  420. tb->lbytes);
  421. RFALSE(pos_in_item !=
  422. ih_item_len
  423. (B_N_PITEM_HEAD
  424. (tbS0, item_pos)),
  425. "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
  426. ih_item_len
  427. (B_N_PITEM_HEAD
  428. (tbS0, item_pos)),
  429. pos_in_item);
  430. if (tb->lbytes >= pos_in_item) {
  431. /* appended item will be in L[0] in whole */
  432. int l_n;
  433. /* this bytes number must be appended to the last item of L[h] */
  434. l_n =
  435. tb->lbytes -
  436. pos_in_item;
  437. /* Calculate new insert_size[0] */
  438. tb->insert_size[0] -=
  439. l_n;
  440. RFALSE(tb->
  441. insert_size[0] <=
  442. 0,
  443. "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
  444. tb->
  445. insert_size[0]);
  446. ret_val =
  447. leaf_shift_left(tb,
  448. tb->
  449. lnum
  450. [0],
  451. ih_item_len
  452. (B_N_PITEM_HEAD
  453. (tbS0,
  454. item_pos)));
  455. /* Append to body of item in L[0] */
  456. bi.tb = tb;
  457. bi.bi_bh = tb->L[0];
  458. bi.bi_parent =
  459. tb->FL[0];
  460. bi.bi_position =
  461. get_left_neighbor_position
  462. (tb, 0);
  463. leaf_paste_in_buffer
  464. (&bi,
  465. n + item_pos -
  466. ret_val,
  467. ih_item_len
  468. (B_N_PITEM_HEAD
  469. (tb->L[0],
  470. n + item_pos -
  471. ret_val)), l_n,
  472. body,
  473. zeros_num >
  474. l_n ? l_n :
  475. zeros_num);
  476. /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
  477. {
  478. int version;
  479. int temp_l =
  480. l_n;
  481. RFALSE
  482. (ih_item_len
  483. (B_N_PITEM_HEAD
  484. (tbS0,
  485. 0)),
  486. "PAP-12106: item length must be 0");
  487. RFALSE
  488. (comp_short_le_keys
  489. (B_N_PKEY
  490. (tbS0, 0),
  491. B_N_PKEY
  492. (tb->L[0],
  493. n +
  494. item_pos
  495. -
  496. ret_val)),
  497. "PAP-12107: items must be of the same file");
  498. if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
  499. temp_l =
  500. l_n
  501. <<
  502. (tb->
  503. tb_sb->
  504. s_blocksize_bits
  505. -
  506. UNFM_P_SHIFT);
  507. }
  508. /* update key of first item in S0 */
  509. version =
  510. ih_version
  511. (B_N_PITEM_HEAD
  512. (tbS0, 0));
  513. set_le_key_k_offset
  514. (version,
  515. B_N_PKEY
  516. (tbS0, 0),
  517. le_key_k_offset
  518. (version,
  519. B_N_PKEY
  520. (tbS0,
  521. 0)) +
  522. temp_l);
  523. /* update left delimiting key */
  524. set_le_key_k_offset
  525. (version,
  526. B_N_PDELIM_KEY
  527. (tb->
  528. CFL[0],
  529. tb->
  530. lkey[0]),
  531. le_key_k_offset
  532. (version,
  533. B_N_PDELIM_KEY
  534. (tb->
  535. CFL[0],
  536. tb->
  537. lkey[0]))
  538. + temp_l);
  539. }
  540. /* Calculate new body, position in item and insert_size[0] */
  541. if (l_n > zeros_num) {
  542. body +=
  543. (l_n -
  544. zeros_num);
  545. zeros_num = 0;
  546. } else
  547. zeros_num -=
  548. l_n;
  549. pos_in_item = 0;
  550. RFALSE
  551. (comp_short_le_keys
  552. (B_N_PKEY(tbS0, 0),
  553. B_N_PKEY(tb->L[0],
  554. B_NR_ITEMS
  555. (tb->
  556. L[0]) -
  557. 1))
  558. ||
  559. !op_is_left_mergeable
  560. (B_N_PKEY(tbS0, 0),
  561. tbS0->b_size)
  562. ||
  563. !op_is_left_mergeable
  564. (B_N_PDELIM_KEY
  565. (tb->CFL[0],
  566. tb->lkey[0]),
  567. tbS0->b_size),
  568. "PAP-12120: item must be merge-able with left neighboring item");
  569. } else { /* only part of the appended item will be in L[0] */
  570. /* Calculate position in item for append in S[0] */
  571. pos_in_item -=
  572. tb->lbytes;
  573. RFALSE(pos_in_item <= 0,
  574. "PAP-12125: no place for paste. pos_in_item=%d",
  575. pos_in_item);
  576. /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
  577. leaf_shift_left(tb,
  578. tb->
  579. lnum[0],
  580. tb->
  581. lbytes);
  582. }
  583. }
  584. } else { /* appended item will be in L[0] in whole */
  585. struct item_head *pasted;
  586. if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
  587. /* then increment pos_in_item by the size of the last item in L[0] */
  588. pasted =
  589. B_N_PITEM_HEAD(tb->L[0],
  590. n - 1);
  591. if (is_direntry_le_ih(pasted))
  592. pos_in_item +=
  593. ih_entry_count
  594. (pasted);
  595. else
  596. pos_in_item +=
  597. ih_item_len(pasted);
  598. }
  599. /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
  600. ret_val =
  601. leaf_shift_left(tb, tb->lnum[0],
  602. tb->lbytes);
  603. /* Append to body of item in L[0] */
  604. bi.tb = tb;
  605. bi.bi_bh = tb->L[0];
  606. bi.bi_parent = tb->FL[0];
  607. bi.bi_position =
  608. get_left_neighbor_position(tb, 0);
  609. leaf_paste_in_buffer(&bi,
  610. n + item_pos -
  611. ret_val,
  612. pos_in_item,
  613. tb->insert_size[0],
  614. body, zeros_num);
  615. /* if appended item is directory, paste entry */
  616. pasted =
  617. B_N_PITEM_HEAD(tb->L[0],
  618. n + item_pos -
  619. ret_val);
  620. if (is_direntry_le_ih(pasted))
  621. leaf_paste_entries(bi.bi_bh,
  622. n +
  623. item_pos -
  624. ret_val,
  625. pos_in_item,
  626. 1,
  627. (struct
  628. reiserfs_de_head
  629. *)body,
  630. body +
  631. DEH_SIZE,
  632. tb->
  633. insert_size
  634. [0]
  635. );
  636. /* if appended item is indirect item, put unformatted node into un list */
  637. if (is_indirect_le_ih(pasted))
  638. set_ih_free_space(pasted, 0);
  639. tb->insert_size[0] = 0;
  640. zeros_num = 0;
  641. }
  642. break;
  643. default: /* cases d and t */
  644. reiserfs_panic(tb->tb_sb,
  645. "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)",
  646. (flag ==
  647. M_DELETE) ? "DELETE" : ((flag ==
  648. M_CUT)
  649. ? "CUT"
  650. :
  651. "UNKNOWN"),
  652. flag);
  653. }
  654. } else {
  655. /* new item doesn't fall into L[0] */
  656. leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
  657. }
  658. }
  659. /* tb->lnum[0] > 0 */
  660. /* Calculate new item position */
  661. item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
  662. if (tb->rnum[0] > 0) {
  663. /* shift rnum[0] items from S[0] to the right neighbor R[0] */
  664. n = B_NR_ITEMS(tbS0);
  665. switch (flag) {
  666. case M_INSERT: /* insert item */
  667. if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
  668. if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
  669. loff_t old_key_comp, old_len,
  670. r_zeros_number;
  671. const char *r_body;
  672. int version;
  673. loff_t offset;
  674. leaf_shift_right(tb, tb->rnum[0] - 1,
  675. -1);
  676. version = ih_version(ih);
  677. /* Remember key component and item length */
  678. old_key_comp = le_ih_k_offset(ih);
  679. old_len = ih_item_len(ih);
  680. /* Calculate key component and item length to insert into R[0] */
  681. offset =
  682. le_ih_k_offset(ih) +
  683. ((old_len -
  684. tb->
  685. rbytes) << (is_indirect_le_ih(ih)
  686. ? tb->tb_sb->
  687. s_blocksize_bits -
  688. UNFM_P_SHIFT : 0));
  689. set_le_ih_k_offset(ih, offset);
  690. put_ih_item_len(ih, tb->rbytes);
  691. /* Insert part of the item into R[0] */
  692. bi.tb = tb;
  693. bi.bi_bh = tb->R[0];
  694. bi.bi_parent = tb->FR[0];
  695. bi.bi_position =
  696. get_right_neighbor_position(tb, 0);
  697. if ((old_len - tb->rbytes) > zeros_num) {
  698. r_zeros_number = 0;
  699. r_body =
  700. body + (old_len -
  701. tb->rbytes) -
  702. zeros_num;
  703. } else {
  704. r_body = body;
  705. r_zeros_number =
  706. zeros_num - (old_len -
  707. tb->rbytes);
  708. zeros_num -= r_zeros_number;
  709. }
  710. leaf_insert_into_buf(&bi, 0, ih, r_body,
  711. r_zeros_number);
  712. /* Replace right delimiting key by first key in R[0] */
  713. replace_key(tb, tb->CFR[0], tb->rkey[0],
  714. tb->R[0], 0);
  715. /* Calculate key component and item length to insert into S[0] */
  716. set_le_ih_k_offset(ih, old_key_comp);
  717. put_ih_item_len(ih,
  718. old_len - tb->rbytes);
  719. tb->insert_size[0] -= tb->rbytes;
  720. } else { /* whole new item falls into R[0] */
  721. /* Shift rnum[0]-1 items to R[0] */
  722. ret_val =
  723. leaf_shift_right(tb,
  724. tb->rnum[0] - 1,
  725. tb->rbytes);
  726. /* Insert new item into R[0] */
  727. bi.tb = tb;
  728. bi.bi_bh = tb->R[0];
  729. bi.bi_parent = tb->FR[0];
  730. bi.bi_position =
  731. get_right_neighbor_position(tb, 0);
  732. leaf_insert_into_buf(&bi,
  733. item_pos - n +
  734. tb->rnum[0] - 1,
  735. ih, body,
  736. zeros_num);
  737. if (item_pos - n + tb->rnum[0] - 1 == 0) {
  738. replace_key(tb, tb->CFR[0],
  739. tb->rkey[0],
  740. tb->R[0], 0);
  741. }
  742. zeros_num = tb->insert_size[0] = 0;
  743. }
  744. } else { /* new item or part of it doesn't fall into R[0] */
  745. leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
  746. }
  747. break;
  748. case M_PASTE: /* append item */
  749. if (n - tb->rnum[0] <= item_pos) { /* pasted item or part of it falls to R[0] */
  750. if (item_pos == n - tb->rnum[0] && tb->rbytes != -1) { /* we must shift the part of the appended item */
  751. if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { /* we append to directory item */
  752. int entry_count;
  753. RFALSE(zeros_num,
  754. "PAP-12145: invalid parameter in case of a directory");
  755. entry_count =
  756. I_ENTRY_COUNT(B_N_PITEM_HEAD
  757. (tbS0,
  758. item_pos));
  759. if (entry_count - tb->rbytes <
  760. pos_in_item)
  761. /* new directory entry falls into R[0] */
  762. {
  763. int paste_entry_position;
  764. RFALSE(tb->rbytes - 1 >=
  765. entry_count
  766. || !tb->
  767. insert_size[0],
  768. "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
  769. tb->rbytes,
  770. entry_count);
  771. /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
  772. leaf_shift_right(tb,
  773. tb->
  774. rnum
  775. [0],
  776. tb->
  777. rbytes
  778. - 1);
  779. /* Paste given directory entry to directory item */
  780. paste_entry_position =
  781. pos_in_item -
  782. entry_count +
  783. tb->rbytes - 1;
  784. bi.tb = tb;
  785. bi.bi_bh = tb->R[0];
  786. bi.bi_parent =
  787. tb->FR[0];
  788. bi.bi_position =
  789. get_right_neighbor_position
  790. (tb, 0);
  791. leaf_paste_in_buffer
  792. (&bi, 0,
  793. paste_entry_position,
  794. tb->insert_size[0],
  795. body, zeros_num);
  796. /* paste entry */
  797. leaf_paste_entries(bi.
  798. bi_bh,
  799. 0,
  800. paste_entry_position,
  801. 1,
  802. (struct
  803. reiserfs_de_head
  804. *)
  805. body,
  806. body
  807. +
  808. DEH_SIZE,
  809. tb->
  810. insert_size
  811. [0]
  812. );
  813. if (paste_entry_position
  814. == 0) {
  815. /* change delimiting keys */
  816. replace_key(tb,
  817. tb->
  818. CFR
  819. [0],
  820. tb->
  821. rkey
  822. [0],
  823. tb->
  824. R
  825. [0],
  826. 0);
  827. }
  828. tb->insert_size[0] = 0;
  829. pos_in_item++;
  830. } else { /* new directory entry doesn't fall into R[0] */
  831. leaf_shift_right(tb,
  832. tb->
  833. rnum
  834. [0],
  835. tb->
  836. rbytes);
  837. }
  838. } else { /* regular object */
  839. int n_shift, n_rem,
  840. r_zeros_number;
  841. const char *r_body;
  842. /* Calculate number of bytes which must be shifted from appended item */
  843. if ((n_shift =
  844. tb->rbytes -
  845. tb->insert_size[0]) < 0)
  846. n_shift = 0;
  847. RFALSE(pos_in_item !=
  848. ih_item_len
  849. (B_N_PITEM_HEAD
  850. (tbS0, item_pos)),
  851. "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
  852. pos_in_item,
  853. ih_item_len
  854. (B_N_PITEM_HEAD
  855. (tbS0, item_pos)));
  856. leaf_shift_right(tb,
  857. tb->rnum[0],
  858. n_shift);
  859. /* Calculate number of bytes which must remain in body after appending to R[0] */
  860. if ((n_rem =
  861. tb->insert_size[0] -
  862. tb->rbytes) < 0)
  863. n_rem = 0;
  864. {
  865. int version;
  866. unsigned long temp_rem =
  867. n_rem;
  868. version =
  869. ih_version
  870. (B_N_PITEM_HEAD
  871. (tb->R[0], 0));
  872. if (is_indirect_le_key
  873. (version,
  874. B_N_PKEY(tb->R[0],
  875. 0))) {
  876. temp_rem =
  877. n_rem <<
  878. (tb->tb_sb->
  879. s_blocksize_bits
  880. -
  881. UNFM_P_SHIFT);
  882. }
  883. set_le_key_k_offset
  884. (version,
  885. B_N_PKEY(tb->R[0],
  886. 0),
  887. le_key_k_offset
  888. (version,
  889. B_N_PKEY(tb->R[0],
  890. 0)) +
  891. temp_rem);
  892. set_le_key_k_offset
  893. (version,
  894. B_N_PDELIM_KEY(tb->
  895. CFR
  896. [0],
  897. tb->
  898. rkey
  899. [0]),
  900. le_key_k_offset
  901. (version,
  902. B_N_PDELIM_KEY
  903. (tb->CFR[0],
  904. tb->rkey[0])) +
  905. temp_rem);
  906. }
  907. /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
  908. k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
  909. do_balance_mark_internal_dirty
  910. (tb, tb->CFR[0], 0);
  911. /* Append part of body into R[0] */
  912. bi.tb = tb;
  913. bi.bi_bh = tb->R[0];
  914. bi.bi_parent = tb->FR[0];
  915. bi.bi_position =
  916. get_right_neighbor_position
  917. (tb, 0);
  918. if (n_rem > zeros_num) {
  919. r_zeros_number = 0;
  920. r_body =
  921. body + n_rem -
  922. zeros_num;
  923. } else {
  924. r_body = body;
  925. r_zeros_number =
  926. zeros_num - n_rem;
  927. zeros_num -=
  928. r_zeros_number;
  929. }
  930. leaf_paste_in_buffer(&bi, 0,
  931. n_shift,
  932. tb->
  933. insert_size
  934. [0] -
  935. n_rem,
  936. r_body,
  937. r_zeros_number);
  938. if (is_indirect_le_ih
  939. (B_N_PITEM_HEAD
  940. (tb->R[0], 0))) {
  941. #if 0
  942. RFALSE(n_rem,
  943. "PAP-12160: paste more than one unformatted node pointer");
  944. #endif
  945. set_ih_free_space
  946. (B_N_PITEM_HEAD
  947. (tb->R[0], 0), 0);
  948. }
  949. tb->insert_size[0] = n_rem;
  950. if (!n_rem)
  951. pos_in_item++;
  952. }
  953. } else { /* pasted item in whole falls into R[0] */
  954. struct item_head *pasted;
  955. ret_val =
  956. leaf_shift_right(tb, tb->rnum[0],
  957. tb->rbytes);
  958. /* append item in R[0] */
  959. if (pos_in_item >= 0) {
  960. bi.tb = tb;
  961. bi.bi_bh = tb->R[0];
  962. bi.bi_parent = tb->FR[0];
  963. bi.bi_position =
  964. get_right_neighbor_position
  965. (tb, 0);
  966. leaf_paste_in_buffer(&bi,
  967. item_pos -
  968. n +
  969. tb->
  970. rnum[0],
  971. pos_in_item,
  972. tb->
  973. insert_size
  974. [0], body,
  975. zeros_num);
  976. }
  977. /* paste new entry, if item is directory item */
  978. pasted =
  979. B_N_PITEM_HEAD(tb->R[0],
  980. item_pos - n +
  981. tb->rnum[0]);
  982. if (is_direntry_le_ih(pasted)
  983. && pos_in_item >= 0) {
  984. leaf_paste_entries(bi.bi_bh,
  985. item_pos -
  986. n +
  987. tb->rnum[0],
  988. pos_in_item,
  989. 1,
  990. (struct
  991. reiserfs_de_head
  992. *)body,
  993. body +
  994. DEH_SIZE,
  995. tb->
  996. insert_size
  997. [0]
  998. );
  999. if (!pos_in_item) {
  1000. RFALSE(item_pos - n +
  1001. tb->rnum[0],
  1002. "PAP-12165: directory item must be first item of node when pasting is in 0th position");
  1003. /* update delimiting keys */
  1004. replace_key(tb,
  1005. tb->CFR[0],
  1006. tb->rkey[0],
  1007. tb->R[0],
  1008. 0);
  1009. }
  1010. }
  1011. if (is_indirect_le_ih(pasted))
  1012. set_ih_free_space(pasted, 0);
  1013. zeros_num = tb->insert_size[0] = 0;
  1014. }
  1015. } else { /* new item doesn't fall into R[0] */
  1016. leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
  1017. }
  1018. break;
  1019. default: /* cases d and t */
  1020. reiserfs_panic(tb->tb_sb,
  1021. "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)",
  1022. (flag ==
  1023. M_DELETE) ? "DELETE" : ((flag ==
  1024. M_CUT) ? "CUT"
  1025. : "UNKNOWN"),
  1026. flag);
  1027. }
  1028. }
  1029. /* tb->rnum[0] > 0 */
  1030. RFALSE(tb->blknum[0] > 3,
  1031. "PAP-12180: blknum can not be %d. It must be <= 3",
  1032. tb->blknum[0]);
  1033. RFALSE(tb->blknum[0] < 0,
  1034. "PAP-12185: blknum can not be %d. It must be >= 0",
  1035. tb->blknum[0]);
  1036. /* if while adding to a node we discover that it is possible to split
  1037. it in two, and merge the left part into the left neighbor and the
  1038. right part into the right neighbor, eliminating the node */
  1039. if (tb->blknum[0] == 0) { /* node S[0] is empty now */
  1040. RFALSE(!tb->lnum[0] || !tb->rnum[0],
  1041. "PAP-12190: lnum and rnum must not be zero");
  1042. /* if insertion was done before 0-th position in R[0], right
  1043. delimiting key of the tb->L[0]'s and left delimiting key are
  1044. not set correctly */
  1045. if (tb->CFL[0]) {
  1046. if (!tb->CFR[0])
  1047. reiserfs_panic(tb->tb_sb,
  1048. "vs-12195: balance_leaf: CFR not initialized");
  1049. copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
  1050. B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]));
  1051. do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
  1052. }
  1053. reiserfs_invalidate_buffer(tb, tbS0);
  1054. return 0;
  1055. }
  1056. /* Fill new nodes that appear in place of S[0] */
  1057. /* I am told that this copying is because we need an array to enable
  1058. the looping code. -Hans */
  1059. snum[0] = tb->s1num, snum[1] = tb->s2num;
  1060. sbytes[0] = tb->s1bytes;
  1061. sbytes[1] = tb->s2bytes;
  1062. for (i = tb->blknum[0] - 2; i >= 0; i--) {
  1063. RFALSE(!snum[i], "PAP-12200: snum[%d] == %d. Must be > 0", i,
  1064. snum[i]);
  1065. /* here we shift from S to S_new nodes */
  1066. S_new[i] = get_FEB(tb);
  1067. /* initialized block type and tree level */
  1068. set_blkh_level(B_BLK_HEAD(S_new[i]), DISK_LEAF_NODE_LEVEL);
  1069. n = B_NR_ITEMS(tbS0);
  1070. switch (flag) {
  1071. case M_INSERT: /* insert item */
  1072. if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
  1073. if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
  1074. int old_key_comp, old_len,
  1075. r_zeros_number;
  1076. const char *r_body;
  1077. int version;
  1078. /* Move snum[i]-1 items from S[0] to S_new[i] */
  1079. leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
  1080. snum[i] - 1, -1,
  1081. S_new[i]);
  1082. /* Remember key component and item length */
  1083. version = ih_version(ih);
  1084. old_key_comp = le_ih_k_offset(ih);
  1085. old_len = ih_item_len(ih);
  1086. /* Calculate key component and item length to insert into S_new[i] */
  1087. set_le_ih_k_offset(ih,
  1088. le_ih_k_offset(ih) +
  1089. ((old_len -
  1090. sbytes[i]) <<
  1091. (is_indirect_le_ih
  1092. (ih) ? tb->tb_sb->
  1093. s_blocksize_bits -
  1094. UNFM_P_SHIFT :
  1095. 0)));
  1096. put_ih_item_len(ih, sbytes[i]);
  1097. /* Insert part of the item into S_new[i] before 0-th item */
  1098. bi.tb = tb;
  1099. bi.bi_bh = S_new[i];
  1100. bi.bi_parent = NULL;
  1101. bi.bi_position = 0;
  1102. if ((old_len - sbytes[i]) > zeros_num) {
  1103. r_zeros_number = 0;
  1104. r_body =
  1105. body + (old_len -
  1106. sbytes[i]) -
  1107. zeros_num;
  1108. } else {
  1109. r_body = body;
  1110. r_zeros_number =
  1111. zeros_num - (old_len -
  1112. sbytes[i]);
  1113. zeros_num -= r_zeros_number;
  1114. }
  1115. leaf_insert_into_buf(&bi, 0, ih, r_body,
  1116. r_zeros_number);
  1117. /* Calculate key component and item length to insert into S[i] */
  1118. set_le_ih_k_offset(ih, old_key_comp);
  1119. put_ih_item_len(ih,
  1120. old_len - sbytes[i]);
  1121. tb->insert_size[0] -= sbytes[i];
  1122. } else { /* whole new item falls into S_new[i] */
  1123. /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
  1124. leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
  1125. snum[i] - 1, sbytes[i],
  1126. S_new[i]);
  1127. /* Insert new item into S_new[i] */
  1128. bi.tb = tb;
  1129. bi.bi_bh = S_new[i];
  1130. bi.bi_parent = NULL;
  1131. bi.bi_position = 0;
  1132. leaf_insert_into_buf(&bi,
  1133. item_pos - n +
  1134. snum[i] - 1, ih,
  1135. body, zeros_num);
  1136. zeros_num = tb->insert_size[0] = 0;
  1137. }
  1138. }
  1139. else { /* new item or it part don't falls into S_new[i] */
  1140. leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
  1141. snum[i], sbytes[i], S_new[i]);
  1142. }
  1143. break;
  1144. case M_PASTE: /* append item */
  1145. if (n - snum[i] <= item_pos) { /* pasted item or part if it falls to S_new[i] */
  1146. if (item_pos == n - snum[i] && sbytes[i] != -1) { /* we must shift part of the appended item */
  1147. struct item_head *aux_ih;
  1148. RFALSE(ih, "PAP-12210: ih must be 0");
  1149. if (is_direntry_le_ih
  1150. (aux_ih =
  1151. B_N_PITEM_HEAD(tbS0, item_pos))) {
  1152. /* we append to directory item */
  1153. int entry_count;
  1154. entry_count =
  1155. ih_entry_count(aux_ih);
  1156. if (entry_count - sbytes[i] <
  1157. pos_in_item
  1158. && pos_in_item <=
  1159. entry_count) {
  1160. /* new directory entry falls into S_new[i] */
  1161. RFALSE(!tb->
  1162. insert_size[0],
  1163. "PAP-12215: insert_size is already 0");
  1164. RFALSE(sbytes[i] - 1 >=
  1165. entry_count,
  1166. "PAP-12220: there are no so much entries (%d), only %d",
  1167. sbytes[i] - 1,
  1168. entry_count);
  1169. /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
  1170. leaf_move_items
  1171. (LEAF_FROM_S_TO_SNEW,
  1172. tb, snum[i],
  1173. sbytes[i] - 1,
  1174. S_new[i]);
  1175. /* Paste given directory entry to directory item */
  1176. bi.tb = tb;
  1177. bi.bi_bh = S_new[i];
  1178. bi.bi_parent = NULL;
  1179. bi.bi_position = 0;
  1180. leaf_paste_in_buffer
  1181. (&bi, 0,
  1182. pos_in_item -
  1183. entry_count +
  1184. sbytes[i] - 1,
  1185. tb->insert_size[0],
  1186. body, zeros_num);
  1187. /* paste new directory entry */
  1188. leaf_paste_entries(bi.
  1189. bi_bh,
  1190. 0,
  1191. pos_in_item
  1192. -
  1193. entry_count
  1194. +
  1195. sbytes
  1196. [i] -
  1197. 1, 1,
  1198. (struct
  1199. reiserfs_de_head
  1200. *)
  1201. body,
  1202. body
  1203. +
  1204. DEH_SIZE,
  1205. tb->
  1206. insert_size
  1207. [0]
  1208. );
  1209. tb->insert_size[0] = 0;
  1210. pos_in_item++;
  1211. } else { /* new directory entry doesn't fall into S_new[i] */
  1212. leaf_move_items
  1213. (LEAF_FROM_S_TO_SNEW,
  1214. tb, snum[i],
  1215. sbytes[i],
  1216. S_new[i]);
  1217. }
  1218. } else { /* regular object */
  1219. int n_shift, n_rem,
  1220. r_zeros_number;
  1221. const char *r_body;
  1222. RFALSE(pos_in_item !=
  1223. ih_item_len
  1224. (B_N_PITEM_HEAD
  1225. (tbS0, item_pos))
  1226. || tb->insert_size[0] <=
  1227. 0,
  1228. "PAP-12225: item too short or insert_size <= 0");
  1229. /* Calculate number of bytes which must be shifted from appended item */
  1230. n_shift =
  1231. sbytes[i] -
  1232. tb->insert_size[0];
  1233. if (n_shift < 0)
  1234. n_shift = 0;
  1235. leaf_move_items
  1236. (LEAF_FROM_S_TO_SNEW, tb,
  1237. snum[i], n_shift,
  1238. S_new[i]);
  1239. /* Calculate number of bytes which must remain in body after append to S_new[i] */
  1240. n_rem =
  1241. tb->insert_size[0] -
  1242. sbytes[i];
  1243. if (n_rem < 0)
  1244. n_rem = 0;
  1245. /* Append part of body into S_new[0] */
  1246. bi.tb = tb;
  1247. bi.bi_bh = S_new[i];
  1248. bi.bi_parent = NULL;
  1249. bi.bi_position = 0;
  1250. if (n_rem > zeros_num) {
  1251. r_zeros_number = 0;
  1252. r_body =
  1253. body + n_rem -
  1254. zeros_num;
  1255. } else {
  1256. r_body = body;
  1257. r_zeros_number =
  1258. zeros_num - n_rem;
  1259. zeros_num -=
  1260. r_zeros_number;
  1261. }
  1262. leaf_paste_in_buffer(&bi, 0,
  1263. n_shift,
  1264. tb->
  1265. insert_size
  1266. [0] -
  1267. n_rem,
  1268. r_body,
  1269. r_zeros_number);
  1270. {
  1271. struct item_head *tmp;
  1272. tmp =
  1273. B_N_PITEM_HEAD(S_new
  1274. [i],
  1275. 0);
  1276. if (is_indirect_le_ih
  1277. (tmp)) {
  1278. set_ih_free_space
  1279. (tmp, 0);
  1280. set_le_ih_k_offset
  1281. (tmp,
  1282. le_ih_k_offset
  1283. (tmp) +
  1284. (n_rem <<
  1285. (tb->
  1286. tb_sb->
  1287. s_blocksize_bits
  1288. -
  1289. UNFM_P_SHIFT)));
  1290. } else {
  1291. set_le_ih_k_offset
  1292. (tmp,
  1293. le_ih_k_offset
  1294. (tmp) +
  1295. n_rem);
  1296. }
  1297. }
  1298. tb->insert_size[0] = n_rem;
  1299. if (!n_rem)
  1300. pos_in_item++;
  1301. }
  1302. } else
  1303. /* item falls wholly into S_new[i] */
  1304. {
  1305. int ret_val;
  1306. struct item_head *pasted;
  1307. #ifdef CONFIG_REISERFS_CHECK
  1308. struct item_head *ih =
  1309. B_N_PITEM_HEAD(tbS0, item_pos);
  1310. if (!is_direntry_le_ih(ih)
  1311. && (pos_in_item != ih_item_len(ih)
  1312. || tb->insert_size[0] <= 0))
  1313. reiserfs_panic(tb->tb_sb,
  1314. "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
  1315. #endif /* CONFIG_REISERFS_CHECK */
  1316. ret_val =
  1317. leaf_move_items(LEAF_FROM_S_TO_SNEW,
  1318. tb, snum[i],
  1319. sbytes[i],
  1320. S_new[i]);
  1321. RFALSE(ret_val,
  1322. "PAP-12240: unexpected value returned by leaf_move_items (%d)",
  1323. ret_val);
  1324. /* paste into item */
  1325. bi.tb = tb;
  1326. bi.bi_bh = S_new[i];
  1327. bi.bi_parent = NULL;
  1328. bi.bi_position = 0;
  1329. leaf_paste_in_buffer(&bi,
  1330. item_pos - n +
  1331. snum[i],
  1332. pos_in_item,
  1333. tb->insert_size[0],
  1334. body, zeros_num);
  1335. pasted =
  1336. B_N_PITEM_HEAD(S_new[i],
  1337. item_pos - n +
  1338. snum[i]);
  1339. if (is_direntry_le_ih(pasted)) {
  1340. leaf_paste_entries(bi.bi_bh,
  1341. item_pos -
  1342. n + snum[i],
  1343. pos_in_item,
  1344. 1,
  1345. (struct
  1346. reiserfs_de_head
  1347. *)body,
  1348. body +
  1349. DEH_SIZE,
  1350. tb->
  1351. insert_size
  1352. [0]
  1353. );
  1354. }
  1355. /* if we paste to indirect item update ih_free_space */
  1356. if (is_indirect_le_ih(pasted))
  1357. set_ih_free_space(pasted, 0);
  1358. zeros_num = tb->insert_size[0] = 0;
  1359. }
  1360. }
  1361. else { /* pasted item doesn't fall into S_new[i] */
  1362. leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
  1363. snum[i], sbytes[i], S_new[i]);
  1364. }
  1365. break;
  1366. default: /* cases d and t */
  1367. reiserfs_panic(tb->tb_sb,
  1368. "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)",
  1369. (flag ==
  1370. M_DELETE) ? "DELETE" : ((flag ==
  1371. M_CUT) ? "CUT"
  1372. : "UNKNOWN"),
  1373. flag);
  1374. }
  1375. memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
  1376. insert_ptr[i] = S_new[i];
  1377. RFALSE(!buffer_journaled(S_new[i])
  1378. || buffer_journal_dirty(S_new[i])
  1379. || buffer_dirty(S_new[i]), "PAP-12247: S_new[%d] : (%b)",
  1380. i, S_new[i]);
  1381. }
  1382. /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
  1383. affected item which remains in S */
  1384. if (0 <= item_pos && item_pos < tb->s0num) { /* if we must insert or append into buffer S[0] */
  1385. switch (flag) {
  1386. case M_INSERT: /* insert item into S[0] */
  1387. bi.tb = tb;
  1388. bi.bi_bh = tbS0;
  1389. bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
  1390. bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
  1391. leaf_insert_into_buf(&bi, item_pos, ih, body,
  1392. zeros_num);
  1393. /* If we insert the first key change the delimiting key */
  1394. if (item_pos == 0) {
  1395. if (tb->CFL[0]) /* can be 0 in reiserfsck */
  1396. replace_key(tb, tb->CFL[0], tb->lkey[0],
  1397. tbS0, 0);
  1398. }
  1399. break;
  1400. case M_PASTE:{ /* append item in S[0] */
  1401. struct item_head *pasted;
  1402. pasted = B_N_PITEM_HEAD(tbS0, item_pos);
  1403. /* when directory, may be new entry already pasted */
  1404. if (is_direntry_le_ih(pasted)) {
  1405. if (pos_in_item >= 0 &&
  1406. pos_in_item <=
  1407. ih_entry_count(pasted)) {
  1408. RFALSE(!tb->insert_size[0],
  1409. "PAP-12260: insert_size is 0 already");
  1410. /* prepare space */
  1411. bi.tb = tb;
  1412. bi.bi_bh = tbS0;
  1413. bi.bi_parent =
  1414. PATH_H_PPARENT(tb->tb_path,
  1415. 0);
  1416. bi.bi_position =
  1417. PATH_H_POSITION(tb->tb_path,
  1418. 1);
  1419. leaf_paste_in_buffer(&bi,
  1420. item_pos,
  1421. pos_in_item,
  1422. tb->
  1423. insert_size
  1424. [0], body,
  1425. zeros_num);
  1426. /* paste entry */
  1427. leaf_paste_entries(bi.bi_bh,
  1428. item_pos,
  1429. pos_in_item,
  1430. 1,
  1431. (struct
  1432. reiserfs_de_head
  1433. *)body,
  1434. body +
  1435. DEH_SIZE,
  1436. tb->
  1437. insert_size
  1438. [0]
  1439. );
  1440. if (!item_pos && !pos_in_item) {
  1441. RFALSE(!tb->CFL[0]
  1442. || !tb->L[0],
  1443. "PAP-12270: CFL[0]/L[0] must be specified");
  1444. if (tb->CFL[0]) {
  1445. replace_key(tb,
  1446. tb->
  1447. CFL
  1448. [0],
  1449. tb->
  1450. lkey
  1451. [0],
  1452. tbS0,
  1453. 0);
  1454. }
  1455. }
  1456. tb->insert_size[0] = 0;
  1457. }
  1458. } else { /* regular object */
  1459. if (pos_in_item == ih_item_len(pasted)) {
  1460. RFALSE(tb->insert_size[0] <= 0,
  1461. "PAP-12275: insert size must not be %d",
  1462. tb->insert_size[0]);
  1463. bi.tb = tb;
  1464. bi.bi_bh = tbS0;
  1465. bi.bi_parent =
  1466. PATH_H_PPARENT(tb->tb_path,
  1467. 0);
  1468. bi.bi_position =
  1469. PATH_H_POSITION(tb->tb_path,
  1470. 1);
  1471. leaf_paste_in_buffer(&bi,
  1472. item_pos,
  1473. pos_in_item,
  1474. tb->
  1475. insert_size
  1476. [0], body,
  1477. zeros_num);
  1478. if (is_indirect_le_ih(pasted)) {
  1479. #if 0
  1480. RFALSE(tb->
  1481. insert_size[0] !=
  1482. UNFM_P_SIZE,
  1483. "PAP-12280: insert_size for indirect item must be %d, not %d",
  1484. UNFM_P_SIZE,
  1485. tb->
  1486. insert_size[0]);
  1487. #endif
  1488. set_ih_free_space
  1489. (pasted, 0);
  1490. }
  1491. tb->insert_size[0] = 0;
  1492. }
  1493. #ifdef CONFIG_REISERFS_CHECK
  1494. else {
  1495. if (tb->insert_size[0]) {
  1496. print_cur_tb("12285");
  1497. reiserfs_panic(tb->
  1498. tb_sb,
  1499. "PAP-12285: balance_leaf: insert_size must be 0 (%d)",
  1500. tb->
  1501. insert_size
  1502. [0]);
  1503. }
  1504. }
  1505. #endif /* CONFIG_REISERFS_CHECK */
  1506. }
  1507. } /* case M_PASTE: */
  1508. }
  1509. }
  1510. #ifdef CONFIG_REISERFS_CHECK
  1511. if (flag == M_PASTE && tb->insert_size[0]) {
  1512. print_cur_tb("12290");
  1513. reiserfs_panic(tb->tb_sb,
  1514. "PAP-12290: balance_leaf: insert_size is still not 0 (%d)",
  1515. tb->insert_size[0]);
  1516. }
  1517. #endif /* CONFIG_REISERFS_CHECK */
  1518. return 0;
  1519. } /* Leaf level of the tree is balanced (end of balance_leaf) */
  1520. /* Make empty node */
  1521. void make_empty_node(struct buffer_info *bi)
  1522. {
  1523. struct block_head *blkh;
  1524. RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL");
  1525. blkh = B_BLK_HEAD(bi->bi_bh);
  1526. set_blkh_nr_item(blkh, 0);
  1527. set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh));
  1528. if (bi->bi_parent)
  1529. B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */
  1530. }
  1531. /* Get first empty buffer */
  1532. struct buffer_head *get_FEB(struct tree_balance *tb)
  1533. {
  1534. int i;
  1535. struct buffer_head *first_b;
  1536. struct buffer_info bi;
  1537. for (i = 0; i < MAX_FEB_SIZE; i++)
  1538. if (tb->FEB[i] != 0)
  1539. break;
  1540. if (i == MAX_FEB_SIZE)
  1541. reiserfs_panic(tb->tb_sb,
  1542. "vs-12300: get_FEB: FEB list is empty");
  1543. bi.tb = tb;
  1544. bi.bi_bh = first_b = tb->FEB[i];
  1545. bi.bi_parent = NULL;
  1546. bi.bi_position = 0;
  1547. make_empty_node(&bi);
  1548. set_buffer_uptodate(first_b);
  1549. tb->FEB[i] = NULL;
  1550. tb->used[i] = first_b;
  1551. return (first_b);
  1552. }
  1553. /* This is now used because reiserfs_free_block has to be able to
  1554. ** schedule.
  1555. */
  1556. static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
  1557. {
  1558. int i;
  1559. if (buffer_dirty(bh))
  1560. reiserfs_warning(tb->tb_sb,
  1561. "store_thrown deals with dirty buffer");
  1562. for (i = 0; i < ARRAY_SIZE(tb->thrown); i++)
  1563. if (!tb->thrown[i]) {
  1564. tb->thrown[i] = bh;
  1565. get_bh(bh); /* free_thrown puts this */
  1566. return;
  1567. }
  1568. reiserfs_warning(tb->tb_sb, "store_thrown: too many thrown buffers");
  1569. }
  1570. static void free_thrown(struct tree_balance *tb)
  1571. {
  1572. int i;
  1573. b_blocknr_t blocknr;
  1574. for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) {
  1575. if (tb->thrown[i]) {
  1576. blocknr = tb->thrown[i]->b_blocknr;
  1577. if (buffer_dirty(tb->thrown[i]))
  1578. reiserfs_warning(tb->tb_sb,
  1579. "free_thrown deals with dirty buffer %d",
  1580. blocknr);
  1581. brelse(tb->thrown[i]); /* incremented in store_thrown */
  1582. reiserfs_free_block(tb->transaction_handle, NULL,
  1583. blocknr, 0);
  1584. }
  1585. }
  1586. }
  1587. void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh)
  1588. {
  1589. struct block_head *blkh;
  1590. blkh = B_BLK_HEAD(bh);
  1591. set_blkh_level(blkh, FREE_LEVEL);
  1592. set_blkh_nr_item(blkh, 0);
  1593. clear_buffer_dirty(bh);
  1594. store_thrown(tb, bh);
  1595. }
  1596. /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
  1597. void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest,
  1598. struct buffer_head *src, int n_src)
  1599. {
  1600. RFALSE(dest == NULL || src == NULL,
  1601. "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
  1602. src, dest);
  1603. RFALSE(!B_IS_KEYS_LEVEL(dest),
  1604. "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
  1605. dest);
  1606. RFALSE(n_dest < 0 || n_src < 0,
  1607. "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest);
  1608. RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src),
  1609. "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
  1610. n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest));
  1611. if (B_IS_ITEMS_LEVEL(src))
  1612. /* source buffer contains leaf node */
  1613. memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PITEM_HEAD(src, n_src),
  1614. KEY_SIZE);
  1615. else
  1616. memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PDELIM_KEY(src, n_src),
  1617. KEY_SIZE);
  1618. do_balance_mark_internal_dirty(tb, dest, 0);
  1619. }
  1620. int get_left_neighbor_position(struct tree_balance *tb, int h)
  1621. {
  1622. int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
  1623. RFALSE(PATH_H_PPARENT(tb->tb_path, h) == 0 || tb->FL[h] == 0,
  1624. "vs-12325: FL[%d](%p) or F[%d](%p) does not exist",
  1625. h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h));
  1626. if (Sh_position == 0)
  1627. return B_NR_ITEMS(tb->FL[h]);
  1628. else
  1629. return Sh_position - 1;
  1630. }
  1631. int get_right_neighbor_position(struct tree_balance *tb, int h)
  1632. {
  1633. int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
  1634. RFALSE(PATH_H_PPARENT(tb->tb_path, h) == 0 || tb->FR[h] == 0,
  1635. "vs-12330: F[%d](%p) or FR[%d](%p) does not exist",
  1636. h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]);
  1637. if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h)))
  1638. return 0;
  1639. else
  1640. return Sh_position + 1;
  1641. }
  1642. #ifdef CONFIG_REISERFS_CHECK
  1643. int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
  1644. static void check_internal_node(struct super_block *s, struct buffer_head *bh,
  1645. char *mes)
  1646. {
  1647. struct disk_child *dc;
  1648. int i;
  1649. RFALSE(!bh, "PAP-12336: bh == 0");
  1650. if (!bh || !B_IS_IN_TREE(bh))
  1651. return;
  1652. RFALSE(!buffer_dirty(bh) &&
  1653. !(buffer_journaled(bh) || buffer_journal_dirty(bh)),
  1654. "PAP-12337: buffer (%b) must be dirty", bh);
  1655. dc = B_N_CHILD(bh, 0);
  1656. for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) {
  1657. if (!is_reusable(s, dc_block_number(dc), 1)) {
  1658. print_cur_tb(mes);
  1659. reiserfs_panic(s,
  1660. "PAP-12338: check_internal_node: invalid child pointer %y in %b",
  1661. dc, bh);
  1662. }
  1663. }
  1664. }
  1665. static int locked_or_not_in_tree(struct buffer_head *bh, char *which)
  1666. {
  1667. if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) ||
  1668. !B_IS_IN_TREE(bh)) {
  1669. reiserfs_warning(NULL,
  1670. "vs-12339: locked_or_not_in_tree: %s (%b)",
  1671. which, bh);
  1672. return 1;
  1673. }
  1674. return 0;
  1675. }
  1676. static int check_before_balancing(struct tree_balance *tb)
  1677. {
  1678. int retval = 0;
  1679. if (cur_tb) {
  1680. reiserfs_panic(tb->tb_sb, "vs-12335: check_before_balancing: "
  1681. "suspect that schedule occurred based on cur_tb not being null at this point in code. "
  1682. "do_balance cannot properly handle schedule occurring while it runs.");
  1683. }
  1684. /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
  1685. prepped all of these for us). */
  1686. if (tb->lnum[0]) {
  1687. retval |= locked_or_not_in_tree(tb->L[0], "L[0]");
  1688. retval |= locked_or_not_in_tree(tb->FL[0], "FL[0]");
  1689. retval |= locked_or_not_in_tree(tb->CFL[0], "CFL[0]");
  1690. check_leaf(tb->L[0]);
  1691. }
  1692. if (tb->rnum[0]) {
  1693. retval |= locked_or_not_in_tree(tb->R[0], "R[0]");
  1694. retval |= locked_or_not_in_tree(tb->FR[0], "FR[0]");
  1695. retval |= locked_or_not_in_tree(tb->CFR[0], "CFR[0]");
  1696. check_leaf(tb->R[0]);
  1697. }
  1698. retval |= locked_or_not_in_tree(PATH_PLAST_BUFFER(tb->tb_path), "S[0]");
  1699. check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
  1700. return retval;
  1701. }
  1702. static void check_after_balance_leaf(struct tree_balance *tb)
  1703. {
  1704. if (tb->lnum[0]) {
  1705. if (B_FREE_SPACE(tb->L[0]) !=
  1706. MAX_CHILD_SIZE(tb->L[0]) -
  1707. dc_size(B_N_CHILD
  1708. (tb->FL[0], get_left_neighbor_position(tb, 0)))) {
  1709. print_cur_tb("12221");
  1710. reiserfs_panic(tb->tb_sb,
  1711. "PAP-12355: check_after_balance_leaf: shift to left was incorrect");
  1712. }
  1713. }
  1714. if (tb->rnum[0]) {
  1715. if (B_FREE_SPACE(tb->R[0]) !=
  1716. MAX_CHILD_SIZE(tb->R[0]) -
  1717. dc_size(B_N_CHILD
  1718. (tb->FR[0], get_right_neighbor_position(tb, 0)))) {
  1719. print_cur_tb("12222");
  1720. reiserfs_panic(tb->tb_sb,
  1721. "PAP-12360: check_after_balance_leaf: shift to right was incorrect");
  1722. }
  1723. }
  1724. if (PATH_H_PBUFFER(tb->tb_path, 1) &&
  1725. (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) !=
  1726. (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
  1727. dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
  1728. PATH_H_POSITION(tb->tb_path, 1)))))) {
  1729. int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0));
  1730. int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
  1731. dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
  1732. PATH_H_POSITION(tb->tb_path,
  1733. 1))));
  1734. print_cur_tb("12223");
  1735. reiserfs_warning(tb->tb_sb,
  1736. "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
  1737. "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
  1738. left,
  1739. MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)),
  1740. PATH_H_PBUFFER(tb->tb_path, 1),
  1741. PATH_H_POSITION(tb->tb_path, 1),
  1742. dc_size(B_N_CHILD
  1743. (PATH_H_PBUFFER(tb->tb_path, 1),
  1744. PATH_H_POSITION(tb->tb_path, 1))),
  1745. right);
  1746. reiserfs_panic(tb->tb_sb,
  1747. "PAP-12365: check_after_balance_leaf: S is incorrect");
  1748. }
  1749. }
  1750. static void check_leaf_level(struct tree_balance *tb)
  1751. {
  1752. check_leaf(tb->L[0]);
  1753. check_leaf(tb->R[0]);
  1754. check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
  1755. }
  1756. static void check_internal_levels(struct tree_balance *tb)
  1757. {
  1758. int h;
  1759. /* check all internal nodes */
  1760. for (h = 1; tb->insert_size[h]; h++) {
  1761. check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h),
  1762. "BAD BUFFER ON PATH");
  1763. if (tb->lnum[h])
  1764. check_internal_node(tb->tb_sb, tb->L[h], "BAD L");
  1765. if (tb->rnum[h])
  1766. check_internal_node(tb->tb_sb, tb->R[h], "BAD R");
  1767. }
  1768. }
  1769. #endif
  1770. /* Now we have all of the buffers that must be used in balancing of
  1771. the tree. We rely on the assumption that schedule() will not occur
  1772. while do_balance works. ( Only interrupt handlers are acceptable.)
  1773. We balance the tree according to the analysis made before this,
  1774. using buffers already obtained. For SMP support it will someday be
  1775. necessary to add ordered locking of tb. */
  1776. /* Some interesting rules of balancing:
  1777. we delete a maximum of two nodes per level per balancing: we never
  1778. delete R, when we delete two of three nodes L, S, R then we move
  1779. them into R.
  1780. we only delete L if we are deleting two nodes, if we delete only
  1781. one node we delete S
  1782. if we shift leaves then we shift as much as we can: this is a
  1783. deliberate policy of extremism in node packing which results in
  1784. higher average utilization after repeated random balance operations
  1785. at the cost of more memory copies and more balancing as a result of
  1786. small insertions to full nodes.
  1787. if we shift internal nodes we try to evenly balance the node
  1788. utilization, with consequent less balancing at the cost of lower
  1789. utilization.
  1790. one could argue that the policy for directories in leaves should be
  1791. that of internal nodes, but we will wait until another day to
  1792. evaluate this.... It would be nice to someday measure and prove
  1793. these assumptions as to what is optimal....
  1794. */
  1795. static inline void do_balance_starts(struct tree_balance *tb)
  1796. {
  1797. /* use print_cur_tb() to see initial state of struct
  1798. tree_balance */
  1799. /* store_print_tb (tb); */
  1800. /* do not delete, just comment it out */
  1801. /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
  1802. "check");*/
  1803. RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
  1804. #ifdef CONFIG_REISERFS_CHECK
  1805. cur_tb = tb;
  1806. #endif
  1807. }
  1808. static inline void do_balance_completed(struct tree_balance *tb)
  1809. {
  1810. #ifdef CONFIG_REISERFS_CHECK
  1811. check_leaf_level(tb);
  1812. check_internal_levels(tb);
  1813. cur_tb = NULL;
  1814. #endif
  1815. /* reiserfs_free_block is no longer schedule safe. So, we need to
  1816. ** put the buffers we want freed on the thrown list during do_balance,
  1817. ** and then free them now
  1818. */
  1819. REISERFS_SB(tb->tb_sb)->s_do_balance++;
  1820. /* release all nodes hold to perform the balancing */
  1821. unfix_nodes(tb);
  1822. free_thrown(tb);
  1823. }
  1824. void do_balance(struct tree_balance *tb, /* tree_balance structure */
  1825. struct item_head *ih, /* item header of inserted item */
  1826. const char *body, /* body of inserted item or bytes to paste */
  1827. int flag)
  1828. { /* i - insert, d - delete
  1829. c - cut, p - paste
  1830. Cut means delete part of an item
  1831. (includes removing an entry from a
  1832. directory).
  1833. Delete means delete whole item.
  1834. Insert means add a new item into the
  1835. tree.
  1836. Paste means to append to the end of an
  1837. existing file or to insert a directory
  1838. entry. */
  1839. int child_pos, /* position of a child node in its parent */
  1840. h; /* level of the tree being processed */
  1841. struct item_head insert_key[2]; /* in our processing of one level
  1842. we sometimes determine what
  1843. must be inserted into the next
  1844. higher level. This insertion
  1845. consists of a key or two keys
  1846. and their corresponding
  1847. pointers */
  1848. struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next
  1849. level */
  1850. tb->tb_mode = flag;
  1851. tb->need_balance_dirty = 0;
  1852. if (FILESYSTEM_CHANGED_TB(tb)) {
  1853. reiserfs_panic(tb->tb_sb,
  1854. "clm-6000: do_balance, fs generation has changed\n");
  1855. }
  1856. /* if we have no real work to do */
  1857. if (!tb->insert_size[0]) {
  1858. reiserfs_warning(tb->tb_sb,
  1859. "PAP-12350: do_balance: insert_size == 0, mode == %c",
  1860. flag);
  1861. unfix_nodes(tb);
  1862. return;
  1863. }
  1864. atomic_inc(&(fs_generation(tb->tb_sb)));
  1865. do_balance_starts(tb);
  1866. /* balance leaf returns 0 except if combining L R and S into
  1867. one node. see balance_internal() for explanation of this
  1868. line of code. */
  1869. child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
  1870. balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
  1871. #ifdef CONFIG_REISERFS_CHECK
  1872. check_after_balance_leaf(tb);
  1873. #endif
  1874. /* Balance internal level of the tree. */
  1875. for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
  1876. child_pos =
  1877. balance_internal(tb, h, child_pos, insert_key, insert_ptr);
  1878. do_balance_completed(tb);
  1879. }