itree_common.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Generic part */
  3. typedef struct {
  4. block_t *p;
  5. block_t key;
  6. struct buffer_head *bh;
  7. } Indirect;
  8. static DEFINE_RWLOCK(pointers_lock);
  9. static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
  10. {
  11. p->key = *(p->p = v);
  12. p->bh = bh;
  13. }
  14. static inline int verify_chain(Indirect *from, Indirect *to)
  15. {
  16. while (from <= to && from->key == *from->p)
  17. from++;
  18. return (from > to);
  19. }
  20. static inline block_t *block_end(struct buffer_head *bh)
  21. {
  22. return (block_t *)((char*)bh->b_data + bh->b_size);
  23. }
  24. static inline Indirect *get_branch(struct inode *inode,
  25. int depth,
  26. int *offsets,
  27. Indirect chain[DEPTH],
  28. int *err)
  29. {
  30. struct super_block *sb = inode->i_sb;
  31. Indirect *p = chain;
  32. struct buffer_head *bh;
  33. *err = 0;
  34. /* i_data is not going away, no lock needed */
  35. add_chain (chain, NULL, i_data(inode) + *offsets);
  36. if (!p->key)
  37. goto no_block;
  38. while (--depth) {
  39. bh = sb_bread(sb, block_to_cpu(p->key));
  40. if (!bh)
  41. goto failure;
  42. read_lock(&pointers_lock);
  43. if (!verify_chain(chain, p))
  44. goto changed;
  45. add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
  46. read_unlock(&pointers_lock);
  47. if (!p->key)
  48. goto no_block;
  49. }
  50. return NULL;
  51. changed:
  52. read_unlock(&pointers_lock);
  53. brelse(bh);
  54. *err = -EAGAIN;
  55. goto no_block;
  56. failure:
  57. *err = -EIO;
  58. no_block:
  59. return p;
  60. }
  61. static int alloc_branch(struct inode *inode,
  62. int num,
  63. int *offsets,
  64. Indirect *branch)
  65. {
  66. int n = 0;
  67. int i;
  68. int parent = minix_new_block(inode);
  69. int err = -ENOSPC;
  70. branch[0].key = cpu_to_block(parent);
  71. if (parent) for (n = 1; n < num; n++) {
  72. struct buffer_head *bh;
  73. /* Allocate the next block */
  74. int nr = minix_new_block(inode);
  75. if (!nr)
  76. break;
  77. branch[n].key = cpu_to_block(nr);
  78. bh = sb_getblk(inode->i_sb, parent);
  79. if (!bh) {
  80. minix_free_block(inode, nr);
  81. err = -ENOMEM;
  82. break;
  83. }
  84. lock_buffer(bh);
  85. memset(bh->b_data, 0, bh->b_size);
  86. branch[n].bh = bh;
  87. branch[n].p = (block_t*) bh->b_data + offsets[n];
  88. *branch[n].p = branch[n].key;
  89. set_buffer_uptodate(bh);
  90. unlock_buffer(bh);
  91. mark_buffer_dirty_inode(bh, inode);
  92. parent = nr;
  93. }
  94. if (n == num)
  95. return 0;
  96. /* Allocation failed, free what we already allocated */
  97. for (i = 1; i < n; i++)
  98. bforget(branch[i].bh);
  99. for (i = 0; i < n; i++)
  100. minix_free_block(inode, block_to_cpu(branch[i].key));
  101. return err;
  102. }
  103. static inline int splice_branch(struct inode *inode,
  104. Indirect chain[DEPTH],
  105. Indirect *where,
  106. int num)
  107. {
  108. int i;
  109. write_lock(&pointers_lock);
  110. /* Verify that place we are splicing to is still there and vacant */
  111. if (!verify_chain(chain, where-1) || *where->p)
  112. goto changed;
  113. *where->p = where->key;
  114. write_unlock(&pointers_lock);
  115. /* We are done with atomic stuff, now do the rest of housekeeping */
  116. inode->i_ctime = current_time(inode);
  117. /* had we spliced it onto indirect block? */
  118. if (where->bh)
  119. mark_buffer_dirty_inode(where->bh, inode);
  120. mark_inode_dirty(inode);
  121. return 0;
  122. changed:
  123. write_unlock(&pointers_lock);
  124. for (i = 1; i < num; i++)
  125. bforget(where[i].bh);
  126. for (i = 0; i < num; i++)
  127. minix_free_block(inode, block_to_cpu(where[i].key));
  128. return -EAGAIN;
  129. }
  130. static int get_block(struct inode * inode, sector_t block,
  131. struct buffer_head *bh, int create)
  132. {
  133. int err = -EIO;
  134. int offsets[DEPTH];
  135. Indirect chain[DEPTH];
  136. Indirect *partial;
  137. int left;
  138. int depth = block_to_path(inode, block, offsets);
  139. if (depth == 0)
  140. goto out;
  141. reread:
  142. partial = get_branch(inode, depth, offsets, chain, &err);
  143. /* Simplest case - block found, no allocation needed */
  144. if (!partial) {
  145. got_it:
  146. map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
  147. /* Clean up and exit */
  148. partial = chain+depth-1; /* the whole chain */
  149. goto cleanup;
  150. }
  151. /* Next simple case - plain lookup or failed read of indirect block */
  152. if (!create || err == -EIO) {
  153. cleanup:
  154. while (partial > chain) {
  155. brelse(partial->bh);
  156. partial--;
  157. }
  158. out:
  159. return err;
  160. }
  161. /*
  162. * Indirect block might be removed by truncate while we were
  163. * reading it. Handling of that case (forget what we've got and
  164. * reread) is taken out of the main path.
  165. */
  166. if (err == -EAGAIN)
  167. goto changed;
  168. left = (chain + depth) - partial;
  169. err = alloc_branch(inode, left, offsets+(partial-chain), partial);
  170. if (err)
  171. goto cleanup;
  172. if (splice_branch(inode, chain, partial, left) < 0)
  173. goto changed;
  174. set_buffer_new(bh);
  175. goto got_it;
  176. changed:
  177. while (partial > chain) {
  178. brelse(partial->bh);
  179. partial--;
  180. }
  181. goto reread;
  182. }
  183. static inline int all_zeroes(block_t *p, block_t *q)
  184. {
  185. while (p < q)
  186. if (*p++)
  187. return 0;
  188. return 1;
  189. }
  190. static Indirect *find_shared(struct inode *inode,
  191. int depth,
  192. int offsets[DEPTH],
  193. Indirect chain[DEPTH],
  194. block_t *top)
  195. {
  196. Indirect *partial, *p;
  197. int k, err;
  198. *top = 0;
  199. for (k = depth; k > 1 && !offsets[k-1]; k--)
  200. ;
  201. partial = get_branch(inode, k, offsets, chain, &err);
  202. write_lock(&pointers_lock);
  203. if (!partial)
  204. partial = chain + k-1;
  205. if (!partial->key && *partial->p) {
  206. write_unlock(&pointers_lock);
  207. goto no_top;
  208. }
  209. for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
  210. ;
  211. if (p == chain + k - 1 && p > chain) {
  212. p->p--;
  213. } else {
  214. *top = *p->p;
  215. *p->p = 0;
  216. }
  217. write_unlock(&pointers_lock);
  218. while(partial > p)
  219. {
  220. brelse(partial->bh);
  221. partial--;
  222. }
  223. no_top:
  224. return partial;
  225. }
  226. static inline void free_data(struct inode *inode, block_t *p, block_t *q)
  227. {
  228. unsigned long nr;
  229. for ( ; p < q ; p++) {
  230. nr = block_to_cpu(*p);
  231. if (nr) {
  232. *p = 0;
  233. minix_free_block(inode, nr);
  234. }
  235. }
  236. }
  237. static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
  238. {
  239. struct buffer_head * bh;
  240. unsigned long nr;
  241. if (depth--) {
  242. for ( ; p < q ; p++) {
  243. nr = block_to_cpu(*p);
  244. if (!nr)
  245. continue;
  246. *p = 0;
  247. bh = sb_bread(inode->i_sb, nr);
  248. if (!bh)
  249. continue;
  250. free_branches(inode, (block_t*)bh->b_data,
  251. block_end(bh), depth);
  252. bforget(bh);
  253. minix_free_block(inode, nr);
  254. mark_inode_dirty(inode);
  255. }
  256. } else
  257. free_data(inode, p, q);
  258. }
  259. static inline void truncate (struct inode * inode)
  260. {
  261. struct super_block *sb = inode->i_sb;
  262. block_t *idata = i_data(inode);
  263. int offsets[DEPTH];
  264. Indirect chain[DEPTH];
  265. Indirect *partial;
  266. block_t nr = 0;
  267. int n;
  268. int first_whole;
  269. long iblock;
  270. iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
  271. block_truncate_page(inode->i_mapping, inode->i_size, get_block);
  272. n = block_to_path(inode, iblock, offsets);
  273. if (!n)
  274. return;
  275. if (n == 1) {
  276. free_data(inode, idata+offsets[0], idata + DIRECT);
  277. first_whole = 0;
  278. goto do_indirects;
  279. }
  280. first_whole = offsets[0] + 1 - DIRECT;
  281. partial = find_shared(inode, n, offsets, chain, &nr);
  282. if (nr) {
  283. if (partial == chain)
  284. mark_inode_dirty(inode);
  285. else
  286. mark_buffer_dirty_inode(partial->bh, inode);
  287. free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
  288. }
  289. /* Clear the ends of indirect blocks on the shared branch */
  290. while (partial > chain) {
  291. free_branches(inode, partial->p + 1, block_end(partial->bh),
  292. (chain+n-1) - partial);
  293. mark_buffer_dirty_inode(partial->bh, inode);
  294. brelse (partial->bh);
  295. partial--;
  296. }
  297. do_indirects:
  298. /* Kill the remaining (whole) subtrees */
  299. while (first_whole < DEPTH-1) {
  300. nr = idata[DIRECT+first_whole];
  301. if (nr) {
  302. idata[DIRECT+first_whole] = 0;
  303. mark_inode_dirty(inode);
  304. free_branches(inode, &nr, &nr+1, first_whole+1);
  305. }
  306. first_whole++;
  307. }
  308. inode->i_mtime = inode->i_ctime = current_time(inode);
  309. mark_inode_dirty(inode);
  310. }
  311. static inline unsigned nblocks(loff_t size, struct super_block *sb)
  312. {
  313. int k = sb->s_blocksize_bits - 10;
  314. unsigned blocks, res, direct = DIRECT, i = DEPTH;
  315. blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
  316. res = blocks;
  317. while (--i && blocks > direct) {
  318. blocks -= direct;
  319. blocks += sb->s_blocksize/sizeof(block_t) - 1;
  320. blocks /= sb->s_blocksize/sizeof(block_t);
  321. res += blocks;
  322. direct = 1;
  323. }
  324. return res;
  325. }