btree.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfsplus/btree.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handle opening/closing btree
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/log2.h>
  14. #include "hfsplus_fs.h"
  15. #include "hfsplus_raw.h"
  16. /*
  17. * Initial source code of clump size calculation is gotten
  18. * from http://opensource.apple.com/tarballs/diskdev_cmds/
  19. */
  20. #define CLUMP_ENTRIES 15
  21. static short clumptbl[CLUMP_ENTRIES * 3] = {
  22. /*
  23. * Volume Attributes Catalog Extents
  24. * Size Clump (MB) Clump (MB) Clump (MB)
  25. */
  26. /* 1GB */ 4, 4, 4,
  27. /* 2GB */ 6, 6, 4,
  28. /* 4GB */ 8, 8, 4,
  29. /* 8GB */ 11, 11, 5,
  30. /*
  31. * For volumes 16GB and larger, we want to make sure that a full OS
  32. * install won't require fragmentation of the Catalog or Attributes
  33. * B-trees. We do this by making the clump sizes sufficiently large,
  34. * and by leaving a gap after the B-trees for them to grow into.
  35. *
  36. * For SnowLeopard 10A298, a FullNetInstall with all packages selected
  37. * results in:
  38. * Catalog B-tree Header
  39. * nodeSize: 8192
  40. * totalNodes: 31616
  41. * freeNodes: 1978
  42. * (used = 231.55 MB)
  43. * Attributes B-tree Header
  44. * nodeSize: 8192
  45. * totalNodes: 63232
  46. * freeNodes: 958
  47. * (used = 486.52 MB)
  48. *
  49. * We also want Time Machine backup volumes to have a sufficiently
  50. * large clump size to reduce fragmentation.
  51. *
  52. * The series of numbers for Catalog and Attribute form a geometric
  53. * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
  54. * the previous term. For Attributes (16GB to 512GB), each term is
  55. * 4**(1/5) times the previous term. For 1TB to 16TB, each term is
  56. * 2**(1/5) times the previous term.
  57. */
  58. /* 16GB */ 64, 32, 5,
  59. /* 32GB */ 84, 49, 6,
  60. /* 64GB */ 111, 74, 7,
  61. /* 128GB */ 147, 111, 8,
  62. /* 256GB */ 194, 169, 9,
  63. /* 512GB */ 256, 256, 11,
  64. /* 1TB */ 294, 294, 14,
  65. /* 2TB */ 338, 338, 16,
  66. /* 4TB */ 388, 388, 20,
  67. /* 8TB */ 446, 446, 25,
  68. /* 16TB */ 512, 512, 32
  69. };
  70. u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
  71. u64 sectors, int file_id)
  72. {
  73. u32 mod = max(node_size, block_size);
  74. u32 clump_size;
  75. int column;
  76. int i;
  77. /* Figure out which column of the above table to use for this file. */
  78. switch (file_id) {
  79. case HFSPLUS_ATTR_CNID:
  80. column = 0;
  81. break;
  82. case HFSPLUS_CAT_CNID:
  83. column = 1;
  84. break;
  85. default:
  86. column = 2;
  87. break;
  88. }
  89. /*
  90. * The default clump size is 0.8% of the volume size. And
  91. * it must also be a multiple of the node and block size.
  92. */
  93. if (sectors < 0x200000) {
  94. clump_size = sectors << 2; /* 0.8 % */
  95. if (clump_size < (8 * node_size))
  96. clump_size = 8 * node_size;
  97. } else {
  98. /* turn exponent into table index... */
  99. for (i = 0, sectors = sectors >> 22;
  100. sectors && (i < CLUMP_ENTRIES - 1);
  101. ++i, sectors = sectors >> 1) {
  102. /* empty body */
  103. }
  104. clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
  105. }
  106. /*
  107. * Round the clump size to a multiple of node and block size.
  108. * NOTE: This rounds down.
  109. */
  110. clump_size /= mod;
  111. clump_size *= mod;
  112. /*
  113. * Rounding down could have rounded down to 0 if the block size was
  114. * greater than the clump size. If so, just use one block or node.
  115. */
  116. if (clump_size == 0)
  117. clump_size = mod;
  118. return clump_size;
  119. }
  120. /* Get a reference to a B*Tree and do some initial checks */
  121. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
  122. {
  123. struct hfs_btree *tree;
  124. struct hfs_btree_header_rec *head;
  125. struct address_space *mapping;
  126. struct inode *inode;
  127. struct page *page;
  128. unsigned int size;
  129. tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  130. if (!tree)
  131. return NULL;
  132. mutex_init(&tree->tree_lock);
  133. spin_lock_init(&tree->hash_lock);
  134. tree->sb = sb;
  135. tree->cnid = id;
  136. inode = hfsplus_iget(sb, id);
  137. if (IS_ERR(inode))
  138. goto free_tree;
  139. tree->inode = inode;
  140. if (!HFSPLUS_I(tree->inode)->first_blocks) {
  141. pr_err("invalid btree extent records (0 size)\n");
  142. goto free_inode;
  143. }
  144. mapping = tree->inode->i_mapping;
  145. page = read_mapping_page(mapping, 0, NULL);
  146. if (IS_ERR(page))
  147. goto free_inode;
  148. /* Load the header */
  149. head = (struct hfs_btree_header_rec *)(kmap(page) +
  150. sizeof(struct hfs_bnode_desc));
  151. tree->root = be32_to_cpu(head->root);
  152. tree->leaf_count = be32_to_cpu(head->leaf_count);
  153. tree->leaf_head = be32_to_cpu(head->leaf_head);
  154. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  155. tree->node_count = be32_to_cpu(head->node_count);
  156. tree->free_nodes = be32_to_cpu(head->free_nodes);
  157. tree->attributes = be32_to_cpu(head->attributes);
  158. tree->node_size = be16_to_cpu(head->node_size);
  159. tree->max_key_len = be16_to_cpu(head->max_key_len);
  160. tree->depth = be16_to_cpu(head->depth);
  161. /* Verify the tree and set the correct compare function */
  162. switch (id) {
  163. case HFSPLUS_EXT_CNID:
  164. if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
  165. pr_err("invalid extent max_key_len %d\n",
  166. tree->max_key_len);
  167. goto fail_page;
  168. }
  169. if (tree->attributes & HFS_TREE_VARIDXKEYS) {
  170. pr_err("invalid extent btree flag\n");
  171. goto fail_page;
  172. }
  173. tree->keycmp = hfsplus_ext_cmp_key;
  174. break;
  175. case HFSPLUS_CAT_CNID:
  176. if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
  177. pr_err("invalid catalog max_key_len %d\n",
  178. tree->max_key_len);
  179. goto fail_page;
  180. }
  181. if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
  182. pr_err("invalid catalog btree flag\n");
  183. goto fail_page;
  184. }
  185. if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
  186. (head->key_type == HFSPLUS_KEY_BINARY))
  187. tree->keycmp = hfsplus_cat_bin_cmp_key;
  188. else {
  189. tree->keycmp = hfsplus_cat_case_cmp_key;
  190. set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
  191. }
  192. break;
  193. case HFSPLUS_ATTR_CNID:
  194. if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
  195. pr_err("invalid attributes max_key_len %d\n",
  196. tree->max_key_len);
  197. goto fail_page;
  198. }
  199. tree->keycmp = hfsplus_attr_bin_cmp_key;
  200. break;
  201. default:
  202. pr_err("unknown B*Tree requested\n");
  203. goto fail_page;
  204. }
  205. if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
  206. pr_err("invalid btree flag\n");
  207. goto fail_page;
  208. }
  209. size = tree->node_size;
  210. if (!is_power_of_2(size))
  211. goto fail_page;
  212. if (!tree->node_count)
  213. goto fail_page;
  214. tree->node_size_shift = ffs(size) - 1;
  215. tree->pages_per_bnode =
  216. (tree->node_size + PAGE_SIZE - 1) >>
  217. PAGE_SHIFT;
  218. kunmap(page);
  219. put_page(page);
  220. return tree;
  221. fail_page:
  222. put_page(page);
  223. free_inode:
  224. tree->inode->i_mapping->a_ops = &hfsplus_aops;
  225. iput(tree->inode);
  226. free_tree:
  227. kfree(tree);
  228. return NULL;
  229. }
  230. /* Release resources used by a btree */
  231. void hfs_btree_close(struct hfs_btree *tree)
  232. {
  233. struct hfs_bnode *node;
  234. int i;
  235. if (!tree)
  236. return;
  237. for (i = 0; i < NODE_HASH_SIZE; i++) {
  238. while ((node = tree->node_hash[i])) {
  239. tree->node_hash[i] = node->next_hash;
  240. if (atomic_read(&node->refcnt))
  241. pr_crit("node %d:%d "
  242. "still has %d user(s)!\n",
  243. node->tree->cnid, node->this,
  244. atomic_read(&node->refcnt));
  245. hfs_bnode_free(node);
  246. tree->node_hash_cnt--;
  247. }
  248. }
  249. iput(tree->inode);
  250. kfree(tree);
  251. }
  252. int hfs_btree_write(struct hfs_btree *tree)
  253. {
  254. struct hfs_btree_header_rec *head;
  255. struct hfs_bnode *node;
  256. struct page *page;
  257. node = hfs_bnode_find(tree, 0);
  258. if (IS_ERR(node))
  259. /* panic? */
  260. return -EIO;
  261. /* Load the header */
  262. page = node->page[0];
  263. head = (struct hfs_btree_header_rec *)(kmap(page) +
  264. sizeof(struct hfs_bnode_desc));
  265. head->root = cpu_to_be32(tree->root);
  266. head->leaf_count = cpu_to_be32(tree->leaf_count);
  267. head->leaf_head = cpu_to_be32(tree->leaf_head);
  268. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  269. head->node_count = cpu_to_be32(tree->node_count);
  270. head->free_nodes = cpu_to_be32(tree->free_nodes);
  271. head->attributes = cpu_to_be32(tree->attributes);
  272. head->depth = cpu_to_be16(tree->depth);
  273. kunmap(page);
  274. set_page_dirty(page);
  275. hfs_bnode_put(node);
  276. return 0;
  277. }
  278. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  279. {
  280. struct hfs_btree *tree = prev->tree;
  281. struct hfs_bnode *node;
  282. struct hfs_bnode_desc desc;
  283. __be32 cnid;
  284. node = hfs_bnode_create(tree, idx);
  285. if (IS_ERR(node))
  286. return node;
  287. tree->free_nodes--;
  288. prev->next = idx;
  289. cnid = cpu_to_be32(idx);
  290. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  291. node->type = HFS_NODE_MAP;
  292. node->num_recs = 1;
  293. hfs_bnode_clear(node, 0, tree->node_size);
  294. desc.next = 0;
  295. desc.prev = 0;
  296. desc.type = HFS_NODE_MAP;
  297. desc.height = 0;
  298. desc.num_recs = cpu_to_be16(1);
  299. desc.reserved = 0;
  300. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  301. hfs_bnode_write_u16(node, 14, 0x8000);
  302. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  303. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  304. return node;
  305. }
  306. /* Make sure @tree has enough space for the @rsvd_nodes */
  307. int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
  308. {
  309. struct inode *inode = tree->inode;
  310. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  311. u32 count;
  312. int res;
  313. if (rsvd_nodes <= 0)
  314. return 0;
  315. while (tree->free_nodes < rsvd_nodes) {
  316. res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
  317. if (res)
  318. return res;
  319. hip->phys_size = inode->i_size =
  320. (loff_t)hip->alloc_blocks <<
  321. HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
  322. hip->fs_blocks =
  323. hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
  324. inode_set_bytes(inode, inode->i_size);
  325. count = inode->i_size >> tree->node_size_shift;
  326. tree->free_nodes += count - tree->node_count;
  327. tree->node_count = count;
  328. }
  329. return 0;
  330. }
  331. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  332. {
  333. struct hfs_bnode *node, *next_node;
  334. struct page **pagep;
  335. u32 nidx, idx;
  336. unsigned off;
  337. u16 off16;
  338. u16 len;
  339. u8 *data, byte, m;
  340. int i, res;
  341. res = hfs_bmap_reserve(tree, 1);
  342. if (res)
  343. return ERR_PTR(res);
  344. nidx = 0;
  345. node = hfs_bnode_find(tree, nidx);
  346. if (IS_ERR(node))
  347. return node;
  348. len = hfs_brec_lenoff(node, 2, &off16);
  349. off = off16;
  350. off += node->page_offset;
  351. pagep = node->page + (off >> PAGE_SHIFT);
  352. data = kmap(*pagep);
  353. off &= ~PAGE_MASK;
  354. idx = 0;
  355. for (;;) {
  356. while (len) {
  357. byte = data[off];
  358. if (byte != 0xff) {
  359. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  360. if (!(byte & m)) {
  361. idx += i;
  362. data[off] |= m;
  363. set_page_dirty(*pagep);
  364. kunmap(*pagep);
  365. tree->free_nodes--;
  366. mark_inode_dirty(tree->inode);
  367. hfs_bnode_put(node);
  368. return hfs_bnode_create(tree,
  369. idx);
  370. }
  371. }
  372. }
  373. if (++off >= PAGE_SIZE) {
  374. kunmap(*pagep);
  375. data = kmap(*++pagep);
  376. off = 0;
  377. }
  378. idx += 8;
  379. len--;
  380. }
  381. kunmap(*pagep);
  382. nidx = node->next;
  383. if (!nidx) {
  384. hfs_dbg(BNODE_MOD, "create new bmap node\n");
  385. next_node = hfs_bmap_new_bmap(node, idx);
  386. } else
  387. next_node = hfs_bnode_find(tree, nidx);
  388. hfs_bnode_put(node);
  389. if (IS_ERR(next_node))
  390. return next_node;
  391. node = next_node;
  392. len = hfs_brec_lenoff(node, 0, &off16);
  393. off = off16;
  394. off += node->page_offset;
  395. pagep = node->page + (off >> PAGE_SHIFT);
  396. data = kmap(*pagep);
  397. off &= ~PAGE_MASK;
  398. }
  399. }
  400. void hfs_bmap_free(struct hfs_bnode *node)
  401. {
  402. struct hfs_btree *tree;
  403. struct page *page;
  404. u16 off, len;
  405. u32 nidx;
  406. u8 *data, byte, m;
  407. hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
  408. BUG_ON(!node->this);
  409. tree = node->tree;
  410. nidx = node->this;
  411. node = hfs_bnode_find(tree, 0);
  412. if (IS_ERR(node))
  413. return;
  414. len = hfs_brec_lenoff(node, 2, &off);
  415. while (nidx >= len * 8) {
  416. u32 i;
  417. nidx -= len * 8;
  418. i = node->next;
  419. if (!i) {
  420. /* panic */;
  421. pr_crit("unable to free bnode %u. "
  422. "bmap not found!\n",
  423. node->this);
  424. hfs_bnode_put(node);
  425. return;
  426. }
  427. hfs_bnode_put(node);
  428. node = hfs_bnode_find(tree, i);
  429. if (IS_ERR(node))
  430. return;
  431. if (node->type != HFS_NODE_MAP) {
  432. /* panic */;
  433. pr_crit("invalid bmap found! "
  434. "(%u,%d)\n",
  435. node->this, node->type);
  436. hfs_bnode_put(node);
  437. return;
  438. }
  439. len = hfs_brec_lenoff(node, 0, &off);
  440. }
  441. off += node->page_offset + nidx / 8;
  442. page = node->page[off >> PAGE_SHIFT];
  443. data = kmap(page);
  444. off &= ~PAGE_MASK;
  445. m = 1 << (~nidx & 7);
  446. byte = data[off];
  447. if (!(byte & m)) {
  448. pr_crit("trying to free free bnode "
  449. "%u(%d)\n",
  450. node->this, node->type);
  451. kunmap(page);
  452. hfs_bnode_put(node);
  453. return;
  454. }
  455. data[off] = byte & ~m;
  456. set_page_dirty(page);
  457. kunmap(page);
  458. hfs_bnode_put(node);
  459. tree->free_nodes++;
  460. mark_inode_dirty(tree->inode);
  461. }