extent.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * linux/fs/hfs/extent.c
  3. *
  4. * Copyright (C) 1995-1997 Paul H. Hargrove
  5. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6. * This file may be distributed under the terms of the GNU General Public License.
  7. *
  8. * This file contains the functions related to the extents B-tree.
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfs_fs.h"
  12. #include "btree.h"
  13. /*================ File-local functions ================*/
  14. /*
  15. * build_key
  16. */
  17. static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
  18. {
  19. key->key_len = 7;
  20. key->ext.FkType = type;
  21. key->ext.FNum = cpu_to_be32(cnid);
  22. key->ext.FABN = cpu_to_be16(block);
  23. }
  24. /*
  25. * hfs_ext_compare()
  26. *
  27. * Description:
  28. * This is the comparison function used for the extents B-tree. In
  29. * comparing extent B-tree entries, the file id is the most
  30. * significant field (compared as unsigned ints); the fork type is
  31. * the second most significant field (compared as unsigned chars);
  32. * and the allocation block number field is the least significant
  33. * (compared as unsigned ints).
  34. * Input Variable(s):
  35. * struct hfs_ext_key *key1: pointer to the first key to compare
  36. * struct hfs_ext_key *key2: pointer to the second key to compare
  37. * Output Variable(s):
  38. * NONE
  39. * Returns:
  40. * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
  41. * Preconditions:
  42. * key1 and key2 point to "valid" (struct hfs_ext_key)s.
  43. * Postconditions:
  44. * This function has no side-effects */
  45. int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
  46. {
  47. __be32 fnum1, fnum2;
  48. __be16 block1, block2;
  49. fnum1 = key1->ext.FNum;
  50. fnum2 = key2->ext.FNum;
  51. if (fnum1 != fnum2)
  52. return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
  53. if (key1->ext.FkType != key2->ext.FkType)
  54. return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
  55. block1 = key1->ext.FABN;
  56. block2 = key2->ext.FABN;
  57. if (block1 == block2)
  58. return 0;
  59. return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
  60. }
  61. /*
  62. * hfs_ext_find_block
  63. *
  64. * Find a block within an extent record
  65. */
  66. static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
  67. {
  68. int i;
  69. u16 count;
  70. for (i = 0; i < 3; ext++, i++) {
  71. count = be16_to_cpu(ext->count);
  72. if (off < count)
  73. return be16_to_cpu(ext->block) + off;
  74. off -= count;
  75. }
  76. /* panic? */
  77. return 0;
  78. }
  79. static int hfs_ext_block_count(struct hfs_extent *ext)
  80. {
  81. int i;
  82. u16 count = 0;
  83. for (i = 0; i < 3; ext++, i++)
  84. count += be16_to_cpu(ext->count);
  85. return count;
  86. }
  87. static u16 hfs_ext_lastblock(struct hfs_extent *ext)
  88. {
  89. int i;
  90. ext += 2;
  91. for (i = 0; i < 2; ext--, i++)
  92. if (ext->count)
  93. break;
  94. return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
  95. }
  96. static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
  97. {
  98. int res;
  99. hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
  100. HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  101. res = hfs_brec_find(fd);
  102. if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
  103. if (res != -ENOENT)
  104. return res;
  105. /* Fail early and avoid ENOSPC during the btree operation */
  106. res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
  107. if (res)
  108. return res;
  109. hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
  110. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  111. } else {
  112. if (res)
  113. return res;
  114. hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
  115. HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
  116. }
  117. return 0;
  118. }
  119. int hfs_ext_write_extent(struct inode *inode)
  120. {
  121. struct hfs_find_data fd;
  122. int res = 0;
  123. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
  124. res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  125. if (res)
  126. return res;
  127. res = __hfs_ext_write_extent(inode, &fd);
  128. hfs_find_exit(&fd);
  129. }
  130. return res;
  131. }
  132. static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
  133. u32 cnid, u32 block, u8 type)
  134. {
  135. int res;
  136. hfs_ext_build_key(fd->search_key, cnid, block, type);
  137. fd->key->ext.FNum = 0;
  138. res = hfs_brec_find(fd);
  139. if (res && res != -ENOENT)
  140. return res;
  141. if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
  142. fd->key->ext.FkType != fd->search_key->ext.FkType)
  143. return -ENOENT;
  144. if (fd->entrylength != sizeof(hfs_extent_rec))
  145. return -EIO;
  146. hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
  147. return 0;
  148. }
  149. static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
  150. {
  151. int res;
  152. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
  153. res = __hfs_ext_write_extent(inode, fd);
  154. if (res)
  155. return res;
  156. }
  157. res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
  158. block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  159. if (!res) {
  160. HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
  161. HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
  162. } else {
  163. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  164. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  165. }
  166. return res;
  167. }
  168. static int hfs_ext_read_extent(struct inode *inode, u16 block)
  169. {
  170. struct hfs_find_data fd;
  171. int res;
  172. if (block >= HFS_I(inode)->cached_start &&
  173. block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
  174. return 0;
  175. res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  176. if (!res) {
  177. res = __hfs_ext_cache_extent(&fd, inode, block);
  178. hfs_find_exit(&fd);
  179. }
  180. return res;
  181. }
  182. static void hfs_dump_extent(struct hfs_extent *extent)
  183. {
  184. int i;
  185. hfs_dbg(EXTENT, " ");
  186. for (i = 0; i < 3; i++)
  187. hfs_dbg_cont(EXTENT, " %u:%u",
  188. be16_to_cpu(extent[i].block),
  189. be16_to_cpu(extent[i].count));
  190. hfs_dbg_cont(EXTENT, "\n");
  191. }
  192. static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
  193. u16 alloc_block, u16 block_count)
  194. {
  195. u16 count, start;
  196. int i;
  197. hfs_dump_extent(extent);
  198. for (i = 0; i < 3; extent++, i++) {
  199. count = be16_to_cpu(extent->count);
  200. if (offset == count) {
  201. start = be16_to_cpu(extent->block);
  202. if (alloc_block != start + count) {
  203. if (++i >= 3)
  204. return -ENOSPC;
  205. extent++;
  206. extent->block = cpu_to_be16(alloc_block);
  207. } else
  208. block_count += count;
  209. extent->count = cpu_to_be16(block_count);
  210. return 0;
  211. } else if (offset < count)
  212. break;
  213. offset -= count;
  214. }
  215. /* panic? */
  216. return -EIO;
  217. }
  218. static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
  219. u16 offset, u16 block_nr)
  220. {
  221. u16 count, start;
  222. int i;
  223. hfs_dump_extent(extent);
  224. for (i = 0; i < 3; extent++, i++) {
  225. count = be16_to_cpu(extent->count);
  226. if (offset == count)
  227. goto found;
  228. else if (offset < count)
  229. break;
  230. offset -= count;
  231. }
  232. /* panic? */
  233. return -EIO;
  234. found:
  235. for (;;) {
  236. start = be16_to_cpu(extent->block);
  237. if (count <= block_nr) {
  238. hfs_clear_vbm_bits(sb, start, count);
  239. extent->block = 0;
  240. extent->count = 0;
  241. block_nr -= count;
  242. } else {
  243. count -= block_nr;
  244. hfs_clear_vbm_bits(sb, start + count, block_nr);
  245. extent->count = cpu_to_be16(count);
  246. block_nr = 0;
  247. }
  248. if (!block_nr || !i)
  249. return 0;
  250. i--;
  251. extent--;
  252. count = be16_to_cpu(extent->count);
  253. }
  254. }
  255. int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
  256. {
  257. struct hfs_find_data fd;
  258. u32 total_blocks, blocks, start;
  259. u32 cnid = be32_to_cpu(file->FlNum);
  260. struct hfs_extent *extent;
  261. int res, i;
  262. if (type == HFS_FK_DATA) {
  263. total_blocks = be32_to_cpu(file->PyLen);
  264. extent = file->ExtRec;
  265. } else {
  266. total_blocks = be32_to_cpu(file->RPyLen);
  267. extent = file->RExtRec;
  268. }
  269. total_blocks /= HFS_SB(sb)->alloc_blksz;
  270. if (!total_blocks)
  271. return 0;
  272. blocks = 0;
  273. for (i = 0; i < 3; i++)
  274. blocks += be16_to_cpu(extent[i].count);
  275. res = hfs_free_extents(sb, extent, blocks, blocks);
  276. if (res)
  277. return res;
  278. if (total_blocks == blocks)
  279. return 0;
  280. res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  281. if (res)
  282. return res;
  283. do {
  284. res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
  285. if (res)
  286. break;
  287. start = be16_to_cpu(fd.key->ext.FABN);
  288. hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
  289. hfs_brec_remove(&fd);
  290. total_blocks = start;
  291. } while (total_blocks > blocks);
  292. hfs_find_exit(&fd);
  293. return res;
  294. }
  295. /*
  296. * hfs_get_block
  297. */
  298. int hfs_get_block(struct inode *inode, sector_t block,
  299. struct buffer_head *bh_result, int create)
  300. {
  301. struct super_block *sb;
  302. u16 dblock, ablock;
  303. int res;
  304. sb = inode->i_sb;
  305. /* Convert inode block to disk allocation block */
  306. ablock = (u32)block / HFS_SB(sb)->fs_div;
  307. if (block >= HFS_I(inode)->fs_blocks) {
  308. if (!create)
  309. return 0;
  310. if (block > HFS_I(inode)->fs_blocks)
  311. return -EIO;
  312. if (ablock >= HFS_I(inode)->alloc_blocks) {
  313. res = hfs_extend_file(inode);
  314. if (res)
  315. return res;
  316. }
  317. } else
  318. create = 0;
  319. if (ablock < HFS_I(inode)->first_blocks) {
  320. dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
  321. goto done;
  322. }
  323. mutex_lock(&HFS_I(inode)->extents_lock);
  324. res = hfs_ext_read_extent(inode, ablock);
  325. if (!res)
  326. dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
  327. ablock - HFS_I(inode)->cached_start);
  328. else {
  329. mutex_unlock(&HFS_I(inode)->extents_lock);
  330. return -EIO;
  331. }
  332. mutex_unlock(&HFS_I(inode)->extents_lock);
  333. done:
  334. map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
  335. dblock * HFS_SB(sb)->fs_div +
  336. (u32)block % HFS_SB(sb)->fs_div);
  337. if (create) {
  338. set_buffer_new(bh_result);
  339. HFS_I(inode)->phys_size += sb->s_blocksize;
  340. HFS_I(inode)->fs_blocks++;
  341. inode_add_bytes(inode, sb->s_blocksize);
  342. mark_inode_dirty(inode);
  343. }
  344. return 0;
  345. }
  346. int hfs_extend_file(struct inode *inode)
  347. {
  348. struct super_block *sb = inode->i_sb;
  349. u32 start, len, goal;
  350. int res;
  351. mutex_lock(&HFS_I(inode)->extents_lock);
  352. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
  353. goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
  354. else {
  355. res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
  356. if (res)
  357. goto out;
  358. goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
  359. }
  360. len = HFS_I(inode)->clump_blocks;
  361. start = hfs_vbm_search_free(sb, goal, &len);
  362. if (!len) {
  363. res = -ENOSPC;
  364. goto out;
  365. }
  366. hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
  367. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
  368. if (!HFS_I(inode)->first_blocks) {
  369. hfs_dbg(EXTENT, "first extents\n");
  370. /* no extents yet */
  371. HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
  372. HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
  373. res = 0;
  374. } else {
  375. /* try to append to extents in inode */
  376. res = hfs_add_extent(HFS_I(inode)->first_extents,
  377. HFS_I(inode)->alloc_blocks,
  378. start, len);
  379. if (res == -ENOSPC)
  380. goto insert_extent;
  381. }
  382. if (!res) {
  383. hfs_dump_extent(HFS_I(inode)->first_extents);
  384. HFS_I(inode)->first_blocks += len;
  385. }
  386. } else {
  387. res = hfs_add_extent(HFS_I(inode)->cached_extents,
  388. HFS_I(inode)->alloc_blocks -
  389. HFS_I(inode)->cached_start,
  390. start, len);
  391. if (!res) {
  392. hfs_dump_extent(HFS_I(inode)->cached_extents);
  393. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  394. HFS_I(inode)->cached_blocks += len;
  395. } else if (res == -ENOSPC)
  396. goto insert_extent;
  397. }
  398. out:
  399. mutex_unlock(&HFS_I(inode)->extents_lock);
  400. if (!res) {
  401. HFS_I(inode)->alloc_blocks += len;
  402. mark_inode_dirty(inode);
  403. if (inode->i_ino < HFS_FIRSTUSER_CNID)
  404. set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
  405. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  406. hfs_mark_mdb_dirty(sb);
  407. }
  408. return res;
  409. insert_extent:
  410. hfs_dbg(EXTENT, "insert new extent\n");
  411. res = hfs_ext_write_extent(inode);
  412. if (res)
  413. goto out;
  414. memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
  415. HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
  416. HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
  417. hfs_dump_extent(HFS_I(inode)->cached_extents);
  418. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
  419. HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
  420. HFS_I(inode)->cached_blocks = len;
  421. res = 0;
  422. goto out;
  423. }
  424. void hfs_file_truncate(struct inode *inode)
  425. {
  426. struct super_block *sb = inode->i_sb;
  427. struct hfs_find_data fd;
  428. u16 blk_cnt, alloc_cnt, start;
  429. u32 size;
  430. int res;
  431. hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
  432. inode->i_ino, (long long)HFS_I(inode)->phys_size,
  433. inode->i_size);
  434. if (inode->i_size > HFS_I(inode)->phys_size) {
  435. struct address_space *mapping = inode->i_mapping;
  436. void *fsdata;
  437. struct page *page;
  438. /* XXX: Can use generic_cont_expand? */
  439. size = inode->i_size - 1;
  440. res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
  441. &page, &fsdata);
  442. if (!res) {
  443. res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
  444. page, fsdata);
  445. }
  446. if (res)
  447. inode->i_size = HFS_I(inode)->phys_size;
  448. return;
  449. } else if (inode->i_size == HFS_I(inode)->phys_size)
  450. return;
  451. size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
  452. blk_cnt = size / HFS_SB(sb)->alloc_blksz;
  453. alloc_cnt = HFS_I(inode)->alloc_blocks;
  454. if (blk_cnt == alloc_cnt)
  455. goto out;
  456. mutex_lock(&HFS_I(inode)->extents_lock);
  457. res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  458. if (res) {
  459. mutex_unlock(&HFS_I(inode)->extents_lock);
  460. /* XXX: We lack error handling of hfs_file_truncate() */
  461. return;
  462. }
  463. while (1) {
  464. if (alloc_cnt == HFS_I(inode)->first_blocks) {
  465. hfs_free_extents(sb, HFS_I(inode)->first_extents,
  466. alloc_cnt, alloc_cnt - blk_cnt);
  467. hfs_dump_extent(HFS_I(inode)->first_extents);
  468. HFS_I(inode)->first_blocks = blk_cnt;
  469. break;
  470. }
  471. res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
  472. if (res)
  473. break;
  474. start = HFS_I(inode)->cached_start;
  475. hfs_free_extents(sb, HFS_I(inode)->cached_extents,
  476. alloc_cnt - start, alloc_cnt - blk_cnt);
  477. hfs_dump_extent(HFS_I(inode)->cached_extents);
  478. if (blk_cnt > start) {
  479. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  480. break;
  481. }
  482. alloc_cnt = start;
  483. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  484. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  485. hfs_brec_remove(&fd);
  486. }
  487. hfs_find_exit(&fd);
  488. mutex_unlock(&HFS_I(inode)->extents_lock);
  489. HFS_I(inode)->alloc_blocks = blk_cnt;
  490. out:
  491. HFS_I(inode)->phys_size = inode->i_size;
  492. HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  493. inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
  494. mark_inode_dirty(inode);
  495. }