extents.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfsplus/extents.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handling of Extents both in catalog and extents overflow trees
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/fs.h>
  13. #include <linux/pagemap.h>
  14. #include "hfsplus_fs.h"
  15. #include "hfsplus_raw.h"
  16. /* Compare two extents keys, returns 0 on same, pos/neg for difference */
  17. int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
  18. const hfsplus_btree_key *k2)
  19. {
  20. __be32 k1id, k2id;
  21. __be32 k1s, k2s;
  22. k1id = k1->ext.cnid;
  23. k2id = k2->ext.cnid;
  24. if (k1id != k2id)
  25. return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
  26. if (k1->ext.fork_type != k2->ext.fork_type)
  27. return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
  28. k1s = k1->ext.start_block;
  29. k2s = k2->ext.start_block;
  30. if (k1s == k2s)
  31. return 0;
  32. return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
  33. }
  34. static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
  35. u32 block, u8 type)
  36. {
  37. key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
  38. key->ext.cnid = cpu_to_be32(cnid);
  39. key->ext.start_block = cpu_to_be32(block);
  40. key->ext.fork_type = type;
  41. key->ext.pad = 0;
  42. }
  43. static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
  44. {
  45. int i;
  46. u32 count;
  47. for (i = 0; i < 8; ext++, i++) {
  48. count = be32_to_cpu(ext->block_count);
  49. if (off < count)
  50. return be32_to_cpu(ext->start_block) + off;
  51. off -= count;
  52. }
  53. /* panic? */
  54. return 0;
  55. }
  56. static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
  57. {
  58. int i;
  59. u32 count = 0;
  60. for (i = 0; i < 8; ext++, i++)
  61. count += be32_to_cpu(ext->block_count);
  62. return count;
  63. }
  64. static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
  65. {
  66. int i;
  67. ext += 7;
  68. for (i = 0; i < 7; ext--, i++)
  69. if (ext->block_count)
  70. break;
  71. return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
  72. }
  73. static int __hfsplus_ext_write_extent(struct inode *inode,
  74. struct hfs_find_data *fd)
  75. {
  76. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  77. int res;
  78. WARN_ON(!mutex_is_locked(&hip->extents_lock));
  79. hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
  80. HFSPLUS_IS_RSRC(inode) ?
  81. HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
  82. res = hfs_brec_find(fd, hfs_find_rec_by_key);
  83. if (hip->extent_state & HFSPLUS_EXT_NEW) {
  84. if (res != -ENOENT)
  85. return res;
  86. /* Fail early and avoid ENOSPC during the btree operation */
  87. res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
  88. if (res)
  89. return res;
  90. hfs_brec_insert(fd, hip->cached_extents,
  91. sizeof(hfsplus_extent_rec));
  92. hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
  93. } else {
  94. if (res)
  95. return res;
  96. hfs_bnode_write(fd->bnode, hip->cached_extents,
  97. fd->entryoffset, fd->entrylength);
  98. hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
  99. }
  100. /*
  101. * We can't just use hfsplus_mark_inode_dirty here, because we
  102. * also get called from hfsplus_write_inode, which should not
  103. * redirty the inode. Instead the callers have to be careful
  104. * to explicily mark the inode dirty, too.
  105. */
  106. set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
  107. return 0;
  108. }
  109. static int hfsplus_ext_write_extent_locked(struct inode *inode)
  110. {
  111. int res = 0;
  112. if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
  113. struct hfs_find_data fd;
  114. res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
  115. if (res)
  116. return res;
  117. res = __hfsplus_ext_write_extent(inode, &fd);
  118. hfs_find_exit(&fd);
  119. }
  120. return res;
  121. }
  122. int hfsplus_ext_write_extent(struct inode *inode)
  123. {
  124. int res;
  125. mutex_lock(&HFSPLUS_I(inode)->extents_lock);
  126. res = hfsplus_ext_write_extent_locked(inode);
  127. mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
  128. return res;
  129. }
  130. static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
  131. struct hfsplus_extent *extent,
  132. u32 cnid, u32 block, u8 type)
  133. {
  134. int res;
  135. hfsplus_ext_build_key(fd->search_key, cnid, block, type);
  136. fd->key->ext.cnid = 0;
  137. res = hfs_brec_find(fd, hfs_find_rec_by_key);
  138. if (res && res != -ENOENT)
  139. return res;
  140. if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
  141. fd->key->ext.fork_type != fd->search_key->ext.fork_type)
  142. return -ENOENT;
  143. if (fd->entrylength != sizeof(hfsplus_extent_rec))
  144. return -EIO;
  145. hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
  146. sizeof(hfsplus_extent_rec));
  147. return 0;
  148. }
  149. static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
  150. struct inode *inode, u32 block)
  151. {
  152. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  153. int res;
  154. WARN_ON(!mutex_is_locked(&hip->extents_lock));
  155. if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
  156. res = __hfsplus_ext_write_extent(inode, fd);
  157. if (res)
  158. return res;
  159. }
  160. res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
  161. block, HFSPLUS_IS_RSRC(inode) ?
  162. HFSPLUS_TYPE_RSRC :
  163. HFSPLUS_TYPE_DATA);
  164. if (!res) {
  165. hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
  166. hip->cached_blocks =
  167. hfsplus_ext_block_count(hip->cached_extents);
  168. } else {
  169. hip->cached_start = hip->cached_blocks = 0;
  170. hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
  171. }
  172. return res;
  173. }
  174. static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
  175. {
  176. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  177. struct hfs_find_data fd;
  178. int res;
  179. if (block >= hip->cached_start &&
  180. block < hip->cached_start + hip->cached_blocks)
  181. return 0;
  182. res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
  183. if (!res) {
  184. res = __hfsplus_ext_cache_extent(&fd, inode, block);
  185. hfs_find_exit(&fd);
  186. }
  187. return res;
  188. }
  189. /* Get a block at iblock for inode, possibly allocating if create */
  190. int hfsplus_get_block(struct inode *inode, sector_t iblock,
  191. struct buffer_head *bh_result, int create)
  192. {
  193. struct super_block *sb = inode->i_sb;
  194. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  195. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  196. int res = -EIO;
  197. u32 ablock, dblock, mask;
  198. sector_t sector;
  199. int was_dirty = 0;
  200. /* Convert inode block to disk allocation block */
  201. ablock = iblock >> sbi->fs_shift;
  202. if (iblock >= hip->fs_blocks) {
  203. if (!create)
  204. return 0;
  205. if (iblock > hip->fs_blocks)
  206. return -EIO;
  207. if (ablock >= hip->alloc_blocks) {
  208. res = hfsplus_file_extend(inode, false);
  209. if (res)
  210. return res;
  211. }
  212. } else
  213. create = 0;
  214. if (ablock < hip->first_blocks) {
  215. dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
  216. goto done;
  217. }
  218. if (inode->i_ino == HFSPLUS_EXT_CNID)
  219. return -EIO;
  220. mutex_lock(&hip->extents_lock);
  221. /*
  222. * hfsplus_ext_read_extent will write out a cached extent into
  223. * the extents btree. In that case we may have to mark the inode
  224. * dirty even for a pure read of an extent here.
  225. */
  226. was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
  227. res = hfsplus_ext_read_extent(inode, ablock);
  228. if (res) {
  229. mutex_unlock(&hip->extents_lock);
  230. return -EIO;
  231. }
  232. dblock = hfsplus_ext_find_block(hip->cached_extents,
  233. ablock - hip->cached_start);
  234. mutex_unlock(&hip->extents_lock);
  235. done:
  236. hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
  237. inode->i_ino, (long long)iblock, dblock);
  238. mask = (1 << sbi->fs_shift) - 1;
  239. sector = ((sector_t)dblock << sbi->fs_shift) +
  240. sbi->blockoffset + (iblock & mask);
  241. map_bh(bh_result, sb, sector);
  242. if (create) {
  243. set_buffer_new(bh_result);
  244. hip->phys_size += sb->s_blocksize;
  245. hip->fs_blocks++;
  246. inode_add_bytes(inode, sb->s_blocksize);
  247. }
  248. if (create || was_dirty)
  249. mark_inode_dirty(inode);
  250. return 0;
  251. }
  252. static void hfsplus_dump_extent(struct hfsplus_extent *extent)
  253. {
  254. int i;
  255. hfs_dbg(EXTENT, " ");
  256. for (i = 0; i < 8; i++)
  257. hfs_dbg_cont(EXTENT, " %u:%u",
  258. be32_to_cpu(extent[i].start_block),
  259. be32_to_cpu(extent[i].block_count));
  260. hfs_dbg_cont(EXTENT, "\n");
  261. }
  262. static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
  263. u32 alloc_block, u32 block_count)
  264. {
  265. u32 count, start;
  266. int i;
  267. hfsplus_dump_extent(extent);
  268. for (i = 0; i < 8; extent++, i++) {
  269. count = be32_to_cpu(extent->block_count);
  270. if (offset == count) {
  271. start = be32_to_cpu(extent->start_block);
  272. if (alloc_block != start + count) {
  273. if (++i >= 8)
  274. return -ENOSPC;
  275. extent++;
  276. extent->start_block = cpu_to_be32(alloc_block);
  277. } else
  278. block_count += count;
  279. extent->block_count = cpu_to_be32(block_count);
  280. return 0;
  281. } else if (offset < count)
  282. break;
  283. offset -= count;
  284. }
  285. /* panic? */
  286. return -EIO;
  287. }
  288. static int hfsplus_free_extents(struct super_block *sb,
  289. struct hfsplus_extent *extent,
  290. u32 offset, u32 block_nr)
  291. {
  292. u32 count, start;
  293. int i;
  294. int err = 0;
  295. /* Mapping the allocation file may lock the extent tree */
  296. WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
  297. hfsplus_dump_extent(extent);
  298. for (i = 0; i < 8; extent++, i++) {
  299. count = be32_to_cpu(extent->block_count);
  300. if (offset == count)
  301. goto found;
  302. else if (offset < count)
  303. break;
  304. offset -= count;
  305. }
  306. /* panic? */
  307. return -EIO;
  308. found:
  309. for (;;) {
  310. start = be32_to_cpu(extent->start_block);
  311. if (count <= block_nr) {
  312. err = hfsplus_block_free(sb, start, count);
  313. if (err) {
  314. pr_err("can't free extent\n");
  315. hfs_dbg(EXTENT, " start: %u count: %u\n",
  316. start, count);
  317. }
  318. extent->block_count = 0;
  319. extent->start_block = 0;
  320. block_nr -= count;
  321. } else {
  322. count -= block_nr;
  323. err = hfsplus_block_free(sb, start + count, block_nr);
  324. if (err) {
  325. pr_err("can't free extent\n");
  326. hfs_dbg(EXTENT, " start: %u count: %u\n",
  327. start, count);
  328. }
  329. extent->block_count = cpu_to_be32(count);
  330. block_nr = 0;
  331. }
  332. if (!block_nr || !i) {
  333. /*
  334. * Try to free all extents and
  335. * return only last error
  336. */
  337. return err;
  338. }
  339. i--;
  340. extent--;
  341. count = be32_to_cpu(extent->block_count);
  342. }
  343. }
  344. int hfsplus_free_fork(struct super_block *sb, u32 cnid,
  345. struct hfsplus_fork_raw *fork, int type)
  346. {
  347. struct hfs_find_data fd;
  348. hfsplus_extent_rec ext_entry;
  349. u32 total_blocks, blocks, start;
  350. int res, i;
  351. total_blocks = be32_to_cpu(fork->total_blocks);
  352. if (!total_blocks)
  353. return 0;
  354. blocks = 0;
  355. for (i = 0; i < 8; i++)
  356. blocks += be32_to_cpu(fork->extents[i].block_count);
  357. res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
  358. if (res)
  359. return res;
  360. if (total_blocks == blocks)
  361. return 0;
  362. res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  363. if (res)
  364. return res;
  365. do {
  366. res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
  367. total_blocks, type);
  368. if (res)
  369. break;
  370. start = be32_to_cpu(fd.key->ext.start_block);
  371. hfs_brec_remove(&fd);
  372. mutex_unlock(&fd.tree->tree_lock);
  373. hfsplus_free_extents(sb, ext_entry, total_blocks - start,
  374. total_blocks);
  375. total_blocks = start;
  376. mutex_lock(&fd.tree->tree_lock);
  377. } while (total_blocks > blocks);
  378. hfs_find_exit(&fd);
  379. return res;
  380. }
  381. int hfsplus_file_extend(struct inode *inode, bool zeroout)
  382. {
  383. struct super_block *sb = inode->i_sb;
  384. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  385. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  386. u32 start, len, goal;
  387. int res;
  388. if (sbi->alloc_file->i_size * 8 <
  389. sbi->total_blocks - sbi->free_blocks + 8) {
  390. /* extend alloc file */
  391. pr_err("extend alloc file! (%llu,%u,%u)\n",
  392. sbi->alloc_file->i_size * 8,
  393. sbi->total_blocks, sbi->free_blocks);
  394. return -ENOSPC;
  395. }
  396. mutex_lock(&hip->extents_lock);
  397. if (hip->alloc_blocks == hip->first_blocks)
  398. goal = hfsplus_ext_lastblock(hip->first_extents);
  399. else {
  400. res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
  401. if (res)
  402. goto out;
  403. goal = hfsplus_ext_lastblock(hip->cached_extents);
  404. }
  405. len = hip->clump_blocks;
  406. start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
  407. if (start >= sbi->total_blocks) {
  408. start = hfsplus_block_allocate(sb, goal, 0, &len);
  409. if (start >= goal) {
  410. res = -ENOSPC;
  411. goto out;
  412. }
  413. }
  414. if (zeroout) {
  415. res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
  416. if (res)
  417. goto out;
  418. }
  419. hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
  420. if (hip->alloc_blocks <= hip->first_blocks) {
  421. if (!hip->first_blocks) {
  422. hfs_dbg(EXTENT, "first extents\n");
  423. /* no extents yet */
  424. hip->first_extents[0].start_block = cpu_to_be32(start);
  425. hip->first_extents[0].block_count = cpu_to_be32(len);
  426. res = 0;
  427. } else {
  428. /* try to append to extents in inode */
  429. res = hfsplus_add_extent(hip->first_extents,
  430. hip->alloc_blocks,
  431. start, len);
  432. if (res == -ENOSPC)
  433. goto insert_extent;
  434. }
  435. if (!res) {
  436. hfsplus_dump_extent(hip->first_extents);
  437. hip->first_blocks += len;
  438. }
  439. } else {
  440. res = hfsplus_add_extent(hip->cached_extents,
  441. hip->alloc_blocks - hip->cached_start,
  442. start, len);
  443. if (!res) {
  444. hfsplus_dump_extent(hip->cached_extents);
  445. hip->extent_state |= HFSPLUS_EXT_DIRTY;
  446. hip->cached_blocks += len;
  447. } else if (res == -ENOSPC)
  448. goto insert_extent;
  449. }
  450. out:
  451. if (!res) {
  452. hip->alloc_blocks += len;
  453. mutex_unlock(&hip->extents_lock);
  454. hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
  455. return 0;
  456. }
  457. mutex_unlock(&hip->extents_lock);
  458. return res;
  459. insert_extent:
  460. hfs_dbg(EXTENT, "insert new extent\n");
  461. res = hfsplus_ext_write_extent_locked(inode);
  462. if (res)
  463. goto out;
  464. memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
  465. hip->cached_extents[0].start_block = cpu_to_be32(start);
  466. hip->cached_extents[0].block_count = cpu_to_be32(len);
  467. hfsplus_dump_extent(hip->cached_extents);
  468. hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
  469. hip->cached_start = hip->alloc_blocks;
  470. hip->cached_blocks = len;
  471. res = 0;
  472. goto out;
  473. }
  474. void hfsplus_file_truncate(struct inode *inode)
  475. {
  476. struct super_block *sb = inode->i_sb;
  477. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  478. struct hfs_find_data fd;
  479. u32 alloc_cnt, blk_cnt, start;
  480. int res;
  481. hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
  482. inode->i_ino, (long long)hip->phys_size, inode->i_size);
  483. if (inode->i_size > hip->phys_size) {
  484. struct address_space *mapping = inode->i_mapping;
  485. struct page *page;
  486. void *fsdata;
  487. loff_t size = inode->i_size;
  488. res = pagecache_write_begin(NULL, mapping, size, 0, 0,
  489. &page, &fsdata);
  490. if (res)
  491. return;
  492. res = pagecache_write_end(NULL, mapping, size,
  493. 0, 0, page, fsdata);
  494. if (res < 0)
  495. return;
  496. mark_inode_dirty(inode);
  497. return;
  498. } else if (inode->i_size == hip->phys_size)
  499. return;
  500. blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
  501. HFSPLUS_SB(sb)->alloc_blksz_shift;
  502. mutex_lock(&hip->extents_lock);
  503. alloc_cnt = hip->alloc_blocks;
  504. if (blk_cnt == alloc_cnt)
  505. goto out_unlock;
  506. res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
  507. if (res) {
  508. mutex_unlock(&hip->extents_lock);
  509. /* XXX: We lack error handling of hfsplus_file_truncate() */
  510. return;
  511. }
  512. while (1) {
  513. if (alloc_cnt == hip->first_blocks) {
  514. mutex_unlock(&fd.tree->tree_lock);
  515. hfsplus_free_extents(sb, hip->first_extents,
  516. alloc_cnt, alloc_cnt - blk_cnt);
  517. hfsplus_dump_extent(hip->first_extents);
  518. hip->first_blocks = blk_cnt;
  519. mutex_lock(&fd.tree->tree_lock);
  520. break;
  521. }
  522. res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
  523. if (res)
  524. break;
  525. start = hip->cached_start;
  526. if (blk_cnt <= start)
  527. hfs_brec_remove(&fd);
  528. mutex_unlock(&fd.tree->tree_lock);
  529. hfsplus_free_extents(sb, hip->cached_extents,
  530. alloc_cnt - start, alloc_cnt - blk_cnt);
  531. hfsplus_dump_extent(hip->cached_extents);
  532. mutex_lock(&fd.tree->tree_lock);
  533. if (blk_cnt > start) {
  534. hip->extent_state |= HFSPLUS_EXT_DIRTY;
  535. break;
  536. }
  537. alloc_cnt = start;
  538. hip->cached_start = hip->cached_blocks = 0;
  539. hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
  540. }
  541. hfs_find_exit(&fd);
  542. hip->alloc_blocks = blk_cnt;
  543. out_unlock:
  544. mutex_unlock(&hip->extents_lock);
  545. hip->phys_size = inode->i_size;
  546. hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
  547. sb->s_blocksize_bits;
  548. inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
  549. hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
  550. }