inode.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ufs/inode.c
  4. *
  5. * Copyright (C) 1998
  6. * Daniel Pirkl <daniel.pirkl@email.cz>
  7. * Charles University, Faculty of Mathematics and Physics
  8. *
  9. * from
  10. *
  11. * linux/fs/ext2/inode.c
  12. *
  13. * Copyright (C) 1992, 1993, 1994, 1995
  14. * Remy Card (card@masi.ibp.fr)
  15. * Laboratoire MASI - Institut Blaise Pascal
  16. * Universite Pierre et Marie Curie (Paris VI)
  17. *
  18. * from
  19. *
  20. * linux/fs/minix/inode.c
  21. *
  22. * Copyright (C) 1991, 1992 Linus Torvalds
  23. *
  24. * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
  25. * Big-endian to little-endian byte-swapping/bitmaps by
  26. * David S. Miller (davem@caip.rutgers.edu), 1995
  27. */
  28. #include <linux/uaccess.h>
  29. #include <linux/errno.h>
  30. #include <linux/fs.h>
  31. #include <linux/time.h>
  32. #include <linux/stat.h>
  33. #include <linux/string.h>
  34. #include <linux/mm.h>
  35. #include <linux/buffer_head.h>
  36. #include <linux/writeback.h>
  37. #include <linux/iversion.h>
  38. #include "ufs_fs.h"
  39. #include "ufs.h"
  40. #include "swab.h"
  41. #include "util.h"
  42. static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
  43. {
  44. struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  45. int ptrs = uspi->s_apb;
  46. int ptrs_bits = uspi->s_apbshift;
  47. const long direct_blocks = UFS_NDADDR,
  48. indirect_blocks = ptrs,
  49. double_blocks = (1 << (ptrs_bits * 2));
  50. int n = 0;
  51. UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
  52. if (i_block < direct_blocks) {
  53. offsets[n++] = i_block;
  54. } else if ((i_block -= direct_blocks) < indirect_blocks) {
  55. offsets[n++] = UFS_IND_BLOCK;
  56. offsets[n++] = i_block;
  57. } else if ((i_block -= indirect_blocks) < double_blocks) {
  58. offsets[n++] = UFS_DIND_BLOCK;
  59. offsets[n++] = i_block >> ptrs_bits;
  60. offsets[n++] = i_block & (ptrs - 1);
  61. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  62. offsets[n++] = UFS_TIND_BLOCK;
  63. offsets[n++] = i_block >> (ptrs_bits * 2);
  64. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  65. offsets[n++] = i_block & (ptrs - 1);
  66. } else {
  67. ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
  68. }
  69. return n;
  70. }
  71. typedef struct {
  72. void *p;
  73. union {
  74. __fs32 key32;
  75. __fs64 key64;
  76. };
  77. struct buffer_head *bh;
  78. } Indirect;
  79. static inline int grow_chain32(struct ufs_inode_info *ufsi,
  80. struct buffer_head *bh, __fs32 *v,
  81. Indirect *from, Indirect *to)
  82. {
  83. Indirect *p;
  84. unsigned seq;
  85. to->bh = bh;
  86. do {
  87. seq = read_seqbegin(&ufsi->meta_lock);
  88. to->key32 = *(__fs32 *)(to->p = v);
  89. for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
  90. ;
  91. } while (read_seqretry(&ufsi->meta_lock, seq));
  92. return (p > to);
  93. }
  94. static inline int grow_chain64(struct ufs_inode_info *ufsi,
  95. struct buffer_head *bh, __fs64 *v,
  96. Indirect *from, Indirect *to)
  97. {
  98. Indirect *p;
  99. unsigned seq;
  100. to->bh = bh;
  101. do {
  102. seq = read_seqbegin(&ufsi->meta_lock);
  103. to->key64 = *(__fs64 *)(to->p = v);
  104. for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
  105. ;
  106. } while (read_seqretry(&ufsi->meta_lock, seq));
  107. return (p > to);
  108. }
  109. /*
  110. * Returns the location of the fragment from
  111. * the beginning of the filesystem.
  112. */
  113. static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
  114. {
  115. struct ufs_inode_info *ufsi = UFS_I(inode);
  116. struct super_block *sb = inode->i_sb;
  117. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  118. u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
  119. int shift = uspi->s_apbshift-uspi->s_fpbshift;
  120. Indirect chain[4], *q = chain;
  121. unsigned *p;
  122. unsigned flags = UFS_SB(sb)->s_flags;
  123. u64 res = 0;
  124. UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
  125. uspi->s_fpbshift, uspi->s_apbmask,
  126. (unsigned long long)mask);
  127. if (depth == 0)
  128. goto no_block;
  129. again:
  130. p = offsets;
  131. if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
  132. goto ufs2;
  133. if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
  134. goto changed;
  135. if (!q->key32)
  136. goto no_block;
  137. while (--depth) {
  138. __fs32 *ptr;
  139. struct buffer_head *bh;
  140. unsigned n = *p++;
  141. bh = sb_bread(sb, uspi->s_sbbase +
  142. fs32_to_cpu(sb, q->key32) + (n>>shift));
  143. if (!bh)
  144. goto no_block;
  145. ptr = (__fs32 *)bh->b_data + (n & mask);
  146. if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
  147. goto changed;
  148. if (!q->key32)
  149. goto no_block;
  150. }
  151. res = fs32_to_cpu(sb, q->key32);
  152. goto found;
  153. ufs2:
  154. if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
  155. goto changed;
  156. if (!q->key64)
  157. goto no_block;
  158. while (--depth) {
  159. __fs64 *ptr;
  160. struct buffer_head *bh;
  161. unsigned n = *p++;
  162. bh = sb_bread(sb, uspi->s_sbbase +
  163. fs64_to_cpu(sb, q->key64) + (n>>shift));
  164. if (!bh)
  165. goto no_block;
  166. ptr = (__fs64 *)bh->b_data + (n & mask);
  167. if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
  168. goto changed;
  169. if (!q->key64)
  170. goto no_block;
  171. }
  172. res = fs64_to_cpu(sb, q->key64);
  173. found:
  174. res += uspi->s_sbbase;
  175. no_block:
  176. while (q > chain) {
  177. brelse(q->bh);
  178. q--;
  179. }
  180. return res;
  181. changed:
  182. while (q > chain) {
  183. brelse(q->bh);
  184. q--;
  185. }
  186. goto again;
  187. }
  188. /*
  189. * Unpacking tails: we have a file with partial final block and
  190. * we had been asked to extend it. If the fragment being written
  191. * is within the same block, we need to extend the tail just to cover
  192. * that fragment. Otherwise the tail is extended to full block.
  193. *
  194. * Note that we might need to create a _new_ tail, but that will
  195. * be handled elsewhere; this is strictly for resizing old
  196. * ones.
  197. */
  198. static bool
  199. ufs_extend_tail(struct inode *inode, u64 writes_to,
  200. int *err, struct page *locked_page)
  201. {
  202. struct ufs_inode_info *ufsi = UFS_I(inode);
  203. struct super_block *sb = inode->i_sb;
  204. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  205. unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
  206. unsigned block = ufs_fragstoblks(lastfrag);
  207. unsigned new_size;
  208. void *p;
  209. u64 tmp;
  210. if (writes_to < (lastfrag | uspi->s_fpbmask))
  211. new_size = (writes_to & uspi->s_fpbmask) + 1;
  212. else
  213. new_size = uspi->s_fpb;
  214. p = ufs_get_direct_data_ptr(uspi, ufsi, block);
  215. tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
  216. new_size - (lastfrag & uspi->s_fpbmask), err,
  217. locked_page);
  218. return tmp != 0;
  219. }
  220. /**
  221. * ufs_inode_getfrag() - allocate new fragment(s)
  222. * @inode: pointer to inode
  223. * @index: number of block pointer within the inode's array.
  224. * @new_fragment: number of new allocated fragment(s)
  225. * @err: we set it if something wrong
  226. * @new: we set it if we allocate new block
  227. * @locked_page: for ufs_new_fragments()
  228. */
  229. static u64
  230. ufs_inode_getfrag(struct inode *inode, unsigned index,
  231. sector_t new_fragment, int *err,
  232. int *new, struct page *locked_page)
  233. {
  234. struct ufs_inode_info *ufsi = UFS_I(inode);
  235. struct super_block *sb = inode->i_sb;
  236. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  237. u64 tmp, goal, lastfrag;
  238. unsigned nfrags = uspi->s_fpb;
  239. void *p;
  240. /* TODO : to be done for write support
  241. if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
  242. goto ufs2;
  243. */
  244. p = ufs_get_direct_data_ptr(uspi, ufsi, index);
  245. tmp = ufs_data_ptr_to_cpu(sb, p);
  246. if (tmp)
  247. goto out;
  248. lastfrag = ufsi->i_lastfrag;
  249. /* will that be a new tail? */
  250. if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
  251. nfrags = (new_fragment & uspi->s_fpbmask) + 1;
  252. goal = 0;
  253. if (index) {
  254. goal = ufs_data_ptr_to_cpu(sb,
  255. ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
  256. if (goal)
  257. goal += uspi->s_fpb;
  258. }
  259. tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
  260. goal, nfrags, err, locked_page);
  261. if (!tmp) {
  262. *err = -ENOSPC;
  263. return 0;
  264. }
  265. if (new)
  266. *new = 1;
  267. inode->i_ctime = current_time(inode);
  268. if (IS_SYNC(inode))
  269. ufs_sync_inode (inode);
  270. mark_inode_dirty(inode);
  271. out:
  272. return tmp + uspi->s_sbbase;
  273. /* This part : To be implemented ....
  274. Required only for writing, not required for READ-ONLY.
  275. ufs2:
  276. u2_block = ufs_fragstoblks(fragment);
  277. u2_blockoff = ufs_fragnum(fragment);
  278. p = ufsi->i_u1.u2_i_data + block;
  279. goal = 0;
  280. repeat2:
  281. tmp = fs32_to_cpu(sb, *p);
  282. lastfrag = ufsi->i_lastfrag;
  283. */
  284. }
  285. /**
  286. * ufs_inode_getblock() - allocate new block
  287. * @inode: pointer to inode
  288. * @ind_block: block number of the indirect block
  289. * @index: number of pointer within the indirect block
  290. * @new_fragment: number of new allocated fragment
  291. * (block will hold this fragment and also uspi->s_fpb-1)
  292. * @err: see ufs_inode_getfrag()
  293. * @new: see ufs_inode_getfrag()
  294. * @locked_page: see ufs_inode_getfrag()
  295. */
  296. static u64
  297. ufs_inode_getblock(struct inode *inode, u64 ind_block,
  298. unsigned index, sector_t new_fragment, int *err,
  299. int *new, struct page *locked_page)
  300. {
  301. struct super_block *sb = inode->i_sb;
  302. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  303. int shift = uspi->s_apbshift - uspi->s_fpbshift;
  304. u64 tmp = 0, goal;
  305. struct buffer_head *bh;
  306. void *p;
  307. if (!ind_block)
  308. return 0;
  309. bh = sb_bread(sb, ind_block + (index >> shift));
  310. if (unlikely(!bh)) {
  311. *err = -EIO;
  312. return 0;
  313. }
  314. index &= uspi->s_apbmask >> uspi->s_fpbshift;
  315. if (uspi->fs_magic == UFS2_MAGIC)
  316. p = (__fs64 *)bh->b_data + index;
  317. else
  318. p = (__fs32 *)bh->b_data + index;
  319. tmp = ufs_data_ptr_to_cpu(sb, p);
  320. if (tmp)
  321. goto out;
  322. if (index && (uspi->fs_magic == UFS2_MAGIC ?
  323. (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
  324. (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
  325. goal = tmp + uspi->s_fpb;
  326. else
  327. goal = bh->b_blocknr + uspi->s_fpb;
  328. tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
  329. uspi->s_fpb, err, locked_page);
  330. if (!tmp)
  331. goto out;
  332. if (new)
  333. *new = 1;
  334. mark_buffer_dirty(bh);
  335. if (IS_SYNC(inode))
  336. sync_dirty_buffer(bh);
  337. inode->i_ctime = current_time(inode);
  338. mark_inode_dirty(inode);
  339. out:
  340. brelse (bh);
  341. UFSD("EXIT\n");
  342. if (tmp)
  343. tmp += uspi->s_sbbase;
  344. return tmp;
  345. }
  346. /**
  347. * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
  348. * readpage, writepage and so on
  349. */
  350. static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
  351. {
  352. struct super_block *sb = inode->i_sb;
  353. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  354. int err = 0, new = 0;
  355. unsigned offsets[4];
  356. int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
  357. u64 phys64 = 0;
  358. unsigned frag = fragment & uspi->s_fpbmask;
  359. phys64 = ufs_frag_map(inode, offsets, depth);
  360. if (!create)
  361. goto done;
  362. if (phys64) {
  363. if (fragment >= UFS_NDIR_FRAGMENT)
  364. goto done;
  365. read_seqlock_excl(&UFS_I(inode)->meta_lock);
  366. if (fragment < UFS_I(inode)->i_lastfrag) {
  367. read_sequnlock_excl(&UFS_I(inode)->meta_lock);
  368. goto done;
  369. }
  370. read_sequnlock_excl(&UFS_I(inode)->meta_lock);
  371. }
  372. /* This code entered only while writing ....? */
  373. mutex_lock(&UFS_I(inode)->truncate_mutex);
  374. UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
  375. if (unlikely(!depth)) {
  376. ufs_warning(sb, "ufs_get_block", "block > big");
  377. err = -EIO;
  378. goto out;
  379. }
  380. if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
  381. unsigned lastfrag = UFS_I(inode)->i_lastfrag;
  382. unsigned tailfrags = lastfrag & uspi->s_fpbmask;
  383. if (tailfrags && fragment >= lastfrag) {
  384. if (!ufs_extend_tail(inode, fragment,
  385. &err, bh_result->b_page))
  386. goto out;
  387. }
  388. }
  389. if (depth == 1) {
  390. phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
  391. &err, &new, bh_result->b_page);
  392. } else {
  393. int i;
  394. phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
  395. &err, NULL, NULL);
  396. for (i = 1; i < depth - 1; i++)
  397. phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
  398. fragment, &err, NULL, NULL);
  399. phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
  400. fragment, &err, &new, bh_result->b_page);
  401. }
  402. out:
  403. if (phys64) {
  404. phys64 += frag;
  405. map_bh(bh_result, sb, phys64);
  406. if (new)
  407. set_buffer_new(bh_result);
  408. }
  409. mutex_unlock(&UFS_I(inode)->truncate_mutex);
  410. return err;
  411. done:
  412. if (phys64)
  413. map_bh(bh_result, sb, phys64 + frag);
  414. return 0;
  415. }
  416. static int ufs_writepage(struct page *page, struct writeback_control *wbc)
  417. {
  418. return block_write_full_page(page,ufs_getfrag_block,wbc);
  419. }
  420. static int ufs_readpage(struct file *file, struct page *page)
  421. {
  422. return block_read_full_page(page,ufs_getfrag_block);
  423. }
  424. int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
  425. {
  426. return __block_write_begin(page, pos, len, ufs_getfrag_block);
  427. }
  428. static void ufs_truncate_blocks(struct inode *);
  429. static void ufs_write_failed(struct address_space *mapping, loff_t to)
  430. {
  431. struct inode *inode = mapping->host;
  432. if (to > inode->i_size) {
  433. truncate_pagecache(inode, inode->i_size);
  434. ufs_truncate_blocks(inode);
  435. }
  436. }
  437. static int ufs_write_begin(struct file *file, struct address_space *mapping,
  438. loff_t pos, unsigned len, unsigned flags,
  439. struct page **pagep, void **fsdata)
  440. {
  441. int ret;
  442. ret = block_write_begin(mapping, pos, len, flags, pagep,
  443. ufs_getfrag_block);
  444. if (unlikely(ret))
  445. ufs_write_failed(mapping, pos + len);
  446. return ret;
  447. }
  448. static int ufs_write_end(struct file *file, struct address_space *mapping,
  449. loff_t pos, unsigned len, unsigned copied,
  450. struct page *page, void *fsdata)
  451. {
  452. int ret;
  453. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  454. if (ret < len)
  455. ufs_write_failed(mapping, pos + len);
  456. return ret;
  457. }
  458. static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
  459. {
  460. return generic_block_bmap(mapping,block,ufs_getfrag_block);
  461. }
  462. const struct address_space_operations ufs_aops = {
  463. .readpage = ufs_readpage,
  464. .writepage = ufs_writepage,
  465. .write_begin = ufs_write_begin,
  466. .write_end = ufs_write_end,
  467. .bmap = ufs_bmap
  468. };
  469. static void ufs_set_inode_ops(struct inode *inode)
  470. {
  471. if (S_ISREG(inode->i_mode)) {
  472. inode->i_op = &ufs_file_inode_operations;
  473. inode->i_fop = &ufs_file_operations;
  474. inode->i_mapping->a_ops = &ufs_aops;
  475. } else if (S_ISDIR(inode->i_mode)) {
  476. inode->i_op = &ufs_dir_inode_operations;
  477. inode->i_fop = &ufs_dir_operations;
  478. inode->i_mapping->a_ops = &ufs_aops;
  479. } else if (S_ISLNK(inode->i_mode)) {
  480. if (!inode->i_blocks) {
  481. inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
  482. inode->i_op = &simple_symlink_inode_operations;
  483. } else {
  484. inode->i_mapping->a_ops = &ufs_aops;
  485. inode->i_op = &page_symlink_inode_operations;
  486. inode_nohighmem(inode);
  487. }
  488. } else
  489. init_special_inode(inode, inode->i_mode,
  490. ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
  491. }
  492. static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
  493. {
  494. struct ufs_inode_info *ufsi = UFS_I(inode);
  495. struct super_block *sb = inode->i_sb;
  496. umode_t mode;
  497. /*
  498. * Copy data to the in-core inode.
  499. */
  500. inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
  501. set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
  502. if (inode->i_nlink == 0)
  503. return -ESTALE;
  504. /*
  505. * Linux now has 32-bit uid and gid, so we can support EFT.
  506. */
  507. i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
  508. i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
  509. inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
  510. inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
  511. inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
  512. inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
  513. inode->i_mtime.tv_nsec = 0;
  514. inode->i_atime.tv_nsec = 0;
  515. inode->i_ctime.tv_nsec = 0;
  516. inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
  517. inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
  518. ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
  519. ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
  520. ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
  521. if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
  522. memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
  523. sizeof(ufs_inode->ui_u2.ui_addr));
  524. } else {
  525. memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
  526. sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
  527. ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
  528. }
  529. return 0;
  530. }
  531. static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
  532. {
  533. struct ufs_inode_info *ufsi = UFS_I(inode);
  534. struct super_block *sb = inode->i_sb;
  535. umode_t mode;
  536. UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
  537. /*
  538. * Copy data to the in-core inode.
  539. */
  540. inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
  541. set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
  542. if (inode->i_nlink == 0)
  543. return -ESTALE;
  544. /*
  545. * Linux now has 32-bit uid and gid, so we can support EFT.
  546. */
  547. i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
  548. i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
  549. inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
  550. inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
  551. inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
  552. inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
  553. inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
  554. inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
  555. inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
  556. inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
  557. inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
  558. ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
  559. /*
  560. ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
  561. ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
  562. */
  563. if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
  564. memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
  565. sizeof(ufs2_inode->ui_u2.ui_addr));
  566. } else {
  567. memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
  568. sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
  569. ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
  570. }
  571. return 0;
  572. }
  573. struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
  574. {
  575. struct ufs_inode_info *ufsi;
  576. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  577. struct buffer_head * bh;
  578. struct inode *inode;
  579. int err = -EIO;
  580. UFSD("ENTER, ino %lu\n", ino);
  581. if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
  582. ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
  583. ino);
  584. return ERR_PTR(-EIO);
  585. }
  586. inode = iget_locked(sb, ino);
  587. if (!inode)
  588. return ERR_PTR(-ENOMEM);
  589. if (!(inode->i_state & I_NEW))
  590. return inode;
  591. ufsi = UFS_I(inode);
  592. bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
  593. if (!bh) {
  594. ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
  595. inode->i_ino);
  596. goto bad_inode;
  597. }
  598. if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
  599. struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
  600. err = ufs2_read_inode(inode,
  601. ufs2_inode + ufs_inotofsbo(inode->i_ino));
  602. } else {
  603. struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
  604. err = ufs1_read_inode(inode,
  605. ufs_inode + ufs_inotofsbo(inode->i_ino));
  606. }
  607. brelse(bh);
  608. if (err)
  609. goto bad_inode;
  610. inode_inc_iversion(inode);
  611. ufsi->i_lastfrag =
  612. (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
  613. ufsi->i_dir_start_lookup = 0;
  614. ufsi->i_osync = 0;
  615. ufs_set_inode_ops(inode);
  616. UFSD("EXIT\n");
  617. unlock_new_inode(inode);
  618. return inode;
  619. bad_inode:
  620. iget_failed(inode);
  621. return ERR_PTR(err);
  622. }
  623. static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
  624. {
  625. struct super_block *sb = inode->i_sb;
  626. struct ufs_inode_info *ufsi = UFS_I(inode);
  627. ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
  628. ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
  629. ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
  630. ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
  631. ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
  632. ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
  633. ufs_inode->ui_atime.tv_usec = 0;
  634. ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
  635. ufs_inode->ui_ctime.tv_usec = 0;
  636. ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
  637. ufs_inode->ui_mtime.tv_usec = 0;
  638. ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
  639. ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
  640. ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
  641. if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
  642. ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
  643. ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
  644. }
  645. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  646. /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
  647. ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
  648. } else if (inode->i_blocks) {
  649. memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
  650. sizeof(ufs_inode->ui_u2.ui_addr));
  651. }
  652. else {
  653. memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
  654. sizeof(ufs_inode->ui_u2.ui_symlink));
  655. }
  656. if (!inode->i_nlink)
  657. memset (ufs_inode, 0, sizeof(struct ufs_inode));
  658. }
  659. static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
  660. {
  661. struct super_block *sb = inode->i_sb;
  662. struct ufs_inode_info *ufsi = UFS_I(inode);
  663. UFSD("ENTER\n");
  664. ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
  665. ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
  666. ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
  667. ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
  668. ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
  669. ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
  670. ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
  671. ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
  672. ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
  673. ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
  674. ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
  675. ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
  676. ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
  677. ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
  678. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  679. /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
  680. ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
  681. } else if (inode->i_blocks) {
  682. memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
  683. sizeof(ufs_inode->ui_u2.ui_addr));
  684. } else {
  685. memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
  686. sizeof(ufs_inode->ui_u2.ui_symlink));
  687. }
  688. if (!inode->i_nlink)
  689. memset (ufs_inode, 0, sizeof(struct ufs2_inode));
  690. UFSD("EXIT\n");
  691. }
  692. static int ufs_update_inode(struct inode * inode, int do_sync)
  693. {
  694. struct super_block *sb = inode->i_sb;
  695. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  696. struct buffer_head * bh;
  697. UFSD("ENTER, ino %lu\n", inode->i_ino);
  698. if (inode->i_ino < UFS_ROOTINO ||
  699. inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
  700. ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
  701. return -1;
  702. }
  703. bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
  704. if (!bh) {
  705. ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
  706. return -1;
  707. }
  708. if (uspi->fs_magic == UFS2_MAGIC) {
  709. struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
  710. ufs2_update_inode(inode,
  711. ufs2_inode + ufs_inotofsbo(inode->i_ino));
  712. } else {
  713. struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
  714. ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
  715. }
  716. mark_buffer_dirty(bh);
  717. if (do_sync)
  718. sync_dirty_buffer(bh);
  719. brelse (bh);
  720. UFSD("EXIT\n");
  721. return 0;
  722. }
  723. int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
  724. {
  725. return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
  726. }
  727. int ufs_sync_inode (struct inode *inode)
  728. {
  729. return ufs_update_inode (inode, 1);
  730. }
  731. void ufs_evict_inode(struct inode * inode)
  732. {
  733. int want_delete = 0;
  734. if (!inode->i_nlink && !is_bad_inode(inode))
  735. want_delete = 1;
  736. truncate_inode_pages_final(&inode->i_data);
  737. if (want_delete) {
  738. inode->i_size = 0;
  739. if (inode->i_blocks &&
  740. (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  741. S_ISLNK(inode->i_mode)))
  742. ufs_truncate_blocks(inode);
  743. ufs_update_inode(inode, inode_needs_sync(inode));
  744. }
  745. invalidate_inode_buffers(inode);
  746. clear_inode(inode);
  747. if (want_delete)
  748. ufs_free_inode(inode);
  749. }
  750. struct to_free {
  751. struct inode *inode;
  752. u64 to;
  753. unsigned count;
  754. };
  755. static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
  756. {
  757. if (ctx->count && ctx->to != from) {
  758. ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
  759. ctx->count = 0;
  760. }
  761. ctx->count += count;
  762. ctx->to = from + count;
  763. }
  764. #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
  765. static void ufs_trunc_direct(struct inode *inode)
  766. {
  767. struct ufs_inode_info *ufsi = UFS_I(inode);
  768. struct super_block * sb;
  769. struct ufs_sb_private_info * uspi;
  770. void *p;
  771. u64 frag1, frag2, frag3, frag4, block1, block2;
  772. struct to_free ctx = {.inode = inode};
  773. unsigned i, tmp;
  774. UFSD("ENTER: ino %lu\n", inode->i_ino);
  775. sb = inode->i_sb;
  776. uspi = UFS_SB(sb)->s_uspi;
  777. frag1 = DIRECT_FRAGMENT;
  778. frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
  779. frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
  780. frag3 = frag4 & ~uspi->s_fpbmask;
  781. block1 = block2 = 0;
  782. if (frag2 > frag3) {
  783. frag2 = frag4;
  784. frag3 = frag4 = 0;
  785. } else if (frag2 < frag3) {
  786. block1 = ufs_fragstoblks (frag2);
  787. block2 = ufs_fragstoblks (frag3);
  788. }
  789. UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
  790. " frag3 %llu, frag4 %llu\n", inode->i_ino,
  791. (unsigned long long)frag1, (unsigned long long)frag2,
  792. (unsigned long long)block1, (unsigned long long)block2,
  793. (unsigned long long)frag3, (unsigned long long)frag4);
  794. if (frag1 >= frag2)
  795. goto next1;
  796. /*
  797. * Free first free fragments
  798. */
  799. p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
  800. tmp = ufs_data_ptr_to_cpu(sb, p);
  801. if (!tmp )
  802. ufs_panic (sb, "ufs_trunc_direct", "internal error");
  803. frag2 -= frag1;
  804. frag1 = ufs_fragnum (frag1);
  805. ufs_free_fragments(inode, tmp + frag1, frag2);
  806. next1:
  807. /*
  808. * Free whole blocks
  809. */
  810. for (i = block1 ; i < block2; i++) {
  811. p = ufs_get_direct_data_ptr(uspi, ufsi, i);
  812. tmp = ufs_data_ptr_to_cpu(sb, p);
  813. if (!tmp)
  814. continue;
  815. write_seqlock(&ufsi->meta_lock);
  816. ufs_data_ptr_clear(uspi, p);
  817. write_sequnlock(&ufsi->meta_lock);
  818. free_data(&ctx, tmp, uspi->s_fpb);
  819. }
  820. free_data(&ctx, 0, 0);
  821. if (frag3 >= frag4)
  822. goto next3;
  823. /*
  824. * Free last free fragments
  825. */
  826. p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
  827. tmp = ufs_data_ptr_to_cpu(sb, p);
  828. if (!tmp )
  829. ufs_panic(sb, "ufs_truncate_direct", "internal error");
  830. frag4 = ufs_fragnum (frag4);
  831. write_seqlock(&ufsi->meta_lock);
  832. ufs_data_ptr_clear(uspi, p);
  833. write_sequnlock(&ufsi->meta_lock);
  834. ufs_free_fragments (inode, tmp, frag4);
  835. next3:
  836. UFSD("EXIT: ino %lu\n", inode->i_ino);
  837. }
  838. static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
  839. {
  840. struct super_block *sb = inode->i_sb;
  841. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  842. struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
  843. unsigned i;
  844. if (!ubh)
  845. return;
  846. if (--depth) {
  847. for (i = 0; i < uspi->s_apb; i++) {
  848. void *p = ubh_get_data_ptr(uspi, ubh, i);
  849. u64 block = ufs_data_ptr_to_cpu(sb, p);
  850. if (block)
  851. free_full_branch(inode, block, depth);
  852. }
  853. } else {
  854. struct to_free ctx = {.inode = inode};
  855. for (i = 0; i < uspi->s_apb; i++) {
  856. void *p = ubh_get_data_ptr(uspi, ubh, i);
  857. u64 block = ufs_data_ptr_to_cpu(sb, p);
  858. if (block)
  859. free_data(&ctx, block, uspi->s_fpb);
  860. }
  861. free_data(&ctx, 0, 0);
  862. }
  863. ubh_bforget(ubh);
  864. ufs_free_blocks(inode, ind_block, uspi->s_fpb);
  865. }
  866. static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
  867. {
  868. struct super_block *sb = inode->i_sb;
  869. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  870. unsigned i;
  871. if (--depth) {
  872. for (i = from; i < uspi->s_apb ; i++) {
  873. void *p = ubh_get_data_ptr(uspi, ubh, i);
  874. u64 block = ufs_data_ptr_to_cpu(sb, p);
  875. if (block) {
  876. write_seqlock(&UFS_I(inode)->meta_lock);
  877. ufs_data_ptr_clear(uspi, p);
  878. write_sequnlock(&UFS_I(inode)->meta_lock);
  879. ubh_mark_buffer_dirty(ubh);
  880. free_full_branch(inode, block, depth);
  881. }
  882. }
  883. } else {
  884. struct to_free ctx = {.inode = inode};
  885. for (i = from; i < uspi->s_apb; i++) {
  886. void *p = ubh_get_data_ptr(uspi, ubh, i);
  887. u64 block = ufs_data_ptr_to_cpu(sb, p);
  888. if (block) {
  889. write_seqlock(&UFS_I(inode)->meta_lock);
  890. ufs_data_ptr_clear(uspi, p);
  891. write_sequnlock(&UFS_I(inode)->meta_lock);
  892. ubh_mark_buffer_dirty(ubh);
  893. free_data(&ctx, block, uspi->s_fpb);
  894. }
  895. }
  896. free_data(&ctx, 0, 0);
  897. }
  898. if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
  899. ubh_sync_block(ubh);
  900. ubh_brelse(ubh);
  901. }
  902. static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
  903. {
  904. int err = 0;
  905. struct super_block *sb = inode->i_sb;
  906. struct address_space *mapping = inode->i_mapping;
  907. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  908. unsigned i, end;
  909. sector_t lastfrag;
  910. struct page *lastpage;
  911. struct buffer_head *bh;
  912. u64 phys64;
  913. lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
  914. if (!lastfrag)
  915. goto out;
  916. lastfrag--;
  917. lastpage = ufs_get_locked_page(mapping, lastfrag >>
  918. (PAGE_SHIFT - inode->i_blkbits));
  919. if (IS_ERR(lastpage)) {
  920. err = -EIO;
  921. goto out;
  922. }
  923. end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
  924. bh = page_buffers(lastpage);
  925. for (i = 0; i < end; ++i)
  926. bh = bh->b_this_page;
  927. err = ufs_getfrag_block(inode, lastfrag, bh, 1);
  928. if (unlikely(err))
  929. goto out_unlock;
  930. if (buffer_new(bh)) {
  931. clear_buffer_new(bh);
  932. clean_bdev_bh_alias(bh);
  933. /*
  934. * we do not zeroize fragment, because of
  935. * if it maped to hole, it already contains zeroes
  936. */
  937. set_buffer_uptodate(bh);
  938. mark_buffer_dirty(bh);
  939. set_page_dirty(lastpage);
  940. }
  941. if (lastfrag >= UFS_IND_FRAGMENT) {
  942. end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
  943. phys64 = bh->b_blocknr + 1;
  944. for (i = 0; i < end; ++i) {
  945. bh = sb_getblk(sb, i + phys64);
  946. lock_buffer(bh);
  947. memset(bh->b_data, 0, sb->s_blocksize);
  948. set_buffer_uptodate(bh);
  949. mark_buffer_dirty(bh);
  950. unlock_buffer(bh);
  951. sync_dirty_buffer(bh);
  952. brelse(bh);
  953. }
  954. }
  955. out_unlock:
  956. ufs_put_locked_page(lastpage);
  957. out:
  958. return err;
  959. }
  960. static void ufs_truncate_blocks(struct inode *inode)
  961. {
  962. struct ufs_inode_info *ufsi = UFS_I(inode);
  963. struct super_block *sb = inode->i_sb;
  964. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  965. unsigned offsets[4];
  966. int depth;
  967. int depth2;
  968. unsigned i;
  969. struct ufs_buffer_head *ubh[3];
  970. void *p;
  971. u64 block;
  972. if (inode->i_size) {
  973. sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
  974. depth = ufs_block_to_path(inode, last, offsets);
  975. if (!depth)
  976. return;
  977. } else {
  978. depth = 1;
  979. }
  980. for (depth2 = depth - 1; depth2; depth2--)
  981. if (offsets[depth2] != uspi->s_apb - 1)
  982. break;
  983. mutex_lock(&ufsi->truncate_mutex);
  984. if (depth == 1) {
  985. ufs_trunc_direct(inode);
  986. offsets[0] = UFS_IND_BLOCK;
  987. } else {
  988. /* get the blocks that should be partially emptied */
  989. p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
  990. for (i = 0; i < depth2; i++) {
  991. block = ufs_data_ptr_to_cpu(sb, p);
  992. if (!block)
  993. break;
  994. ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
  995. if (!ubh[i]) {
  996. write_seqlock(&ufsi->meta_lock);
  997. ufs_data_ptr_clear(uspi, p);
  998. write_sequnlock(&ufsi->meta_lock);
  999. break;
  1000. }
  1001. p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
  1002. }
  1003. while (i--)
  1004. free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
  1005. }
  1006. for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
  1007. p = ufs_get_direct_data_ptr(uspi, ufsi, i);
  1008. block = ufs_data_ptr_to_cpu(sb, p);
  1009. if (block) {
  1010. write_seqlock(&ufsi->meta_lock);
  1011. ufs_data_ptr_clear(uspi, p);
  1012. write_sequnlock(&ufsi->meta_lock);
  1013. free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
  1014. }
  1015. }
  1016. read_seqlock_excl(&ufsi->meta_lock);
  1017. ufsi->i_lastfrag = DIRECT_FRAGMENT;
  1018. read_sequnlock_excl(&ufsi->meta_lock);
  1019. mark_inode_dirty(inode);
  1020. mutex_unlock(&ufsi->truncate_mutex);
  1021. }
  1022. static int ufs_truncate(struct inode *inode, loff_t size)
  1023. {
  1024. int err = 0;
  1025. UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
  1026. inode->i_ino, (unsigned long long)size,
  1027. (unsigned long long)i_size_read(inode));
  1028. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  1029. S_ISLNK(inode->i_mode)))
  1030. return -EINVAL;
  1031. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  1032. return -EPERM;
  1033. err = ufs_alloc_lastblock(inode, size);
  1034. if (err)
  1035. goto out;
  1036. block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
  1037. truncate_setsize(inode, size);
  1038. ufs_truncate_blocks(inode);
  1039. inode->i_mtime = inode->i_ctime = current_time(inode);
  1040. mark_inode_dirty(inode);
  1041. out:
  1042. UFSD("EXIT: err %d\n", err);
  1043. return err;
  1044. }
  1045. int ufs_setattr(struct dentry *dentry, struct iattr *attr)
  1046. {
  1047. struct inode *inode = d_inode(dentry);
  1048. unsigned int ia_valid = attr->ia_valid;
  1049. int error;
  1050. error = setattr_prepare(dentry, attr);
  1051. if (error)
  1052. return error;
  1053. if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
  1054. error = ufs_truncate(inode, attr->ia_size);
  1055. if (error)
  1056. return error;
  1057. }
  1058. setattr_copy(inode, attr);
  1059. mark_inode_dirty(inode);
  1060. return 0;
  1061. }
  1062. const struct inode_operations ufs_file_inode_operations = {
  1063. .setattr = ufs_setattr,
  1064. };