xfs_bmap_util.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * Copyright (c) 2012 Red Hat, Inc.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_bit.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_defer.h"
  16. #include "xfs_inode.h"
  17. #include "xfs_btree.h"
  18. #include "xfs_trans.h"
  19. #include "xfs_alloc.h"
  20. #include "xfs_bmap.h"
  21. #include "xfs_bmap_util.h"
  22. #include "xfs_bmap_btree.h"
  23. #include "xfs_rtalloc.h"
  24. #include "xfs_error.h"
  25. #include "xfs_quota.h"
  26. #include "xfs_trans_space.h"
  27. #include "xfs_trace.h"
  28. #include "xfs_icache.h"
  29. #include "xfs_iomap.h"
  30. #include "xfs_reflink.h"
  31. /* Kernel only BMAP related definitions and functions */
  32. /*
  33. * Convert the given file system block to a disk block. We have to treat it
  34. * differently based on whether the file is a real time file or not, because the
  35. * bmap code does.
  36. */
  37. xfs_daddr_t
  38. xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  39. {
  40. if (XFS_IS_REALTIME_INODE(ip))
  41. return XFS_FSB_TO_BB(ip->i_mount, fsb);
  42. return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
  43. }
  44. /*
  45. * Routine to zero an extent on disk allocated to the specific inode.
  46. *
  47. * The VFS functions take a linearised filesystem block offset, so we have to
  48. * convert the sparse xfs fsb to the right format first.
  49. * VFS types are real funky, too.
  50. */
  51. int
  52. xfs_zero_extent(
  53. struct xfs_inode *ip,
  54. xfs_fsblock_t start_fsb,
  55. xfs_off_t count_fsb)
  56. {
  57. struct xfs_mount *mp = ip->i_mount;
  58. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  59. xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
  60. sector_t block = XFS_BB_TO_FSBT(mp, sector);
  61. return blkdev_issue_zeroout(target->bt_bdev,
  62. block << (mp->m_super->s_blocksize_bits - 9),
  63. count_fsb << (mp->m_super->s_blocksize_bits - 9),
  64. GFP_NOFS, 0);
  65. }
  66. #ifdef CONFIG_XFS_RT
  67. int
  68. xfs_bmap_rtalloc(
  69. struct xfs_bmalloca *ap) /* bmap alloc argument struct */
  70. {
  71. int error; /* error return value */
  72. xfs_mount_t *mp; /* mount point structure */
  73. xfs_extlen_t prod = 0; /* product factor for allocators */
  74. xfs_extlen_t mod = 0; /* product factor for allocators */
  75. xfs_extlen_t ralen = 0; /* realtime allocation length */
  76. xfs_extlen_t align; /* minimum allocation alignment */
  77. xfs_rtblock_t rtb;
  78. mp = ap->ip->i_mount;
  79. align = xfs_get_extsz_hint(ap->ip);
  80. prod = align / mp->m_sb.sb_rextsize;
  81. error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  82. align, 1, ap->eof, 0,
  83. ap->conv, &ap->offset, &ap->length);
  84. if (error)
  85. return error;
  86. ASSERT(ap->length);
  87. ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  88. /*
  89. * If the offset & length are not perfectly aligned
  90. * then kill prod, it will just get us in trouble.
  91. */
  92. div_u64_rem(ap->offset, align, &mod);
  93. if (mod || ap->length % align)
  94. prod = 1;
  95. /*
  96. * Set ralen to be the actual requested length in rtextents.
  97. */
  98. ralen = ap->length / mp->m_sb.sb_rextsize;
  99. /*
  100. * If the old value was close enough to MAXEXTLEN that
  101. * we rounded up to it, cut it back so it's valid again.
  102. * Note that if it's a really large request (bigger than
  103. * MAXEXTLEN), we don't hear about that number, and can't
  104. * adjust the starting point to match it.
  105. */
  106. if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
  107. ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
  108. /*
  109. * Lock out modifications to both the RT bitmap and summary inodes
  110. */
  111. xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
  112. xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
  113. xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
  114. xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
  115. /*
  116. * If it's an allocation to an empty file at offset 0,
  117. * pick an extent that will space things out in the rt area.
  118. */
  119. if (ap->eof && ap->offset == 0) {
  120. xfs_rtblock_t rtx; /* realtime extent no */
  121. error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
  122. if (error)
  123. return error;
  124. ap->blkno = rtx * mp->m_sb.sb_rextsize;
  125. } else {
  126. ap->blkno = 0;
  127. }
  128. xfs_bmap_adjacent(ap);
  129. /*
  130. * Realtime allocation, done through xfs_rtallocate_extent.
  131. */
  132. do_div(ap->blkno, mp->m_sb.sb_rextsize);
  133. rtb = ap->blkno;
  134. ap->length = ralen;
  135. error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
  136. &ralen, ap->wasdel, prod, &rtb);
  137. if (error)
  138. return error;
  139. ap->blkno = rtb;
  140. if (ap->blkno != NULLFSBLOCK) {
  141. ap->blkno *= mp->m_sb.sb_rextsize;
  142. ralen *= mp->m_sb.sb_rextsize;
  143. ap->length = ralen;
  144. ap->ip->i_d.di_nblocks += ralen;
  145. xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
  146. if (ap->wasdel)
  147. ap->ip->i_delayed_blks -= ralen;
  148. /*
  149. * Adjust the disk quota also. This was reserved
  150. * earlier.
  151. */
  152. xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
  153. ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
  154. XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
  155. } else {
  156. ap->length = 0;
  157. }
  158. return 0;
  159. }
  160. #endif /* CONFIG_XFS_RT */
  161. /*
  162. * Extent tree block counting routines.
  163. */
  164. /*
  165. * Count leaf blocks given a range of extent records. Delayed allocation
  166. * extents are not counted towards the totals.
  167. */
  168. xfs_extnum_t
  169. xfs_bmap_count_leaves(
  170. struct xfs_ifork *ifp,
  171. xfs_filblks_t *count)
  172. {
  173. struct xfs_iext_cursor icur;
  174. struct xfs_bmbt_irec got;
  175. xfs_extnum_t numrecs = 0;
  176. for_each_xfs_iext(ifp, &icur, &got) {
  177. if (!isnullstartblock(got.br_startblock)) {
  178. *count += got.br_blockcount;
  179. numrecs++;
  180. }
  181. }
  182. return numrecs;
  183. }
  184. /*
  185. * Count fsblocks of the given fork. Delayed allocation extents are
  186. * not counted towards the totals.
  187. */
  188. int
  189. xfs_bmap_count_blocks(
  190. struct xfs_trans *tp,
  191. struct xfs_inode *ip,
  192. int whichfork,
  193. xfs_extnum_t *nextents,
  194. xfs_filblks_t *count)
  195. {
  196. struct xfs_mount *mp = ip->i_mount;
  197. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
  198. struct xfs_btree_cur *cur;
  199. xfs_extlen_t btblocks = 0;
  200. int error;
  201. *nextents = 0;
  202. *count = 0;
  203. if (!ifp)
  204. return 0;
  205. switch (ifp->if_format) {
  206. case XFS_DINODE_FMT_BTREE:
  207. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  208. error = xfs_iread_extents(tp, ip, whichfork);
  209. if (error)
  210. return error;
  211. }
  212. cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
  213. error = xfs_btree_count_blocks(cur, &btblocks);
  214. xfs_btree_del_cursor(cur, error);
  215. if (error)
  216. return error;
  217. /*
  218. * xfs_btree_count_blocks includes the root block contained in
  219. * the inode fork in @btblocks, so subtract one because we're
  220. * only interested in allocated disk blocks.
  221. */
  222. *count += btblocks - 1;
  223. /* fall through */
  224. case XFS_DINODE_FMT_EXTENTS:
  225. *nextents = xfs_bmap_count_leaves(ifp, count);
  226. break;
  227. }
  228. return 0;
  229. }
  230. static int
  231. xfs_getbmap_report_one(
  232. struct xfs_inode *ip,
  233. struct getbmapx *bmv,
  234. struct kgetbmap *out,
  235. int64_t bmv_end,
  236. struct xfs_bmbt_irec *got)
  237. {
  238. struct kgetbmap *p = out + bmv->bmv_entries;
  239. bool shared = false;
  240. int error;
  241. error = xfs_reflink_trim_around_shared(ip, got, &shared);
  242. if (error)
  243. return error;
  244. if (isnullstartblock(got->br_startblock) ||
  245. got->br_startblock == DELAYSTARTBLOCK) {
  246. /*
  247. * Delalloc extents that start beyond EOF can occur due to
  248. * speculative EOF allocation when the delalloc extent is larger
  249. * than the largest freespace extent at conversion time. These
  250. * extents cannot be converted by data writeback, so can exist
  251. * here even if we are not supposed to be finding delalloc
  252. * extents.
  253. */
  254. if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
  255. ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
  256. p->bmv_oflags |= BMV_OF_DELALLOC;
  257. p->bmv_block = -2;
  258. } else {
  259. p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
  260. }
  261. if (got->br_state == XFS_EXT_UNWRITTEN &&
  262. (bmv->bmv_iflags & BMV_IF_PREALLOC))
  263. p->bmv_oflags |= BMV_OF_PREALLOC;
  264. if (shared)
  265. p->bmv_oflags |= BMV_OF_SHARED;
  266. p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
  267. p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
  268. bmv->bmv_offset = p->bmv_offset + p->bmv_length;
  269. bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
  270. bmv->bmv_entries++;
  271. return 0;
  272. }
  273. static void
  274. xfs_getbmap_report_hole(
  275. struct xfs_inode *ip,
  276. struct getbmapx *bmv,
  277. struct kgetbmap *out,
  278. int64_t bmv_end,
  279. xfs_fileoff_t bno,
  280. xfs_fileoff_t end)
  281. {
  282. struct kgetbmap *p = out + bmv->bmv_entries;
  283. if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
  284. return;
  285. p->bmv_block = -1;
  286. p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
  287. p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
  288. bmv->bmv_offset = p->bmv_offset + p->bmv_length;
  289. bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
  290. bmv->bmv_entries++;
  291. }
  292. static inline bool
  293. xfs_getbmap_full(
  294. struct getbmapx *bmv)
  295. {
  296. return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
  297. }
  298. static bool
  299. xfs_getbmap_next_rec(
  300. struct xfs_bmbt_irec *rec,
  301. xfs_fileoff_t total_end)
  302. {
  303. xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
  304. if (end == total_end)
  305. return false;
  306. rec->br_startoff += rec->br_blockcount;
  307. if (!isnullstartblock(rec->br_startblock) &&
  308. rec->br_startblock != DELAYSTARTBLOCK)
  309. rec->br_startblock += rec->br_blockcount;
  310. rec->br_blockcount = total_end - end;
  311. return true;
  312. }
  313. /*
  314. * Get inode's extents as described in bmv, and format for output.
  315. * Calls formatter to fill the user's buffer until all extents
  316. * are mapped, until the passed-in bmv->bmv_count slots have
  317. * been filled, or until the formatter short-circuits the loop,
  318. * if it is tracking filled-in extents on its own.
  319. */
  320. int /* error code */
  321. xfs_getbmap(
  322. struct xfs_inode *ip,
  323. struct getbmapx *bmv, /* user bmap structure */
  324. struct kgetbmap *out)
  325. {
  326. struct xfs_mount *mp = ip->i_mount;
  327. int iflags = bmv->bmv_iflags;
  328. int whichfork, lock, error = 0;
  329. int64_t bmv_end, max_len;
  330. xfs_fileoff_t bno, first_bno;
  331. struct xfs_ifork *ifp;
  332. struct xfs_bmbt_irec got, rec;
  333. xfs_filblks_t len;
  334. struct xfs_iext_cursor icur;
  335. if (bmv->bmv_iflags & ~BMV_IF_VALID)
  336. return -EINVAL;
  337. #ifndef DEBUG
  338. /* Only allow CoW fork queries if we're debugging. */
  339. if (iflags & BMV_IF_COWFORK)
  340. return -EINVAL;
  341. #endif
  342. if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
  343. return -EINVAL;
  344. if (bmv->bmv_length < -1)
  345. return -EINVAL;
  346. bmv->bmv_entries = 0;
  347. if (bmv->bmv_length == 0)
  348. return 0;
  349. if (iflags & BMV_IF_ATTRFORK)
  350. whichfork = XFS_ATTR_FORK;
  351. else if (iflags & BMV_IF_COWFORK)
  352. whichfork = XFS_COW_FORK;
  353. else
  354. whichfork = XFS_DATA_FORK;
  355. ifp = XFS_IFORK_PTR(ip, whichfork);
  356. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  357. switch (whichfork) {
  358. case XFS_ATTR_FORK:
  359. if (!XFS_IFORK_Q(ip))
  360. goto out_unlock_iolock;
  361. max_len = 1LL << 32;
  362. lock = xfs_ilock_attr_map_shared(ip);
  363. break;
  364. case XFS_COW_FORK:
  365. /* No CoW fork? Just return */
  366. if (!ifp)
  367. goto out_unlock_iolock;
  368. if (xfs_get_cowextsz_hint(ip))
  369. max_len = mp->m_super->s_maxbytes;
  370. else
  371. max_len = XFS_ISIZE(ip);
  372. lock = XFS_ILOCK_SHARED;
  373. xfs_ilock(ip, lock);
  374. break;
  375. case XFS_DATA_FORK:
  376. if (!(iflags & BMV_IF_DELALLOC) &&
  377. (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
  378. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  379. if (error)
  380. goto out_unlock_iolock;
  381. /*
  382. * Even after flushing the inode, there can still be
  383. * delalloc blocks on the inode beyond EOF due to
  384. * speculative preallocation. These are not removed
  385. * until the release function is called or the inode
  386. * is inactivated. Hence we cannot assert here that
  387. * ip->i_delayed_blks == 0.
  388. */
  389. }
  390. if (xfs_get_extsz_hint(ip) ||
  391. (ip->i_d.di_flags &
  392. (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
  393. max_len = mp->m_super->s_maxbytes;
  394. else
  395. max_len = XFS_ISIZE(ip);
  396. lock = xfs_ilock_data_map_shared(ip);
  397. break;
  398. }
  399. switch (ifp->if_format) {
  400. case XFS_DINODE_FMT_EXTENTS:
  401. case XFS_DINODE_FMT_BTREE:
  402. break;
  403. case XFS_DINODE_FMT_LOCAL:
  404. /* Local format inode forks report no extents. */
  405. goto out_unlock_ilock;
  406. default:
  407. error = -EINVAL;
  408. goto out_unlock_ilock;
  409. }
  410. if (bmv->bmv_length == -1) {
  411. max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
  412. bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
  413. }
  414. bmv_end = bmv->bmv_offset + bmv->bmv_length;
  415. first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
  416. len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
  417. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  418. error = xfs_iread_extents(NULL, ip, whichfork);
  419. if (error)
  420. goto out_unlock_ilock;
  421. }
  422. if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
  423. /*
  424. * Report a whole-file hole if the delalloc flag is set to
  425. * stay compatible with the old implementation.
  426. */
  427. if (iflags & BMV_IF_DELALLOC)
  428. xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
  429. XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
  430. goto out_unlock_ilock;
  431. }
  432. while (!xfs_getbmap_full(bmv)) {
  433. xfs_trim_extent(&got, first_bno, len);
  434. /*
  435. * Report an entry for a hole if this extent doesn't directly
  436. * follow the previous one.
  437. */
  438. if (got.br_startoff > bno) {
  439. xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
  440. got.br_startoff);
  441. if (xfs_getbmap_full(bmv))
  442. break;
  443. }
  444. /*
  445. * In order to report shared extents accurately, we report each
  446. * distinct shared / unshared part of a single bmbt record with
  447. * an individual getbmapx record.
  448. */
  449. bno = got.br_startoff + got.br_blockcount;
  450. rec = got;
  451. do {
  452. error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
  453. &rec);
  454. if (error || xfs_getbmap_full(bmv))
  455. goto out_unlock_ilock;
  456. } while (xfs_getbmap_next_rec(&rec, bno));
  457. if (!xfs_iext_next_extent(ifp, &icur, &got)) {
  458. xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
  459. out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
  460. if (whichfork != XFS_ATTR_FORK && bno < end &&
  461. !xfs_getbmap_full(bmv)) {
  462. xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
  463. bno, end);
  464. }
  465. break;
  466. }
  467. if (bno >= first_bno + len)
  468. break;
  469. }
  470. out_unlock_ilock:
  471. xfs_iunlock(ip, lock);
  472. out_unlock_iolock:
  473. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  474. return error;
  475. }
  476. /*
  477. * Dead simple method of punching delalyed allocation blocks from a range in
  478. * the inode. This will always punch out both the start and end blocks, even
  479. * if the ranges only partially overlap them, so it is up to the caller to
  480. * ensure that partial blocks are not passed in.
  481. */
  482. int
  483. xfs_bmap_punch_delalloc_range(
  484. struct xfs_inode *ip,
  485. xfs_fileoff_t start_fsb,
  486. xfs_fileoff_t length)
  487. {
  488. struct xfs_ifork *ifp = &ip->i_df;
  489. xfs_fileoff_t end_fsb = start_fsb + length;
  490. struct xfs_bmbt_irec got, del;
  491. struct xfs_iext_cursor icur;
  492. int error = 0;
  493. ASSERT(ifp->if_flags & XFS_IFEXTENTS);
  494. xfs_ilock(ip, XFS_ILOCK_EXCL);
  495. if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
  496. goto out_unlock;
  497. while (got.br_startoff + got.br_blockcount > start_fsb) {
  498. del = got;
  499. xfs_trim_extent(&del, start_fsb, length);
  500. /*
  501. * A delete can push the cursor forward. Step back to the
  502. * previous extent on non-delalloc or extents outside the
  503. * target range.
  504. */
  505. if (!del.br_blockcount ||
  506. !isnullstartblock(del.br_startblock)) {
  507. if (!xfs_iext_prev_extent(ifp, &icur, &got))
  508. break;
  509. continue;
  510. }
  511. error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
  512. &got, &del);
  513. if (error || !xfs_iext_get_extent(ifp, &icur, &got))
  514. break;
  515. }
  516. out_unlock:
  517. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  518. return error;
  519. }
  520. /*
  521. * Test whether it is appropriate to check an inode for and free post EOF
  522. * blocks. The 'force' parameter determines whether we should also consider
  523. * regular files that are marked preallocated or append-only.
  524. */
  525. bool
  526. xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
  527. {
  528. /* prealloc/delalloc exists only on regular files */
  529. if (!S_ISREG(VFS_I(ip)->i_mode))
  530. return false;
  531. /*
  532. * Zero sized files with no cached pages and delalloc blocks will not
  533. * have speculative prealloc/delalloc blocks to remove.
  534. */
  535. if (VFS_I(ip)->i_size == 0 &&
  536. VFS_I(ip)->i_mapping->nrpages == 0 &&
  537. ip->i_delayed_blks == 0)
  538. return false;
  539. /* If we haven't read in the extent list, then don't do it now. */
  540. if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
  541. return false;
  542. /*
  543. * Do not free real preallocated or append-only files unless the file
  544. * has delalloc blocks and we are forced to remove them.
  545. */
  546. if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
  547. if (!force || ip->i_delayed_blks == 0)
  548. return false;
  549. return true;
  550. }
  551. /*
  552. * This is called to free any blocks beyond eof. The caller must hold
  553. * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
  554. * reference to the inode.
  555. */
  556. int
  557. xfs_free_eofblocks(
  558. struct xfs_inode *ip)
  559. {
  560. struct xfs_trans *tp;
  561. int error;
  562. xfs_fileoff_t end_fsb;
  563. xfs_fileoff_t last_fsb;
  564. xfs_filblks_t map_len;
  565. int nimaps;
  566. struct xfs_bmbt_irec imap;
  567. struct xfs_mount *mp = ip->i_mount;
  568. /*
  569. * Figure out if there are any blocks beyond the end
  570. * of the file. If not, then there is nothing to do.
  571. */
  572. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
  573. last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  574. if (last_fsb <= end_fsb)
  575. return 0;
  576. map_len = last_fsb - end_fsb;
  577. nimaps = 1;
  578. xfs_ilock(ip, XFS_ILOCK_SHARED);
  579. error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
  580. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  581. /*
  582. * If there are blocks after the end of file, truncate the file to its
  583. * current size to free them up.
  584. */
  585. if (!error && (nimaps != 0) &&
  586. (imap.br_startblock != HOLESTARTBLOCK ||
  587. ip->i_delayed_blks)) {
  588. /*
  589. * Attach the dquots to the inode up front.
  590. */
  591. error = xfs_qm_dqattach(ip);
  592. if (error)
  593. return error;
  594. /* wait on dio to ensure i_size has settled */
  595. inode_dio_wait(VFS_I(ip));
  596. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
  597. &tp);
  598. if (error) {
  599. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  600. return error;
  601. }
  602. xfs_ilock(ip, XFS_ILOCK_EXCL);
  603. xfs_trans_ijoin(tp, ip, 0);
  604. /*
  605. * Do not update the on-disk file size. If we update the
  606. * on-disk file size and then the system crashes before the
  607. * contents of the file are flushed to disk then the files
  608. * may be full of holes (ie NULL files bug).
  609. */
  610. error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
  611. XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
  612. if (error) {
  613. /*
  614. * If we get an error at this point we simply don't
  615. * bother truncating the file.
  616. */
  617. xfs_trans_cancel(tp);
  618. } else {
  619. error = xfs_trans_commit(tp);
  620. if (!error)
  621. xfs_inode_clear_eofblocks_tag(ip);
  622. }
  623. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  624. }
  625. return error;
  626. }
  627. int
  628. xfs_alloc_file_space(
  629. struct xfs_inode *ip,
  630. xfs_off_t offset,
  631. xfs_off_t len,
  632. int alloc_type)
  633. {
  634. xfs_mount_t *mp = ip->i_mount;
  635. xfs_off_t count;
  636. xfs_filblks_t allocated_fsb;
  637. xfs_filblks_t allocatesize_fsb;
  638. xfs_extlen_t extsz, temp;
  639. xfs_fileoff_t startoffset_fsb;
  640. xfs_fileoff_t endoffset_fsb;
  641. int nimaps;
  642. int quota_flag;
  643. int rt;
  644. xfs_trans_t *tp;
  645. xfs_bmbt_irec_t imaps[1], *imapp;
  646. uint qblocks, resblks, resrtextents;
  647. int error;
  648. trace_xfs_alloc_file_space(ip);
  649. if (XFS_FORCED_SHUTDOWN(mp))
  650. return -EIO;
  651. error = xfs_qm_dqattach(ip);
  652. if (error)
  653. return error;
  654. if (len <= 0)
  655. return -EINVAL;
  656. rt = XFS_IS_REALTIME_INODE(ip);
  657. extsz = xfs_get_extsz_hint(ip);
  658. count = len;
  659. imapp = &imaps[0];
  660. nimaps = 1;
  661. startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
  662. endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
  663. allocatesize_fsb = endoffset_fsb - startoffset_fsb;
  664. /*
  665. * Allocate file space until done or until there is an error
  666. */
  667. while (allocatesize_fsb && !error) {
  668. xfs_fileoff_t s, e;
  669. /*
  670. * Determine space reservations for data/realtime.
  671. */
  672. if (unlikely(extsz)) {
  673. s = startoffset_fsb;
  674. do_div(s, extsz);
  675. s *= extsz;
  676. e = startoffset_fsb + allocatesize_fsb;
  677. div_u64_rem(startoffset_fsb, extsz, &temp);
  678. if (temp)
  679. e += temp;
  680. div_u64_rem(e, extsz, &temp);
  681. if (temp)
  682. e += extsz - temp;
  683. } else {
  684. s = 0;
  685. e = allocatesize_fsb;
  686. }
  687. /*
  688. * The transaction reservation is limited to a 32-bit block
  689. * count, hence we need to limit the number of blocks we are
  690. * trying to reserve to avoid an overflow. We can't allocate
  691. * more than @nimaps extents, and an extent is limited on disk
  692. * to MAXEXTLEN (21 bits), so use that to enforce the limit.
  693. */
  694. resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
  695. if (unlikely(rt)) {
  696. resrtextents = qblocks = resblks;
  697. resrtextents /= mp->m_sb.sb_rextsize;
  698. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  699. quota_flag = XFS_QMOPT_RES_RTBLKS;
  700. } else {
  701. resrtextents = 0;
  702. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
  703. quota_flag = XFS_QMOPT_RES_REGBLKS;
  704. }
  705. /*
  706. * Allocate and setup the transaction.
  707. */
  708. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
  709. resrtextents, 0, &tp);
  710. /*
  711. * Check for running out of space
  712. */
  713. if (error) {
  714. /*
  715. * Free the transaction structure.
  716. */
  717. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  718. break;
  719. }
  720. xfs_ilock(ip, XFS_ILOCK_EXCL);
  721. error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
  722. 0, quota_flag);
  723. if (error)
  724. goto error1;
  725. xfs_trans_ijoin(tp, ip, 0);
  726. error = xfs_bmapi_write(tp, ip, startoffset_fsb,
  727. allocatesize_fsb, alloc_type, 0, imapp,
  728. &nimaps);
  729. if (error)
  730. goto error0;
  731. /*
  732. * Complete the transaction
  733. */
  734. error = xfs_trans_commit(tp);
  735. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  736. if (error)
  737. break;
  738. allocated_fsb = imapp->br_blockcount;
  739. if (nimaps == 0) {
  740. error = -ENOSPC;
  741. break;
  742. }
  743. startoffset_fsb += allocated_fsb;
  744. allocatesize_fsb -= allocated_fsb;
  745. }
  746. return error;
  747. error0: /* unlock inode, unreserve quota blocks, cancel trans */
  748. xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
  749. error1: /* Just cancel transaction */
  750. xfs_trans_cancel(tp);
  751. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  752. return error;
  753. }
  754. static int
  755. xfs_unmap_extent(
  756. struct xfs_inode *ip,
  757. xfs_fileoff_t startoffset_fsb,
  758. xfs_filblks_t len_fsb,
  759. int *done)
  760. {
  761. struct xfs_mount *mp = ip->i_mount;
  762. struct xfs_trans *tp;
  763. uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  764. int error;
  765. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
  766. if (error) {
  767. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  768. return error;
  769. }
  770. xfs_ilock(ip, XFS_ILOCK_EXCL);
  771. error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
  772. ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
  773. if (error)
  774. goto out_trans_cancel;
  775. xfs_trans_ijoin(tp, ip, 0);
  776. error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
  777. if (error)
  778. goto out_trans_cancel;
  779. error = xfs_trans_commit(tp);
  780. out_unlock:
  781. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  782. return error;
  783. out_trans_cancel:
  784. xfs_trans_cancel(tp);
  785. goto out_unlock;
  786. }
  787. /* Caller must first wait for the completion of any pending DIOs if required. */
  788. int
  789. xfs_flush_unmap_range(
  790. struct xfs_inode *ip,
  791. xfs_off_t offset,
  792. xfs_off_t len)
  793. {
  794. struct xfs_mount *mp = ip->i_mount;
  795. struct inode *inode = VFS_I(ip);
  796. xfs_off_t rounding, start, end;
  797. int error;
  798. rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
  799. start = round_down(offset, rounding);
  800. end = round_up(offset + len, rounding) - 1;
  801. error = filemap_write_and_wait_range(inode->i_mapping, start, end);
  802. if (error)
  803. return error;
  804. truncate_pagecache_range(inode, start, end);
  805. return 0;
  806. }
  807. int
  808. xfs_free_file_space(
  809. struct xfs_inode *ip,
  810. xfs_off_t offset,
  811. xfs_off_t len)
  812. {
  813. struct xfs_mount *mp = ip->i_mount;
  814. xfs_fileoff_t startoffset_fsb;
  815. xfs_fileoff_t endoffset_fsb;
  816. int done = 0, error;
  817. trace_xfs_free_file_space(ip);
  818. error = xfs_qm_dqattach(ip);
  819. if (error)
  820. return error;
  821. if (len <= 0) /* if nothing being freed */
  822. return 0;
  823. startoffset_fsb = XFS_B_TO_FSB(mp, offset);
  824. endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
  825. /* We can only free complete realtime extents. */
  826. if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
  827. startoffset_fsb = roundup_64(startoffset_fsb,
  828. mp->m_sb.sb_rextsize);
  829. endoffset_fsb = rounddown_64(endoffset_fsb,
  830. mp->m_sb.sb_rextsize);
  831. }
  832. /*
  833. * Need to zero the stuff we're not freeing, on disk.
  834. */
  835. if (endoffset_fsb > startoffset_fsb) {
  836. while (!done) {
  837. error = xfs_unmap_extent(ip, startoffset_fsb,
  838. endoffset_fsb - startoffset_fsb, &done);
  839. if (error)
  840. return error;
  841. }
  842. }
  843. /*
  844. * Now that we've unmap all full blocks we'll have to zero out any
  845. * partial block at the beginning and/or end. iomap_zero_range is smart
  846. * enough to skip any holes, including those we just created, but we
  847. * must take care not to zero beyond EOF and enlarge i_size.
  848. */
  849. if (offset >= XFS_ISIZE(ip))
  850. return 0;
  851. if (offset + len > XFS_ISIZE(ip))
  852. len = XFS_ISIZE(ip) - offset;
  853. error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
  854. &xfs_buffered_write_iomap_ops);
  855. if (error)
  856. return error;
  857. /*
  858. * If we zeroed right up to EOF and EOF straddles a page boundary we
  859. * must make sure that the post-EOF area is also zeroed because the
  860. * page could be mmap'd and iomap_zero_range doesn't do that for us.
  861. * Writeback of the eof page will do this, albeit clumsily.
  862. */
  863. if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
  864. error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
  865. round_down(offset + len, PAGE_SIZE), LLONG_MAX);
  866. }
  867. return error;
  868. }
  869. static int
  870. xfs_prepare_shift(
  871. struct xfs_inode *ip,
  872. loff_t offset)
  873. {
  874. struct xfs_mount *mp = ip->i_mount;
  875. int error;
  876. /*
  877. * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
  878. * into the accessible region of the file.
  879. */
  880. if (xfs_can_free_eofblocks(ip, true)) {
  881. error = xfs_free_eofblocks(ip);
  882. if (error)
  883. return error;
  884. }
  885. /*
  886. * Shift operations must stabilize the start block offset boundary along
  887. * with the full range of the operation. If we don't, a COW writeback
  888. * completion could race with an insert, front merge with the start
  889. * extent (after split) during the shift and corrupt the file. Start
  890. * with the block just prior to the start to stabilize the boundary.
  891. */
  892. offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
  893. if (offset)
  894. offset -= (1 << mp->m_sb.sb_blocklog);
  895. /*
  896. * Writeback and invalidate cache for the remainder of the file as we're
  897. * about to shift down every extent from offset to EOF.
  898. */
  899. error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
  900. if (error)
  901. return error;
  902. /*
  903. * Clean out anything hanging around in the cow fork now that
  904. * we've flushed all the dirty data out to disk to avoid having
  905. * CoW extents at the wrong offsets.
  906. */
  907. if (xfs_inode_has_cow_data(ip)) {
  908. error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
  909. true);
  910. if (error)
  911. return error;
  912. }
  913. return 0;
  914. }
  915. /*
  916. * xfs_collapse_file_space()
  917. * This routine frees disk space and shift extent for the given file.
  918. * The first thing we do is to free data blocks in the specified range
  919. * by calling xfs_free_file_space(). It would also sync dirty data
  920. * and invalidate page cache over the region on which collapse range
  921. * is working. And Shift extent records to the left to cover a hole.
  922. * RETURNS:
  923. * 0 on success
  924. * errno on error
  925. *
  926. */
  927. int
  928. xfs_collapse_file_space(
  929. struct xfs_inode *ip,
  930. xfs_off_t offset,
  931. xfs_off_t len)
  932. {
  933. struct xfs_mount *mp = ip->i_mount;
  934. struct xfs_trans *tp;
  935. int error;
  936. xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
  937. xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
  938. bool done = false;
  939. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  940. ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
  941. trace_xfs_collapse_file_space(ip);
  942. error = xfs_free_file_space(ip, offset, len);
  943. if (error)
  944. return error;
  945. error = xfs_prepare_shift(ip, offset);
  946. if (error)
  947. return error;
  948. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
  949. if (error)
  950. return error;
  951. xfs_ilock(ip, XFS_ILOCK_EXCL);
  952. xfs_trans_ijoin(tp, ip, 0);
  953. while (!done) {
  954. error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
  955. &done);
  956. if (error)
  957. goto out_trans_cancel;
  958. if (done)
  959. break;
  960. /* finish any deferred frees and roll the transaction */
  961. error = xfs_defer_finish(&tp);
  962. if (error)
  963. goto out_trans_cancel;
  964. }
  965. error = xfs_trans_commit(tp);
  966. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  967. return error;
  968. out_trans_cancel:
  969. xfs_trans_cancel(tp);
  970. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  971. return error;
  972. }
  973. /*
  974. * xfs_insert_file_space()
  975. * This routine create hole space by shifting extents for the given file.
  976. * The first thing we do is to sync dirty data and invalidate page cache
  977. * over the region on which insert range is working. And split an extent
  978. * to two extents at given offset by calling xfs_bmap_split_extent.
  979. * And shift all extent records which are laying between [offset,
  980. * last allocated extent] to the right to reserve hole range.
  981. * RETURNS:
  982. * 0 on success
  983. * errno on error
  984. */
  985. int
  986. xfs_insert_file_space(
  987. struct xfs_inode *ip,
  988. loff_t offset,
  989. loff_t len)
  990. {
  991. struct xfs_mount *mp = ip->i_mount;
  992. struct xfs_trans *tp;
  993. int error;
  994. xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
  995. xfs_fileoff_t next_fsb = NULLFSBLOCK;
  996. xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
  997. bool done = false;
  998. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  999. ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
  1000. trace_xfs_insert_file_space(ip);
  1001. error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
  1002. if (error)
  1003. return error;
  1004. error = xfs_prepare_shift(ip, offset);
  1005. if (error)
  1006. return error;
  1007. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
  1008. XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
  1009. if (error)
  1010. return error;
  1011. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1012. xfs_trans_ijoin(tp, ip, 0);
  1013. /*
  1014. * The extent shifting code works on extent granularity. So, if stop_fsb
  1015. * is not the starting block of extent, we need to split the extent at
  1016. * stop_fsb.
  1017. */
  1018. error = xfs_bmap_split_extent(tp, ip, stop_fsb);
  1019. if (error)
  1020. goto out_trans_cancel;
  1021. do {
  1022. error = xfs_defer_finish(&tp);
  1023. if (error)
  1024. goto out_trans_cancel;
  1025. error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
  1026. &done, stop_fsb);
  1027. if (error)
  1028. goto out_trans_cancel;
  1029. } while (!done);
  1030. error = xfs_trans_commit(tp);
  1031. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1032. return error;
  1033. out_trans_cancel:
  1034. xfs_trans_cancel(tp);
  1035. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1036. return error;
  1037. }
  1038. /*
  1039. * We need to check that the format of the data fork in the temporary inode is
  1040. * valid for the target inode before doing the swap. This is not a problem with
  1041. * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
  1042. * data fork depending on the space the attribute fork is taking so we can get
  1043. * invalid formats on the target inode.
  1044. *
  1045. * E.g. target has space for 7 extents in extent format, temp inode only has
  1046. * space for 6. If we defragment down to 7 extents, then the tmp format is a
  1047. * btree, but when swapped it needs to be in extent format. Hence we can't just
  1048. * blindly swap data forks on attr2 filesystems.
  1049. *
  1050. * Note that we check the swap in both directions so that we don't end up with
  1051. * a corrupt temporary inode, either.
  1052. *
  1053. * Note that fixing the way xfs_fsr sets up the attribute fork in the source
  1054. * inode will prevent this situation from occurring, so all we do here is
  1055. * reject and log the attempt. basically we are putting the responsibility on
  1056. * userspace to get this right.
  1057. */
  1058. static int
  1059. xfs_swap_extents_check_format(
  1060. struct xfs_inode *ip, /* target inode */
  1061. struct xfs_inode *tip) /* tmp inode */
  1062. {
  1063. struct xfs_ifork *ifp = &ip->i_df;
  1064. struct xfs_ifork *tifp = &tip->i_df;
  1065. /* User/group/project quota ids must match if quotas are enforced. */
  1066. if (XFS_IS_QUOTA_ON(ip->i_mount) &&
  1067. (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
  1068. !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
  1069. ip->i_d.di_projid != tip->i_d.di_projid))
  1070. return -EINVAL;
  1071. /* Should never get a local format */
  1072. if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
  1073. tifp->if_format == XFS_DINODE_FMT_LOCAL)
  1074. return -EINVAL;
  1075. /*
  1076. * if the target inode has less extents that then temporary inode then
  1077. * why did userspace call us?
  1078. */
  1079. if (ifp->if_nextents < tifp->if_nextents)
  1080. return -EINVAL;
  1081. /*
  1082. * If we have to use the (expensive) rmap swap method, we can
  1083. * handle any number of extents and any format.
  1084. */
  1085. if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
  1086. return 0;
  1087. /*
  1088. * if the target inode is in extent form and the temp inode is in btree
  1089. * form then we will end up with the target inode in the wrong format
  1090. * as we already know there are less extents in the temp inode.
  1091. */
  1092. if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
  1093. tifp->if_format == XFS_DINODE_FMT_BTREE)
  1094. return -EINVAL;
  1095. /* Check temp in extent form to max in target */
  1096. if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
  1097. tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1098. return -EINVAL;
  1099. /* Check target in extent form to max in temp */
  1100. if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
  1101. ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1102. return -EINVAL;
  1103. /*
  1104. * If we are in a btree format, check that the temp root block will fit
  1105. * in the target and that it has enough extents to be in btree format
  1106. * in the target.
  1107. *
  1108. * Note that we have to be careful to allow btree->extent conversions
  1109. * (a common defrag case) which will occur when the temp inode is in
  1110. * extent format...
  1111. */
  1112. if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
  1113. if (XFS_IFORK_Q(ip) &&
  1114. XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
  1115. return -EINVAL;
  1116. if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1117. return -EINVAL;
  1118. }
  1119. /* Reciprocal target->temp btree format checks */
  1120. if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
  1121. if (XFS_IFORK_Q(tip) &&
  1122. XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
  1123. return -EINVAL;
  1124. if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1125. return -EINVAL;
  1126. }
  1127. return 0;
  1128. }
  1129. static int
  1130. xfs_swap_extent_flush(
  1131. struct xfs_inode *ip)
  1132. {
  1133. int error;
  1134. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  1135. if (error)
  1136. return error;
  1137. truncate_pagecache_range(VFS_I(ip), 0, -1);
  1138. /* Verify O_DIRECT for ftmp */
  1139. if (VFS_I(ip)->i_mapping->nrpages)
  1140. return -EINVAL;
  1141. return 0;
  1142. }
  1143. /*
  1144. * Move extents from one file to another, when rmap is enabled.
  1145. */
  1146. STATIC int
  1147. xfs_swap_extent_rmap(
  1148. struct xfs_trans **tpp,
  1149. struct xfs_inode *ip,
  1150. struct xfs_inode *tip)
  1151. {
  1152. struct xfs_trans *tp = *tpp;
  1153. struct xfs_bmbt_irec irec;
  1154. struct xfs_bmbt_irec uirec;
  1155. struct xfs_bmbt_irec tirec;
  1156. xfs_fileoff_t offset_fsb;
  1157. xfs_fileoff_t end_fsb;
  1158. xfs_filblks_t count_fsb;
  1159. int error;
  1160. xfs_filblks_t ilen;
  1161. xfs_filblks_t rlen;
  1162. int nimaps;
  1163. uint64_t tip_flags2;
  1164. /*
  1165. * If the source file has shared blocks, we must flag the donor
  1166. * file as having shared blocks so that we get the shared-block
  1167. * rmap functions when we go to fix up the rmaps. The flags
  1168. * will be switch for reals later.
  1169. */
  1170. tip_flags2 = tip->i_d.di_flags2;
  1171. if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
  1172. tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
  1173. offset_fsb = 0;
  1174. end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
  1175. count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
  1176. while (count_fsb) {
  1177. /* Read extent from the donor file */
  1178. nimaps = 1;
  1179. error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
  1180. &nimaps, 0);
  1181. if (error)
  1182. goto out;
  1183. ASSERT(nimaps == 1);
  1184. ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
  1185. trace_xfs_swap_extent_rmap_remap(tip, &tirec);
  1186. ilen = tirec.br_blockcount;
  1187. /* Unmap the old blocks in the source file. */
  1188. while (tirec.br_blockcount) {
  1189. ASSERT(tp->t_firstblock == NULLFSBLOCK);
  1190. trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
  1191. /* Read extent from the source file */
  1192. nimaps = 1;
  1193. error = xfs_bmapi_read(ip, tirec.br_startoff,
  1194. tirec.br_blockcount, &irec,
  1195. &nimaps, 0);
  1196. if (error)
  1197. goto out;
  1198. ASSERT(nimaps == 1);
  1199. ASSERT(tirec.br_startoff == irec.br_startoff);
  1200. trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
  1201. /* Trim the extent. */
  1202. uirec = tirec;
  1203. uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
  1204. tirec.br_blockcount,
  1205. irec.br_blockcount);
  1206. trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
  1207. /* Remove the mapping from the donor file. */
  1208. xfs_bmap_unmap_extent(tp, tip, &uirec);
  1209. /* Remove the mapping from the source file. */
  1210. xfs_bmap_unmap_extent(tp, ip, &irec);
  1211. /* Map the donor file's blocks into the source file. */
  1212. xfs_bmap_map_extent(tp, ip, &uirec);
  1213. /* Map the source file's blocks into the donor file. */
  1214. xfs_bmap_map_extent(tp, tip, &irec);
  1215. error = xfs_defer_finish(tpp);
  1216. tp = *tpp;
  1217. if (error)
  1218. goto out;
  1219. tirec.br_startoff += rlen;
  1220. if (tirec.br_startblock != HOLESTARTBLOCK &&
  1221. tirec.br_startblock != DELAYSTARTBLOCK)
  1222. tirec.br_startblock += rlen;
  1223. tirec.br_blockcount -= rlen;
  1224. }
  1225. /* Roll on... */
  1226. count_fsb -= ilen;
  1227. offset_fsb += ilen;
  1228. }
  1229. tip->i_d.di_flags2 = tip_flags2;
  1230. return 0;
  1231. out:
  1232. trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
  1233. tip->i_d.di_flags2 = tip_flags2;
  1234. return error;
  1235. }
  1236. /* Swap the extents of two files by swapping data forks. */
  1237. STATIC int
  1238. xfs_swap_extent_forks(
  1239. struct xfs_trans *tp,
  1240. struct xfs_inode *ip,
  1241. struct xfs_inode *tip,
  1242. int *src_log_flags,
  1243. int *target_log_flags)
  1244. {
  1245. xfs_filblks_t aforkblks = 0;
  1246. xfs_filblks_t taforkblks = 0;
  1247. xfs_extnum_t junk;
  1248. uint64_t tmp;
  1249. int error;
  1250. /*
  1251. * Count the number of extended attribute blocks
  1252. */
  1253. if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
  1254. ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
  1255. error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
  1256. &aforkblks);
  1257. if (error)
  1258. return error;
  1259. }
  1260. if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
  1261. tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
  1262. error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
  1263. &taforkblks);
  1264. if (error)
  1265. return error;
  1266. }
  1267. /*
  1268. * Btree format (v3) inodes have the inode number stamped in the bmbt
  1269. * block headers. We can't start changing the bmbt blocks until the
  1270. * inode owner change is logged so recovery does the right thing in the
  1271. * event of a crash. Set the owner change log flags now and leave the
  1272. * bmbt scan as the last step.
  1273. */
  1274. if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
  1275. if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
  1276. (*target_log_flags) |= XFS_ILOG_DOWNER;
  1277. if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
  1278. (*src_log_flags) |= XFS_ILOG_DOWNER;
  1279. }
  1280. /*
  1281. * Swap the data forks of the inodes
  1282. */
  1283. swap(ip->i_df, tip->i_df);
  1284. /*
  1285. * Fix the on-disk inode values
  1286. */
  1287. tmp = (uint64_t)ip->i_d.di_nblocks;
  1288. ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
  1289. tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
  1290. /*
  1291. * The extents in the source inode could still contain speculative
  1292. * preallocation beyond EOF (e.g. the file is open but not modified
  1293. * while defrag is in progress). In that case, we need to copy over the
  1294. * number of delalloc blocks the data fork in the source inode is
  1295. * tracking beyond EOF so that when the fork is truncated away when the
  1296. * temporary inode is unlinked we don't underrun the i_delayed_blks
  1297. * counter on that inode.
  1298. */
  1299. ASSERT(tip->i_delayed_blks == 0);
  1300. tip->i_delayed_blks = ip->i_delayed_blks;
  1301. ip->i_delayed_blks = 0;
  1302. switch (ip->i_df.if_format) {
  1303. case XFS_DINODE_FMT_EXTENTS:
  1304. (*src_log_flags) |= XFS_ILOG_DEXT;
  1305. break;
  1306. case XFS_DINODE_FMT_BTREE:
  1307. ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
  1308. (*src_log_flags & XFS_ILOG_DOWNER));
  1309. (*src_log_flags) |= XFS_ILOG_DBROOT;
  1310. break;
  1311. }
  1312. switch (tip->i_df.if_format) {
  1313. case XFS_DINODE_FMT_EXTENTS:
  1314. (*target_log_flags) |= XFS_ILOG_DEXT;
  1315. break;
  1316. case XFS_DINODE_FMT_BTREE:
  1317. (*target_log_flags) |= XFS_ILOG_DBROOT;
  1318. ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
  1319. (*target_log_flags & XFS_ILOG_DOWNER));
  1320. break;
  1321. }
  1322. return 0;
  1323. }
  1324. /*
  1325. * Fix up the owners of the bmbt blocks to refer to the current inode. The
  1326. * change owner scan attempts to order all modified buffers in the current
  1327. * transaction. In the event of ordered buffer failure, the offending buffer is
  1328. * physically logged as a fallback and the scan returns -EAGAIN. We must roll
  1329. * the transaction in this case to replenish the fallback log reservation and
  1330. * restart the scan. This process repeats until the scan completes.
  1331. */
  1332. static int
  1333. xfs_swap_change_owner(
  1334. struct xfs_trans **tpp,
  1335. struct xfs_inode *ip,
  1336. struct xfs_inode *tmpip)
  1337. {
  1338. int error;
  1339. struct xfs_trans *tp = *tpp;
  1340. do {
  1341. error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
  1342. NULL);
  1343. /* success or fatal error */
  1344. if (error != -EAGAIN)
  1345. break;
  1346. error = xfs_trans_roll(tpp);
  1347. if (error)
  1348. break;
  1349. tp = *tpp;
  1350. /*
  1351. * Redirty both inodes so they can relog and keep the log tail
  1352. * moving forward.
  1353. */
  1354. xfs_trans_ijoin(tp, ip, 0);
  1355. xfs_trans_ijoin(tp, tmpip, 0);
  1356. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1357. xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
  1358. } while (true);
  1359. return error;
  1360. }
  1361. int
  1362. xfs_swap_extents(
  1363. struct xfs_inode *ip, /* target inode */
  1364. struct xfs_inode *tip, /* tmp inode */
  1365. struct xfs_swapext *sxp)
  1366. {
  1367. struct xfs_mount *mp = ip->i_mount;
  1368. struct xfs_trans *tp;
  1369. struct xfs_bstat *sbp = &sxp->sx_stat;
  1370. int src_log_flags, target_log_flags;
  1371. int error = 0;
  1372. int lock_flags;
  1373. uint64_t f;
  1374. int resblks = 0;
  1375. unsigned int flags = 0;
  1376. /*
  1377. * Lock the inodes against other IO, page faults and truncate to
  1378. * begin with. Then we can ensure the inodes are flushed and have no
  1379. * page cache safely. Once we have done this we can take the ilocks and
  1380. * do the rest of the checks.
  1381. */
  1382. lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
  1383. lock_flags = XFS_MMAPLOCK_EXCL;
  1384. xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
  1385. /* Verify that both files have the same format */
  1386. if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
  1387. error = -EINVAL;
  1388. goto out_unlock;
  1389. }
  1390. /* Verify both files are either real-time or non-realtime */
  1391. if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
  1392. error = -EINVAL;
  1393. goto out_unlock;
  1394. }
  1395. error = xfs_qm_dqattach(ip);
  1396. if (error)
  1397. goto out_unlock;
  1398. error = xfs_qm_dqattach(tip);
  1399. if (error)
  1400. goto out_unlock;
  1401. error = xfs_swap_extent_flush(ip);
  1402. if (error)
  1403. goto out_unlock;
  1404. error = xfs_swap_extent_flush(tip);
  1405. if (error)
  1406. goto out_unlock;
  1407. if (xfs_inode_has_cow_data(tip)) {
  1408. error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
  1409. if (error)
  1410. goto out_unlock;
  1411. }
  1412. /*
  1413. * Extent "swapping" with rmap requires a permanent reservation and
  1414. * a block reservation because it's really just a remap operation
  1415. * performed with log redo items!
  1416. */
  1417. if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
  1418. int w = XFS_DATA_FORK;
  1419. uint32_t ipnext = ip->i_df.if_nextents;
  1420. uint32_t tipnext = tip->i_df.if_nextents;
  1421. /*
  1422. * Conceptually this shouldn't affect the shape of either bmbt,
  1423. * but since we atomically move extents one by one, we reserve
  1424. * enough space to rebuild both trees.
  1425. */
  1426. resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
  1427. resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
  1428. /*
  1429. * If either inode straddles a bmapbt block allocation boundary,
  1430. * the rmapbt algorithm triggers repeated allocs and frees as
  1431. * extents are remapped. This can exhaust the block reservation
  1432. * prematurely and cause shutdown. Return freed blocks to the
  1433. * transaction reservation to counter this behavior.
  1434. */
  1435. flags |= XFS_TRANS_RES_FDBLKS;
  1436. }
  1437. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
  1438. &tp);
  1439. if (error)
  1440. goto out_unlock;
  1441. /*
  1442. * Lock and join the inodes to the tansaction so that transaction commit
  1443. * or cancel will unlock the inodes from this point onwards.
  1444. */
  1445. xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
  1446. lock_flags |= XFS_ILOCK_EXCL;
  1447. xfs_trans_ijoin(tp, ip, 0);
  1448. xfs_trans_ijoin(tp, tip, 0);
  1449. /* Verify all data are being swapped */
  1450. if (sxp->sx_offset != 0 ||
  1451. sxp->sx_length != ip->i_d.di_size ||
  1452. sxp->sx_length != tip->i_d.di_size) {
  1453. error = -EFAULT;
  1454. goto out_trans_cancel;
  1455. }
  1456. trace_xfs_swap_extent_before(ip, 0);
  1457. trace_xfs_swap_extent_before(tip, 1);
  1458. /* check inode formats now that data is flushed */
  1459. error = xfs_swap_extents_check_format(ip, tip);
  1460. if (error) {
  1461. xfs_notice(mp,
  1462. "%s: inode 0x%llx format is incompatible for exchanging.",
  1463. __func__, ip->i_ino);
  1464. goto out_trans_cancel;
  1465. }
  1466. /*
  1467. * Compare the current change & modify times with that
  1468. * passed in. If they differ, we abort this swap.
  1469. * This is the mechanism used to ensure the calling
  1470. * process that the file was not changed out from
  1471. * under it.
  1472. */
  1473. if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
  1474. (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
  1475. (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
  1476. (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
  1477. error = -EBUSY;
  1478. goto out_trans_cancel;
  1479. }
  1480. /*
  1481. * Note the trickiness in setting the log flags - we set the owner log
  1482. * flag on the opposite inode (i.e. the inode we are setting the new
  1483. * owner to be) because once we swap the forks and log that, log
  1484. * recovery is going to see the fork as owned by the swapped inode,
  1485. * not the pre-swapped inodes.
  1486. */
  1487. src_log_flags = XFS_ILOG_CORE;
  1488. target_log_flags = XFS_ILOG_CORE;
  1489. if (xfs_sb_version_hasrmapbt(&mp->m_sb))
  1490. error = xfs_swap_extent_rmap(&tp, ip, tip);
  1491. else
  1492. error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
  1493. &target_log_flags);
  1494. if (error)
  1495. goto out_trans_cancel;
  1496. /* Do we have to swap reflink flags? */
  1497. if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
  1498. (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
  1499. f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
  1500. ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1501. ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
  1502. tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1503. tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
  1504. }
  1505. /* Swap the cow forks. */
  1506. if (xfs_sb_version_hasreflink(&mp->m_sb)) {
  1507. ASSERT(!ip->i_cowfp ||
  1508. ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
  1509. ASSERT(!tip->i_cowfp ||
  1510. tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
  1511. swap(ip->i_cowfp, tip->i_cowfp);
  1512. if (ip->i_cowfp && ip->i_cowfp->if_bytes)
  1513. xfs_inode_set_cowblocks_tag(ip);
  1514. else
  1515. xfs_inode_clear_cowblocks_tag(ip);
  1516. if (tip->i_cowfp && tip->i_cowfp->if_bytes)
  1517. xfs_inode_set_cowblocks_tag(tip);
  1518. else
  1519. xfs_inode_clear_cowblocks_tag(tip);
  1520. }
  1521. xfs_trans_log_inode(tp, ip, src_log_flags);
  1522. xfs_trans_log_inode(tp, tip, target_log_flags);
  1523. /*
  1524. * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
  1525. * have inode number owner values in the bmbt blocks that still refer to
  1526. * the old inode. Scan each bmbt to fix up the owner values with the
  1527. * inode number of the current inode.
  1528. */
  1529. if (src_log_flags & XFS_ILOG_DOWNER) {
  1530. error = xfs_swap_change_owner(&tp, ip, tip);
  1531. if (error)
  1532. goto out_trans_cancel;
  1533. }
  1534. if (target_log_flags & XFS_ILOG_DOWNER) {
  1535. error = xfs_swap_change_owner(&tp, tip, ip);
  1536. if (error)
  1537. goto out_trans_cancel;
  1538. }
  1539. /*
  1540. * If this is a synchronous mount, make sure that the
  1541. * transaction goes to disk before returning to the user.
  1542. */
  1543. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1544. xfs_trans_set_sync(tp);
  1545. error = xfs_trans_commit(tp);
  1546. trace_xfs_swap_extent_after(ip, 0);
  1547. trace_xfs_swap_extent_after(tip, 1);
  1548. out_unlock:
  1549. xfs_iunlock(ip, lock_flags);
  1550. xfs_iunlock(tip, lock_flags);
  1551. unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
  1552. return error;
  1553. out_trans_cancel:
  1554. xfs_trans_cancel(tp);
  1555. goto out_unlock;
  1556. }