xfs_iomap.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * Copyright (c) 2016-2018 Christoph Hellwig.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_btree.h"
  16. #include "xfs_bmap_btree.h"
  17. #include "xfs_bmap.h"
  18. #include "xfs_bmap_util.h"
  19. #include "xfs_errortag.h"
  20. #include "xfs_error.h"
  21. #include "xfs_trans.h"
  22. #include "xfs_trans_space.h"
  23. #include "xfs_inode_item.h"
  24. #include "xfs_iomap.h"
  25. #include "xfs_trace.h"
  26. #include "xfs_quota.h"
  27. #include "xfs_dquot_item.h"
  28. #include "xfs_dquot.h"
  29. #include "xfs_reflink.h"
  30. #define XFS_ALLOC_ALIGN(mp, off) \
  31. (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
  32. static int
  33. xfs_alert_fsblock_zero(
  34. xfs_inode_t *ip,
  35. xfs_bmbt_irec_t *imap)
  36. {
  37. xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
  38. "Access to block zero in inode %llu "
  39. "start_block: %llx start_off: %llx "
  40. "blkcnt: %llx extent-state: %x",
  41. (unsigned long long)ip->i_ino,
  42. (unsigned long long)imap->br_startblock,
  43. (unsigned long long)imap->br_startoff,
  44. (unsigned long long)imap->br_blockcount,
  45. imap->br_state);
  46. return -EFSCORRUPTED;
  47. }
  48. int
  49. xfs_bmbt_to_iomap(
  50. struct xfs_inode *ip,
  51. struct iomap *iomap,
  52. struct xfs_bmbt_irec *imap,
  53. u16 flags)
  54. {
  55. struct xfs_mount *mp = ip->i_mount;
  56. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  57. if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
  58. return xfs_alert_fsblock_zero(ip, imap);
  59. if (imap->br_startblock == HOLESTARTBLOCK) {
  60. iomap->addr = IOMAP_NULL_ADDR;
  61. iomap->type = IOMAP_HOLE;
  62. } else if (imap->br_startblock == DELAYSTARTBLOCK ||
  63. isnullstartblock(imap->br_startblock)) {
  64. iomap->addr = IOMAP_NULL_ADDR;
  65. iomap->type = IOMAP_DELALLOC;
  66. } else {
  67. iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
  68. if (imap->br_state == XFS_EXT_UNWRITTEN)
  69. iomap->type = IOMAP_UNWRITTEN;
  70. else
  71. iomap->type = IOMAP_MAPPED;
  72. }
  73. iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
  74. iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
  75. iomap->bdev = target->bt_bdev;
  76. iomap->dax_dev = target->bt_daxdev;
  77. iomap->flags = flags;
  78. if (xfs_ipincount(ip) &&
  79. (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  80. iomap->flags |= IOMAP_F_DIRTY;
  81. return 0;
  82. }
  83. static void
  84. xfs_hole_to_iomap(
  85. struct xfs_inode *ip,
  86. struct iomap *iomap,
  87. xfs_fileoff_t offset_fsb,
  88. xfs_fileoff_t end_fsb)
  89. {
  90. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  91. iomap->addr = IOMAP_NULL_ADDR;
  92. iomap->type = IOMAP_HOLE;
  93. iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
  94. iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
  95. iomap->bdev = target->bt_bdev;
  96. iomap->dax_dev = target->bt_daxdev;
  97. }
  98. static inline xfs_fileoff_t
  99. xfs_iomap_end_fsb(
  100. struct xfs_mount *mp,
  101. loff_t offset,
  102. loff_t count)
  103. {
  104. ASSERT(offset <= mp->m_super->s_maxbytes);
  105. return min(XFS_B_TO_FSB(mp, offset + count),
  106. XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
  107. }
  108. static xfs_extlen_t
  109. xfs_eof_alignment(
  110. struct xfs_inode *ip)
  111. {
  112. struct xfs_mount *mp = ip->i_mount;
  113. xfs_extlen_t align = 0;
  114. if (!XFS_IS_REALTIME_INODE(ip)) {
  115. /*
  116. * Round up the allocation request to a stripe unit
  117. * (m_dalign) boundary if the file size is >= stripe unit
  118. * size, and we are allocating past the allocation eof.
  119. *
  120. * If mounted with the "-o swalloc" option the alignment is
  121. * increased from the strip unit size to the stripe width.
  122. */
  123. if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
  124. align = mp->m_swidth;
  125. else if (mp->m_dalign)
  126. align = mp->m_dalign;
  127. if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
  128. align = 0;
  129. }
  130. return align;
  131. }
  132. /*
  133. * Check if last_fsb is outside the last extent, and if so grow it to the next
  134. * stripe unit boundary.
  135. */
  136. xfs_fileoff_t
  137. xfs_iomap_eof_align_last_fsb(
  138. struct xfs_inode *ip,
  139. xfs_fileoff_t end_fsb)
  140. {
  141. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  142. xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
  143. xfs_extlen_t align = xfs_eof_alignment(ip);
  144. struct xfs_bmbt_irec irec;
  145. struct xfs_iext_cursor icur;
  146. ASSERT(ifp->if_flags & XFS_IFEXTENTS);
  147. /*
  148. * Always round up the allocation request to the extent hint boundary.
  149. */
  150. if (extsz) {
  151. if (align)
  152. align = roundup_64(align, extsz);
  153. else
  154. align = extsz;
  155. }
  156. if (align) {
  157. xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
  158. xfs_iext_last(ifp, &icur);
  159. if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
  160. aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
  161. return aligned_end_fsb;
  162. }
  163. return end_fsb;
  164. }
  165. int
  166. xfs_iomap_write_direct(
  167. struct xfs_inode *ip,
  168. xfs_fileoff_t offset_fsb,
  169. xfs_fileoff_t count_fsb,
  170. struct xfs_bmbt_irec *imap)
  171. {
  172. struct xfs_mount *mp = ip->i_mount;
  173. struct xfs_trans *tp;
  174. xfs_filblks_t resaligned;
  175. int nimaps;
  176. int quota_flag;
  177. uint qblocks, resblks;
  178. unsigned int resrtextents = 0;
  179. int error;
  180. int bmapi_flags = XFS_BMAPI_PREALLOC;
  181. uint tflags = 0;
  182. ASSERT(count_fsb > 0);
  183. resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
  184. xfs_get_extsz_hint(ip));
  185. if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
  186. resrtextents = qblocks = resaligned;
  187. resrtextents /= mp->m_sb.sb_rextsize;
  188. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  189. quota_flag = XFS_QMOPT_RES_RTBLKS;
  190. } else {
  191. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
  192. quota_flag = XFS_QMOPT_RES_REGBLKS;
  193. }
  194. error = xfs_qm_dqattach(ip);
  195. if (error)
  196. return error;
  197. /*
  198. * For DAX, we do not allocate unwritten extents, but instead we zero
  199. * the block before we commit the transaction. Ideally we'd like to do
  200. * this outside the transaction context, but if we commit and then crash
  201. * we may not have zeroed the blocks and this will be exposed on
  202. * recovery of the allocation. Hence we must zero before commit.
  203. *
  204. * Further, if we are mapping unwritten extents here, we need to zero
  205. * and convert them to written so that we don't need an unwritten extent
  206. * callback for DAX. This also means that we need to be able to dip into
  207. * the reserve block pool for bmbt block allocation if there is no space
  208. * left but we need to do unwritten extent conversion.
  209. */
  210. if (IS_DAX(VFS_I(ip))) {
  211. bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
  212. if (imap->br_state == XFS_EXT_UNWRITTEN) {
  213. tflags |= XFS_TRANS_RESERVE;
  214. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  215. }
  216. }
  217. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
  218. tflags, &tp);
  219. if (error)
  220. return error;
  221. xfs_ilock(ip, XFS_ILOCK_EXCL);
  222. error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
  223. if (error)
  224. goto out_trans_cancel;
  225. xfs_trans_ijoin(tp, ip, 0);
  226. /*
  227. * From this point onwards we overwrite the imap pointer that the
  228. * caller gave to us.
  229. */
  230. nimaps = 1;
  231. error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
  232. imap, &nimaps);
  233. if (error)
  234. goto out_res_cancel;
  235. /*
  236. * Complete the transaction
  237. */
  238. error = xfs_trans_commit(tp);
  239. if (error)
  240. goto out_unlock;
  241. /*
  242. * Copy any maps to caller's array and return any error.
  243. */
  244. if (nimaps == 0) {
  245. error = -ENOSPC;
  246. goto out_unlock;
  247. }
  248. if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
  249. error = xfs_alert_fsblock_zero(ip, imap);
  250. out_unlock:
  251. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  252. return error;
  253. out_res_cancel:
  254. xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
  255. out_trans_cancel:
  256. xfs_trans_cancel(tp);
  257. goto out_unlock;
  258. }
  259. STATIC bool
  260. xfs_quota_need_throttle(
  261. struct xfs_inode *ip,
  262. xfs_dqtype_t type,
  263. xfs_fsblock_t alloc_blocks)
  264. {
  265. struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
  266. if (!dq || !xfs_this_quota_on(ip->i_mount, type))
  267. return false;
  268. /* no hi watermark, no throttle */
  269. if (!dq->q_prealloc_hi_wmark)
  270. return false;
  271. /* under the lo watermark, no throttle */
  272. if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
  273. return false;
  274. return true;
  275. }
  276. STATIC void
  277. xfs_quota_calc_throttle(
  278. struct xfs_inode *ip,
  279. xfs_dqtype_t type,
  280. xfs_fsblock_t *qblocks,
  281. int *qshift,
  282. int64_t *qfreesp)
  283. {
  284. struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
  285. int64_t freesp;
  286. int shift = 0;
  287. /* no dq, or over hi wmark, squash the prealloc completely */
  288. if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
  289. *qblocks = 0;
  290. *qfreesp = 0;
  291. return;
  292. }
  293. freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
  294. if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
  295. shift = 2;
  296. if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
  297. shift += 2;
  298. if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
  299. shift += 2;
  300. }
  301. if (freesp < *qfreesp)
  302. *qfreesp = freesp;
  303. /* only overwrite the throttle values if we are more aggressive */
  304. if ((freesp >> shift) < (*qblocks >> *qshift)) {
  305. *qblocks = freesp;
  306. *qshift = shift;
  307. }
  308. }
  309. /*
  310. * If we don't have a user specified preallocation size, dynamically increase
  311. * the preallocation size as the size of the file grows. Cap the maximum size
  312. * at a single extent or less if the filesystem is near full. The closer the
  313. * filesystem is to being full, the smaller the maximum preallocation.
  314. */
  315. STATIC xfs_fsblock_t
  316. xfs_iomap_prealloc_size(
  317. struct xfs_inode *ip,
  318. int whichfork,
  319. loff_t offset,
  320. loff_t count,
  321. struct xfs_iext_cursor *icur)
  322. {
  323. struct xfs_iext_cursor ncur = *icur;
  324. struct xfs_bmbt_irec prev, got;
  325. struct xfs_mount *mp = ip->i_mount;
  326. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
  327. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  328. int64_t freesp;
  329. xfs_fsblock_t qblocks;
  330. xfs_fsblock_t alloc_blocks = 0;
  331. xfs_extlen_t plen;
  332. int shift = 0;
  333. int qshift = 0;
  334. /*
  335. * As an exception we don't do any preallocation at all if the file is
  336. * smaller than the minimum preallocation and we are using the default
  337. * dynamic preallocation scheme, as it is likely this is the only write
  338. * to the file that is going to be done.
  339. */
  340. if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
  341. return 0;
  342. /*
  343. * Use the minimum preallocation size for small files or if we are
  344. * writing right after a hole.
  345. */
  346. if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
  347. !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
  348. prev.br_startoff + prev.br_blockcount < offset_fsb)
  349. return mp->m_allocsize_blocks;
  350. /*
  351. * Take the size of the preceding data extents as the basis for the
  352. * preallocation size. Note that we don't care if the previous extents
  353. * are written or not.
  354. */
  355. plen = prev.br_blockcount;
  356. while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
  357. if (plen > MAXEXTLEN / 2 ||
  358. isnullstartblock(got.br_startblock) ||
  359. got.br_startoff + got.br_blockcount != prev.br_startoff ||
  360. got.br_startblock + got.br_blockcount != prev.br_startblock)
  361. break;
  362. plen += got.br_blockcount;
  363. prev = got;
  364. }
  365. /*
  366. * If the size of the extents is greater than half the maximum extent
  367. * length, then use the current offset as the basis. This ensures that
  368. * for large files the preallocation size always extends to MAXEXTLEN
  369. * rather than falling short due to things like stripe unit/width
  370. * alignment of real extents.
  371. */
  372. alloc_blocks = plen * 2;
  373. if (alloc_blocks > MAXEXTLEN)
  374. alloc_blocks = XFS_B_TO_FSB(mp, offset);
  375. qblocks = alloc_blocks;
  376. /*
  377. * MAXEXTLEN is not a power of two value but we round the prealloc down
  378. * to the nearest power of two value after throttling. To prevent the
  379. * round down from unconditionally reducing the maximum supported
  380. * prealloc size, we round up first, apply appropriate throttling,
  381. * round down and cap the value to MAXEXTLEN.
  382. */
  383. alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
  384. alloc_blocks);
  385. freesp = percpu_counter_read_positive(&mp->m_fdblocks);
  386. if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
  387. shift = 2;
  388. if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
  389. shift++;
  390. if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
  391. shift++;
  392. if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
  393. shift++;
  394. if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
  395. shift++;
  396. }
  397. /*
  398. * Check each quota to cap the prealloc size, provide a shift value to
  399. * throttle with and adjust amount of available space.
  400. */
  401. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
  402. xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
  403. &freesp);
  404. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
  405. xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
  406. &freesp);
  407. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
  408. xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
  409. &freesp);
  410. /*
  411. * The final prealloc size is set to the minimum of free space available
  412. * in each of the quotas and the overall filesystem.
  413. *
  414. * The shift throttle value is set to the maximum value as determined by
  415. * the global low free space values and per-quota low free space values.
  416. */
  417. alloc_blocks = min(alloc_blocks, qblocks);
  418. shift = max(shift, qshift);
  419. if (shift)
  420. alloc_blocks >>= shift;
  421. /*
  422. * rounddown_pow_of_two() returns an undefined result if we pass in
  423. * alloc_blocks = 0.
  424. */
  425. if (alloc_blocks)
  426. alloc_blocks = rounddown_pow_of_two(alloc_blocks);
  427. if (alloc_blocks > MAXEXTLEN)
  428. alloc_blocks = MAXEXTLEN;
  429. /*
  430. * If we are still trying to allocate more space than is
  431. * available, squash the prealloc hard. This can happen if we
  432. * have a large file on a small filesystem and the above
  433. * lowspace thresholds are smaller than MAXEXTLEN.
  434. */
  435. while (alloc_blocks && alloc_blocks >= freesp)
  436. alloc_blocks >>= 4;
  437. if (alloc_blocks < mp->m_allocsize_blocks)
  438. alloc_blocks = mp->m_allocsize_blocks;
  439. trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
  440. mp->m_allocsize_blocks);
  441. return alloc_blocks;
  442. }
  443. int
  444. xfs_iomap_write_unwritten(
  445. xfs_inode_t *ip,
  446. xfs_off_t offset,
  447. xfs_off_t count,
  448. bool update_isize)
  449. {
  450. xfs_mount_t *mp = ip->i_mount;
  451. xfs_fileoff_t offset_fsb;
  452. xfs_filblks_t count_fsb;
  453. xfs_filblks_t numblks_fsb;
  454. int nimaps;
  455. xfs_trans_t *tp;
  456. xfs_bmbt_irec_t imap;
  457. struct inode *inode = VFS_I(ip);
  458. xfs_fsize_t i_size;
  459. uint resblks;
  460. int error;
  461. trace_xfs_unwritten_convert(ip, offset, count);
  462. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  463. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  464. count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
  465. /*
  466. * Reserve enough blocks in this transaction for two complete extent
  467. * btree splits. We may be converting the middle part of an unwritten
  468. * extent and in this case we will insert two new extents in the btree
  469. * each of which could cause a full split.
  470. *
  471. * This reservation amount will be used in the first call to
  472. * xfs_bmbt_split() to select an AG with enough space to satisfy the
  473. * rest of the operation.
  474. */
  475. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  476. /* Attach dquots so that bmbt splits are accounted correctly. */
  477. error = xfs_qm_dqattach(ip);
  478. if (error)
  479. return error;
  480. do {
  481. /*
  482. * Set up a transaction to convert the range of extents
  483. * from unwritten to real. Do allocations in a loop until
  484. * we have covered the range passed in.
  485. *
  486. * Note that we can't risk to recursing back into the filesystem
  487. * here as we might be asked to write out the same inode that we
  488. * complete here and might deadlock on the iolock.
  489. */
  490. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
  491. XFS_TRANS_RESERVE, &tp);
  492. if (error)
  493. return error;
  494. xfs_ilock(ip, XFS_ILOCK_EXCL);
  495. xfs_trans_ijoin(tp, ip, 0);
  496. error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
  497. XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
  498. if (error)
  499. goto error_on_bmapi_transaction;
  500. /*
  501. * Modify the unwritten extent state of the buffer.
  502. */
  503. nimaps = 1;
  504. error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
  505. XFS_BMAPI_CONVERT, resblks, &imap,
  506. &nimaps);
  507. if (error)
  508. goto error_on_bmapi_transaction;
  509. /*
  510. * Log the updated inode size as we go. We have to be careful
  511. * to only log it up to the actual write offset if it is
  512. * halfway into a block.
  513. */
  514. i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
  515. if (i_size > offset + count)
  516. i_size = offset + count;
  517. if (update_isize && i_size > i_size_read(inode))
  518. i_size_write(inode, i_size);
  519. i_size = xfs_new_eof(ip, i_size);
  520. if (i_size) {
  521. ip->i_d.di_size = i_size;
  522. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  523. }
  524. error = xfs_trans_commit(tp);
  525. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  526. if (error)
  527. return error;
  528. if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
  529. return xfs_alert_fsblock_zero(ip, &imap);
  530. if ((numblks_fsb = imap.br_blockcount) == 0) {
  531. /*
  532. * The numblks_fsb value should always get
  533. * smaller, otherwise the loop is stuck.
  534. */
  535. ASSERT(imap.br_blockcount);
  536. break;
  537. }
  538. offset_fsb += numblks_fsb;
  539. count_fsb -= numblks_fsb;
  540. } while (count_fsb > 0);
  541. return 0;
  542. error_on_bmapi_transaction:
  543. xfs_trans_cancel(tp);
  544. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  545. return error;
  546. }
  547. static inline bool
  548. imap_needs_alloc(
  549. struct inode *inode,
  550. unsigned flags,
  551. struct xfs_bmbt_irec *imap,
  552. int nimaps)
  553. {
  554. /* don't allocate blocks when just zeroing */
  555. if (flags & IOMAP_ZERO)
  556. return false;
  557. if (!nimaps ||
  558. imap->br_startblock == HOLESTARTBLOCK ||
  559. imap->br_startblock == DELAYSTARTBLOCK)
  560. return true;
  561. /* we convert unwritten extents before copying the data for DAX */
  562. if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
  563. return true;
  564. return false;
  565. }
  566. static inline bool
  567. imap_needs_cow(
  568. struct xfs_inode *ip,
  569. unsigned int flags,
  570. struct xfs_bmbt_irec *imap,
  571. int nimaps)
  572. {
  573. if (!xfs_is_cow_inode(ip))
  574. return false;
  575. /* when zeroing we don't have to COW holes or unwritten extents */
  576. if (flags & IOMAP_ZERO) {
  577. if (!nimaps ||
  578. imap->br_startblock == HOLESTARTBLOCK ||
  579. imap->br_state == XFS_EXT_UNWRITTEN)
  580. return false;
  581. }
  582. return true;
  583. }
  584. static int
  585. xfs_ilock_for_iomap(
  586. struct xfs_inode *ip,
  587. unsigned flags,
  588. unsigned *lockmode)
  589. {
  590. unsigned mode = XFS_ILOCK_SHARED;
  591. bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
  592. /*
  593. * COW writes may allocate delalloc space or convert unwritten COW
  594. * extents, so we need to make sure to take the lock exclusively here.
  595. */
  596. if (xfs_is_cow_inode(ip) && is_write)
  597. mode = XFS_ILOCK_EXCL;
  598. /*
  599. * Extents not yet cached requires exclusive access, don't block. This
  600. * is an opencoded xfs_ilock_data_map_shared() call but with
  601. * non-blocking behaviour.
  602. */
  603. if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
  604. if (flags & IOMAP_NOWAIT)
  605. return -EAGAIN;
  606. mode = XFS_ILOCK_EXCL;
  607. }
  608. relock:
  609. if (flags & IOMAP_NOWAIT) {
  610. if (!xfs_ilock_nowait(ip, mode))
  611. return -EAGAIN;
  612. } else {
  613. xfs_ilock(ip, mode);
  614. }
  615. /*
  616. * The reflink iflag could have changed since the earlier unlocked
  617. * check, so if we got ILOCK_SHARED for a write and but we're now a
  618. * reflink inode we have to switch to ILOCK_EXCL and relock.
  619. */
  620. if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
  621. xfs_iunlock(ip, mode);
  622. mode = XFS_ILOCK_EXCL;
  623. goto relock;
  624. }
  625. *lockmode = mode;
  626. return 0;
  627. }
  628. /*
  629. * Check that the imap we are going to return to the caller spans the entire
  630. * range that the caller requested for the IO.
  631. */
  632. static bool
  633. imap_spans_range(
  634. struct xfs_bmbt_irec *imap,
  635. xfs_fileoff_t offset_fsb,
  636. xfs_fileoff_t end_fsb)
  637. {
  638. if (imap->br_startoff > offset_fsb)
  639. return false;
  640. if (imap->br_startoff + imap->br_blockcount < end_fsb)
  641. return false;
  642. return true;
  643. }
  644. static int
  645. xfs_direct_write_iomap_begin(
  646. struct inode *inode,
  647. loff_t offset,
  648. loff_t length,
  649. unsigned flags,
  650. struct iomap *iomap,
  651. struct iomap *srcmap)
  652. {
  653. struct xfs_inode *ip = XFS_I(inode);
  654. struct xfs_mount *mp = ip->i_mount;
  655. struct xfs_bmbt_irec imap, cmap;
  656. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  657. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  658. int nimaps = 1, error = 0;
  659. bool shared = false;
  660. u16 iomap_flags = 0;
  661. unsigned lockmode;
  662. ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
  663. if (XFS_FORCED_SHUTDOWN(mp))
  664. return -EIO;
  665. /*
  666. * Writes that span EOF might trigger an IO size update on completion,
  667. * so consider them to be dirty for the purposes of O_DSYNC even if
  668. * there is no other metadata changes pending or have been made here.
  669. */
  670. if (offset + length > i_size_read(inode))
  671. iomap_flags |= IOMAP_F_DIRTY;
  672. error = xfs_ilock_for_iomap(ip, flags, &lockmode);
  673. if (error)
  674. return error;
  675. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  676. &nimaps, 0);
  677. if (error)
  678. goto out_unlock;
  679. if (imap_needs_cow(ip, flags, &imap, nimaps)) {
  680. error = -EAGAIN;
  681. if (flags & IOMAP_NOWAIT)
  682. goto out_unlock;
  683. /* may drop and re-acquire the ilock */
  684. error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
  685. &lockmode, flags & IOMAP_DIRECT);
  686. if (error)
  687. goto out_unlock;
  688. if (shared)
  689. goto out_found_cow;
  690. end_fsb = imap.br_startoff + imap.br_blockcount;
  691. length = XFS_FSB_TO_B(mp, end_fsb) - offset;
  692. }
  693. if (imap_needs_alloc(inode, flags, &imap, nimaps))
  694. goto allocate_blocks;
  695. /*
  696. * NOWAIT IO needs to span the entire requested IO with a single map so
  697. * that we avoid partial IO failures due to the rest of the IO range not
  698. * covered by this map triggering an EAGAIN condition when it is
  699. * subsequently mapped and aborting the IO.
  700. */
  701. if ((flags & IOMAP_NOWAIT) &&
  702. !imap_spans_range(&imap, offset_fsb, end_fsb)) {
  703. error = -EAGAIN;
  704. goto out_unlock;
  705. }
  706. xfs_iunlock(ip, lockmode);
  707. trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
  708. return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
  709. allocate_blocks:
  710. error = -EAGAIN;
  711. if (flags & IOMAP_NOWAIT)
  712. goto out_unlock;
  713. /*
  714. * We cap the maximum length we map to a sane size to keep the chunks
  715. * of work done where somewhat symmetric with the work writeback does.
  716. * This is a completely arbitrary number pulled out of thin air as a
  717. * best guess for initial testing.
  718. *
  719. * Note that the values needs to be less than 32-bits wide until the
  720. * lower level functions are updated.
  721. */
  722. length = min_t(loff_t, length, 1024 * PAGE_SIZE);
  723. end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  724. if (offset + length > XFS_ISIZE(ip))
  725. end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
  726. else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
  727. end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
  728. xfs_iunlock(ip, lockmode);
  729. error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
  730. &imap);
  731. if (error)
  732. return error;
  733. trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
  734. return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
  735. out_found_cow:
  736. xfs_iunlock(ip, lockmode);
  737. length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
  738. trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
  739. if (imap.br_startblock != HOLESTARTBLOCK) {
  740. error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
  741. if (error)
  742. return error;
  743. }
  744. return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
  745. out_unlock:
  746. xfs_iunlock(ip, lockmode);
  747. return error;
  748. }
  749. const struct iomap_ops xfs_direct_write_iomap_ops = {
  750. .iomap_begin = xfs_direct_write_iomap_begin,
  751. };
  752. static int
  753. xfs_buffered_write_iomap_begin(
  754. struct inode *inode,
  755. loff_t offset,
  756. loff_t count,
  757. unsigned flags,
  758. struct iomap *iomap,
  759. struct iomap *srcmap)
  760. {
  761. struct xfs_inode *ip = XFS_I(inode);
  762. struct xfs_mount *mp = ip->i_mount;
  763. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  764. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
  765. struct xfs_bmbt_irec imap, cmap;
  766. struct xfs_iext_cursor icur, ccur;
  767. xfs_fsblock_t prealloc_blocks = 0;
  768. bool eof = false, cow_eof = false, shared = false;
  769. int allocfork = XFS_DATA_FORK;
  770. int error = 0;
  771. /* we can't use delayed allocations when using extent size hints */
  772. if (xfs_get_extsz_hint(ip))
  773. return xfs_direct_write_iomap_begin(inode, offset, count,
  774. flags, iomap, srcmap);
  775. ASSERT(!XFS_IS_REALTIME_INODE(ip));
  776. xfs_ilock(ip, XFS_ILOCK_EXCL);
  777. if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
  778. XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
  779. error = -EFSCORRUPTED;
  780. goto out_unlock;
  781. }
  782. XFS_STATS_INC(mp, xs_blk_mapw);
  783. if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
  784. error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
  785. if (error)
  786. goto out_unlock;
  787. }
  788. /*
  789. * Search the data fork first to look up our source mapping. We
  790. * always need the data fork map, as we have to return it to the
  791. * iomap code so that the higher level write code can read data in to
  792. * perform read-modify-write cycles for unaligned writes.
  793. */
  794. eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
  795. if (eof)
  796. imap.br_startoff = end_fsb; /* fake hole until the end */
  797. /* We never need to allocate blocks for zeroing a hole. */
  798. if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
  799. xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
  800. goto out_unlock;
  801. }
  802. /*
  803. * Search the COW fork extent list even if we did not find a data fork
  804. * extent. This serves two purposes: first this implements the
  805. * speculative preallocation using cowextsize, so that we also unshare
  806. * block adjacent to shared blocks instead of just the shared blocks
  807. * themselves. Second the lookup in the extent list is generally faster
  808. * than going out to the shared extent tree.
  809. */
  810. if (xfs_is_cow_inode(ip)) {
  811. if (!ip->i_cowfp) {
  812. ASSERT(!xfs_is_reflink_inode(ip));
  813. xfs_ifork_init_cow(ip);
  814. }
  815. cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
  816. &ccur, &cmap);
  817. if (!cow_eof && cmap.br_startoff <= offset_fsb) {
  818. trace_xfs_reflink_cow_found(ip, &cmap);
  819. goto found_cow;
  820. }
  821. }
  822. if (imap.br_startoff <= offset_fsb) {
  823. /*
  824. * For reflink files we may need a delalloc reservation when
  825. * overwriting shared extents. This includes zeroing of
  826. * existing extents that contain data.
  827. */
  828. if (!xfs_is_cow_inode(ip) ||
  829. ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
  830. trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
  831. &imap);
  832. goto found_imap;
  833. }
  834. xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
  835. /* Trim the mapping to the nearest shared extent boundary. */
  836. error = xfs_bmap_trim_cow(ip, &imap, &shared);
  837. if (error)
  838. goto out_unlock;
  839. /* Not shared? Just report the (potentially capped) extent. */
  840. if (!shared) {
  841. trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
  842. &imap);
  843. goto found_imap;
  844. }
  845. /*
  846. * Fork all the shared blocks from our write offset until the
  847. * end of the extent.
  848. */
  849. allocfork = XFS_COW_FORK;
  850. end_fsb = imap.br_startoff + imap.br_blockcount;
  851. } else {
  852. /*
  853. * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
  854. * pages to keep the chunks of work done where somewhat
  855. * symmetric with the work writeback does. This is a completely
  856. * arbitrary number pulled out of thin air.
  857. *
  858. * Note that the values needs to be less than 32-bits wide until
  859. * the lower level functions are updated.
  860. */
  861. count = min_t(loff_t, count, 1024 * PAGE_SIZE);
  862. end_fsb = xfs_iomap_end_fsb(mp, offset, count);
  863. if (xfs_is_always_cow_inode(ip))
  864. allocfork = XFS_COW_FORK;
  865. }
  866. error = xfs_qm_dqattach_locked(ip, false);
  867. if (error)
  868. goto out_unlock;
  869. if (eof && offset + count > XFS_ISIZE(ip)) {
  870. /*
  871. * Determine the initial size of the preallocation.
  872. * We clean up any extra preallocation when the file is closed.
  873. */
  874. if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
  875. prealloc_blocks = mp->m_allocsize_blocks;
  876. else
  877. prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
  878. offset, count, &icur);
  879. if (prealloc_blocks) {
  880. xfs_extlen_t align;
  881. xfs_off_t end_offset;
  882. xfs_fileoff_t p_end_fsb;
  883. end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
  884. p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
  885. prealloc_blocks;
  886. align = xfs_eof_alignment(ip);
  887. if (align)
  888. p_end_fsb = roundup_64(p_end_fsb, align);
  889. p_end_fsb = min(p_end_fsb,
  890. XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
  891. ASSERT(p_end_fsb > offset_fsb);
  892. prealloc_blocks = p_end_fsb - end_fsb;
  893. }
  894. }
  895. retry:
  896. error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
  897. end_fsb - offset_fsb, prealloc_blocks,
  898. allocfork == XFS_DATA_FORK ? &imap : &cmap,
  899. allocfork == XFS_DATA_FORK ? &icur : &ccur,
  900. allocfork == XFS_DATA_FORK ? eof : cow_eof);
  901. switch (error) {
  902. case 0:
  903. break;
  904. case -ENOSPC:
  905. case -EDQUOT:
  906. /* retry without any preallocation */
  907. trace_xfs_delalloc_enospc(ip, offset, count);
  908. if (prealloc_blocks) {
  909. prealloc_blocks = 0;
  910. goto retry;
  911. }
  912. /*FALLTHRU*/
  913. default:
  914. goto out_unlock;
  915. }
  916. if (allocfork == XFS_COW_FORK) {
  917. trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
  918. goto found_cow;
  919. }
  920. /*
  921. * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
  922. * them out if the write happens to fail.
  923. */
  924. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  925. trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
  926. return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
  927. found_imap:
  928. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  929. return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
  930. found_cow:
  931. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  932. if (imap.br_startoff <= offset_fsb) {
  933. error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
  934. if (error)
  935. return error;
  936. } else {
  937. xfs_trim_extent(&cmap, offset_fsb,
  938. imap.br_startoff - offset_fsb);
  939. }
  940. return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
  941. out_unlock:
  942. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  943. return error;
  944. }
  945. static int
  946. xfs_buffered_write_iomap_end(
  947. struct inode *inode,
  948. loff_t offset,
  949. loff_t length,
  950. ssize_t written,
  951. unsigned flags,
  952. struct iomap *iomap)
  953. {
  954. struct xfs_inode *ip = XFS_I(inode);
  955. struct xfs_mount *mp = ip->i_mount;
  956. xfs_fileoff_t start_fsb;
  957. xfs_fileoff_t end_fsb;
  958. int error = 0;
  959. if (iomap->type != IOMAP_DELALLOC)
  960. return 0;
  961. /*
  962. * Behave as if the write failed if drop writes is enabled. Set the NEW
  963. * flag to force delalloc cleanup.
  964. */
  965. if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
  966. iomap->flags |= IOMAP_F_NEW;
  967. written = 0;
  968. }
  969. /*
  970. * start_fsb refers to the first unused block after a short write. If
  971. * nothing was written, round offset down to point at the first block in
  972. * the range.
  973. */
  974. if (unlikely(!written))
  975. start_fsb = XFS_B_TO_FSBT(mp, offset);
  976. else
  977. start_fsb = XFS_B_TO_FSB(mp, offset + written);
  978. end_fsb = XFS_B_TO_FSB(mp, offset + length);
  979. /*
  980. * Trim delalloc blocks if they were allocated by this write and we
  981. * didn't manage to write the whole range.
  982. *
  983. * We don't need to care about racing delalloc as we hold i_mutex
  984. * across the reserve/allocate/unreserve calls. If there are delalloc
  985. * blocks in the range, they are ours.
  986. */
  987. if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
  988. truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
  989. XFS_FSB_TO_B(mp, end_fsb) - 1);
  990. error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
  991. end_fsb - start_fsb);
  992. if (error && !XFS_FORCED_SHUTDOWN(mp)) {
  993. xfs_alert(mp, "%s: unable to clean up ino %lld",
  994. __func__, ip->i_ino);
  995. return error;
  996. }
  997. }
  998. return 0;
  999. }
  1000. const struct iomap_ops xfs_buffered_write_iomap_ops = {
  1001. .iomap_begin = xfs_buffered_write_iomap_begin,
  1002. .iomap_end = xfs_buffered_write_iomap_end,
  1003. };
  1004. static int
  1005. xfs_read_iomap_begin(
  1006. struct inode *inode,
  1007. loff_t offset,
  1008. loff_t length,
  1009. unsigned flags,
  1010. struct iomap *iomap,
  1011. struct iomap *srcmap)
  1012. {
  1013. struct xfs_inode *ip = XFS_I(inode);
  1014. struct xfs_mount *mp = ip->i_mount;
  1015. struct xfs_bmbt_irec imap;
  1016. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1017. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  1018. int nimaps = 1, error = 0;
  1019. bool shared = false;
  1020. unsigned lockmode;
  1021. ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
  1022. if (XFS_FORCED_SHUTDOWN(mp))
  1023. return -EIO;
  1024. error = xfs_ilock_for_iomap(ip, flags, &lockmode);
  1025. if (error)
  1026. return error;
  1027. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  1028. &nimaps, 0);
  1029. if (!error && (flags & IOMAP_REPORT))
  1030. error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
  1031. xfs_iunlock(ip, lockmode);
  1032. if (error)
  1033. return error;
  1034. trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
  1035. return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
  1036. }
  1037. const struct iomap_ops xfs_read_iomap_ops = {
  1038. .iomap_begin = xfs_read_iomap_begin,
  1039. };
  1040. static int
  1041. xfs_seek_iomap_begin(
  1042. struct inode *inode,
  1043. loff_t offset,
  1044. loff_t length,
  1045. unsigned flags,
  1046. struct iomap *iomap,
  1047. struct iomap *srcmap)
  1048. {
  1049. struct xfs_inode *ip = XFS_I(inode);
  1050. struct xfs_mount *mp = ip->i_mount;
  1051. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1052. xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
  1053. xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
  1054. struct xfs_iext_cursor icur;
  1055. struct xfs_bmbt_irec imap, cmap;
  1056. int error = 0;
  1057. unsigned lockmode;
  1058. if (XFS_FORCED_SHUTDOWN(mp))
  1059. return -EIO;
  1060. lockmode = xfs_ilock_data_map_shared(ip);
  1061. if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
  1062. error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
  1063. if (error)
  1064. goto out_unlock;
  1065. }
  1066. if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
  1067. /*
  1068. * If we found a data extent we are done.
  1069. */
  1070. if (imap.br_startoff <= offset_fsb)
  1071. goto done;
  1072. data_fsb = imap.br_startoff;
  1073. } else {
  1074. /*
  1075. * Fake a hole until the end of the file.
  1076. */
  1077. data_fsb = xfs_iomap_end_fsb(mp, offset, length);
  1078. }
  1079. /*
  1080. * If a COW fork extent covers the hole, report it - capped to the next
  1081. * data fork extent:
  1082. */
  1083. if (xfs_inode_has_cow_data(ip) &&
  1084. xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
  1085. cow_fsb = cmap.br_startoff;
  1086. if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
  1087. if (data_fsb < cow_fsb + cmap.br_blockcount)
  1088. end_fsb = min(end_fsb, data_fsb);
  1089. xfs_trim_extent(&cmap, offset_fsb, end_fsb);
  1090. error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
  1091. /*
  1092. * This is a COW extent, so we must probe the page cache
  1093. * because there could be dirty page cache being backed
  1094. * by this extent.
  1095. */
  1096. iomap->type = IOMAP_UNWRITTEN;
  1097. goto out_unlock;
  1098. }
  1099. /*
  1100. * Else report a hole, capped to the next found data or COW extent.
  1101. */
  1102. if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
  1103. imap.br_blockcount = cow_fsb - offset_fsb;
  1104. else
  1105. imap.br_blockcount = data_fsb - offset_fsb;
  1106. imap.br_startoff = offset_fsb;
  1107. imap.br_startblock = HOLESTARTBLOCK;
  1108. imap.br_state = XFS_EXT_NORM;
  1109. done:
  1110. xfs_trim_extent(&imap, offset_fsb, end_fsb);
  1111. error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
  1112. out_unlock:
  1113. xfs_iunlock(ip, lockmode);
  1114. return error;
  1115. }
  1116. const struct iomap_ops xfs_seek_iomap_ops = {
  1117. .iomap_begin = xfs_seek_iomap_begin,
  1118. };
  1119. static int
  1120. xfs_xattr_iomap_begin(
  1121. struct inode *inode,
  1122. loff_t offset,
  1123. loff_t length,
  1124. unsigned flags,
  1125. struct iomap *iomap,
  1126. struct iomap *srcmap)
  1127. {
  1128. struct xfs_inode *ip = XFS_I(inode);
  1129. struct xfs_mount *mp = ip->i_mount;
  1130. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1131. xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
  1132. struct xfs_bmbt_irec imap;
  1133. int nimaps = 1, error = 0;
  1134. unsigned lockmode;
  1135. if (XFS_FORCED_SHUTDOWN(mp))
  1136. return -EIO;
  1137. lockmode = xfs_ilock_attr_map_shared(ip);
  1138. /* if there are no attribute fork or extents, return ENOENT */
  1139. if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) {
  1140. error = -ENOENT;
  1141. goto out_unlock;
  1142. }
  1143. ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL);
  1144. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  1145. &nimaps, XFS_BMAPI_ATTRFORK);
  1146. out_unlock:
  1147. xfs_iunlock(ip, lockmode);
  1148. if (error)
  1149. return error;
  1150. ASSERT(nimaps);
  1151. return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
  1152. }
  1153. const struct iomap_ops xfs_xattr_iomap_ops = {
  1154. .iomap_begin = xfs_xattr_iomap_begin,
  1155. };