xfs_refcount_item.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_format.h"
  9. #include "xfs_log_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_bit.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_trans.h"
  16. #include "xfs_trans_priv.h"
  17. #include "xfs_refcount_item.h"
  18. #include "xfs_log.h"
  19. #include "xfs_refcount.h"
  20. #include "xfs_error.h"
  21. #include "xfs_log_priv.h"
  22. #include "xfs_log_recover.h"
  23. kmem_zone_t *xfs_cui_zone;
  24. kmem_zone_t *xfs_cud_zone;
  25. static const struct xfs_item_ops xfs_cui_item_ops;
  26. static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
  27. {
  28. return container_of(lip, struct xfs_cui_log_item, cui_item);
  29. }
  30. STATIC void
  31. xfs_cui_item_free(
  32. struct xfs_cui_log_item *cuip)
  33. {
  34. if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
  35. kmem_free(cuip);
  36. else
  37. kmem_cache_free(xfs_cui_zone, cuip);
  38. }
  39. /*
  40. * Freeing the CUI requires that we remove it from the AIL if it has already
  41. * been placed there. However, the CUI may not yet have been placed in the AIL
  42. * when called by xfs_cui_release() from CUD processing due to the ordering of
  43. * committed vs unpin operations in bulk insert operations. Hence the reference
  44. * count to ensure only the last caller frees the CUI.
  45. */
  46. STATIC void
  47. xfs_cui_release(
  48. struct xfs_cui_log_item *cuip)
  49. {
  50. ASSERT(atomic_read(&cuip->cui_refcount) > 0);
  51. if (atomic_dec_and_test(&cuip->cui_refcount)) {
  52. xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
  53. xfs_cui_item_free(cuip);
  54. }
  55. }
  56. STATIC void
  57. xfs_cui_item_size(
  58. struct xfs_log_item *lip,
  59. int *nvecs,
  60. int *nbytes)
  61. {
  62. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  63. *nvecs += 1;
  64. *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
  65. }
  66. /*
  67. * This is called to fill in the vector of log iovecs for the
  68. * given cui log item. We use only 1 iovec, and we point that
  69. * at the cui_log_format structure embedded in the cui item.
  70. * It is at this point that we assert that all of the extent
  71. * slots in the cui item have been filled.
  72. */
  73. STATIC void
  74. xfs_cui_item_format(
  75. struct xfs_log_item *lip,
  76. struct xfs_log_vec *lv)
  77. {
  78. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  79. struct xfs_log_iovec *vecp = NULL;
  80. ASSERT(atomic_read(&cuip->cui_next_extent) ==
  81. cuip->cui_format.cui_nextents);
  82. cuip->cui_format.cui_type = XFS_LI_CUI;
  83. cuip->cui_format.cui_size = 1;
  84. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
  85. xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
  86. }
  87. /*
  88. * The unpin operation is the last place an CUI is manipulated in the log. It is
  89. * either inserted in the AIL or aborted in the event of a log I/O error. In
  90. * either case, the CUI transaction has been successfully committed to make it
  91. * this far. Therefore, we expect whoever committed the CUI to either construct
  92. * and commit the CUD or drop the CUD's reference in the event of error. Simply
  93. * drop the log's CUI reference now that the log is done with it.
  94. */
  95. STATIC void
  96. xfs_cui_item_unpin(
  97. struct xfs_log_item *lip,
  98. int remove)
  99. {
  100. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  101. xfs_cui_release(cuip);
  102. }
  103. /*
  104. * The CUI has been either committed or aborted if the transaction has been
  105. * cancelled. If the transaction was cancelled, an CUD isn't going to be
  106. * constructed and thus we free the CUI here directly.
  107. */
  108. STATIC void
  109. xfs_cui_item_release(
  110. struct xfs_log_item *lip)
  111. {
  112. xfs_cui_release(CUI_ITEM(lip));
  113. }
  114. /*
  115. * Allocate and initialize an cui item with the given number of extents.
  116. */
  117. STATIC struct xfs_cui_log_item *
  118. xfs_cui_init(
  119. struct xfs_mount *mp,
  120. uint nextents)
  121. {
  122. struct xfs_cui_log_item *cuip;
  123. ASSERT(nextents > 0);
  124. if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
  125. cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
  126. 0);
  127. else
  128. cuip = kmem_cache_zalloc(xfs_cui_zone,
  129. GFP_KERNEL | __GFP_NOFAIL);
  130. xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
  131. cuip->cui_format.cui_nextents = nextents;
  132. cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
  133. atomic_set(&cuip->cui_next_extent, 0);
  134. atomic_set(&cuip->cui_refcount, 2);
  135. return cuip;
  136. }
  137. static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
  138. {
  139. return container_of(lip, struct xfs_cud_log_item, cud_item);
  140. }
  141. STATIC void
  142. xfs_cud_item_size(
  143. struct xfs_log_item *lip,
  144. int *nvecs,
  145. int *nbytes)
  146. {
  147. *nvecs += 1;
  148. *nbytes += sizeof(struct xfs_cud_log_format);
  149. }
  150. /*
  151. * This is called to fill in the vector of log iovecs for the
  152. * given cud log item. We use only 1 iovec, and we point that
  153. * at the cud_log_format structure embedded in the cud item.
  154. * It is at this point that we assert that all of the extent
  155. * slots in the cud item have been filled.
  156. */
  157. STATIC void
  158. xfs_cud_item_format(
  159. struct xfs_log_item *lip,
  160. struct xfs_log_vec *lv)
  161. {
  162. struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
  163. struct xfs_log_iovec *vecp = NULL;
  164. cudp->cud_format.cud_type = XFS_LI_CUD;
  165. cudp->cud_format.cud_size = 1;
  166. xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
  167. sizeof(struct xfs_cud_log_format));
  168. }
  169. /*
  170. * The CUD is either committed or aborted if the transaction is cancelled. If
  171. * the transaction is cancelled, drop our reference to the CUI and free the
  172. * CUD.
  173. */
  174. STATIC void
  175. xfs_cud_item_release(
  176. struct xfs_log_item *lip)
  177. {
  178. struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
  179. xfs_cui_release(cudp->cud_cuip);
  180. kmem_cache_free(xfs_cud_zone, cudp);
  181. }
  182. static const struct xfs_item_ops xfs_cud_item_ops = {
  183. .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
  184. .iop_size = xfs_cud_item_size,
  185. .iop_format = xfs_cud_item_format,
  186. .iop_release = xfs_cud_item_release,
  187. };
  188. static struct xfs_cud_log_item *
  189. xfs_trans_get_cud(
  190. struct xfs_trans *tp,
  191. struct xfs_cui_log_item *cuip)
  192. {
  193. struct xfs_cud_log_item *cudp;
  194. cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL);
  195. xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
  196. &xfs_cud_item_ops);
  197. cudp->cud_cuip = cuip;
  198. cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
  199. xfs_trans_add_item(tp, &cudp->cud_item);
  200. return cudp;
  201. }
  202. /*
  203. * Finish an refcount update and log it to the CUD. Note that the
  204. * transaction is marked dirty regardless of whether the refcount
  205. * update succeeds or fails to support the CUI/CUD lifecycle rules.
  206. */
  207. static int
  208. xfs_trans_log_finish_refcount_update(
  209. struct xfs_trans *tp,
  210. struct xfs_cud_log_item *cudp,
  211. enum xfs_refcount_intent_type type,
  212. xfs_fsblock_t startblock,
  213. xfs_extlen_t blockcount,
  214. xfs_fsblock_t *new_fsb,
  215. xfs_extlen_t *new_len,
  216. struct xfs_btree_cur **pcur)
  217. {
  218. int error;
  219. error = xfs_refcount_finish_one(tp, type, startblock,
  220. blockcount, new_fsb, new_len, pcur);
  221. /*
  222. * Mark the transaction dirty, even on error. This ensures the
  223. * transaction is aborted, which:
  224. *
  225. * 1.) releases the CUI and frees the CUD
  226. * 2.) shuts down the filesystem
  227. */
  228. tp->t_flags |= XFS_TRANS_DIRTY;
  229. set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
  230. return error;
  231. }
  232. /* Sort refcount intents by AG. */
  233. static int
  234. xfs_refcount_update_diff_items(
  235. void *priv,
  236. struct list_head *a,
  237. struct list_head *b)
  238. {
  239. struct xfs_mount *mp = priv;
  240. struct xfs_refcount_intent *ra;
  241. struct xfs_refcount_intent *rb;
  242. ra = container_of(a, struct xfs_refcount_intent, ri_list);
  243. rb = container_of(b, struct xfs_refcount_intent, ri_list);
  244. return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
  245. XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
  246. }
  247. /* Set the phys extent flags for this reverse mapping. */
  248. static void
  249. xfs_trans_set_refcount_flags(
  250. struct xfs_phys_extent *refc,
  251. enum xfs_refcount_intent_type type)
  252. {
  253. refc->pe_flags = 0;
  254. switch (type) {
  255. case XFS_REFCOUNT_INCREASE:
  256. case XFS_REFCOUNT_DECREASE:
  257. case XFS_REFCOUNT_ALLOC_COW:
  258. case XFS_REFCOUNT_FREE_COW:
  259. refc->pe_flags |= type;
  260. break;
  261. default:
  262. ASSERT(0);
  263. }
  264. }
  265. /* Log refcount updates in the intent item. */
  266. STATIC void
  267. xfs_refcount_update_log_item(
  268. struct xfs_trans *tp,
  269. struct xfs_cui_log_item *cuip,
  270. struct xfs_refcount_intent *refc)
  271. {
  272. uint next_extent;
  273. struct xfs_phys_extent *ext;
  274. tp->t_flags |= XFS_TRANS_DIRTY;
  275. set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
  276. /*
  277. * atomic_inc_return gives us the value after the increment;
  278. * we want to use it as an array index so we need to subtract 1 from
  279. * it.
  280. */
  281. next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
  282. ASSERT(next_extent < cuip->cui_format.cui_nextents);
  283. ext = &cuip->cui_format.cui_extents[next_extent];
  284. ext->pe_startblock = refc->ri_startblock;
  285. ext->pe_len = refc->ri_blockcount;
  286. xfs_trans_set_refcount_flags(ext, refc->ri_type);
  287. }
  288. static struct xfs_log_item *
  289. xfs_refcount_update_create_intent(
  290. struct xfs_trans *tp,
  291. struct list_head *items,
  292. unsigned int count,
  293. bool sort)
  294. {
  295. struct xfs_mount *mp = tp->t_mountp;
  296. struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
  297. struct xfs_refcount_intent *refc;
  298. ASSERT(count > 0);
  299. xfs_trans_add_item(tp, &cuip->cui_item);
  300. if (sort)
  301. list_sort(mp, items, xfs_refcount_update_diff_items);
  302. list_for_each_entry(refc, items, ri_list)
  303. xfs_refcount_update_log_item(tp, cuip, refc);
  304. return &cuip->cui_item;
  305. }
  306. /* Get an CUD so we can process all the deferred refcount updates. */
  307. static struct xfs_log_item *
  308. xfs_refcount_update_create_done(
  309. struct xfs_trans *tp,
  310. struct xfs_log_item *intent,
  311. unsigned int count)
  312. {
  313. return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
  314. }
  315. /* Process a deferred refcount update. */
  316. STATIC int
  317. xfs_refcount_update_finish_item(
  318. struct xfs_trans *tp,
  319. struct xfs_log_item *done,
  320. struct list_head *item,
  321. struct xfs_btree_cur **state)
  322. {
  323. struct xfs_refcount_intent *refc;
  324. xfs_fsblock_t new_fsb;
  325. xfs_extlen_t new_aglen;
  326. int error;
  327. refc = container_of(item, struct xfs_refcount_intent, ri_list);
  328. error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
  329. refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
  330. &new_fsb, &new_aglen, state);
  331. /* Did we run out of reservation? Requeue what we didn't finish. */
  332. if (!error && new_aglen > 0) {
  333. ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
  334. refc->ri_type == XFS_REFCOUNT_DECREASE);
  335. refc->ri_startblock = new_fsb;
  336. refc->ri_blockcount = new_aglen;
  337. return -EAGAIN;
  338. }
  339. kmem_free(refc);
  340. return error;
  341. }
  342. /* Abort all pending CUIs. */
  343. STATIC void
  344. xfs_refcount_update_abort_intent(
  345. struct xfs_log_item *intent)
  346. {
  347. xfs_cui_release(CUI_ITEM(intent));
  348. }
  349. /* Cancel a deferred refcount update. */
  350. STATIC void
  351. xfs_refcount_update_cancel_item(
  352. struct list_head *item)
  353. {
  354. struct xfs_refcount_intent *refc;
  355. refc = container_of(item, struct xfs_refcount_intent, ri_list);
  356. kmem_free(refc);
  357. }
  358. const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
  359. .max_items = XFS_CUI_MAX_FAST_EXTENTS,
  360. .create_intent = xfs_refcount_update_create_intent,
  361. .abort_intent = xfs_refcount_update_abort_intent,
  362. .create_done = xfs_refcount_update_create_done,
  363. .finish_item = xfs_refcount_update_finish_item,
  364. .finish_cleanup = xfs_refcount_finish_one_cleanup,
  365. .cancel_item = xfs_refcount_update_cancel_item,
  366. };
  367. /*
  368. * Process a refcount update intent item that was recovered from the log.
  369. * We need to update the refcountbt.
  370. */
  371. STATIC int
  372. xfs_cui_item_recover(
  373. struct xfs_log_item *lip,
  374. struct list_head *capture_list)
  375. {
  376. struct xfs_bmbt_irec irec;
  377. struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
  378. struct xfs_phys_extent *refc;
  379. struct xfs_cud_log_item *cudp;
  380. struct xfs_trans *tp;
  381. struct xfs_btree_cur *rcur = NULL;
  382. struct xfs_mount *mp = lip->li_mountp;
  383. xfs_fsblock_t startblock_fsb;
  384. xfs_fsblock_t new_fsb;
  385. xfs_extlen_t new_len;
  386. unsigned int refc_type;
  387. bool op_ok;
  388. bool requeue_only = false;
  389. enum xfs_refcount_intent_type type;
  390. int i;
  391. int error = 0;
  392. /*
  393. * First check the validity of the extents described by the
  394. * CUI. If any are bad, then assume that all are bad and
  395. * just toss the CUI.
  396. */
  397. for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
  398. refc = &cuip->cui_format.cui_extents[i];
  399. startblock_fsb = XFS_BB_TO_FSB(mp,
  400. XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
  401. switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
  402. case XFS_REFCOUNT_INCREASE:
  403. case XFS_REFCOUNT_DECREASE:
  404. case XFS_REFCOUNT_ALLOC_COW:
  405. case XFS_REFCOUNT_FREE_COW:
  406. op_ok = true;
  407. break;
  408. default:
  409. op_ok = false;
  410. break;
  411. }
  412. if (!op_ok || startblock_fsb == 0 ||
  413. refc->pe_len == 0 ||
  414. startblock_fsb >= mp->m_sb.sb_dblocks ||
  415. refc->pe_len >= mp->m_sb.sb_agblocks ||
  416. (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS))
  417. return -EFSCORRUPTED;
  418. }
  419. /*
  420. * Under normal operation, refcount updates are deferred, so we
  421. * wouldn't be adding them directly to a transaction. All
  422. * refcount updates manage reservation usage internally and
  423. * dynamically by deferring work that won't fit in the
  424. * transaction. Normally, any work that needs to be deferred
  425. * gets attached to the same defer_ops that scheduled the
  426. * refcount update. However, we're in log recovery here, so we
  427. * use the passed in defer_ops and to finish up any work that
  428. * doesn't fit. We need to reserve enough blocks to handle a
  429. * full btree split on either end of the refcount range.
  430. */
  431. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
  432. mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
  433. if (error)
  434. return error;
  435. cudp = xfs_trans_get_cud(tp, cuip);
  436. for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
  437. refc = &cuip->cui_format.cui_extents[i];
  438. refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
  439. switch (refc_type) {
  440. case XFS_REFCOUNT_INCREASE:
  441. case XFS_REFCOUNT_DECREASE:
  442. case XFS_REFCOUNT_ALLOC_COW:
  443. case XFS_REFCOUNT_FREE_COW:
  444. type = refc_type;
  445. break;
  446. default:
  447. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
  448. error = -EFSCORRUPTED;
  449. goto abort_error;
  450. }
  451. if (requeue_only) {
  452. new_fsb = refc->pe_startblock;
  453. new_len = refc->pe_len;
  454. } else
  455. error = xfs_trans_log_finish_refcount_update(tp, cudp,
  456. type, refc->pe_startblock, refc->pe_len,
  457. &new_fsb, &new_len, &rcur);
  458. if (error)
  459. goto abort_error;
  460. /* Requeue what we didn't finish. */
  461. if (new_len > 0) {
  462. irec.br_startblock = new_fsb;
  463. irec.br_blockcount = new_len;
  464. switch (type) {
  465. case XFS_REFCOUNT_INCREASE:
  466. xfs_refcount_increase_extent(tp, &irec);
  467. break;
  468. case XFS_REFCOUNT_DECREASE:
  469. xfs_refcount_decrease_extent(tp, &irec);
  470. break;
  471. case XFS_REFCOUNT_ALLOC_COW:
  472. xfs_refcount_alloc_cow_extent(tp,
  473. irec.br_startblock,
  474. irec.br_blockcount);
  475. break;
  476. case XFS_REFCOUNT_FREE_COW:
  477. xfs_refcount_free_cow_extent(tp,
  478. irec.br_startblock,
  479. irec.br_blockcount);
  480. break;
  481. default:
  482. ASSERT(0);
  483. }
  484. requeue_only = true;
  485. }
  486. }
  487. xfs_refcount_finish_one_cleanup(tp, rcur, error);
  488. return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
  489. abort_error:
  490. xfs_refcount_finish_one_cleanup(tp, rcur, error);
  491. xfs_trans_cancel(tp);
  492. return error;
  493. }
  494. STATIC bool
  495. xfs_cui_item_match(
  496. struct xfs_log_item *lip,
  497. uint64_t intent_id)
  498. {
  499. return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
  500. }
  501. /* Relog an intent item to push the log tail forward. */
  502. static struct xfs_log_item *
  503. xfs_cui_item_relog(
  504. struct xfs_log_item *intent,
  505. struct xfs_trans *tp)
  506. {
  507. struct xfs_cud_log_item *cudp;
  508. struct xfs_cui_log_item *cuip;
  509. struct xfs_phys_extent *extp;
  510. unsigned int count;
  511. count = CUI_ITEM(intent)->cui_format.cui_nextents;
  512. extp = CUI_ITEM(intent)->cui_format.cui_extents;
  513. tp->t_flags |= XFS_TRANS_DIRTY;
  514. cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
  515. set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
  516. cuip = xfs_cui_init(tp->t_mountp, count);
  517. memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
  518. atomic_set(&cuip->cui_next_extent, count);
  519. xfs_trans_add_item(tp, &cuip->cui_item);
  520. set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
  521. return &cuip->cui_item;
  522. }
  523. static const struct xfs_item_ops xfs_cui_item_ops = {
  524. .iop_size = xfs_cui_item_size,
  525. .iop_format = xfs_cui_item_format,
  526. .iop_unpin = xfs_cui_item_unpin,
  527. .iop_release = xfs_cui_item_release,
  528. .iop_recover = xfs_cui_item_recover,
  529. .iop_match = xfs_cui_item_match,
  530. .iop_relog = xfs_cui_item_relog,
  531. };
  532. /*
  533. * Copy an CUI format buffer from the given buf, and into the destination
  534. * CUI format structure. The CUI/CUD items were designed not to need any
  535. * special alignment handling.
  536. */
  537. static int
  538. xfs_cui_copy_format(
  539. struct xfs_log_iovec *buf,
  540. struct xfs_cui_log_format *dst_cui_fmt)
  541. {
  542. struct xfs_cui_log_format *src_cui_fmt;
  543. uint len;
  544. src_cui_fmt = buf->i_addr;
  545. len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
  546. if (buf->i_len == len) {
  547. memcpy(dst_cui_fmt, src_cui_fmt, len);
  548. return 0;
  549. }
  550. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
  551. return -EFSCORRUPTED;
  552. }
  553. /*
  554. * This routine is called to create an in-core extent refcount update
  555. * item from the cui format structure which was logged on disk.
  556. * It allocates an in-core cui, copies the extents from the format
  557. * structure into it, and adds the cui to the AIL with the given
  558. * LSN.
  559. */
  560. STATIC int
  561. xlog_recover_cui_commit_pass2(
  562. struct xlog *log,
  563. struct list_head *buffer_list,
  564. struct xlog_recover_item *item,
  565. xfs_lsn_t lsn)
  566. {
  567. int error;
  568. struct xfs_mount *mp = log->l_mp;
  569. struct xfs_cui_log_item *cuip;
  570. struct xfs_cui_log_format *cui_formatp;
  571. cui_formatp = item->ri_buf[0].i_addr;
  572. cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
  573. error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
  574. if (error) {
  575. xfs_cui_item_free(cuip);
  576. return error;
  577. }
  578. atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
  579. /*
  580. * Insert the intent into the AIL directly and drop one reference so
  581. * that finishing or canceling the work will drop the other.
  582. */
  583. xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
  584. xfs_cui_release(cuip);
  585. return 0;
  586. }
  587. const struct xlog_recover_item_ops xlog_cui_item_ops = {
  588. .item_type = XFS_LI_CUI,
  589. .commit_pass2 = xlog_recover_cui_commit_pass2,
  590. };
  591. /*
  592. * This routine is called when an CUD format structure is found in a committed
  593. * transaction in the log. Its purpose is to cancel the corresponding CUI if it
  594. * was still in the log. To do this it searches the AIL for the CUI with an id
  595. * equal to that in the CUD format structure. If we find it we drop the CUD
  596. * reference, which removes the CUI from the AIL and frees it.
  597. */
  598. STATIC int
  599. xlog_recover_cud_commit_pass2(
  600. struct xlog *log,
  601. struct list_head *buffer_list,
  602. struct xlog_recover_item *item,
  603. xfs_lsn_t lsn)
  604. {
  605. struct xfs_cud_log_format *cud_formatp;
  606. cud_formatp = item->ri_buf[0].i_addr;
  607. if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
  608. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
  609. return -EFSCORRUPTED;
  610. }
  611. xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
  612. return 0;
  613. }
  614. const struct xlog_recover_item_ops xlog_cud_item_ops = {
  615. .item_type = XFS_LI_CUD,
  616. .commit_pass2 = xlog_recover_cud_commit_pass2,
  617. };