xfs_extent_busy.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  4. * Copyright (c) 2010 David Chinner.
  5. * Copyright (c) 2011 Christoph Hellwig.
  6. * All Rights Reserved.
  7. */
  8. #include "xfs.h"
  9. #include "xfs_fs.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_shared.h"
  13. #include "xfs_trans_resv.h"
  14. #include "xfs_sb.h"
  15. #include "xfs_mount.h"
  16. #include "xfs_alloc.h"
  17. #include "xfs_extent_busy.h"
  18. #include "xfs_trace.h"
  19. #include "xfs_trans.h"
  20. #include "xfs_log.h"
  21. void
  22. xfs_extent_busy_insert(
  23. struct xfs_trans *tp,
  24. xfs_agnumber_t agno,
  25. xfs_agblock_t bno,
  26. xfs_extlen_t len,
  27. unsigned int flags)
  28. {
  29. struct xfs_extent_busy *new;
  30. struct xfs_extent_busy *busyp;
  31. struct xfs_perag *pag;
  32. struct rb_node **rbp;
  33. struct rb_node *parent = NULL;
  34. new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
  35. new->agno = agno;
  36. new->bno = bno;
  37. new->length = len;
  38. INIT_LIST_HEAD(&new->list);
  39. new->flags = flags;
  40. /* trace before insert to be able to see failed inserts */
  41. trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
  42. pag = xfs_perag_get(tp->t_mountp, new->agno);
  43. spin_lock(&pag->pagb_lock);
  44. rbp = &pag->pagb_tree.rb_node;
  45. while (*rbp) {
  46. parent = *rbp;
  47. busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
  48. if (new->bno < busyp->bno) {
  49. rbp = &(*rbp)->rb_left;
  50. ASSERT(new->bno + new->length <= busyp->bno);
  51. } else if (new->bno > busyp->bno) {
  52. rbp = &(*rbp)->rb_right;
  53. ASSERT(bno >= busyp->bno + busyp->length);
  54. } else {
  55. ASSERT(0);
  56. }
  57. }
  58. rb_link_node(&new->rb_node, parent, rbp);
  59. rb_insert_color(&new->rb_node, &pag->pagb_tree);
  60. list_add(&new->list, &tp->t_busy);
  61. spin_unlock(&pag->pagb_lock);
  62. xfs_perag_put(pag);
  63. }
  64. /*
  65. * Search for a busy extent within the range of the extent we are about to
  66. * allocate. You need to be holding the busy extent tree lock when calling
  67. * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
  68. * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
  69. * match. This is done so that a non-zero return indicates an overlap that
  70. * will require a synchronous transaction, but it can still be
  71. * used to distinguish between a partial or exact match.
  72. */
  73. int
  74. xfs_extent_busy_search(
  75. struct xfs_mount *mp,
  76. xfs_agnumber_t agno,
  77. xfs_agblock_t bno,
  78. xfs_extlen_t len)
  79. {
  80. struct xfs_perag *pag;
  81. struct rb_node *rbp;
  82. struct xfs_extent_busy *busyp;
  83. int match = 0;
  84. pag = xfs_perag_get(mp, agno);
  85. spin_lock(&pag->pagb_lock);
  86. rbp = pag->pagb_tree.rb_node;
  87. /* find closest start bno overlap */
  88. while (rbp) {
  89. busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
  90. if (bno < busyp->bno) {
  91. /* may overlap, but exact start block is lower */
  92. if (bno + len > busyp->bno)
  93. match = -1;
  94. rbp = rbp->rb_left;
  95. } else if (bno > busyp->bno) {
  96. /* may overlap, but exact start block is higher */
  97. if (bno < busyp->bno + busyp->length)
  98. match = -1;
  99. rbp = rbp->rb_right;
  100. } else {
  101. /* bno matches busyp, length determines exact match */
  102. match = (busyp->length == len) ? 1 : -1;
  103. break;
  104. }
  105. }
  106. spin_unlock(&pag->pagb_lock);
  107. xfs_perag_put(pag);
  108. return match;
  109. }
  110. /*
  111. * The found free extent [fbno, fend] overlaps part or all of the given busy
  112. * extent. If the overlap covers the beginning, the end, or all of the busy
  113. * extent, the overlapping portion can be made unbusy and used for the
  114. * allocation. We can't split a busy extent because we can't modify a
  115. * transaction/CIL context busy list, but we can update an entry's block
  116. * number or length.
  117. *
  118. * Returns true if the extent can safely be reused, or false if the search
  119. * needs to be restarted.
  120. */
  121. STATIC bool
  122. xfs_extent_busy_update_extent(
  123. struct xfs_mount *mp,
  124. struct xfs_perag *pag,
  125. struct xfs_extent_busy *busyp,
  126. xfs_agblock_t fbno,
  127. xfs_extlen_t flen,
  128. bool userdata) __releases(&pag->pagb_lock)
  129. __acquires(&pag->pagb_lock)
  130. {
  131. xfs_agblock_t fend = fbno + flen;
  132. xfs_agblock_t bbno = busyp->bno;
  133. xfs_agblock_t bend = bbno + busyp->length;
  134. /*
  135. * This extent is currently being discarded. Give the thread
  136. * performing the discard a chance to mark the extent unbusy
  137. * and retry.
  138. */
  139. if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
  140. spin_unlock(&pag->pagb_lock);
  141. delay(1);
  142. spin_lock(&pag->pagb_lock);
  143. return false;
  144. }
  145. /*
  146. * If there is a busy extent overlapping a user allocation, we have
  147. * no choice but to force the log and retry the search.
  148. *
  149. * Fortunately this does not happen during normal operation, but
  150. * only if the filesystem is very low on space and has to dip into
  151. * the AGFL for normal allocations.
  152. */
  153. if (userdata)
  154. goto out_force_log;
  155. if (bbno < fbno && bend > fend) {
  156. /*
  157. * Case 1:
  158. * bbno bend
  159. * +BBBBBBBBBBBBBBBBB+
  160. * +---------+
  161. * fbno fend
  162. */
  163. /*
  164. * We would have to split the busy extent to be able to track
  165. * it correct, which we cannot do because we would have to
  166. * modify the list of busy extents attached to the transaction
  167. * or CIL context, which is immutable.
  168. *
  169. * Force out the log to clear the busy extent and retry the
  170. * search.
  171. */
  172. goto out_force_log;
  173. } else if (bbno >= fbno && bend <= fend) {
  174. /*
  175. * Case 2:
  176. * bbno bend
  177. * +BBBBBBBBBBBBBBBBB+
  178. * +-----------------+
  179. * fbno fend
  180. *
  181. * Case 3:
  182. * bbno bend
  183. * +BBBBBBBBBBBBBBBBB+
  184. * +--------------------------+
  185. * fbno fend
  186. *
  187. * Case 4:
  188. * bbno bend
  189. * +BBBBBBBBBBBBBBBBB+
  190. * +--------------------------+
  191. * fbno fend
  192. *
  193. * Case 5:
  194. * bbno bend
  195. * +BBBBBBBBBBBBBBBBB+
  196. * +-----------------------------------+
  197. * fbno fend
  198. *
  199. */
  200. /*
  201. * The busy extent is fully covered by the extent we are
  202. * allocating, and can simply be removed from the rbtree.
  203. * However we cannot remove it from the immutable list
  204. * tracking busy extents in the transaction or CIL context,
  205. * so set the length to zero to mark it invalid.
  206. *
  207. * We also need to restart the busy extent search from the
  208. * tree root, because erasing the node can rearrange the
  209. * tree topology.
  210. */
  211. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  212. busyp->length = 0;
  213. return false;
  214. } else if (fend < bend) {
  215. /*
  216. * Case 6:
  217. * bbno bend
  218. * +BBBBBBBBBBBBBBBBB+
  219. * +---------+
  220. * fbno fend
  221. *
  222. * Case 7:
  223. * bbno bend
  224. * +BBBBBBBBBBBBBBBBB+
  225. * +------------------+
  226. * fbno fend
  227. *
  228. */
  229. busyp->bno = fend;
  230. } else if (bbno < fbno) {
  231. /*
  232. * Case 8:
  233. * bbno bend
  234. * +BBBBBBBBBBBBBBBBB+
  235. * +-------------+
  236. * fbno fend
  237. *
  238. * Case 9:
  239. * bbno bend
  240. * +BBBBBBBBBBBBBBBBB+
  241. * +----------------------+
  242. * fbno fend
  243. */
  244. busyp->length = fbno - busyp->bno;
  245. } else {
  246. ASSERT(0);
  247. }
  248. trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
  249. return true;
  250. out_force_log:
  251. spin_unlock(&pag->pagb_lock);
  252. xfs_log_force(mp, XFS_LOG_SYNC);
  253. trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
  254. spin_lock(&pag->pagb_lock);
  255. return false;
  256. }
  257. /*
  258. * For a given extent [fbno, flen], make sure we can reuse it safely.
  259. */
  260. void
  261. xfs_extent_busy_reuse(
  262. struct xfs_mount *mp,
  263. xfs_agnumber_t agno,
  264. xfs_agblock_t fbno,
  265. xfs_extlen_t flen,
  266. bool userdata)
  267. {
  268. struct xfs_perag *pag;
  269. struct rb_node *rbp;
  270. ASSERT(flen > 0);
  271. pag = xfs_perag_get(mp, agno);
  272. spin_lock(&pag->pagb_lock);
  273. restart:
  274. rbp = pag->pagb_tree.rb_node;
  275. while (rbp) {
  276. struct xfs_extent_busy *busyp =
  277. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  278. xfs_agblock_t bbno = busyp->bno;
  279. xfs_agblock_t bend = bbno + busyp->length;
  280. if (fbno + flen <= bbno) {
  281. rbp = rbp->rb_left;
  282. continue;
  283. } else if (fbno >= bend) {
  284. rbp = rbp->rb_right;
  285. continue;
  286. }
  287. if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
  288. userdata))
  289. goto restart;
  290. }
  291. spin_unlock(&pag->pagb_lock);
  292. xfs_perag_put(pag);
  293. }
  294. /*
  295. * For a given extent [fbno, flen], search the busy extent list to find a
  296. * subset of the extent that is not busy. If *rlen is smaller than
  297. * args->minlen no suitable extent could be found, and the higher level
  298. * code needs to force out the log and retry the allocation.
  299. *
  300. * Return the current busy generation for the AG if the extent is busy. This
  301. * value can be used to wait for at least one of the currently busy extents
  302. * to be cleared. Note that the busy list is not guaranteed to be empty after
  303. * the gen is woken. The state of a specific extent must always be confirmed
  304. * with another call to xfs_extent_busy_trim() before it can be used.
  305. */
  306. bool
  307. xfs_extent_busy_trim(
  308. struct xfs_alloc_arg *args,
  309. xfs_agblock_t *bno,
  310. xfs_extlen_t *len,
  311. unsigned *busy_gen)
  312. {
  313. xfs_agblock_t fbno;
  314. xfs_extlen_t flen;
  315. struct rb_node *rbp;
  316. bool ret = false;
  317. ASSERT(*len > 0);
  318. spin_lock(&args->pag->pagb_lock);
  319. restart:
  320. fbno = *bno;
  321. flen = *len;
  322. rbp = args->pag->pagb_tree.rb_node;
  323. while (rbp && flen >= args->minlen) {
  324. struct xfs_extent_busy *busyp =
  325. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  326. xfs_agblock_t fend = fbno + flen;
  327. xfs_agblock_t bbno = busyp->bno;
  328. xfs_agblock_t bend = bbno + busyp->length;
  329. if (fend <= bbno) {
  330. rbp = rbp->rb_left;
  331. continue;
  332. } else if (fbno >= bend) {
  333. rbp = rbp->rb_right;
  334. continue;
  335. }
  336. /*
  337. * If this is a metadata allocation, try to reuse the busy
  338. * extent instead of trimming the allocation.
  339. */
  340. if (!(args->datatype & XFS_ALLOC_USERDATA) &&
  341. !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
  342. if (!xfs_extent_busy_update_extent(args->mp, args->pag,
  343. busyp, fbno, flen,
  344. false))
  345. goto restart;
  346. continue;
  347. }
  348. if (bbno <= fbno) {
  349. /* start overlap */
  350. /*
  351. * Case 1:
  352. * bbno bend
  353. * +BBBBBBBBBBBBBBBBB+
  354. * +---------+
  355. * fbno fend
  356. *
  357. * Case 2:
  358. * bbno bend
  359. * +BBBBBBBBBBBBBBBBB+
  360. * +-------------+
  361. * fbno fend
  362. *
  363. * Case 3:
  364. * bbno bend
  365. * +BBBBBBBBBBBBBBBBB+
  366. * +-------------+
  367. * fbno fend
  368. *
  369. * Case 4:
  370. * bbno bend
  371. * +BBBBBBBBBBBBBBBBB+
  372. * +-----------------+
  373. * fbno fend
  374. *
  375. * No unbusy region in extent, return failure.
  376. */
  377. if (fend <= bend)
  378. goto fail;
  379. /*
  380. * Case 5:
  381. * bbno bend
  382. * +BBBBBBBBBBBBBBBBB+
  383. * +----------------------+
  384. * fbno fend
  385. *
  386. * Case 6:
  387. * bbno bend
  388. * +BBBBBBBBBBBBBBBBB+
  389. * +--------------------------+
  390. * fbno fend
  391. *
  392. * Needs to be trimmed to:
  393. * +-------+
  394. * fbno fend
  395. */
  396. fbno = bend;
  397. } else if (bend >= fend) {
  398. /* end overlap */
  399. /*
  400. * Case 7:
  401. * bbno bend
  402. * +BBBBBBBBBBBBBBBBB+
  403. * +------------------+
  404. * fbno fend
  405. *
  406. * Case 8:
  407. * bbno bend
  408. * +BBBBBBBBBBBBBBBBB+
  409. * +--------------------------+
  410. * fbno fend
  411. *
  412. * Needs to be trimmed to:
  413. * +-------+
  414. * fbno fend
  415. */
  416. fend = bbno;
  417. } else {
  418. /* middle overlap */
  419. /*
  420. * Case 9:
  421. * bbno bend
  422. * +BBBBBBBBBBBBBBBBB+
  423. * +-----------------------------------+
  424. * fbno fend
  425. *
  426. * Can be trimmed to:
  427. * +-------+ OR +-------+
  428. * fbno fend fbno fend
  429. *
  430. * Backward allocation leads to significant
  431. * fragmentation of directories, which degrades
  432. * directory performance, therefore we always want to
  433. * choose the option that produces forward allocation
  434. * patterns.
  435. * Preferring the lower bno extent will make the next
  436. * request use "fend" as the start of the next
  437. * allocation; if the segment is no longer busy at
  438. * that point, we'll get a contiguous allocation, but
  439. * even if it is still busy, we will get a forward
  440. * allocation.
  441. * We try to avoid choosing the segment at "bend",
  442. * because that can lead to the next allocation
  443. * taking the segment at "fbno", which would be a
  444. * backward allocation. We only use the segment at
  445. * "fbno" if it is much larger than the current
  446. * requested size, because in that case there's a
  447. * good chance subsequent allocations will be
  448. * contiguous.
  449. */
  450. if (bbno - fbno >= args->maxlen) {
  451. /* left candidate fits perfect */
  452. fend = bbno;
  453. } else if (fend - bend >= args->maxlen * 4) {
  454. /* right candidate has enough free space */
  455. fbno = bend;
  456. } else if (bbno - fbno >= args->minlen) {
  457. /* left candidate fits minimum requirement */
  458. fend = bbno;
  459. } else {
  460. goto fail;
  461. }
  462. }
  463. flen = fend - fbno;
  464. }
  465. out:
  466. if (fbno != *bno || flen != *len) {
  467. trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
  468. fbno, flen);
  469. *bno = fbno;
  470. *len = flen;
  471. *busy_gen = args->pag->pagb_gen;
  472. ret = true;
  473. }
  474. spin_unlock(&args->pag->pagb_lock);
  475. return ret;
  476. fail:
  477. /*
  478. * Return a zero extent length as failure indications. All callers
  479. * re-check if the trimmed extent satisfies the minlen requirement.
  480. */
  481. flen = 0;
  482. goto out;
  483. }
  484. STATIC void
  485. xfs_extent_busy_clear_one(
  486. struct xfs_mount *mp,
  487. struct xfs_perag *pag,
  488. struct xfs_extent_busy *busyp)
  489. {
  490. if (busyp->length) {
  491. trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
  492. busyp->length);
  493. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  494. }
  495. list_del_init(&busyp->list);
  496. kmem_free(busyp);
  497. }
  498. static void
  499. xfs_extent_busy_put_pag(
  500. struct xfs_perag *pag,
  501. bool wakeup)
  502. __releases(pag->pagb_lock)
  503. {
  504. if (wakeup) {
  505. pag->pagb_gen++;
  506. wake_up_all(&pag->pagb_wait);
  507. }
  508. spin_unlock(&pag->pagb_lock);
  509. xfs_perag_put(pag);
  510. }
  511. /*
  512. * Remove all extents on the passed in list from the busy extents tree.
  513. * If do_discard is set skip extents that need to be discarded, and mark
  514. * these as undergoing a discard operation instead.
  515. */
  516. void
  517. xfs_extent_busy_clear(
  518. struct xfs_mount *mp,
  519. struct list_head *list,
  520. bool do_discard)
  521. {
  522. struct xfs_extent_busy *busyp, *n;
  523. struct xfs_perag *pag = NULL;
  524. xfs_agnumber_t agno = NULLAGNUMBER;
  525. bool wakeup = false;
  526. list_for_each_entry_safe(busyp, n, list, list) {
  527. if (busyp->agno != agno) {
  528. if (pag)
  529. xfs_extent_busy_put_pag(pag, wakeup);
  530. agno = busyp->agno;
  531. pag = xfs_perag_get(mp, agno);
  532. spin_lock(&pag->pagb_lock);
  533. wakeup = false;
  534. }
  535. if (do_discard && busyp->length &&
  536. !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
  537. busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
  538. } else {
  539. xfs_extent_busy_clear_one(mp, pag, busyp);
  540. wakeup = true;
  541. }
  542. }
  543. if (pag)
  544. xfs_extent_busy_put_pag(pag, wakeup);
  545. }
  546. /*
  547. * Flush out all busy extents for this AG.
  548. */
  549. void
  550. xfs_extent_busy_flush(
  551. struct xfs_mount *mp,
  552. struct xfs_perag *pag,
  553. unsigned busy_gen)
  554. {
  555. DEFINE_WAIT (wait);
  556. int error;
  557. error = xfs_log_force(mp, XFS_LOG_SYNC);
  558. if (error)
  559. return;
  560. do {
  561. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  562. if (busy_gen != READ_ONCE(pag->pagb_gen))
  563. break;
  564. schedule();
  565. } while (1);
  566. finish_wait(&pag->pagb_wait, &wait);
  567. }
  568. void
  569. xfs_extent_busy_wait_all(
  570. struct xfs_mount *mp)
  571. {
  572. DEFINE_WAIT (wait);
  573. xfs_agnumber_t agno;
  574. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  575. struct xfs_perag *pag = xfs_perag_get(mp, agno);
  576. do {
  577. prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
  578. if (RB_EMPTY_ROOT(&pag->pagb_tree))
  579. break;
  580. schedule();
  581. } while (1);
  582. finish_wait(&pag->pagb_wait, &wait);
  583. xfs_perag_put(pag);
  584. }
  585. }
  586. /*
  587. * Callback for list_sort to sort busy extents by the AG they reside in.
  588. */
  589. int
  590. xfs_extent_busy_ag_cmp(
  591. void *priv,
  592. struct list_head *l1,
  593. struct list_head *l2)
  594. {
  595. struct xfs_extent_busy *b1 =
  596. container_of(l1, struct xfs_extent_busy, list);
  597. struct xfs_extent_busy *b2 =
  598. container_of(l2, struct xfs_extent_busy, list);
  599. s32 diff;
  600. diff = b1->agno - b2->agno;
  601. if (!diff)
  602. diff = b1->bno - b2->bno;
  603. return diff;
  604. }