xfs_pwork.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2019 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_trace.h"
  14. #include "xfs_sysctl.h"
  15. #include "xfs_pwork.h"
  16. #include <linux/nmi.h>
  17. /*
  18. * Parallel Work Queue
  19. * ===================
  20. *
  21. * Abstract away the details of running a large and "obviously" parallelizable
  22. * task across multiple CPUs. Callers initialize the pwork control object with
  23. * a desired level of parallelization and a work function. Next, they embed
  24. * struct xfs_pwork in whatever structure they use to pass work context to a
  25. * worker thread and queue that pwork. The work function will be passed the
  26. * pwork item when it is run (from process context) and any returned error will
  27. * be recorded in xfs_pwork_ctl.error. Work functions should check for errors
  28. * and abort if necessary; the non-zeroness of xfs_pwork_ctl.error does not
  29. * stop workqueue item processing.
  30. *
  31. * This is the rough equivalent of the xfsprogs workqueue code, though we can't
  32. * reuse that name here.
  33. */
  34. /* Invoke our caller's function. */
  35. static void
  36. xfs_pwork_work(
  37. struct work_struct *work)
  38. {
  39. struct xfs_pwork *pwork;
  40. struct xfs_pwork_ctl *pctl;
  41. int error;
  42. pwork = container_of(work, struct xfs_pwork, work);
  43. pctl = pwork->pctl;
  44. error = pctl->work_fn(pctl->mp, pwork);
  45. if (error && !pctl->error)
  46. pctl->error = error;
  47. if (atomic_dec_and_test(&pctl->nr_work))
  48. wake_up(&pctl->poll_wait);
  49. }
  50. /*
  51. * Set up control data for parallel work. @work_fn is the function that will
  52. * be called. @tag will be written into the kernel threads. @nr_threads is
  53. * the level of parallelism desired, or 0 for no limit.
  54. */
  55. int
  56. xfs_pwork_init(
  57. struct xfs_mount *mp,
  58. struct xfs_pwork_ctl *pctl,
  59. xfs_pwork_work_fn work_fn,
  60. const char *tag,
  61. unsigned int nr_threads)
  62. {
  63. #ifdef DEBUG
  64. if (xfs_globals.pwork_threads >= 0)
  65. nr_threads = xfs_globals.pwork_threads;
  66. #endif
  67. trace_xfs_pwork_init(mp, nr_threads, current->pid);
  68. pctl->wq = alloc_workqueue("%s-%d", WQ_FREEZABLE, nr_threads, tag,
  69. current->pid);
  70. if (!pctl->wq)
  71. return -ENOMEM;
  72. pctl->work_fn = work_fn;
  73. pctl->error = 0;
  74. pctl->mp = mp;
  75. atomic_set(&pctl->nr_work, 0);
  76. init_waitqueue_head(&pctl->poll_wait);
  77. return 0;
  78. }
  79. /* Queue some parallel work. */
  80. void
  81. xfs_pwork_queue(
  82. struct xfs_pwork_ctl *pctl,
  83. struct xfs_pwork *pwork)
  84. {
  85. INIT_WORK(&pwork->work, xfs_pwork_work);
  86. pwork->pctl = pctl;
  87. atomic_inc(&pctl->nr_work);
  88. queue_work(pctl->wq, &pwork->work);
  89. }
  90. /* Wait for the work to finish and tear down the control structure. */
  91. int
  92. xfs_pwork_destroy(
  93. struct xfs_pwork_ctl *pctl)
  94. {
  95. destroy_workqueue(pctl->wq);
  96. pctl->wq = NULL;
  97. return pctl->error;
  98. }
  99. /*
  100. * Wait for the work to finish by polling completion status and touch the soft
  101. * lockup watchdog. This is for callers such as mount which hold locks.
  102. */
  103. void
  104. xfs_pwork_poll(
  105. struct xfs_pwork_ctl *pctl)
  106. {
  107. while (wait_event_timeout(pctl->poll_wait,
  108. atomic_read(&pctl->nr_work) == 0, HZ) == 0)
  109. touch_softlockup_watchdog();
  110. }
  111. /*
  112. * Return the amount of parallelism that the data device can handle, or 0 for
  113. * no limit.
  114. */
  115. unsigned int
  116. xfs_pwork_guess_datadev_parallelism(
  117. struct xfs_mount *mp)
  118. {
  119. struct xfs_buftarg *btp = mp->m_ddev_targp;
  120. /*
  121. * For now we'll go with the most conservative setting possible,
  122. * which is two threads for an SSD and 1 thread everywhere else.
  123. */
  124. return blk_queue_nonrot(btp->bt_bdev->bd_disk->queue) ? 2 : 1;
  125. }