io-wq.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Basic worker thread pool for io_uring
  4. *
  5. * Copyright (C) 2019 Jens Axboe
  6. *
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/errno.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/mm.h>
  13. #include <linux/sched/mm.h>
  14. #include <linux/percpu.h>
  15. #include <linux/slab.h>
  16. #include <linux/kthread.h>
  17. #include <linux/rculist_nulls.h>
  18. #include <linux/fs_struct.h>
  19. #include <linux/task_work.h>
  20. #include <linux/blk-cgroup.h>
  21. #include <linux/audit.h>
  22. #include <linux/cpu.h>
  23. #include "../kernel/sched/sched.h"
  24. #include "io-wq.h"
  25. #define WORKER_IDLE_TIMEOUT (5 * HZ)
  26. enum {
  27. IO_WORKER_F_UP = 1, /* up and active */
  28. IO_WORKER_F_RUNNING = 2, /* account as running */
  29. IO_WORKER_F_FREE = 4, /* worker on free list */
  30. IO_WORKER_F_FIXED = 8, /* static idle worker */
  31. IO_WORKER_F_BOUND = 16, /* is doing bounded work */
  32. };
  33. enum {
  34. IO_WQ_BIT_EXIT = 0, /* wq exiting */
  35. IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
  36. IO_WQ_BIT_ERROR = 2, /* error on setup */
  37. };
  38. enum {
  39. IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
  40. };
  41. /*
  42. * One for each thread in a wqe pool
  43. */
  44. struct io_worker {
  45. refcount_t ref;
  46. unsigned flags;
  47. struct hlist_nulls_node nulls_node;
  48. struct list_head all_list;
  49. struct task_struct *task;
  50. struct io_wqe *wqe;
  51. struct io_wq_work *cur_work;
  52. spinlock_t lock;
  53. struct rcu_head rcu;
  54. struct mm_struct *mm;
  55. #ifdef CONFIG_BLK_CGROUP
  56. struct cgroup_subsys_state *blkcg_css;
  57. #endif
  58. const struct cred *cur_creds;
  59. const struct cred *saved_creds;
  60. struct files_struct *restore_files;
  61. struct nsproxy *restore_nsproxy;
  62. struct fs_struct *restore_fs;
  63. };
  64. #if BITS_PER_LONG == 64
  65. #define IO_WQ_HASH_ORDER 6
  66. #else
  67. #define IO_WQ_HASH_ORDER 5
  68. #endif
  69. #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
  70. struct io_wqe_acct {
  71. unsigned nr_workers;
  72. unsigned max_workers;
  73. atomic_t nr_running;
  74. };
  75. enum {
  76. IO_WQ_ACCT_BOUND,
  77. IO_WQ_ACCT_UNBOUND,
  78. };
  79. /*
  80. * Per-node worker thread pool
  81. */
  82. struct io_wqe {
  83. struct {
  84. raw_spinlock_t lock;
  85. struct io_wq_work_list work_list;
  86. unsigned long hash_map;
  87. unsigned flags;
  88. } ____cacheline_aligned_in_smp;
  89. int node;
  90. struct io_wqe_acct acct[2];
  91. struct hlist_nulls_head free_list;
  92. struct list_head all_list;
  93. struct io_wq *wq;
  94. struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
  95. };
  96. /*
  97. * Per io_wq state
  98. */
  99. struct io_wq {
  100. struct io_wqe **wqes;
  101. unsigned long state;
  102. free_work_fn *free_work;
  103. io_wq_work_fn *do_work;
  104. struct task_struct *manager;
  105. struct user_struct *user;
  106. refcount_t refs;
  107. struct completion done;
  108. struct hlist_node cpuhp_node;
  109. refcount_t use_refs;
  110. };
  111. static enum cpuhp_state io_wq_online;
  112. static bool io_worker_get(struct io_worker *worker)
  113. {
  114. return refcount_inc_not_zero(&worker->ref);
  115. }
  116. static void io_worker_release(struct io_worker *worker)
  117. {
  118. if (refcount_dec_and_test(&worker->ref))
  119. wake_up_process(worker->task);
  120. }
  121. /*
  122. * Note: drops the wqe->lock if returning true! The caller must re-acquire
  123. * the lock in that case. Some callers need to restart handling if this
  124. * happens, so we can't just re-acquire the lock on behalf of the caller.
  125. */
  126. static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
  127. {
  128. bool dropped_lock = false;
  129. if (worker->saved_creds) {
  130. revert_creds(worker->saved_creds);
  131. worker->cur_creds = worker->saved_creds = NULL;
  132. }
  133. if (current->files != worker->restore_files) {
  134. __acquire(&wqe->lock);
  135. raw_spin_unlock_irq(&wqe->lock);
  136. dropped_lock = true;
  137. task_lock(current);
  138. current->files = worker->restore_files;
  139. current->nsproxy = worker->restore_nsproxy;
  140. task_unlock(current);
  141. }
  142. if (current->fs != worker->restore_fs)
  143. current->fs = worker->restore_fs;
  144. /*
  145. * If we have an active mm, we need to drop the wq lock before unusing
  146. * it. If we do, return true and let the caller retry the idle loop.
  147. */
  148. if (worker->mm) {
  149. if (!dropped_lock) {
  150. __acquire(&wqe->lock);
  151. raw_spin_unlock_irq(&wqe->lock);
  152. dropped_lock = true;
  153. }
  154. __set_current_state(TASK_RUNNING);
  155. kthread_unuse_mm(worker->mm);
  156. mmput(worker->mm);
  157. worker->mm = NULL;
  158. }
  159. #ifdef CONFIG_BLK_CGROUP
  160. if (worker->blkcg_css) {
  161. kthread_associate_blkcg(NULL);
  162. worker->blkcg_css = NULL;
  163. }
  164. #endif
  165. if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
  166. current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
  167. return dropped_lock;
  168. }
  169. static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
  170. struct io_wq_work *work)
  171. {
  172. if (work->flags & IO_WQ_WORK_UNBOUND)
  173. return &wqe->acct[IO_WQ_ACCT_UNBOUND];
  174. return &wqe->acct[IO_WQ_ACCT_BOUND];
  175. }
  176. static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
  177. struct io_worker *worker)
  178. {
  179. if (worker->flags & IO_WORKER_F_BOUND)
  180. return &wqe->acct[IO_WQ_ACCT_BOUND];
  181. return &wqe->acct[IO_WQ_ACCT_UNBOUND];
  182. }
  183. static void io_worker_exit(struct io_worker *worker)
  184. {
  185. struct io_wqe *wqe = worker->wqe;
  186. struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
  187. /*
  188. * If we're not at zero, someone else is holding a brief reference
  189. * to the worker. Wait for that to go away.
  190. */
  191. set_current_state(TASK_INTERRUPTIBLE);
  192. if (!refcount_dec_and_test(&worker->ref))
  193. schedule();
  194. __set_current_state(TASK_RUNNING);
  195. preempt_disable();
  196. current->flags &= ~PF_IO_WORKER;
  197. if (worker->flags & IO_WORKER_F_RUNNING)
  198. atomic_dec(&acct->nr_running);
  199. if (!(worker->flags & IO_WORKER_F_BOUND))
  200. atomic_dec(&wqe->wq->user->processes);
  201. worker->flags = 0;
  202. preempt_enable();
  203. raw_spin_lock_irq(&wqe->lock);
  204. hlist_nulls_del_rcu(&worker->nulls_node);
  205. list_del_rcu(&worker->all_list);
  206. if (__io_worker_unuse(wqe, worker)) {
  207. __release(&wqe->lock);
  208. raw_spin_lock_irq(&wqe->lock);
  209. }
  210. acct->nr_workers--;
  211. raw_spin_unlock_irq(&wqe->lock);
  212. kfree_rcu(worker, rcu);
  213. if (refcount_dec_and_test(&wqe->wq->refs))
  214. complete(&wqe->wq->done);
  215. }
  216. static inline bool io_wqe_run_queue(struct io_wqe *wqe)
  217. __must_hold(wqe->lock)
  218. {
  219. if (!wq_list_empty(&wqe->work_list) &&
  220. !(wqe->flags & IO_WQE_FLAG_STALLED))
  221. return true;
  222. return false;
  223. }
  224. /*
  225. * Check head of free list for an available worker. If one isn't available,
  226. * caller must wake up the wq manager to create one.
  227. */
  228. static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
  229. __must_hold(RCU)
  230. {
  231. struct hlist_nulls_node *n;
  232. struct io_worker *worker;
  233. n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
  234. if (is_a_nulls(n))
  235. return false;
  236. worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
  237. if (io_worker_get(worker)) {
  238. wake_up_process(worker->task);
  239. io_worker_release(worker);
  240. return true;
  241. }
  242. return false;
  243. }
  244. /*
  245. * We need a worker. If we find a free one, we're good. If not, and we're
  246. * below the max number of workers, wake up the manager to create one.
  247. */
  248. static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
  249. {
  250. bool ret;
  251. /*
  252. * Most likely an attempt to queue unbounded work on an io_wq that
  253. * wasn't setup with any unbounded workers.
  254. */
  255. if (unlikely(!acct->max_workers))
  256. pr_warn_once("io-wq is not configured for unbound workers");
  257. rcu_read_lock();
  258. ret = io_wqe_activate_free_worker(wqe);
  259. rcu_read_unlock();
  260. if (!ret && acct->nr_workers < acct->max_workers)
  261. wake_up_process(wqe->wq->manager);
  262. }
  263. static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
  264. {
  265. struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
  266. atomic_inc(&acct->nr_running);
  267. }
  268. static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
  269. __must_hold(wqe->lock)
  270. {
  271. struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
  272. if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
  273. io_wqe_wake_worker(wqe, acct);
  274. }
  275. static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
  276. {
  277. allow_kernel_signal(SIGINT);
  278. current->flags |= PF_IO_WORKER;
  279. worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
  280. worker->restore_files = current->files;
  281. worker->restore_nsproxy = current->nsproxy;
  282. worker->restore_fs = current->fs;
  283. io_wqe_inc_running(wqe, worker);
  284. }
  285. /*
  286. * Worker will start processing some work. Move it to the busy list, if
  287. * it's currently on the freelist
  288. */
  289. static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
  290. struct io_wq_work *work)
  291. __must_hold(wqe->lock)
  292. {
  293. bool worker_bound, work_bound;
  294. if (worker->flags & IO_WORKER_F_FREE) {
  295. worker->flags &= ~IO_WORKER_F_FREE;
  296. hlist_nulls_del_init_rcu(&worker->nulls_node);
  297. }
  298. /*
  299. * If worker is moving from bound to unbound (or vice versa), then
  300. * ensure we update the running accounting.
  301. */
  302. worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
  303. work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
  304. if (worker_bound != work_bound) {
  305. io_wqe_dec_running(wqe, worker);
  306. if (work_bound) {
  307. worker->flags |= IO_WORKER_F_BOUND;
  308. wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
  309. wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
  310. atomic_dec(&wqe->wq->user->processes);
  311. } else {
  312. worker->flags &= ~IO_WORKER_F_BOUND;
  313. wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
  314. wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
  315. atomic_inc(&wqe->wq->user->processes);
  316. }
  317. io_wqe_inc_running(wqe, worker);
  318. }
  319. }
  320. /*
  321. * No work, worker going to sleep. Move to freelist, and unuse mm if we
  322. * have one attached. Dropping the mm may potentially sleep, so we drop
  323. * the lock in that case and return success. Since the caller has to
  324. * retry the loop in that case (we changed task state), we don't regrab
  325. * the lock if we return success.
  326. */
  327. static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
  328. __must_hold(wqe->lock)
  329. {
  330. if (!(worker->flags & IO_WORKER_F_FREE)) {
  331. worker->flags |= IO_WORKER_F_FREE;
  332. hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
  333. }
  334. return __io_worker_unuse(wqe, worker);
  335. }
  336. static inline unsigned int io_get_work_hash(struct io_wq_work *work)
  337. {
  338. return work->flags >> IO_WQ_HASH_SHIFT;
  339. }
  340. static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
  341. __must_hold(wqe->lock)
  342. {
  343. struct io_wq_work_node *node, *prev;
  344. struct io_wq_work *work, *tail;
  345. unsigned int hash;
  346. wq_list_for_each(node, prev, &wqe->work_list) {
  347. work = container_of(node, struct io_wq_work, list);
  348. /* not hashed, can run anytime */
  349. if (!io_wq_is_hashed(work)) {
  350. wq_list_del(&wqe->work_list, node, prev);
  351. return work;
  352. }
  353. /* hashed, can run if not already running */
  354. hash = io_get_work_hash(work);
  355. if (!(wqe->hash_map & BIT(hash))) {
  356. wqe->hash_map |= BIT(hash);
  357. /* all items with this hash lie in [work, tail] */
  358. tail = wqe->hash_tail[hash];
  359. wqe->hash_tail[hash] = NULL;
  360. wq_list_cut(&wqe->work_list, &tail->list, prev);
  361. return work;
  362. }
  363. }
  364. return NULL;
  365. }
  366. static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
  367. {
  368. if (worker->mm) {
  369. kthread_unuse_mm(worker->mm);
  370. mmput(worker->mm);
  371. worker->mm = NULL;
  372. }
  373. if (mmget_not_zero(work->identity->mm)) {
  374. kthread_use_mm(work->identity->mm);
  375. worker->mm = work->identity->mm;
  376. return;
  377. }
  378. /* failed grabbing mm, ensure work gets cancelled */
  379. work->flags |= IO_WQ_WORK_CANCEL;
  380. }
  381. static inline void io_wq_switch_blkcg(struct io_worker *worker,
  382. struct io_wq_work *work)
  383. {
  384. #ifdef CONFIG_BLK_CGROUP
  385. if (!(work->flags & IO_WQ_WORK_BLKCG))
  386. return;
  387. if (work->identity->blkcg_css != worker->blkcg_css) {
  388. kthread_associate_blkcg(work->identity->blkcg_css);
  389. worker->blkcg_css = work->identity->blkcg_css;
  390. }
  391. #endif
  392. }
  393. static void io_wq_switch_creds(struct io_worker *worker,
  394. struct io_wq_work *work)
  395. {
  396. const struct cred *old_creds = override_creds(work->identity->creds);
  397. worker->cur_creds = work->identity->creds;
  398. if (worker->saved_creds)
  399. put_cred(old_creds); /* creds set by previous switch */
  400. else
  401. worker->saved_creds = old_creds;
  402. }
  403. static void io_impersonate_work(struct io_worker *worker,
  404. struct io_wq_work *work)
  405. {
  406. if ((work->flags & IO_WQ_WORK_FILES) &&
  407. current->files != work->identity->files) {
  408. task_lock(current);
  409. current->files = work->identity->files;
  410. current->nsproxy = work->identity->nsproxy;
  411. task_unlock(current);
  412. if (!work->identity->files) {
  413. /* failed grabbing files, ensure work gets cancelled */
  414. work->flags |= IO_WQ_WORK_CANCEL;
  415. }
  416. }
  417. if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
  418. current->fs = work->identity->fs;
  419. if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
  420. io_wq_switch_mm(worker, work);
  421. if ((work->flags & IO_WQ_WORK_CREDS) &&
  422. worker->cur_creds != work->identity->creds)
  423. io_wq_switch_creds(worker, work);
  424. if (work->flags & IO_WQ_WORK_FSIZE)
  425. current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
  426. else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
  427. current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
  428. io_wq_switch_blkcg(worker, work);
  429. #ifdef CONFIG_AUDIT
  430. current->loginuid = work->identity->loginuid;
  431. current->sessionid = work->identity->sessionid;
  432. #endif
  433. }
  434. static void io_assign_current_work(struct io_worker *worker,
  435. struct io_wq_work *work)
  436. {
  437. if (work) {
  438. /* flush pending signals before assigning new work */
  439. if (signal_pending(current))
  440. flush_signals(current);
  441. cond_resched();
  442. }
  443. #ifdef CONFIG_AUDIT
  444. current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
  445. current->sessionid = AUDIT_SID_UNSET;
  446. #endif
  447. spin_lock_irq(&worker->lock);
  448. worker->cur_work = work;
  449. spin_unlock_irq(&worker->lock);
  450. }
  451. static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
  452. static void io_worker_handle_work(struct io_worker *worker)
  453. __releases(wqe->lock)
  454. {
  455. struct io_wqe *wqe = worker->wqe;
  456. struct io_wq *wq = wqe->wq;
  457. do {
  458. struct io_wq_work *work;
  459. get_next:
  460. /*
  461. * If we got some work, mark us as busy. If we didn't, but
  462. * the list isn't empty, it means we stalled on hashed work.
  463. * Mark us stalled so we don't keep looking for work when we
  464. * can't make progress, any work completion or insertion will
  465. * clear the stalled flag.
  466. */
  467. work = io_get_next_work(wqe);
  468. if (work)
  469. __io_worker_busy(wqe, worker, work);
  470. else if (!wq_list_empty(&wqe->work_list))
  471. wqe->flags |= IO_WQE_FLAG_STALLED;
  472. raw_spin_unlock_irq(&wqe->lock);
  473. if (!work)
  474. break;
  475. io_assign_current_work(worker, work);
  476. /* handle a whole dependent link */
  477. do {
  478. struct io_wq_work *old_work, *next_hashed, *linked;
  479. unsigned int hash = io_get_work_hash(work);
  480. next_hashed = wq_next_work(work);
  481. io_impersonate_work(worker, work);
  482. /*
  483. * OK to set IO_WQ_WORK_CANCEL even for uncancellable
  484. * work, the worker function will do the right thing.
  485. */
  486. if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
  487. work->flags |= IO_WQ_WORK_CANCEL;
  488. old_work = work;
  489. linked = wq->do_work(work);
  490. work = next_hashed;
  491. if (!work && linked && !io_wq_is_hashed(linked)) {
  492. work = linked;
  493. linked = NULL;
  494. }
  495. io_assign_current_work(worker, work);
  496. wq->free_work(old_work);
  497. if (linked)
  498. io_wqe_enqueue(wqe, linked);
  499. if (hash != -1U && !next_hashed) {
  500. raw_spin_lock_irq(&wqe->lock);
  501. wqe->hash_map &= ~BIT_ULL(hash);
  502. wqe->flags &= ~IO_WQE_FLAG_STALLED;
  503. /* skip unnecessary unlock-lock wqe->lock */
  504. if (!work)
  505. goto get_next;
  506. raw_spin_unlock_irq(&wqe->lock);
  507. }
  508. } while (work);
  509. raw_spin_lock_irq(&wqe->lock);
  510. } while (1);
  511. }
  512. static int io_wqe_worker(void *data)
  513. {
  514. struct io_worker *worker = data;
  515. struct io_wqe *wqe = worker->wqe;
  516. struct io_wq *wq = wqe->wq;
  517. io_worker_start(wqe, worker);
  518. while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
  519. set_current_state(TASK_INTERRUPTIBLE);
  520. loop:
  521. raw_spin_lock_irq(&wqe->lock);
  522. if (io_wqe_run_queue(wqe)) {
  523. __set_current_state(TASK_RUNNING);
  524. io_worker_handle_work(worker);
  525. goto loop;
  526. }
  527. /* drops the lock on success, retry */
  528. if (__io_worker_idle(wqe, worker)) {
  529. __release(&wqe->lock);
  530. goto loop;
  531. }
  532. raw_spin_unlock_irq(&wqe->lock);
  533. if (signal_pending(current))
  534. flush_signals(current);
  535. if (schedule_timeout(WORKER_IDLE_TIMEOUT))
  536. continue;
  537. /* timed out, exit unless we're the fixed worker */
  538. if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
  539. !(worker->flags & IO_WORKER_F_FIXED))
  540. break;
  541. }
  542. if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
  543. raw_spin_lock_irq(&wqe->lock);
  544. if (!wq_list_empty(&wqe->work_list))
  545. io_worker_handle_work(worker);
  546. else
  547. raw_spin_unlock_irq(&wqe->lock);
  548. }
  549. io_worker_exit(worker);
  550. return 0;
  551. }
  552. /*
  553. * Called when a worker is scheduled in. Mark us as currently running.
  554. */
  555. void io_wq_worker_running(struct task_struct *tsk)
  556. {
  557. struct io_worker *worker = kthread_data(tsk);
  558. struct io_wqe *wqe = worker->wqe;
  559. if (!(worker->flags & IO_WORKER_F_UP))
  560. return;
  561. if (worker->flags & IO_WORKER_F_RUNNING)
  562. return;
  563. worker->flags |= IO_WORKER_F_RUNNING;
  564. io_wqe_inc_running(wqe, worker);
  565. }
  566. /*
  567. * Called when worker is going to sleep. If there are no workers currently
  568. * running and we have work pending, wake up a free one or have the manager
  569. * set one up.
  570. */
  571. void io_wq_worker_sleeping(struct task_struct *tsk)
  572. {
  573. struct io_worker *worker = kthread_data(tsk);
  574. struct io_wqe *wqe = worker->wqe;
  575. if (!(worker->flags & IO_WORKER_F_UP))
  576. return;
  577. if (!(worker->flags & IO_WORKER_F_RUNNING))
  578. return;
  579. worker->flags &= ~IO_WORKER_F_RUNNING;
  580. raw_spin_lock_irq(&wqe->lock);
  581. io_wqe_dec_running(wqe, worker);
  582. raw_spin_unlock_irq(&wqe->lock);
  583. }
  584. static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
  585. {
  586. struct io_wqe_acct *acct = &wqe->acct[index];
  587. struct io_worker *worker;
  588. worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
  589. if (!worker)
  590. return false;
  591. refcount_set(&worker->ref, 1);
  592. worker->nulls_node.pprev = NULL;
  593. worker->wqe = wqe;
  594. spin_lock_init(&worker->lock);
  595. worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
  596. "io_wqe_worker-%d/%d", index, wqe->node);
  597. if (IS_ERR(worker->task)) {
  598. kfree(worker);
  599. return false;
  600. }
  601. kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
  602. raw_spin_lock_irq(&wqe->lock);
  603. hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
  604. list_add_tail_rcu(&worker->all_list, &wqe->all_list);
  605. worker->flags |= IO_WORKER_F_FREE;
  606. if (index == IO_WQ_ACCT_BOUND)
  607. worker->flags |= IO_WORKER_F_BOUND;
  608. if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
  609. worker->flags |= IO_WORKER_F_FIXED;
  610. acct->nr_workers++;
  611. raw_spin_unlock_irq(&wqe->lock);
  612. if (index == IO_WQ_ACCT_UNBOUND)
  613. atomic_inc(&wq->user->processes);
  614. refcount_inc(&wq->refs);
  615. wake_up_process(worker->task);
  616. return true;
  617. }
  618. static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
  619. __must_hold(wqe->lock)
  620. {
  621. struct io_wqe_acct *acct = &wqe->acct[index];
  622. /* if we have available workers or no work, no need */
  623. if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
  624. return false;
  625. return acct->nr_workers < acct->max_workers;
  626. }
  627. static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
  628. {
  629. send_sig(SIGINT, worker->task, 1);
  630. return false;
  631. }
  632. /*
  633. * Iterate the passed in list and call the specific function for each
  634. * worker that isn't exiting
  635. */
  636. static bool io_wq_for_each_worker(struct io_wqe *wqe,
  637. bool (*func)(struct io_worker *, void *),
  638. void *data)
  639. {
  640. struct io_worker *worker;
  641. bool ret = false;
  642. list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
  643. if (io_worker_get(worker)) {
  644. /* no task if node is/was offline */
  645. if (worker->task)
  646. ret = func(worker, data);
  647. io_worker_release(worker);
  648. if (ret)
  649. break;
  650. }
  651. }
  652. return ret;
  653. }
  654. static bool io_wq_worker_wake(struct io_worker *worker, void *data)
  655. {
  656. wake_up_process(worker->task);
  657. return false;
  658. }
  659. /*
  660. * Manager thread. Tasked with creating new workers, if we need them.
  661. */
  662. static int io_wq_manager(void *data)
  663. {
  664. struct io_wq *wq = data;
  665. int node;
  666. /* create fixed workers */
  667. refcount_set(&wq->refs, 1);
  668. for_each_node(node) {
  669. if (!node_online(node))
  670. continue;
  671. if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
  672. continue;
  673. set_bit(IO_WQ_BIT_ERROR, &wq->state);
  674. set_bit(IO_WQ_BIT_EXIT, &wq->state);
  675. goto out;
  676. }
  677. complete(&wq->done);
  678. while (!kthread_should_stop()) {
  679. if (current->task_works)
  680. task_work_run();
  681. for_each_node(node) {
  682. struct io_wqe *wqe = wq->wqes[node];
  683. bool fork_worker[2] = { false, false };
  684. if (!node_online(node))
  685. continue;
  686. raw_spin_lock_irq(&wqe->lock);
  687. if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
  688. fork_worker[IO_WQ_ACCT_BOUND] = true;
  689. if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
  690. fork_worker[IO_WQ_ACCT_UNBOUND] = true;
  691. raw_spin_unlock_irq(&wqe->lock);
  692. if (fork_worker[IO_WQ_ACCT_BOUND])
  693. create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
  694. if (fork_worker[IO_WQ_ACCT_UNBOUND])
  695. create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
  696. }
  697. set_current_state(TASK_INTERRUPTIBLE);
  698. schedule_timeout(HZ);
  699. }
  700. if (current->task_works)
  701. task_work_run();
  702. out:
  703. if (refcount_dec_and_test(&wq->refs)) {
  704. complete(&wq->done);
  705. return 0;
  706. }
  707. /* if ERROR is set and we get here, we have workers to wake */
  708. if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
  709. rcu_read_lock();
  710. for_each_node(node)
  711. io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
  712. rcu_read_unlock();
  713. }
  714. return 0;
  715. }
  716. static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
  717. struct io_wq_work *work)
  718. {
  719. bool free_worker;
  720. if (!(work->flags & IO_WQ_WORK_UNBOUND))
  721. return true;
  722. if (atomic_read(&acct->nr_running))
  723. return true;
  724. rcu_read_lock();
  725. free_worker = !hlist_nulls_empty(&wqe->free_list);
  726. rcu_read_unlock();
  727. if (free_worker)
  728. return true;
  729. if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
  730. !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
  731. return false;
  732. return true;
  733. }
  734. static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
  735. {
  736. struct io_wq *wq = wqe->wq;
  737. do {
  738. struct io_wq_work *old_work = work;
  739. work->flags |= IO_WQ_WORK_CANCEL;
  740. work = wq->do_work(work);
  741. wq->free_work(old_work);
  742. } while (work);
  743. }
  744. static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
  745. {
  746. unsigned int hash;
  747. struct io_wq_work *tail;
  748. if (!io_wq_is_hashed(work)) {
  749. append:
  750. wq_list_add_tail(&work->list, &wqe->work_list);
  751. return;
  752. }
  753. hash = io_get_work_hash(work);
  754. tail = wqe->hash_tail[hash];
  755. wqe->hash_tail[hash] = work;
  756. if (!tail)
  757. goto append;
  758. wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
  759. }
  760. static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
  761. {
  762. struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
  763. bool do_wake;
  764. unsigned long flags;
  765. /*
  766. * Do early check to see if we need a new unbound worker, and if we do,
  767. * if we're allowed to do so. This isn't 100% accurate as there's a
  768. * gap between this check and incrementing the value, but that's OK.
  769. * It's close enough to not be an issue, fork() has the same delay.
  770. */
  771. if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
  772. io_run_cancel(work, wqe);
  773. return;
  774. }
  775. raw_spin_lock_irqsave(&wqe->lock, flags);
  776. io_wqe_insert_work(wqe, work);
  777. wqe->flags &= ~IO_WQE_FLAG_STALLED;
  778. do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) ||
  779. !atomic_read(&acct->nr_running);
  780. raw_spin_unlock_irqrestore(&wqe->lock, flags);
  781. if (do_wake)
  782. io_wqe_wake_worker(wqe, acct);
  783. }
  784. void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
  785. {
  786. struct io_wqe *wqe = wq->wqes[numa_node_id()];
  787. io_wqe_enqueue(wqe, work);
  788. }
  789. /*
  790. * Work items that hash to the same value will not be done in parallel.
  791. * Used to limit concurrent writes, generally hashed by inode.
  792. */
  793. void io_wq_hash_work(struct io_wq_work *work, void *val)
  794. {
  795. unsigned int bit;
  796. bit = hash_ptr(val, IO_WQ_HASH_ORDER);
  797. work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
  798. }
  799. void io_wq_cancel_all(struct io_wq *wq)
  800. {
  801. int node;
  802. set_bit(IO_WQ_BIT_CANCEL, &wq->state);
  803. rcu_read_lock();
  804. for_each_node(node) {
  805. struct io_wqe *wqe = wq->wqes[node];
  806. io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
  807. }
  808. rcu_read_unlock();
  809. }
  810. struct io_cb_cancel_data {
  811. work_cancel_fn *fn;
  812. void *data;
  813. int nr_running;
  814. int nr_pending;
  815. bool cancel_all;
  816. };
  817. static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
  818. {
  819. struct io_cb_cancel_data *match = data;
  820. unsigned long flags;
  821. /*
  822. * Hold the lock to avoid ->cur_work going out of scope, caller
  823. * may dereference the passed in work.
  824. */
  825. spin_lock_irqsave(&worker->lock, flags);
  826. if (worker->cur_work &&
  827. !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
  828. match->fn(worker->cur_work, match->data)) {
  829. send_sig(SIGINT, worker->task, 1);
  830. match->nr_running++;
  831. }
  832. spin_unlock_irqrestore(&worker->lock, flags);
  833. return match->nr_running && !match->cancel_all;
  834. }
  835. static inline void io_wqe_remove_pending(struct io_wqe *wqe,
  836. struct io_wq_work *work,
  837. struct io_wq_work_node *prev)
  838. {
  839. unsigned int hash = io_get_work_hash(work);
  840. struct io_wq_work *prev_work = NULL;
  841. if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
  842. if (prev)
  843. prev_work = container_of(prev, struct io_wq_work, list);
  844. if (prev_work && io_get_work_hash(prev_work) == hash)
  845. wqe->hash_tail[hash] = prev_work;
  846. else
  847. wqe->hash_tail[hash] = NULL;
  848. }
  849. wq_list_del(&wqe->work_list, &work->list, prev);
  850. }
  851. static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
  852. struct io_cb_cancel_data *match)
  853. {
  854. struct io_wq_work_node *node, *prev;
  855. struct io_wq_work *work;
  856. unsigned long flags;
  857. retry:
  858. raw_spin_lock_irqsave(&wqe->lock, flags);
  859. wq_list_for_each(node, prev, &wqe->work_list) {
  860. work = container_of(node, struct io_wq_work, list);
  861. if (!match->fn(work, match->data))
  862. continue;
  863. io_wqe_remove_pending(wqe, work, prev);
  864. raw_spin_unlock_irqrestore(&wqe->lock, flags);
  865. io_run_cancel(work, wqe);
  866. match->nr_pending++;
  867. if (!match->cancel_all)
  868. return;
  869. /* not safe to continue after unlock */
  870. goto retry;
  871. }
  872. raw_spin_unlock_irqrestore(&wqe->lock, flags);
  873. }
  874. static void io_wqe_cancel_running_work(struct io_wqe *wqe,
  875. struct io_cb_cancel_data *match)
  876. {
  877. rcu_read_lock();
  878. io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
  879. rcu_read_unlock();
  880. }
  881. enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
  882. void *data, bool cancel_all)
  883. {
  884. struct io_cb_cancel_data match = {
  885. .fn = cancel,
  886. .data = data,
  887. .cancel_all = cancel_all,
  888. };
  889. int node;
  890. /*
  891. * First check pending list, if we're lucky we can just remove it
  892. * from there. CANCEL_OK means that the work is returned as-new,
  893. * no completion will be posted for it.
  894. */
  895. for_each_node(node) {
  896. struct io_wqe *wqe = wq->wqes[node];
  897. io_wqe_cancel_pending_work(wqe, &match);
  898. if (match.nr_pending && !match.cancel_all)
  899. return IO_WQ_CANCEL_OK;
  900. }
  901. /*
  902. * Now check if a free (going busy) or busy worker has the work
  903. * currently running. If we find it there, we'll return CANCEL_RUNNING
  904. * as an indication that we attempt to signal cancellation. The
  905. * completion will run normally in this case.
  906. */
  907. for_each_node(node) {
  908. struct io_wqe *wqe = wq->wqes[node];
  909. io_wqe_cancel_running_work(wqe, &match);
  910. if (match.nr_running && !match.cancel_all)
  911. return IO_WQ_CANCEL_RUNNING;
  912. }
  913. if (match.nr_running)
  914. return IO_WQ_CANCEL_RUNNING;
  915. if (match.nr_pending)
  916. return IO_WQ_CANCEL_OK;
  917. return IO_WQ_CANCEL_NOTFOUND;
  918. }
  919. struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
  920. {
  921. int ret = -ENOMEM, node;
  922. struct io_wq *wq;
  923. if (WARN_ON_ONCE(!data->free_work || !data->do_work))
  924. return ERR_PTR(-EINVAL);
  925. if (WARN_ON_ONCE(!bounded))
  926. return ERR_PTR(-EINVAL);
  927. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  928. if (!wq)
  929. return ERR_PTR(-ENOMEM);
  930. wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
  931. if (!wq->wqes)
  932. goto err_wq;
  933. ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
  934. if (ret)
  935. goto err_wqes;
  936. wq->free_work = data->free_work;
  937. wq->do_work = data->do_work;
  938. /* caller must already hold a reference to this */
  939. wq->user = data->user;
  940. ret = -ENOMEM;
  941. for_each_node(node) {
  942. struct io_wqe *wqe;
  943. int alloc_node = node;
  944. if (!node_online(alloc_node))
  945. alloc_node = NUMA_NO_NODE;
  946. wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
  947. if (!wqe)
  948. goto err;
  949. wq->wqes[node] = wqe;
  950. wqe->node = alloc_node;
  951. wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
  952. atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
  953. if (wq->user) {
  954. wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
  955. task_rlimit(current, RLIMIT_NPROC);
  956. }
  957. atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
  958. wqe->wq = wq;
  959. raw_spin_lock_init(&wqe->lock);
  960. INIT_WQ_LIST(&wqe->work_list);
  961. INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
  962. INIT_LIST_HEAD(&wqe->all_list);
  963. }
  964. init_completion(&wq->done);
  965. wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
  966. if (!IS_ERR(wq->manager)) {
  967. wake_up_process(wq->manager);
  968. wait_for_completion(&wq->done);
  969. if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
  970. ret = -ENOMEM;
  971. goto err;
  972. }
  973. refcount_set(&wq->use_refs, 1);
  974. reinit_completion(&wq->done);
  975. return wq;
  976. }
  977. ret = PTR_ERR(wq->manager);
  978. complete(&wq->done);
  979. err:
  980. cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
  981. for_each_node(node)
  982. kfree(wq->wqes[node]);
  983. err_wqes:
  984. kfree(wq->wqes);
  985. err_wq:
  986. kfree(wq);
  987. return ERR_PTR(ret);
  988. }
  989. bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
  990. {
  991. if (data->free_work != wq->free_work || data->do_work != wq->do_work)
  992. return false;
  993. return refcount_inc_not_zero(&wq->use_refs);
  994. }
  995. static void __io_wq_destroy(struct io_wq *wq)
  996. {
  997. int node;
  998. cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
  999. set_bit(IO_WQ_BIT_EXIT, &wq->state);
  1000. if (wq->manager)
  1001. kthread_stop(wq->manager);
  1002. rcu_read_lock();
  1003. for_each_node(node)
  1004. io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
  1005. rcu_read_unlock();
  1006. wait_for_completion(&wq->done);
  1007. for_each_node(node)
  1008. kfree(wq->wqes[node]);
  1009. kfree(wq->wqes);
  1010. kfree(wq);
  1011. }
  1012. void io_wq_destroy(struct io_wq *wq)
  1013. {
  1014. if (refcount_dec_and_test(&wq->use_refs))
  1015. __io_wq_destroy(wq);
  1016. }
  1017. struct task_struct *io_wq_get_task(struct io_wq *wq)
  1018. {
  1019. return wq->manager;
  1020. }
  1021. static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
  1022. {
  1023. struct task_struct *task = worker->task;
  1024. struct rq_flags rf;
  1025. struct rq *rq;
  1026. rq = task_rq_lock(task, &rf);
  1027. do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
  1028. task->flags |= PF_NO_SETAFFINITY;
  1029. task_rq_unlock(rq, task, &rf);
  1030. return false;
  1031. }
  1032. static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
  1033. {
  1034. struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
  1035. int i;
  1036. rcu_read_lock();
  1037. for_each_node(i)
  1038. io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
  1039. rcu_read_unlock();
  1040. return 0;
  1041. }
  1042. static __init int io_wq_init(void)
  1043. {
  1044. int ret;
  1045. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
  1046. io_wq_cpu_online, NULL);
  1047. if (ret < 0)
  1048. return ret;
  1049. io_wq_online = ret;
  1050. return 0;
  1051. }
  1052. subsys_initcall(io_wq_init);