123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Basic worker thread pool for io_uring
- *
- * Copyright (C) 2019 Jens Axboe
- *
- */
- #include <linux/kernel.h>
- #include <linux/init.h>
- #include <linux/errno.h>
- #include <linux/sched/signal.h>
- #include <linux/mm.h>
- #include <linux/sched/mm.h>
- #include <linux/percpu.h>
- #include <linux/slab.h>
- #include <linux/kthread.h>
- #include <linux/rculist_nulls.h>
- #include <linux/fs_struct.h>
- #include <linux/task_work.h>
- #include <linux/blk-cgroup.h>
- #include <linux/audit.h>
- #include <linux/cpu.h>
- #include "../kernel/sched/sched.h"
- #include "io-wq.h"
- #define WORKER_IDLE_TIMEOUT (5 * HZ)
- enum {
- IO_WORKER_F_UP = 1, /* up and active */
- IO_WORKER_F_RUNNING = 2, /* account as running */
- IO_WORKER_F_FREE = 4, /* worker on free list */
- IO_WORKER_F_FIXED = 8, /* static idle worker */
- IO_WORKER_F_BOUND = 16, /* is doing bounded work */
- };
- enum {
- IO_WQ_BIT_EXIT = 0, /* wq exiting */
- IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
- IO_WQ_BIT_ERROR = 2, /* error on setup */
- };
- enum {
- IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
- };
- /*
- * One for each thread in a wqe pool
- */
- struct io_worker {
- refcount_t ref;
- unsigned flags;
- struct hlist_nulls_node nulls_node;
- struct list_head all_list;
- struct task_struct *task;
- struct io_wqe *wqe;
- struct io_wq_work *cur_work;
- spinlock_t lock;
- struct rcu_head rcu;
- struct mm_struct *mm;
- #ifdef CONFIG_BLK_CGROUP
- struct cgroup_subsys_state *blkcg_css;
- #endif
- const struct cred *cur_creds;
- const struct cred *saved_creds;
- struct files_struct *restore_files;
- struct nsproxy *restore_nsproxy;
- struct fs_struct *restore_fs;
- };
- #if BITS_PER_LONG == 64
- #define IO_WQ_HASH_ORDER 6
- #else
- #define IO_WQ_HASH_ORDER 5
- #endif
- #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
- struct io_wqe_acct {
- unsigned nr_workers;
- unsigned max_workers;
- atomic_t nr_running;
- };
- enum {
- IO_WQ_ACCT_BOUND,
- IO_WQ_ACCT_UNBOUND,
- };
- /*
- * Per-node worker thread pool
- */
- struct io_wqe {
- struct {
- raw_spinlock_t lock;
- struct io_wq_work_list work_list;
- unsigned long hash_map;
- unsigned flags;
- } ____cacheline_aligned_in_smp;
- int node;
- struct io_wqe_acct acct[2];
- struct hlist_nulls_head free_list;
- struct list_head all_list;
- struct io_wq *wq;
- struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
- };
- /*
- * Per io_wq state
- */
- struct io_wq {
- struct io_wqe **wqes;
- unsigned long state;
- free_work_fn *free_work;
- io_wq_work_fn *do_work;
- struct task_struct *manager;
- struct user_struct *user;
- refcount_t refs;
- struct completion done;
- struct hlist_node cpuhp_node;
- refcount_t use_refs;
- };
- static enum cpuhp_state io_wq_online;
- static bool io_worker_get(struct io_worker *worker)
- {
- return refcount_inc_not_zero(&worker->ref);
- }
- static void io_worker_release(struct io_worker *worker)
- {
- if (refcount_dec_and_test(&worker->ref))
- wake_up_process(worker->task);
- }
- /*
- * Note: drops the wqe->lock if returning true! The caller must re-acquire
- * the lock in that case. Some callers need to restart handling if this
- * happens, so we can't just re-acquire the lock on behalf of the caller.
- */
- static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
- {
- bool dropped_lock = false;
- if (worker->saved_creds) {
- revert_creds(worker->saved_creds);
- worker->cur_creds = worker->saved_creds = NULL;
- }
- if (current->files != worker->restore_files) {
- __acquire(&wqe->lock);
- raw_spin_unlock_irq(&wqe->lock);
- dropped_lock = true;
- task_lock(current);
- current->files = worker->restore_files;
- current->nsproxy = worker->restore_nsproxy;
- task_unlock(current);
- }
- if (current->fs != worker->restore_fs)
- current->fs = worker->restore_fs;
- /*
- * If we have an active mm, we need to drop the wq lock before unusing
- * it. If we do, return true and let the caller retry the idle loop.
- */
- if (worker->mm) {
- if (!dropped_lock) {
- __acquire(&wqe->lock);
- raw_spin_unlock_irq(&wqe->lock);
- dropped_lock = true;
- }
- __set_current_state(TASK_RUNNING);
- kthread_unuse_mm(worker->mm);
- mmput(worker->mm);
- worker->mm = NULL;
- }
- #ifdef CONFIG_BLK_CGROUP
- if (worker->blkcg_css) {
- kthread_associate_blkcg(NULL);
- worker->blkcg_css = NULL;
- }
- #endif
- if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
- return dropped_lock;
- }
- static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
- struct io_wq_work *work)
- {
- if (work->flags & IO_WQ_WORK_UNBOUND)
- return &wqe->acct[IO_WQ_ACCT_UNBOUND];
- return &wqe->acct[IO_WQ_ACCT_BOUND];
- }
- static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
- struct io_worker *worker)
- {
- if (worker->flags & IO_WORKER_F_BOUND)
- return &wqe->acct[IO_WQ_ACCT_BOUND];
- return &wqe->acct[IO_WQ_ACCT_UNBOUND];
- }
- static void io_worker_exit(struct io_worker *worker)
- {
- struct io_wqe *wqe = worker->wqe;
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
- /*
- * If we're not at zero, someone else is holding a brief reference
- * to the worker. Wait for that to go away.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- if (!refcount_dec_and_test(&worker->ref))
- schedule();
- __set_current_state(TASK_RUNNING);
- preempt_disable();
- current->flags &= ~PF_IO_WORKER;
- if (worker->flags & IO_WORKER_F_RUNNING)
- atomic_dec(&acct->nr_running);
- if (!(worker->flags & IO_WORKER_F_BOUND))
- atomic_dec(&wqe->wq->user->processes);
- worker->flags = 0;
- preempt_enable();
- raw_spin_lock_irq(&wqe->lock);
- hlist_nulls_del_rcu(&worker->nulls_node);
- list_del_rcu(&worker->all_list);
- if (__io_worker_unuse(wqe, worker)) {
- __release(&wqe->lock);
- raw_spin_lock_irq(&wqe->lock);
- }
- acct->nr_workers--;
- raw_spin_unlock_irq(&wqe->lock);
- kfree_rcu(worker, rcu);
- if (refcount_dec_and_test(&wqe->wq->refs))
- complete(&wqe->wq->done);
- }
- static inline bool io_wqe_run_queue(struct io_wqe *wqe)
- __must_hold(wqe->lock)
- {
- if (!wq_list_empty(&wqe->work_list) &&
- !(wqe->flags & IO_WQE_FLAG_STALLED))
- return true;
- return false;
- }
- /*
- * Check head of free list for an available worker. If one isn't available,
- * caller must wake up the wq manager to create one.
- */
- static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
- __must_hold(RCU)
- {
- struct hlist_nulls_node *n;
- struct io_worker *worker;
- n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
- if (is_a_nulls(n))
- return false;
- worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
- if (io_worker_get(worker)) {
- wake_up_process(worker->task);
- io_worker_release(worker);
- return true;
- }
- return false;
- }
- /*
- * We need a worker. If we find a free one, we're good. If not, and we're
- * below the max number of workers, wake up the manager to create one.
- */
- static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
- {
- bool ret;
- /*
- * Most likely an attempt to queue unbounded work on an io_wq that
- * wasn't setup with any unbounded workers.
- */
- if (unlikely(!acct->max_workers))
- pr_warn_once("io-wq is not configured for unbound workers");
- rcu_read_lock();
- ret = io_wqe_activate_free_worker(wqe);
- rcu_read_unlock();
- if (!ret && acct->nr_workers < acct->max_workers)
- wake_up_process(wqe->wq->manager);
- }
- static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
- {
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
- atomic_inc(&acct->nr_running);
- }
- static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
- __must_hold(wqe->lock)
- {
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
- if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
- io_wqe_wake_worker(wqe, acct);
- }
- static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
- {
- allow_kernel_signal(SIGINT);
- current->flags |= PF_IO_WORKER;
- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
- worker->restore_files = current->files;
- worker->restore_nsproxy = current->nsproxy;
- worker->restore_fs = current->fs;
- io_wqe_inc_running(wqe, worker);
- }
- /*
- * Worker will start processing some work. Move it to the busy list, if
- * it's currently on the freelist
- */
- static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
- struct io_wq_work *work)
- __must_hold(wqe->lock)
- {
- bool worker_bound, work_bound;
- if (worker->flags & IO_WORKER_F_FREE) {
- worker->flags &= ~IO_WORKER_F_FREE;
- hlist_nulls_del_init_rcu(&worker->nulls_node);
- }
- /*
- * If worker is moving from bound to unbound (or vice versa), then
- * ensure we update the running accounting.
- */
- worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
- work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
- if (worker_bound != work_bound) {
- io_wqe_dec_running(wqe, worker);
- if (work_bound) {
- worker->flags |= IO_WORKER_F_BOUND;
- wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
- wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
- atomic_dec(&wqe->wq->user->processes);
- } else {
- worker->flags &= ~IO_WORKER_F_BOUND;
- wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
- wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
- atomic_inc(&wqe->wq->user->processes);
- }
- io_wqe_inc_running(wqe, worker);
- }
- }
- /*
- * No work, worker going to sleep. Move to freelist, and unuse mm if we
- * have one attached. Dropping the mm may potentially sleep, so we drop
- * the lock in that case and return success. Since the caller has to
- * retry the loop in that case (we changed task state), we don't regrab
- * the lock if we return success.
- */
- static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
- __must_hold(wqe->lock)
- {
- if (!(worker->flags & IO_WORKER_F_FREE)) {
- worker->flags |= IO_WORKER_F_FREE;
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
- }
- return __io_worker_unuse(wqe, worker);
- }
- static inline unsigned int io_get_work_hash(struct io_wq_work *work)
- {
- return work->flags >> IO_WQ_HASH_SHIFT;
- }
- static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
- __must_hold(wqe->lock)
- {
- struct io_wq_work_node *node, *prev;
- struct io_wq_work *work, *tail;
- unsigned int hash;
- wq_list_for_each(node, prev, &wqe->work_list) {
- work = container_of(node, struct io_wq_work, list);
- /* not hashed, can run anytime */
- if (!io_wq_is_hashed(work)) {
- wq_list_del(&wqe->work_list, node, prev);
- return work;
- }
- /* hashed, can run if not already running */
- hash = io_get_work_hash(work);
- if (!(wqe->hash_map & BIT(hash))) {
- wqe->hash_map |= BIT(hash);
- /* all items with this hash lie in [work, tail] */
- tail = wqe->hash_tail[hash];
- wqe->hash_tail[hash] = NULL;
- wq_list_cut(&wqe->work_list, &tail->list, prev);
- return work;
- }
- }
- return NULL;
- }
- static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
- {
- if (worker->mm) {
- kthread_unuse_mm(worker->mm);
- mmput(worker->mm);
- worker->mm = NULL;
- }
- if (mmget_not_zero(work->identity->mm)) {
- kthread_use_mm(work->identity->mm);
- worker->mm = work->identity->mm;
- return;
- }
- /* failed grabbing mm, ensure work gets cancelled */
- work->flags |= IO_WQ_WORK_CANCEL;
- }
- static inline void io_wq_switch_blkcg(struct io_worker *worker,
- struct io_wq_work *work)
- {
- #ifdef CONFIG_BLK_CGROUP
- if (!(work->flags & IO_WQ_WORK_BLKCG))
- return;
- if (work->identity->blkcg_css != worker->blkcg_css) {
- kthread_associate_blkcg(work->identity->blkcg_css);
- worker->blkcg_css = work->identity->blkcg_css;
- }
- #endif
- }
- static void io_wq_switch_creds(struct io_worker *worker,
- struct io_wq_work *work)
- {
- const struct cred *old_creds = override_creds(work->identity->creds);
- worker->cur_creds = work->identity->creds;
- if (worker->saved_creds)
- put_cred(old_creds); /* creds set by previous switch */
- else
- worker->saved_creds = old_creds;
- }
- static void io_impersonate_work(struct io_worker *worker,
- struct io_wq_work *work)
- {
- if ((work->flags & IO_WQ_WORK_FILES) &&
- current->files != work->identity->files) {
- task_lock(current);
- current->files = work->identity->files;
- current->nsproxy = work->identity->nsproxy;
- task_unlock(current);
- if (!work->identity->files) {
- /* failed grabbing files, ensure work gets cancelled */
- work->flags |= IO_WQ_WORK_CANCEL;
- }
- }
- if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
- current->fs = work->identity->fs;
- if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
- io_wq_switch_mm(worker, work);
- if ((work->flags & IO_WQ_WORK_CREDS) &&
- worker->cur_creds != work->identity->creds)
- io_wq_switch_creds(worker, work);
- if (work->flags & IO_WQ_WORK_FSIZE)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
- else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
- io_wq_switch_blkcg(worker, work);
- #ifdef CONFIG_AUDIT
- current->loginuid = work->identity->loginuid;
- current->sessionid = work->identity->sessionid;
- #endif
- }
- static void io_assign_current_work(struct io_worker *worker,
- struct io_wq_work *work)
- {
- if (work) {
- /* flush pending signals before assigning new work */
- if (signal_pending(current))
- flush_signals(current);
- cond_resched();
- }
- #ifdef CONFIG_AUDIT
- current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
- current->sessionid = AUDIT_SID_UNSET;
- #endif
- spin_lock_irq(&worker->lock);
- worker->cur_work = work;
- spin_unlock_irq(&worker->lock);
- }
- static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
- static void io_worker_handle_work(struct io_worker *worker)
- __releases(wqe->lock)
- {
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
- do {
- struct io_wq_work *work;
- get_next:
- /*
- * If we got some work, mark us as busy. If we didn't, but
- * the list isn't empty, it means we stalled on hashed work.
- * Mark us stalled so we don't keep looking for work when we
- * can't make progress, any work completion or insertion will
- * clear the stalled flag.
- */
- work = io_get_next_work(wqe);
- if (work)
- __io_worker_busy(wqe, worker, work);
- else if (!wq_list_empty(&wqe->work_list))
- wqe->flags |= IO_WQE_FLAG_STALLED;
- raw_spin_unlock_irq(&wqe->lock);
- if (!work)
- break;
- io_assign_current_work(worker, work);
- /* handle a whole dependent link */
- do {
- struct io_wq_work *old_work, *next_hashed, *linked;
- unsigned int hash = io_get_work_hash(work);
- next_hashed = wq_next_work(work);
- io_impersonate_work(worker, work);
- /*
- * OK to set IO_WQ_WORK_CANCEL even for uncancellable
- * work, the worker function will do the right thing.
- */
- if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
- work->flags |= IO_WQ_WORK_CANCEL;
- old_work = work;
- linked = wq->do_work(work);
- work = next_hashed;
- if (!work && linked && !io_wq_is_hashed(linked)) {
- work = linked;
- linked = NULL;
- }
- io_assign_current_work(worker, work);
- wq->free_work(old_work);
- if (linked)
- io_wqe_enqueue(wqe, linked);
- if (hash != -1U && !next_hashed) {
- raw_spin_lock_irq(&wqe->lock);
- wqe->hash_map &= ~BIT_ULL(hash);
- wqe->flags &= ~IO_WQE_FLAG_STALLED;
- /* skip unnecessary unlock-lock wqe->lock */
- if (!work)
- goto get_next;
- raw_spin_unlock_irq(&wqe->lock);
- }
- } while (work);
- raw_spin_lock_irq(&wqe->lock);
- } while (1);
- }
- static int io_wqe_worker(void *data)
- {
- struct io_worker *worker = data;
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
- io_worker_start(wqe, worker);
- while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
- set_current_state(TASK_INTERRUPTIBLE);
- loop:
- raw_spin_lock_irq(&wqe->lock);
- if (io_wqe_run_queue(wqe)) {
- __set_current_state(TASK_RUNNING);
- io_worker_handle_work(worker);
- goto loop;
- }
- /* drops the lock on success, retry */
- if (__io_worker_idle(wqe, worker)) {
- __release(&wqe->lock);
- goto loop;
- }
- raw_spin_unlock_irq(&wqe->lock);
- if (signal_pending(current))
- flush_signals(current);
- if (schedule_timeout(WORKER_IDLE_TIMEOUT))
- continue;
- /* timed out, exit unless we're the fixed worker */
- if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
- !(worker->flags & IO_WORKER_F_FIXED))
- break;
- }
- if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
- raw_spin_lock_irq(&wqe->lock);
- if (!wq_list_empty(&wqe->work_list))
- io_worker_handle_work(worker);
- else
- raw_spin_unlock_irq(&wqe->lock);
- }
- io_worker_exit(worker);
- return 0;
- }
- /*
- * Called when a worker is scheduled in. Mark us as currently running.
- */
- void io_wq_worker_running(struct task_struct *tsk)
- {
- struct io_worker *worker = kthread_data(tsk);
- struct io_wqe *wqe = worker->wqe;
- if (!(worker->flags & IO_WORKER_F_UP))
- return;
- if (worker->flags & IO_WORKER_F_RUNNING)
- return;
- worker->flags |= IO_WORKER_F_RUNNING;
- io_wqe_inc_running(wqe, worker);
- }
- /*
- * Called when worker is going to sleep. If there are no workers currently
- * running and we have work pending, wake up a free one or have the manager
- * set one up.
- */
- void io_wq_worker_sleeping(struct task_struct *tsk)
- {
- struct io_worker *worker = kthread_data(tsk);
- struct io_wqe *wqe = worker->wqe;
- if (!(worker->flags & IO_WORKER_F_UP))
- return;
- if (!(worker->flags & IO_WORKER_F_RUNNING))
- return;
- worker->flags &= ~IO_WORKER_F_RUNNING;
- raw_spin_lock_irq(&wqe->lock);
- io_wqe_dec_running(wqe, worker);
- raw_spin_unlock_irq(&wqe->lock);
- }
- static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
- {
- struct io_wqe_acct *acct = &wqe->acct[index];
- struct io_worker *worker;
- worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
- if (!worker)
- return false;
- refcount_set(&worker->ref, 1);
- worker->nulls_node.pprev = NULL;
- worker->wqe = wqe;
- spin_lock_init(&worker->lock);
- worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
- "io_wqe_worker-%d/%d", index, wqe->node);
- if (IS_ERR(worker->task)) {
- kfree(worker);
- return false;
- }
- kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
- raw_spin_lock_irq(&wqe->lock);
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
- list_add_tail_rcu(&worker->all_list, &wqe->all_list);
- worker->flags |= IO_WORKER_F_FREE;
- if (index == IO_WQ_ACCT_BOUND)
- worker->flags |= IO_WORKER_F_BOUND;
- if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
- worker->flags |= IO_WORKER_F_FIXED;
- acct->nr_workers++;
- raw_spin_unlock_irq(&wqe->lock);
- if (index == IO_WQ_ACCT_UNBOUND)
- atomic_inc(&wq->user->processes);
- refcount_inc(&wq->refs);
- wake_up_process(worker->task);
- return true;
- }
- static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
- __must_hold(wqe->lock)
- {
- struct io_wqe_acct *acct = &wqe->acct[index];
- /* if we have available workers or no work, no need */
- if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
- return false;
- return acct->nr_workers < acct->max_workers;
- }
- static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
- {
- send_sig(SIGINT, worker->task, 1);
- return false;
- }
- /*
- * Iterate the passed in list and call the specific function for each
- * worker that isn't exiting
- */
- static bool io_wq_for_each_worker(struct io_wqe *wqe,
- bool (*func)(struct io_worker *, void *),
- void *data)
- {
- struct io_worker *worker;
- bool ret = false;
- list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
- if (io_worker_get(worker)) {
- /* no task if node is/was offline */
- if (worker->task)
- ret = func(worker, data);
- io_worker_release(worker);
- if (ret)
- break;
- }
- }
- return ret;
- }
- static bool io_wq_worker_wake(struct io_worker *worker, void *data)
- {
- wake_up_process(worker->task);
- return false;
- }
- /*
- * Manager thread. Tasked with creating new workers, if we need them.
- */
- static int io_wq_manager(void *data)
- {
- struct io_wq *wq = data;
- int node;
- /* create fixed workers */
- refcount_set(&wq->refs, 1);
- for_each_node(node) {
- if (!node_online(node))
- continue;
- if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
- continue;
- set_bit(IO_WQ_BIT_ERROR, &wq->state);
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
- goto out;
- }
- complete(&wq->done);
- while (!kthread_should_stop()) {
- if (current->task_works)
- task_work_run();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- bool fork_worker[2] = { false, false };
- if (!node_online(node))
- continue;
- raw_spin_lock_irq(&wqe->lock);
- if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
- fork_worker[IO_WQ_ACCT_BOUND] = true;
- if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
- fork_worker[IO_WQ_ACCT_UNBOUND] = true;
- raw_spin_unlock_irq(&wqe->lock);
- if (fork_worker[IO_WQ_ACCT_BOUND])
- create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
- if (fork_worker[IO_WQ_ACCT_UNBOUND])
- create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
- }
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
- if (current->task_works)
- task_work_run();
- out:
- if (refcount_dec_and_test(&wq->refs)) {
- complete(&wq->done);
- return 0;
- }
- /* if ERROR is set and we get here, we have workers to wake */
- if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
- rcu_read_lock();
- for_each_node(node)
- io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
- rcu_read_unlock();
- }
- return 0;
- }
- static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
- struct io_wq_work *work)
- {
- bool free_worker;
- if (!(work->flags & IO_WQ_WORK_UNBOUND))
- return true;
- if (atomic_read(&acct->nr_running))
- return true;
- rcu_read_lock();
- free_worker = !hlist_nulls_empty(&wqe->free_list);
- rcu_read_unlock();
- if (free_worker)
- return true;
- if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
- !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
- return false;
- return true;
- }
- static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
- {
- struct io_wq *wq = wqe->wq;
- do {
- struct io_wq_work *old_work = work;
- work->flags |= IO_WQ_WORK_CANCEL;
- work = wq->do_work(work);
- wq->free_work(old_work);
- } while (work);
- }
- static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
- {
- unsigned int hash;
- struct io_wq_work *tail;
- if (!io_wq_is_hashed(work)) {
- append:
- wq_list_add_tail(&work->list, &wqe->work_list);
- return;
- }
- hash = io_get_work_hash(work);
- tail = wqe->hash_tail[hash];
- wqe->hash_tail[hash] = work;
- if (!tail)
- goto append;
- wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
- }
- static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
- {
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
- bool do_wake;
- unsigned long flags;
- /*
- * Do early check to see if we need a new unbound worker, and if we do,
- * if we're allowed to do so. This isn't 100% accurate as there's a
- * gap between this check and incrementing the value, but that's OK.
- * It's close enough to not be an issue, fork() has the same delay.
- */
- if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
- io_run_cancel(work, wqe);
- return;
- }
- raw_spin_lock_irqsave(&wqe->lock, flags);
- io_wqe_insert_work(wqe, work);
- wqe->flags &= ~IO_WQE_FLAG_STALLED;
- do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) ||
- !atomic_read(&acct->nr_running);
- raw_spin_unlock_irqrestore(&wqe->lock, flags);
- if (do_wake)
- io_wqe_wake_worker(wqe, acct);
- }
- void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
- {
- struct io_wqe *wqe = wq->wqes[numa_node_id()];
- io_wqe_enqueue(wqe, work);
- }
- /*
- * Work items that hash to the same value will not be done in parallel.
- * Used to limit concurrent writes, generally hashed by inode.
- */
- void io_wq_hash_work(struct io_wq_work *work, void *val)
- {
- unsigned int bit;
- bit = hash_ptr(val, IO_WQ_HASH_ORDER);
- work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
- }
- void io_wq_cancel_all(struct io_wq *wq)
- {
- int node;
- set_bit(IO_WQ_BIT_CANCEL, &wq->state);
- rcu_read_lock();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
- }
- rcu_read_unlock();
- }
- struct io_cb_cancel_data {
- work_cancel_fn *fn;
- void *data;
- int nr_running;
- int nr_pending;
- bool cancel_all;
- };
- static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
- {
- struct io_cb_cancel_data *match = data;
- unsigned long flags;
- /*
- * Hold the lock to avoid ->cur_work going out of scope, caller
- * may dereference the passed in work.
- */
- spin_lock_irqsave(&worker->lock, flags);
- if (worker->cur_work &&
- !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
- match->fn(worker->cur_work, match->data)) {
- send_sig(SIGINT, worker->task, 1);
- match->nr_running++;
- }
- spin_unlock_irqrestore(&worker->lock, flags);
- return match->nr_running && !match->cancel_all;
- }
- static inline void io_wqe_remove_pending(struct io_wqe *wqe,
- struct io_wq_work *work,
- struct io_wq_work_node *prev)
- {
- unsigned int hash = io_get_work_hash(work);
- struct io_wq_work *prev_work = NULL;
- if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
- if (prev)
- prev_work = container_of(prev, struct io_wq_work, list);
- if (prev_work && io_get_work_hash(prev_work) == hash)
- wqe->hash_tail[hash] = prev_work;
- else
- wqe->hash_tail[hash] = NULL;
- }
- wq_list_del(&wqe->work_list, &work->list, prev);
- }
- static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
- {
- struct io_wq_work_node *node, *prev;
- struct io_wq_work *work;
- unsigned long flags;
- retry:
- raw_spin_lock_irqsave(&wqe->lock, flags);
- wq_list_for_each(node, prev, &wqe->work_list) {
- work = container_of(node, struct io_wq_work, list);
- if (!match->fn(work, match->data))
- continue;
- io_wqe_remove_pending(wqe, work, prev);
- raw_spin_unlock_irqrestore(&wqe->lock, flags);
- io_run_cancel(work, wqe);
- match->nr_pending++;
- if (!match->cancel_all)
- return;
- /* not safe to continue after unlock */
- goto retry;
- }
- raw_spin_unlock_irqrestore(&wqe->lock, flags);
- }
- static void io_wqe_cancel_running_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
- {
- rcu_read_lock();
- io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
- rcu_read_unlock();
- }
- enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
- void *data, bool cancel_all)
- {
- struct io_cb_cancel_data match = {
- .fn = cancel,
- .data = data,
- .cancel_all = cancel_all,
- };
- int node;
- /*
- * First check pending list, if we're lucky we can just remove it
- * from there. CANCEL_OK means that the work is returned as-new,
- * no completion will be posted for it.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- io_wqe_cancel_pending_work(wqe, &match);
- if (match.nr_pending && !match.cancel_all)
- return IO_WQ_CANCEL_OK;
- }
- /*
- * Now check if a free (going busy) or busy worker has the work
- * currently running. If we find it there, we'll return CANCEL_RUNNING
- * as an indication that we attempt to signal cancellation. The
- * completion will run normally in this case.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- io_wqe_cancel_running_work(wqe, &match);
- if (match.nr_running && !match.cancel_all)
- return IO_WQ_CANCEL_RUNNING;
- }
- if (match.nr_running)
- return IO_WQ_CANCEL_RUNNING;
- if (match.nr_pending)
- return IO_WQ_CANCEL_OK;
- return IO_WQ_CANCEL_NOTFOUND;
- }
- struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
- {
- int ret = -ENOMEM, node;
- struct io_wq *wq;
- if (WARN_ON_ONCE(!data->free_work || !data->do_work))
- return ERR_PTR(-EINVAL);
- if (WARN_ON_ONCE(!bounded))
- return ERR_PTR(-EINVAL);
- wq = kzalloc(sizeof(*wq), GFP_KERNEL);
- if (!wq)
- return ERR_PTR(-ENOMEM);
- wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
- if (!wq->wqes)
- goto err_wq;
- ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- if (ret)
- goto err_wqes;
- wq->free_work = data->free_work;
- wq->do_work = data->do_work;
- /* caller must already hold a reference to this */
- wq->user = data->user;
- ret = -ENOMEM;
- for_each_node(node) {
- struct io_wqe *wqe;
- int alloc_node = node;
- if (!node_online(alloc_node))
- alloc_node = NUMA_NO_NODE;
- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
- if (!wqe)
- goto err;
- wq->wqes[node] = wqe;
- wqe->node = alloc_node;
- wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
- atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
- if (wq->user) {
- wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
- task_rlimit(current, RLIMIT_NPROC);
- }
- atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
- wqe->wq = wq;
- raw_spin_lock_init(&wqe->lock);
- INIT_WQ_LIST(&wqe->work_list);
- INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
- INIT_LIST_HEAD(&wqe->all_list);
- }
- init_completion(&wq->done);
- wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
- if (!IS_ERR(wq->manager)) {
- wake_up_process(wq->manager);
- wait_for_completion(&wq->done);
- if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
- ret = -ENOMEM;
- goto err;
- }
- refcount_set(&wq->use_refs, 1);
- reinit_completion(&wq->done);
- return wq;
- }
- ret = PTR_ERR(wq->manager);
- complete(&wq->done);
- err:
- cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- for_each_node(node)
- kfree(wq->wqes[node]);
- err_wqes:
- kfree(wq->wqes);
- err_wq:
- kfree(wq);
- return ERR_PTR(ret);
- }
- bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
- {
- if (data->free_work != wq->free_work || data->do_work != wq->do_work)
- return false;
- return refcount_inc_not_zero(&wq->use_refs);
- }
- static void __io_wq_destroy(struct io_wq *wq)
- {
- int node;
- cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
- if (wq->manager)
- kthread_stop(wq->manager);
- rcu_read_lock();
- for_each_node(node)
- io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
- rcu_read_unlock();
- wait_for_completion(&wq->done);
- for_each_node(node)
- kfree(wq->wqes[node]);
- kfree(wq->wqes);
- kfree(wq);
- }
- void io_wq_destroy(struct io_wq *wq)
- {
- if (refcount_dec_and_test(&wq->use_refs))
- __io_wq_destroy(wq);
- }
- struct task_struct *io_wq_get_task(struct io_wq *wq)
- {
- return wq->manager;
- }
- static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
- {
- struct task_struct *task = worker->task;
- struct rq_flags rf;
- struct rq *rq;
- rq = task_rq_lock(task, &rf);
- do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
- task->flags |= PF_NO_SETAFFINITY;
- task_rq_unlock(rq, task, &rf);
- return false;
- }
- static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
- {
- struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
- int i;
- rcu_read_lock();
- for_each_node(i)
- io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
- rcu_read_unlock();
- return 0;
- }
- static __init int io_wq_init(void)
- {
- int ret;
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
- io_wq_cpu_online, NULL);
- if (ret < 0)
- return ret;
- io_wq_online = ret;
- return 0;
- }
- subsys_initcall(io_wq_init);
|