1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <uapi/linux/io_uring.h>
19
20 #include "io-wq.h"
21 #include "slist.h"
22 #include "io_uring.h"
23
24 #define WORKER_IDLE_TIMEOUT (5 * HZ)
25
26 enum {
27 IO_WORKER_F_UP = 1, /* up and active */
28 IO_WORKER_F_RUNNING = 2, /* account as running */
29 IO_WORKER_F_FREE = 4, /* worker on free list */
30 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
31 };
32
33 enum {
34 IO_WQ_BIT_EXIT = 0, /* wq exiting */
35 };
36
37 enum {
38 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
39 };
40
41 /*
42 * One for each thread in a wqe pool
43 */
44 struct io_worker {
45 refcount_t ref;
46 unsigned flags;
47 struct hlist_nulls_node nulls_node;
48 struct list_head all_list;
49 struct task_struct *task;
50 struct io_wqe *wqe;
51
52 struct io_wq_work *cur_work;
53 struct io_wq_work *next_work;
54 raw_spinlock_t lock;
55
56 struct completion ref_done;
57
58 unsigned long create_state;
59 struct callback_head create_work;
60 int create_index;
61
62 union {
63 struct rcu_head rcu;
64 struct work_struct work;
65 };
66 };
67
68 #if BITS_PER_LONG == 64
69 #define IO_WQ_HASH_ORDER 6
70 #else
71 #define IO_WQ_HASH_ORDER 5
72 #endif
73
74 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
75
76 struct io_wqe_acct {
77 unsigned nr_workers;
78 unsigned max_workers;
79 int index;
80 atomic_t nr_running;
81 raw_spinlock_t lock;
82 struct io_wq_work_list work_list;
83 unsigned long flags;
84 };
85
86 enum {
87 IO_WQ_ACCT_BOUND,
88 IO_WQ_ACCT_UNBOUND,
89 IO_WQ_ACCT_NR,
90 };
91
92 /*
93 * Per-node worker thread pool
94 */
95 struct io_wqe {
96 raw_spinlock_t lock;
97 struct io_wqe_acct acct[IO_WQ_ACCT_NR];
98
99 int node;
100
101 struct hlist_nulls_head free_list;
102 struct list_head all_list;
103
104 struct wait_queue_entry wait;
105
106 struct io_wq *wq;
107 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
108
109 cpumask_var_t cpu_mask;
110 };
111
112 /*
113 * Per io_wq state
114 */
115 struct io_wq {
116 unsigned long state;
117
118 free_work_fn *free_work;
119 io_wq_work_fn *do_work;
120
121 struct io_wq_hash *hash;
122
123 atomic_t worker_refs;
124 struct completion worker_done;
125
126 struct hlist_node cpuhp_node;
127
128 struct task_struct *task;
129
130 struct io_wqe *wqes[];
131 };
132
133 static enum cpuhp_state io_wq_online;
134
135 struct io_cb_cancel_data {
136 work_cancel_fn *fn;
137 void *data;
138 int nr_running;
139 int nr_pending;
140 bool cancel_all;
141 };
142
143 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
144 static void io_wqe_dec_running(struct io_worker *worker);
145 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
146 struct io_wqe_acct *acct,
147 struct io_cb_cancel_data *match);
148 static void create_worker_cb(struct callback_head *cb);
149 static void io_wq_cancel_tw_create(struct io_wq *wq);
150
io_worker_get(struct io_worker * worker)151 static bool io_worker_get(struct io_worker *worker)
152 {
153 return refcount_inc_not_zero(&worker->ref);
154 }
155
io_worker_release(struct io_worker * worker)156 static void io_worker_release(struct io_worker *worker)
157 {
158 if (refcount_dec_and_test(&worker->ref))
159 complete(&worker->ref_done);
160 }
161
io_get_acct(struct io_wqe * wqe,bool bound)162 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
163 {
164 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
165 }
166
io_work_get_acct(struct io_wqe * wqe,struct io_wq_work * work)167 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
168 struct io_wq_work *work)
169 {
170 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
171 }
172
io_wqe_get_acct(struct io_worker * worker)173 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
174 {
175 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
176 }
177
io_worker_ref_put(struct io_wq * wq)178 static void io_worker_ref_put(struct io_wq *wq)
179 {
180 if (atomic_dec_and_test(&wq->worker_refs))
181 complete(&wq->worker_done);
182 }
183
io_worker_cancel_cb(struct io_worker * worker)184 static void io_worker_cancel_cb(struct io_worker *worker)
185 {
186 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
187 struct io_wqe *wqe = worker->wqe;
188 struct io_wq *wq = wqe->wq;
189
190 atomic_dec(&acct->nr_running);
191 raw_spin_lock(&worker->wqe->lock);
192 acct->nr_workers--;
193 raw_spin_unlock(&worker->wqe->lock);
194 io_worker_ref_put(wq);
195 clear_bit_unlock(0, &worker->create_state);
196 io_worker_release(worker);
197 }
198
io_task_worker_match(struct callback_head * cb,void * data)199 static bool io_task_worker_match(struct callback_head *cb, void *data)
200 {
201 struct io_worker *worker;
202
203 if (cb->func != create_worker_cb)
204 return false;
205 worker = container_of(cb, struct io_worker, create_work);
206 return worker == data;
207 }
208
io_worker_exit(struct io_worker * worker)209 static void io_worker_exit(struct io_worker *worker)
210 {
211 struct io_wqe *wqe = worker->wqe;
212 struct io_wq *wq = wqe->wq;
213
214 while (1) {
215 struct callback_head *cb = task_work_cancel_match(wq->task,
216 io_task_worker_match, worker);
217
218 if (!cb)
219 break;
220 io_worker_cancel_cb(worker);
221 }
222
223 io_worker_release(worker);
224 wait_for_completion(&worker->ref_done);
225
226 raw_spin_lock(&wqe->lock);
227 if (worker->flags & IO_WORKER_F_FREE)
228 hlist_nulls_del_rcu(&worker->nulls_node);
229 list_del_rcu(&worker->all_list);
230 raw_spin_unlock(&wqe->lock);
231 io_wqe_dec_running(worker);
232 worker->flags = 0;
233 preempt_disable();
234 current->flags &= ~PF_IO_WORKER;
235 preempt_enable();
236
237 kfree_rcu(worker, rcu);
238 io_worker_ref_put(wqe->wq);
239 do_exit(0);
240 }
241
io_acct_run_queue(struct io_wqe_acct * acct)242 static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
243 {
244 bool ret = false;
245
246 raw_spin_lock(&acct->lock);
247 if (!wq_list_empty(&acct->work_list) &&
248 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
249 ret = true;
250 raw_spin_unlock(&acct->lock);
251
252 return ret;
253 }
254
255 /*
256 * Check head of free list for an available worker. If one isn't available,
257 * caller must create one.
258 */
io_wqe_activate_free_worker(struct io_wqe * wqe,struct io_wqe_acct * acct)259 static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
260 struct io_wqe_acct *acct)
261 __must_hold(RCU)
262 {
263 struct hlist_nulls_node *n;
264 struct io_worker *worker;
265
266 /*
267 * Iterate free_list and see if we can find an idle worker to
268 * activate. If a given worker is on the free_list but in the process
269 * of exiting, keep trying.
270 */
271 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
272 if (!io_worker_get(worker))
273 continue;
274 if (io_wqe_get_acct(worker) != acct) {
275 io_worker_release(worker);
276 continue;
277 }
278 if (wake_up_process(worker->task)) {
279 io_worker_release(worker);
280 return true;
281 }
282 io_worker_release(worker);
283 }
284
285 return false;
286 }
287
288 /*
289 * We need a worker. If we find a free one, we're good. If not, and we're
290 * below the max number of workers, create one.
291 */
io_wqe_create_worker(struct io_wqe * wqe,struct io_wqe_acct * acct)292 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
293 {
294 /*
295 * Most likely an attempt to queue unbounded work on an io_wq that
296 * wasn't setup with any unbounded workers.
297 */
298 if (unlikely(!acct->max_workers))
299 pr_warn_once("io-wq is not configured for unbound workers");
300
301 raw_spin_lock(&wqe->lock);
302 if (acct->nr_workers >= acct->max_workers) {
303 raw_spin_unlock(&wqe->lock);
304 return true;
305 }
306 acct->nr_workers++;
307 raw_spin_unlock(&wqe->lock);
308 atomic_inc(&acct->nr_running);
309 atomic_inc(&wqe->wq->worker_refs);
310 return create_io_worker(wqe->wq, wqe, acct->index);
311 }
312
io_wqe_inc_running(struct io_worker * worker)313 static void io_wqe_inc_running(struct io_worker *worker)
314 {
315 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
316
317 atomic_inc(&acct->nr_running);
318 }
319
create_worker_cb(struct callback_head * cb)320 static void create_worker_cb(struct callback_head *cb)
321 {
322 struct io_worker *worker;
323 struct io_wq *wq;
324 struct io_wqe *wqe;
325 struct io_wqe_acct *acct;
326 bool do_create = false;
327
328 worker = container_of(cb, struct io_worker, create_work);
329 wqe = worker->wqe;
330 wq = wqe->wq;
331 acct = &wqe->acct[worker->create_index];
332 raw_spin_lock(&wqe->lock);
333 if (acct->nr_workers < acct->max_workers) {
334 acct->nr_workers++;
335 do_create = true;
336 }
337 raw_spin_unlock(&wqe->lock);
338 if (do_create) {
339 create_io_worker(wq, wqe, worker->create_index);
340 } else {
341 atomic_dec(&acct->nr_running);
342 io_worker_ref_put(wq);
343 }
344 clear_bit_unlock(0, &worker->create_state);
345 io_worker_release(worker);
346 }
347
io_queue_worker_create(struct io_worker * worker,struct io_wqe_acct * acct,task_work_func_t func)348 static bool io_queue_worker_create(struct io_worker *worker,
349 struct io_wqe_acct *acct,
350 task_work_func_t func)
351 {
352 struct io_wqe *wqe = worker->wqe;
353 struct io_wq *wq = wqe->wq;
354
355 /* raced with exit, just ignore create call */
356 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
357 goto fail;
358 if (!io_worker_get(worker))
359 goto fail;
360 /*
361 * create_state manages ownership of create_work/index. We should
362 * only need one entry per worker, as the worker going to sleep
363 * will trigger the condition, and waking will clear it once it
364 * runs the task_work.
365 */
366 if (test_bit(0, &worker->create_state) ||
367 test_and_set_bit_lock(0, &worker->create_state))
368 goto fail_release;
369
370 atomic_inc(&wq->worker_refs);
371 init_task_work(&worker->create_work, func);
372 worker->create_index = acct->index;
373 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
374 /*
375 * EXIT may have been set after checking it above, check after
376 * adding the task_work and remove any creation item if it is
377 * now set. wq exit does that too, but we can have added this
378 * work item after we canceled in io_wq_exit_workers().
379 */
380 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
381 io_wq_cancel_tw_create(wq);
382 io_worker_ref_put(wq);
383 return true;
384 }
385 io_worker_ref_put(wq);
386 clear_bit_unlock(0, &worker->create_state);
387 fail_release:
388 io_worker_release(worker);
389 fail:
390 atomic_dec(&acct->nr_running);
391 io_worker_ref_put(wq);
392 return false;
393 }
394
io_wqe_dec_running(struct io_worker * worker)395 static void io_wqe_dec_running(struct io_worker *worker)
396 {
397 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
398 struct io_wqe *wqe = worker->wqe;
399
400 if (!(worker->flags & IO_WORKER_F_UP))
401 return;
402
403 if (!atomic_dec_and_test(&acct->nr_running))
404 return;
405 if (!io_acct_run_queue(acct))
406 return;
407
408 atomic_inc(&acct->nr_running);
409 atomic_inc(&wqe->wq->worker_refs);
410 io_queue_worker_create(worker, acct, create_worker_cb);
411 }
412
413 /*
414 * Worker will start processing some work. Move it to the busy list, if
415 * it's currently on the freelist
416 */
__io_worker_busy(struct io_wqe * wqe,struct io_worker * worker)417 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
418 {
419 if (worker->flags & IO_WORKER_F_FREE) {
420 worker->flags &= ~IO_WORKER_F_FREE;
421 raw_spin_lock(&wqe->lock);
422 hlist_nulls_del_init_rcu(&worker->nulls_node);
423 raw_spin_unlock(&wqe->lock);
424 }
425 }
426
427 /*
428 * No work, worker going to sleep. Move to freelist, and unuse mm if we
429 * have one attached. Dropping the mm may potentially sleep, so we drop
430 * the lock in that case and return success. Since the caller has to
431 * retry the loop in that case (we changed task state), we don't regrab
432 * the lock if we return success.
433 */
__io_worker_idle(struct io_wqe * wqe,struct io_worker * worker)434 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
435 __must_hold(wqe->lock)
436 {
437 if (!(worker->flags & IO_WORKER_F_FREE)) {
438 worker->flags |= IO_WORKER_F_FREE;
439 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
440 }
441 }
442
io_get_work_hash(struct io_wq_work * work)443 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
444 {
445 return work->flags >> IO_WQ_HASH_SHIFT;
446 }
447
io_wait_on_hash(struct io_wqe * wqe,unsigned int hash)448 static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
449 {
450 struct io_wq *wq = wqe->wq;
451 bool ret = false;
452
453 spin_lock_irq(&wq->hash->wait.lock);
454 if (list_empty(&wqe->wait.entry)) {
455 __add_wait_queue(&wq->hash->wait, &wqe->wait);
456 if (!test_bit(hash, &wq->hash->map)) {
457 __set_current_state(TASK_RUNNING);
458 list_del_init(&wqe->wait.entry);
459 ret = true;
460 }
461 }
462 spin_unlock_irq(&wq->hash->wait.lock);
463 return ret;
464 }
465
io_get_next_work(struct io_wqe_acct * acct,struct io_worker * worker)466 static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
467 struct io_worker *worker)
468 __must_hold(acct->lock)
469 {
470 struct io_wq_work_node *node, *prev;
471 struct io_wq_work *work, *tail;
472 unsigned int stall_hash = -1U;
473 struct io_wqe *wqe = worker->wqe;
474
475 wq_list_for_each(node, prev, &acct->work_list) {
476 unsigned int hash;
477
478 work = container_of(node, struct io_wq_work, list);
479
480 /* not hashed, can run anytime */
481 if (!io_wq_is_hashed(work)) {
482 wq_list_del(&acct->work_list, node, prev);
483 return work;
484 }
485
486 hash = io_get_work_hash(work);
487 /* all items with this hash lie in [work, tail] */
488 tail = wqe->hash_tail[hash];
489
490 /* hashed, can run if not already running */
491 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
492 wqe->hash_tail[hash] = NULL;
493 wq_list_cut(&acct->work_list, &tail->list, prev);
494 return work;
495 }
496 if (stall_hash == -1U)
497 stall_hash = hash;
498 /* fast forward to a next hash, for-each will fix up @prev */
499 node = &tail->list;
500 }
501
502 if (stall_hash != -1U) {
503 bool unstalled;
504
505 /*
506 * Set this before dropping the lock to avoid racing with new
507 * work being added and clearing the stalled bit.
508 */
509 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
510 raw_spin_unlock(&acct->lock);
511 unstalled = io_wait_on_hash(wqe, stall_hash);
512 raw_spin_lock(&acct->lock);
513 if (unstalled) {
514 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
515 if (wq_has_sleeper(&wqe->wq->hash->wait))
516 wake_up(&wqe->wq->hash->wait);
517 }
518 }
519
520 return NULL;
521 }
522
io_assign_current_work(struct io_worker * worker,struct io_wq_work * work)523 static void io_assign_current_work(struct io_worker *worker,
524 struct io_wq_work *work)
525 {
526 if (work) {
527 io_run_task_work();
528 cond_resched();
529 }
530
531 raw_spin_lock(&worker->lock);
532 worker->cur_work = work;
533 worker->next_work = NULL;
534 raw_spin_unlock(&worker->lock);
535 }
536
537 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
538
io_worker_handle_work(struct io_worker * worker)539 static void io_worker_handle_work(struct io_worker *worker)
540 {
541 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
542 struct io_wqe *wqe = worker->wqe;
543 struct io_wq *wq = wqe->wq;
544 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
545
546 do {
547 struct io_wq_work *work;
548
549 /*
550 * If we got some work, mark us as busy. If we didn't, but
551 * the list isn't empty, it means we stalled on hashed work.
552 * Mark us stalled so we don't keep looking for work when we
553 * can't make progress, any work completion or insertion will
554 * clear the stalled flag.
555 */
556 raw_spin_lock(&acct->lock);
557 work = io_get_next_work(acct, worker);
558 raw_spin_unlock(&acct->lock);
559 if (work) {
560 __io_worker_busy(wqe, worker);
561
562 /*
563 * Make sure cancelation can find this, even before
564 * it becomes the active work. That avoids a window
565 * where the work has been removed from our general
566 * work list, but isn't yet discoverable as the
567 * current work item for this worker.
568 */
569 raw_spin_lock(&worker->lock);
570 worker->next_work = work;
571 raw_spin_unlock(&worker->lock);
572 } else {
573 break;
574 }
575 io_assign_current_work(worker, work);
576 __set_current_state(TASK_RUNNING);
577
578 /* handle a whole dependent link */
579 do {
580 struct io_wq_work *next_hashed, *linked;
581 unsigned int hash = io_get_work_hash(work);
582
583 next_hashed = wq_next_work(work);
584
585 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
586 work->flags |= IO_WQ_WORK_CANCEL;
587 wq->do_work(work);
588 io_assign_current_work(worker, NULL);
589
590 linked = wq->free_work(work);
591 work = next_hashed;
592 if (!work && linked && !io_wq_is_hashed(linked)) {
593 work = linked;
594 linked = NULL;
595 }
596 io_assign_current_work(worker, work);
597 if (linked)
598 io_wqe_enqueue(wqe, linked);
599
600 if (hash != -1U && !next_hashed) {
601 /* serialize hash clear with wake_up() */
602 spin_lock_irq(&wq->hash->wait.lock);
603 clear_bit(hash, &wq->hash->map);
604 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
605 spin_unlock_irq(&wq->hash->wait.lock);
606 if (wq_has_sleeper(&wq->hash->wait))
607 wake_up(&wq->hash->wait);
608 }
609 } while (work);
610 } while (1);
611 }
612
io_wqe_worker(void * data)613 static int io_wqe_worker(void *data)
614 {
615 struct io_worker *worker = data;
616 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
617 struct io_wqe *wqe = worker->wqe;
618 struct io_wq *wq = wqe->wq;
619 bool last_timeout = false;
620 char buf[TASK_COMM_LEN];
621
622 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
623
624 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
625 set_task_comm(current, buf);
626
627 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
628 long ret;
629
630 set_current_state(TASK_INTERRUPTIBLE);
631 while (io_acct_run_queue(acct))
632 io_worker_handle_work(worker);
633
634 raw_spin_lock(&wqe->lock);
635 /* timed out, exit unless we're the last worker */
636 if (last_timeout && acct->nr_workers > 1) {
637 acct->nr_workers--;
638 raw_spin_unlock(&wqe->lock);
639 __set_current_state(TASK_RUNNING);
640 break;
641 }
642 last_timeout = false;
643 __io_worker_idle(wqe, worker);
644 raw_spin_unlock(&wqe->lock);
645 if (io_run_task_work())
646 continue;
647 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
648 if (signal_pending(current)) {
649 struct ksignal ksig;
650
651 if (!get_signal(&ksig))
652 continue;
653 break;
654 }
655 last_timeout = !ret;
656 }
657
658 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
659 io_worker_handle_work(worker);
660
661 io_worker_exit(worker);
662 return 0;
663 }
664
665 /*
666 * Called when a worker is scheduled in. Mark us as currently running.
667 */
io_wq_worker_running(struct task_struct * tsk)668 void io_wq_worker_running(struct task_struct *tsk)
669 {
670 struct io_worker *worker = tsk->worker_private;
671
672 if (!worker)
673 return;
674 if (!(worker->flags & IO_WORKER_F_UP))
675 return;
676 if (worker->flags & IO_WORKER_F_RUNNING)
677 return;
678 worker->flags |= IO_WORKER_F_RUNNING;
679 io_wqe_inc_running(worker);
680 }
681
682 /*
683 * Called when worker is going to sleep. If there are no workers currently
684 * running and we have work pending, wake up a free one or create a new one.
685 */
io_wq_worker_sleeping(struct task_struct * tsk)686 void io_wq_worker_sleeping(struct task_struct *tsk)
687 {
688 struct io_worker *worker = tsk->worker_private;
689
690 if (!worker)
691 return;
692 if (!(worker->flags & IO_WORKER_F_UP))
693 return;
694 if (!(worker->flags & IO_WORKER_F_RUNNING))
695 return;
696
697 worker->flags &= ~IO_WORKER_F_RUNNING;
698 io_wqe_dec_running(worker);
699 }
700
io_init_new_worker(struct io_wqe * wqe,struct io_worker * worker,struct task_struct * tsk)701 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
702 struct task_struct *tsk)
703 {
704 tsk->worker_private = worker;
705 worker->task = tsk;
706 set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
707 tsk->flags |= PF_NO_SETAFFINITY;
708
709 raw_spin_lock(&wqe->lock);
710 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
711 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
712 worker->flags |= IO_WORKER_F_FREE;
713 raw_spin_unlock(&wqe->lock);
714 wake_up_new_task(tsk);
715 }
716
io_wq_work_match_all(struct io_wq_work * work,void * data)717 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
718 {
719 return true;
720 }
721
io_should_retry_thread(long err)722 static inline bool io_should_retry_thread(long err)
723 {
724 /*
725 * Prevent perpetual task_work retry, if the task (or its group) is
726 * exiting.
727 */
728 if (fatal_signal_pending(current))
729 return false;
730
731 switch (err) {
732 case -EAGAIN:
733 case -ERESTARTSYS:
734 case -ERESTARTNOINTR:
735 case -ERESTARTNOHAND:
736 return true;
737 default:
738 return false;
739 }
740 }
741
create_worker_cont(struct callback_head * cb)742 static void create_worker_cont(struct callback_head *cb)
743 {
744 struct io_worker *worker;
745 struct task_struct *tsk;
746 struct io_wqe *wqe;
747
748 worker = container_of(cb, struct io_worker, create_work);
749 clear_bit_unlock(0, &worker->create_state);
750 wqe = worker->wqe;
751 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
752 if (!IS_ERR(tsk)) {
753 io_init_new_worker(wqe, worker, tsk);
754 io_worker_release(worker);
755 return;
756 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
757 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
758
759 atomic_dec(&acct->nr_running);
760 raw_spin_lock(&wqe->lock);
761 acct->nr_workers--;
762 if (!acct->nr_workers) {
763 struct io_cb_cancel_data match = {
764 .fn = io_wq_work_match_all,
765 .cancel_all = true,
766 };
767
768 raw_spin_unlock(&wqe->lock);
769 while (io_acct_cancel_pending_work(wqe, acct, &match))
770 ;
771 } else {
772 raw_spin_unlock(&wqe->lock);
773 }
774 io_worker_ref_put(wqe->wq);
775 kfree(worker);
776 return;
777 }
778
779 /* re-create attempts grab a new worker ref, drop the existing one */
780 io_worker_release(worker);
781 schedule_work(&worker->work);
782 }
783
io_workqueue_create(struct work_struct * work)784 static void io_workqueue_create(struct work_struct *work)
785 {
786 struct io_worker *worker = container_of(work, struct io_worker, work);
787 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
788
789 if (!io_queue_worker_create(worker, acct, create_worker_cont))
790 kfree(worker);
791 }
792
create_io_worker(struct io_wq * wq,struct io_wqe * wqe,int index)793 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
794 {
795 struct io_wqe_acct *acct = &wqe->acct[index];
796 struct io_worker *worker;
797 struct task_struct *tsk;
798
799 __set_current_state(TASK_RUNNING);
800
801 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
802 if (!worker) {
803 fail:
804 atomic_dec(&acct->nr_running);
805 raw_spin_lock(&wqe->lock);
806 acct->nr_workers--;
807 raw_spin_unlock(&wqe->lock);
808 io_worker_ref_put(wq);
809 return false;
810 }
811
812 refcount_set(&worker->ref, 1);
813 worker->wqe = wqe;
814 raw_spin_lock_init(&worker->lock);
815 init_completion(&worker->ref_done);
816
817 if (index == IO_WQ_ACCT_BOUND)
818 worker->flags |= IO_WORKER_F_BOUND;
819
820 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
821 if (!IS_ERR(tsk)) {
822 io_init_new_worker(wqe, worker, tsk);
823 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
824 kfree(worker);
825 goto fail;
826 } else {
827 INIT_WORK(&worker->work, io_workqueue_create);
828 schedule_work(&worker->work);
829 }
830
831 return true;
832 }
833
834 /*
835 * Iterate the passed in list and call the specific function for each
836 * worker that isn't exiting
837 */
io_wq_for_each_worker(struct io_wqe * wqe,bool (* func)(struct io_worker *,void *),void * data)838 static bool io_wq_for_each_worker(struct io_wqe *wqe,
839 bool (*func)(struct io_worker *, void *),
840 void *data)
841 {
842 struct io_worker *worker;
843 bool ret = false;
844
845 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
846 if (io_worker_get(worker)) {
847 /* no task if node is/was offline */
848 if (worker->task)
849 ret = func(worker, data);
850 io_worker_release(worker);
851 if (ret)
852 break;
853 }
854 }
855
856 return ret;
857 }
858
io_wq_worker_wake(struct io_worker * worker,void * data)859 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
860 {
861 __set_notify_signal(worker->task);
862 wake_up_process(worker->task);
863 return false;
864 }
865
io_run_cancel(struct io_wq_work * work,struct io_wqe * wqe)866 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
867 {
868 struct io_wq *wq = wqe->wq;
869
870 do {
871 work->flags |= IO_WQ_WORK_CANCEL;
872 wq->do_work(work);
873 work = wq->free_work(work);
874 } while (work);
875 }
876
io_wqe_insert_work(struct io_wqe * wqe,struct io_wq_work * work)877 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
878 {
879 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
880 unsigned int hash;
881 struct io_wq_work *tail;
882
883 if (!io_wq_is_hashed(work)) {
884 append:
885 wq_list_add_tail(&work->list, &acct->work_list);
886 return;
887 }
888
889 hash = io_get_work_hash(work);
890 tail = wqe->hash_tail[hash];
891 wqe->hash_tail[hash] = work;
892 if (!tail)
893 goto append;
894
895 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
896 }
897
io_wq_work_match_item(struct io_wq_work * work,void * data)898 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
899 {
900 return work == data;
901 }
902
io_wqe_enqueue(struct io_wqe * wqe,struct io_wq_work * work)903 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
904 {
905 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
906 struct io_cb_cancel_data match;
907 unsigned work_flags = work->flags;
908 bool do_create;
909
910 /*
911 * If io-wq is exiting for this task, or if the request has explicitly
912 * been marked as one that should not get executed, cancel it here.
913 */
914 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
915 (work->flags & IO_WQ_WORK_CANCEL)) {
916 io_run_cancel(work, wqe);
917 return;
918 }
919
920 raw_spin_lock(&acct->lock);
921 io_wqe_insert_work(wqe, work);
922 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
923 raw_spin_unlock(&acct->lock);
924
925 raw_spin_lock(&wqe->lock);
926 rcu_read_lock();
927 do_create = !io_wqe_activate_free_worker(wqe, acct);
928 rcu_read_unlock();
929
930 raw_spin_unlock(&wqe->lock);
931
932 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
933 !atomic_read(&acct->nr_running))) {
934 bool did_create;
935
936 did_create = io_wqe_create_worker(wqe, acct);
937 if (likely(did_create))
938 return;
939
940 raw_spin_lock(&wqe->lock);
941 if (acct->nr_workers) {
942 raw_spin_unlock(&wqe->lock);
943 return;
944 }
945 raw_spin_unlock(&wqe->lock);
946
947 /* fatal condition, failed to create the first worker */
948 match.fn = io_wq_work_match_item,
949 match.data = work,
950 match.cancel_all = false,
951
952 io_acct_cancel_pending_work(wqe, acct, &match);
953 }
954 }
955
io_wq_enqueue(struct io_wq * wq,struct io_wq_work * work)956 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
957 {
958 struct io_wqe *wqe = wq->wqes[numa_node_id()];
959
960 io_wqe_enqueue(wqe, work);
961 }
962
963 /*
964 * Work items that hash to the same value will not be done in parallel.
965 * Used to limit concurrent writes, generally hashed by inode.
966 */
io_wq_hash_work(struct io_wq_work * work,void * val)967 void io_wq_hash_work(struct io_wq_work *work, void *val)
968 {
969 unsigned int bit;
970
971 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
972 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
973 }
974
__io_wq_worker_cancel(struct io_worker * worker,struct io_cb_cancel_data * match,struct io_wq_work * work)975 static bool __io_wq_worker_cancel(struct io_worker *worker,
976 struct io_cb_cancel_data *match,
977 struct io_wq_work *work)
978 {
979 if (work && match->fn(work, match->data)) {
980 work->flags |= IO_WQ_WORK_CANCEL;
981 __set_notify_signal(worker->task);
982 return true;
983 }
984
985 return false;
986 }
987
io_wq_worker_cancel(struct io_worker * worker,void * data)988 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
989 {
990 struct io_cb_cancel_data *match = data;
991
992 /*
993 * Hold the lock to avoid ->cur_work going out of scope, caller
994 * may dereference the passed in work.
995 */
996 raw_spin_lock(&worker->lock);
997 if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
998 __io_wq_worker_cancel(worker, match, worker->next_work))
999 match->nr_running++;
1000 raw_spin_unlock(&worker->lock);
1001
1002 return match->nr_running && !match->cancel_all;
1003 }
1004
io_wqe_remove_pending(struct io_wqe * wqe,struct io_wq_work * work,struct io_wq_work_node * prev)1005 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
1006 struct io_wq_work *work,
1007 struct io_wq_work_node *prev)
1008 {
1009 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
1010 unsigned int hash = io_get_work_hash(work);
1011 struct io_wq_work *prev_work = NULL;
1012
1013 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
1014 if (prev)
1015 prev_work = container_of(prev, struct io_wq_work, list);
1016 if (prev_work && io_get_work_hash(prev_work) == hash)
1017 wqe->hash_tail[hash] = prev_work;
1018 else
1019 wqe->hash_tail[hash] = NULL;
1020 }
1021 wq_list_del(&acct->work_list, &work->list, prev);
1022 }
1023
io_acct_cancel_pending_work(struct io_wqe * wqe,struct io_wqe_acct * acct,struct io_cb_cancel_data * match)1024 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
1025 struct io_wqe_acct *acct,
1026 struct io_cb_cancel_data *match)
1027 {
1028 struct io_wq_work_node *node, *prev;
1029 struct io_wq_work *work;
1030
1031 raw_spin_lock(&acct->lock);
1032 wq_list_for_each(node, prev, &acct->work_list) {
1033 work = container_of(node, struct io_wq_work, list);
1034 if (!match->fn(work, match->data))
1035 continue;
1036 io_wqe_remove_pending(wqe, work, prev);
1037 raw_spin_unlock(&acct->lock);
1038 io_run_cancel(work, wqe);
1039 match->nr_pending++;
1040 /* not safe to continue after unlock */
1041 return true;
1042 }
1043 raw_spin_unlock(&acct->lock);
1044
1045 return false;
1046 }
1047
io_wqe_cancel_pending_work(struct io_wqe * wqe,struct io_cb_cancel_data * match)1048 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1049 struct io_cb_cancel_data *match)
1050 {
1051 int i;
1052 retry:
1053 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1054 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
1055
1056 if (io_acct_cancel_pending_work(wqe, acct, match)) {
1057 if (match->cancel_all)
1058 goto retry;
1059 break;
1060 }
1061 }
1062 }
1063
io_wqe_cancel_running_work(struct io_wqe * wqe,struct io_cb_cancel_data * match)1064 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1065 struct io_cb_cancel_data *match)
1066 {
1067 rcu_read_lock();
1068 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1069 rcu_read_unlock();
1070 }
1071
io_wq_cancel_cb(struct io_wq * wq,work_cancel_fn * cancel,void * data,bool cancel_all)1072 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1073 void *data, bool cancel_all)
1074 {
1075 struct io_cb_cancel_data match = {
1076 .fn = cancel,
1077 .data = data,
1078 .cancel_all = cancel_all,
1079 };
1080 int node;
1081
1082 /*
1083 * First check pending list, if we're lucky we can just remove it
1084 * from there. CANCEL_OK means that the work is returned as-new,
1085 * no completion will be posted for it.
1086 *
1087 * Then check if a free (going busy) or busy worker has the work
1088 * currently running. If we find it there, we'll return CANCEL_RUNNING
1089 * as an indication that we attempt to signal cancellation. The
1090 * completion will run normally in this case.
1091 *
1092 * Do both of these while holding the wqe->lock, to ensure that
1093 * we'll find a work item regardless of state.
1094 */
1095 for_each_node(node) {
1096 struct io_wqe *wqe = wq->wqes[node];
1097
1098 io_wqe_cancel_pending_work(wqe, &match);
1099 if (match.nr_pending && !match.cancel_all)
1100 return IO_WQ_CANCEL_OK;
1101
1102 raw_spin_lock(&wqe->lock);
1103 io_wqe_cancel_running_work(wqe, &match);
1104 raw_spin_unlock(&wqe->lock);
1105 if (match.nr_running && !match.cancel_all)
1106 return IO_WQ_CANCEL_RUNNING;
1107 }
1108
1109 if (match.nr_running)
1110 return IO_WQ_CANCEL_RUNNING;
1111 if (match.nr_pending)
1112 return IO_WQ_CANCEL_OK;
1113 return IO_WQ_CANCEL_NOTFOUND;
1114 }
1115
io_wqe_hash_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)1116 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1117 int sync, void *key)
1118 {
1119 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
1120 int i;
1121
1122 list_del_init(&wait->entry);
1123
1124 rcu_read_lock();
1125 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1126 struct io_wqe_acct *acct = &wqe->acct[i];
1127
1128 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1129 io_wqe_activate_free_worker(wqe, acct);
1130 }
1131 rcu_read_unlock();
1132 return 1;
1133 }
1134
io_wq_create(unsigned bounded,struct io_wq_data * data)1135 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1136 {
1137 int ret, node, i;
1138 struct io_wq *wq;
1139
1140 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1141 return ERR_PTR(-EINVAL);
1142 if (WARN_ON_ONCE(!bounded))
1143 return ERR_PTR(-EINVAL);
1144
1145 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
1146 if (!wq)
1147 return ERR_PTR(-ENOMEM);
1148 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1149 if (ret)
1150 goto err_wq;
1151
1152 refcount_inc(&data->hash->refs);
1153 wq->hash = data->hash;
1154 wq->free_work = data->free_work;
1155 wq->do_work = data->do_work;
1156
1157 ret = -ENOMEM;
1158 for_each_node(node) {
1159 struct io_wqe *wqe;
1160 int alloc_node = node;
1161
1162 if (!node_online(alloc_node))
1163 alloc_node = NUMA_NO_NODE;
1164 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1165 if (!wqe)
1166 goto err;
1167 wq->wqes[node] = wqe;
1168 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1169 goto err;
1170 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
1171 wqe->node = alloc_node;
1172 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1173 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1174 task_rlimit(current, RLIMIT_NPROC);
1175 INIT_LIST_HEAD(&wqe->wait.entry);
1176 wqe->wait.func = io_wqe_hash_wake;
1177 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1178 struct io_wqe_acct *acct = &wqe->acct[i];
1179
1180 acct->index = i;
1181 atomic_set(&acct->nr_running, 0);
1182 INIT_WQ_LIST(&acct->work_list);
1183 raw_spin_lock_init(&acct->lock);
1184 }
1185 wqe->wq = wq;
1186 raw_spin_lock_init(&wqe->lock);
1187 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1188 INIT_LIST_HEAD(&wqe->all_list);
1189 }
1190
1191 wq->task = get_task_struct(data->task);
1192 atomic_set(&wq->worker_refs, 1);
1193 init_completion(&wq->worker_done);
1194 return wq;
1195 err:
1196 io_wq_put_hash(data->hash);
1197 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1198 for_each_node(node) {
1199 if (!wq->wqes[node])
1200 continue;
1201 free_cpumask_var(wq->wqes[node]->cpu_mask);
1202 kfree(wq->wqes[node]);
1203 }
1204 err_wq:
1205 kfree(wq);
1206 return ERR_PTR(ret);
1207 }
1208
io_task_work_match(struct callback_head * cb,void * data)1209 static bool io_task_work_match(struct callback_head *cb, void *data)
1210 {
1211 struct io_worker *worker;
1212
1213 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1214 return false;
1215 worker = container_of(cb, struct io_worker, create_work);
1216 return worker->wqe->wq == data;
1217 }
1218
io_wq_exit_start(struct io_wq * wq)1219 void io_wq_exit_start(struct io_wq *wq)
1220 {
1221 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1222 }
1223
io_wq_cancel_tw_create(struct io_wq * wq)1224 static void io_wq_cancel_tw_create(struct io_wq *wq)
1225 {
1226 struct callback_head *cb;
1227
1228 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1229 struct io_worker *worker;
1230
1231 worker = container_of(cb, struct io_worker, create_work);
1232 io_worker_cancel_cb(worker);
1233 /*
1234 * Only the worker continuation helper has worker allocated and
1235 * hence needs freeing.
1236 */
1237 if (cb->func == create_worker_cont)
1238 kfree(worker);
1239 }
1240 }
1241
io_wq_exit_workers(struct io_wq * wq)1242 static void io_wq_exit_workers(struct io_wq *wq)
1243 {
1244 int node;
1245
1246 if (!wq->task)
1247 return;
1248
1249 io_wq_cancel_tw_create(wq);
1250
1251 rcu_read_lock();
1252 for_each_node(node) {
1253 struct io_wqe *wqe = wq->wqes[node];
1254
1255 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1256 }
1257 rcu_read_unlock();
1258 io_worker_ref_put(wq);
1259 wait_for_completion(&wq->worker_done);
1260
1261 for_each_node(node) {
1262 spin_lock_irq(&wq->hash->wait.lock);
1263 list_del_init(&wq->wqes[node]->wait.entry);
1264 spin_unlock_irq(&wq->hash->wait.lock);
1265 }
1266 put_task_struct(wq->task);
1267 wq->task = NULL;
1268 }
1269
io_wq_destroy(struct io_wq * wq)1270 static void io_wq_destroy(struct io_wq *wq)
1271 {
1272 int node;
1273
1274 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1275
1276 for_each_node(node) {
1277 struct io_wqe *wqe = wq->wqes[node];
1278 struct io_cb_cancel_data match = {
1279 .fn = io_wq_work_match_all,
1280 .cancel_all = true,
1281 };
1282 io_wqe_cancel_pending_work(wqe, &match);
1283 free_cpumask_var(wqe->cpu_mask);
1284 kfree(wqe);
1285 }
1286 io_wq_put_hash(wq->hash);
1287 kfree(wq);
1288 }
1289
io_wq_put_and_exit(struct io_wq * wq)1290 void io_wq_put_and_exit(struct io_wq *wq)
1291 {
1292 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1293
1294 io_wq_exit_workers(wq);
1295 io_wq_destroy(wq);
1296 }
1297
1298 struct online_data {
1299 unsigned int cpu;
1300 bool online;
1301 };
1302
io_wq_worker_affinity(struct io_worker * worker,void * data)1303 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1304 {
1305 struct online_data *od = data;
1306
1307 if (od->online)
1308 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1309 else
1310 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
1311 return false;
1312 }
1313
__io_wq_cpu_online(struct io_wq * wq,unsigned int cpu,bool online)1314 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1315 {
1316 struct online_data od = {
1317 .cpu = cpu,
1318 .online = online
1319 };
1320 int i;
1321
1322 rcu_read_lock();
1323 for_each_node(i)
1324 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
1325 rcu_read_unlock();
1326 return 0;
1327 }
1328
io_wq_cpu_online(unsigned int cpu,struct hlist_node * node)1329 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1330 {
1331 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1332
1333 return __io_wq_cpu_online(wq, cpu, true);
1334 }
1335
io_wq_cpu_offline(unsigned int cpu,struct hlist_node * node)1336 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1337 {
1338 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1339
1340 return __io_wq_cpu_online(wq, cpu, false);
1341 }
1342
io_wq_cpu_affinity(struct io_wq * wq,cpumask_var_t mask)1343 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1344 {
1345 int i;
1346
1347 rcu_read_lock();
1348 for_each_node(i) {
1349 struct io_wqe *wqe = wq->wqes[i];
1350
1351 if (mask)
1352 cpumask_copy(wqe->cpu_mask, mask);
1353 else
1354 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1355 }
1356 rcu_read_unlock();
1357 return 0;
1358 }
1359
1360 /*
1361 * Set max number of unbounded workers, returns old value. If new_count is 0,
1362 * then just return the old value.
1363 */
io_wq_max_workers(struct io_wq * wq,int * new_count)1364 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1365 {
1366 int prev[IO_WQ_ACCT_NR];
1367 bool first_node = true;
1368 int i, node;
1369
1370 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1371 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1372 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1373
1374 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1375 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1376 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1377 }
1378
1379 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1380 prev[i] = 0;
1381
1382 rcu_read_lock();
1383 for_each_node(node) {
1384 struct io_wqe *wqe = wq->wqes[node];
1385 struct io_wqe_acct *acct;
1386
1387 raw_spin_lock(&wqe->lock);
1388 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1389 acct = &wqe->acct[i];
1390 if (first_node)
1391 prev[i] = max_t(int, acct->max_workers, prev[i]);
1392 if (new_count[i])
1393 acct->max_workers = new_count[i];
1394 }
1395 raw_spin_unlock(&wqe->lock);
1396 first_node = false;
1397 }
1398 rcu_read_unlock();
1399
1400 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1401 new_count[i] = prev[i];
1402
1403 return 0;
1404 }
1405
io_wq_init(void)1406 static __init int io_wq_init(void)
1407 {
1408 int ret;
1409
1410 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1411 io_wq_cpu_online, io_wq_cpu_offline);
1412 if (ret < 0)
1413 return ret;
1414 io_wq_online = ret;
1415 return 0;
1416 }
1417 subsys_initcall(io_wq_init);
1418