1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32
33
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37
38 struct kthread_create_info
39 {
40 /* Information passed to kthread() from kthreadd. */
41 char *full_name;
42 int (*threadfn)(void *data);
43 void *data;
44 int node;
45
46 /* Result passed back to kthread_create() from kthreadd. */
47 struct task_struct *result;
48 struct completion *done;
49
50 struct list_head list;
51 };
52
53 struct kthread {
54 unsigned long flags;
55 unsigned int cpu;
56 int result;
57 int (*threadfn)(void *);
58 void *data;
59 struct completion parked;
60 struct completion exited;
61 #ifdef CONFIG_BLK_CGROUP
62 struct cgroup_subsys_state *blkcg_css;
63 #endif
64 /* To store the full name if task comm is truncated. */
65 char *full_name;
66 };
67
68 enum KTHREAD_BITS {
69 KTHREAD_IS_PER_CPU = 0,
70 KTHREAD_SHOULD_STOP,
71 KTHREAD_SHOULD_PARK,
72 };
73
to_kthread(struct task_struct * k)74 static inline struct kthread *to_kthread(struct task_struct *k)
75 {
76 WARN_ON(!(k->flags & PF_KTHREAD));
77 return k->worker_private;
78 }
79
80 /*
81 * Variant of to_kthread() that doesn't assume @p is a kthread.
82 *
83 * Per construction; when:
84 *
85 * (p->flags & PF_KTHREAD) && p->worker_private
86 *
87 * the task is both a kthread and struct kthread is persistent. However
88 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
89 * begin_new_exec()).
90 */
__to_kthread(struct task_struct * p)91 static inline struct kthread *__to_kthread(struct task_struct *p)
92 {
93 void *kthread = p->worker_private;
94 if (kthread && !(p->flags & PF_KTHREAD))
95 kthread = NULL;
96 return kthread;
97 }
98
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)99 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100 {
101 struct kthread *kthread = to_kthread(tsk);
102
103 if (!kthread || !kthread->full_name) {
104 __get_task_comm(buf, buf_size, tsk);
105 return;
106 }
107
108 strscpy_pad(buf, kthread->full_name, buf_size);
109 }
110
set_kthread_struct(struct task_struct * p)111 bool set_kthread_struct(struct task_struct *p)
112 {
113 struct kthread *kthread;
114
115 if (WARN_ON_ONCE(to_kthread(p)))
116 return false;
117
118 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
119 if (!kthread)
120 return false;
121
122 init_completion(&kthread->exited);
123 init_completion(&kthread->parked);
124 p->vfork_done = &kthread->exited;
125
126 p->worker_private = kthread;
127 return true;
128 }
129
free_kthread_struct(struct task_struct * k)130 void free_kthread_struct(struct task_struct *k)
131 {
132 struct kthread *kthread;
133
134 /*
135 * Can be NULL if kmalloc() in set_kthread_struct() failed.
136 */
137 kthread = to_kthread(k);
138 if (!kthread)
139 return;
140
141 #ifdef CONFIG_BLK_CGROUP
142 WARN_ON_ONCE(kthread->blkcg_css);
143 #endif
144 k->worker_private = NULL;
145 kfree(kthread->full_name);
146 kfree(kthread);
147 }
148
149 /**
150 * kthread_should_stop - should this kthread return now?
151 *
152 * When someone calls kthread_stop() on your kthread, it will be woken
153 * and this will return true. You should then return, and your return
154 * value will be passed through to kthread_stop().
155 */
kthread_should_stop(void)156 bool kthread_should_stop(void)
157 {
158 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
159 }
160 EXPORT_SYMBOL(kthread_should_stop);
161
__kthread_should_park(struct task_struct * k)162 static bool __kthread_should_park(struct task_struct *k)
163 {
164 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
165 }
166
167 /**
168 * kthread_should_park - should this kthread park now?
169 *
170 * When someone calls kthread_park() on your kthread, it will be woken
171 * and this will return true. You should then do the necessary
172 * cleanup and call kthread_parkme()
173 *
174 * Similar to kthread_should_stop(), but this keeps the thread alive
175 * and in a park position. kthread_unpark() "restarts" the thread and
176 * calls the thread function again.
177 */
kthread_should_park(void)178 bool kthread_should_park(void)
179 {
180 return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183
kthread_should_stop_or_park(void)184 bool kthread_should_stop_or_park(void)
185 {
186 struct kthread *kthread = __to_kthread(current);
187
188 if (!kthread)
189 return false;
190
191 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192 }
193
194 /**
195 * kthread_freezable_should_stop - should this freezable kthread return now?
196 * @was_frozen: optional out parameter, indicates whether %current was frozen
197 *
198 * kthread_should_stop() for freezable kthreads, which will enter
199 * refrigerator if necessary. This function is safe from kthread_stop() /
200 * freezer deadlock and freezable kthreads should use this function instead
201 * of calling try_to_freeze() directly.
202 */
kthread_freezable_should_stop(bool * was_frozen)203 bool kthread_freezable_should_stop(bool *was_frozen)
204 {
205 bool frozen = false;
206
207 might_sleep();
208
209 if (unlikely(freezing(current)))
210 frozen = __refrigerator(true);
211
212 if (was_frozen)
213 *was_frozen = frozen;
214
215 return kthread_should_stop();
216 }
217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218
219 /**
220 * kthread_func - return the function specified on kthread creation
221 * @task: kthread task in question
222 *
223 * Returns NULL if the task is not a kthread.
224 */
kthread_func(struct task_struct * task)225 void *kthread_func(struct task_struct *task)
226 {
227 struct kthread *kthread = __to_kthread(task);
228 if (kthread)
229 return kthread->threadfn;
230 return NULL;
231 }
232 EXPORT_SYMBOL_GPL(kthread_func);
233
234 /**
235 * kthread_data - return data value specified on kthread creation
236 * @task: kthread task in question
237 *
238 * Return the data value specified when kthread @task was created.
239 * The caller is responsible for ensuring the validity of @task when
240 * calling this function.
241 */
kthread_data(struct task_struct * task)242 void *kthread_data(struct task_struct *task)
243 {
244 return to_kthread(task)->data;
245 }
246 EXPORT_SYMBOL_GPL(kthread_data);
247
248 /**
249 * kthread_probe_data - speculative version of kthread_data()
250 * @task: possible kthread task in question
251 *
252 * @task could be a kthread task. Return the data value specified when it
253 * was created if accessible. If @task isn't a kthread task or its data is
254 * inaccessible for any reason, %NULL is returned. This function requires
255 * that @task itself is safe to dereference.
256 */
kthread_probe_data(struct task_struct * task)257 void *kthread_probe_data(struct task_struct *task)
258 {
259 struct kthread *kthread = __to_kthread(task);
260 void *data = NULL;
261
262 if (kthread)
263 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264 return data;
265 }
266
__kthread_parkme(struct kthread * self)267 static void __kthread_parkme(struct kthread *self)
268 {
269 for (;;) {
270 /*
271 * TASK_PARKED is a special state; we must serialize against
272 * possible pending wakeups to avoid store-store collisions on
273 * task->state.
274 *
275 * Such a collision might possibly result in the task state
276 * changin from TASK_PARKED and us failing the
277 * wait_task_inactive() in kthread_park().
278 */
279 set_special_state(TASK_PARKED);
280 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281 break;
282
283 /*
284 * Thread is going to call schedule(), do not preempt it,
285 * or the caller of kthread_park() may spend more time in
286 * wait_task_inactive().
287 */
288 preempt_disable();
289 complete(&self->parked);
290 schedule_preempt_disabled();
291 preempt_enable();
292 }
293 __set_current_state(TASK_RUNNING);
294 }
295
kthread_parkme(void)296 void kthread_parkme(void)
297 {
298 __kthread_parkme(to_kthread(current));
299 }
300 EXPORT_SYMBOL_GPL(kthread_parkme);
301
302 /**
303 * kthread_exit - Cause the current kthread return @result to kthread_stop().
304 * @result: The integer value to return to kthread_stop().
305 *
306 * While kthread_exit can be called directly, it exists so that
307 * functions which do some additional work in non-modular code such as
308 * module_put_and_kthread_exit can be implemented.
309 *
310 * Does not return.
311 */
kthread_exit(long result)312 void __noreturn kthread_exit(long result)
313 {
314 struct kthread *kthread = to_kthread(current);
315 kthread->result = result;
316 do_exit(0);
317 }
318
319 /**
320 * kthread_complete_and_exit - Exit the current kthread.
321 * @comp: Completion to complete
322 * @code: The integer value to return to kthread_stop().
323 *
324 * If present, complete @comp and then return code to kthread_stop().
325 *
326 * A kernel thread whose module may be removed after the completion of
327 * @comp can use this function to exit safely.
328 *
329 * Does not return.
330 */
kthread_complete_and_exit(struct completion * comp,long code)331 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
332 {
333 if (comp)
334 complete(comp);
335
336 kthread_exit(code);
337 }
338 EXPORT_SYMBOL(kthread_complete_and_exit);
339
kthread(void * _create)340 static int kthread(void *_create)
341 {
342 static const struct sched_param param = { .sched_priority = 0 };
343 /* Copy data: it's on kthread's stack */
344 struct kthread_create_info *create = _create;
345 int (*threadfn)(void *data) = create->threadfn;
346 void *data = create->data;
347 struct completion *done;
348 struct kthread *self;
349 int ret;
350
351 self = to_kthread(current);
352
353 /* Release the structure when caller killed by a fatal signal. */
354 done = xchg(&create->done, NULL);
355 if (!done) {
356 kfree(create->full_name);
357 kfree(create);
358 kthread_exit(-EINTR);
359 }
360
361 self->full_name = create->full_name;
362 self->threadfn = threadfn;
363 self->data = data;
364
365 /*
366 * The new thread inherited kthreadd's priority and CPU mask. Reset
367 * back to default in case they have been changed.
368 */
369 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
370 set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
371
372 /* OK, tell user we're spawned, wait for stop or wakeup */
373 __set_current_state(TASK_UNINTERRUPTIBLE);
374 create->result = current;
375 /*
376 * Thread is going to call schedule(), do not preempt it,
377 * or the creator may spend more time in wait_task_inactive().
378 */
379 preempt_disable();
380 complete(done);
381 schedule_preempt_disabled();
382 preempt_enable();
383
384 ret = -EINTR;
385 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
386 cgroup_kthread_ready();
387 __kthread_parkme(self);
388 ret = threadfn(data);
389 }
390 kthread_exit(ret);
391 }
392
393 /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)394 int tsk_fork_get_node(struct task_struct *tsk)
395 {
396 #ifdef CONFIG_NUMA
397 if (tsk == kthreadd_task)
398 return tsk->pref_node_fork;
399 #endif
400 return NUMA_NO_NODE;
401 }
402
create_kthread(struct kthread_create_info * create)403 static void create_kthread(struct kthread_create_info *create)
404 {
405 int pid;
406
407 #ifdef CONFIG_NUMA
408 current->pref_node_fork = create->node;
409 #endif
410 /* We want our own signal handler (we take no signals by default). */
411 pid = kernel_thread(kthread, create, create->full_name,
412 CLONE_FS | CLONE_FILES | SIGCHLD);
413 if (pid < 0) {
414 /* Release the structure when caller killed by a fatal signal. */
415 struct completion *done = xchg(&create->done, NULL);
416
417 kfree(create->full_name);
418 if (!done) {
419 kfree(create);
420 return;
421 }
422 create->result = ERR_PTR(pid);
423 complete(done);
424 }
425 }
426
427 static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)428 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
429 void *data, int node,
430 const char namefmt[],
431 va_list args)
432 {
433 DECLARE_COMPLETION_ONSTACK(done);
434 struct task_struct *task;
435 struct kthread_create_info *create = kmalloc(sizeof(*create),
436 GFP_KERNEL);
437
438 if (!create)
439 return ERR_PTR(-ENOMEM);
440 create->threadfn = threadfn;
441 create->data = data;
442 create->node = node;
443 create->done = &done;
444 create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
445 if (!create->full_name) {
446 task = ERR_PTR(-ENOMEM);
447 goto free_create;
448 }
449
450 spin_lock(&kthread_create_lock);
451 list_add_tail(&create->list, &kthread_create_list);
452 spin_unlock(&kthread_create_lock);
453
454 wake_up_process(kthreadd_task);
455 /*
456 * Wait for completion in killable state, for I might be chosen by
457 * the OOM killer while kthreadd is trying to allocate memory for
458 * new kernel thread.
459 */
460 if (unlikely(wait_for_completion_killable(&done))) {
461 /*
462 * If I was killed by a fatal signal before kthreadd (or new
463 * kernel thread) calls complete(), leave the cleanup of this
464 * structure to that thread.
465 */
466 if (xchg(&create->done, NULL))
467 return ERR_PTR(-EINTR);
468 /*
469 * kthreadd (or new kernel thread) will call complete()
470 * shortly.
471 */
472 wait_for_completion(&done);
473 }
474 task = create->result;
475 free_create:
476 kfree(create);
477 return task;
478 }
479
480 /**
481 * kthread_create_on_node - create a kthread.
482 * @threadfn: the function to run until signal_pending(current).
483 * @data: data ptr for @threadfn.
484 * @node: task and thread structures for the thread are allocated on this node
485 * @namefmt: printf-style name for the thread.
486 *
487 * Description: This helper function creates and names a kernel
488 * thread. The thread will be stopped: use wake_up_process() to start
489 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
490 * is affine to all CPUs.
491 *
492 * If thread is going to be bound on a particular cpu, give its node
493 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
494 * When woken, the thread will run @threadfn() with @data as its
495 * argument. @threadfn() can either return directly if it is a
496 * standalone thread for which no one will call kthread_stop(), or
497 * return when 'kthread_should_stop()' is true (which means
498 * kthread_stop() has been called). The return value should be zero
499 * or a negative error number; it will be passed to kthread_stop().
500 *
501 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
502 */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)503 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
504 void *data, int node,
505 const char namefmt[],
506 ...)
507 {
508 struct task_struct *task;
509 va_list args;
510
511 va_start(args, namefmt);
512 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
513 va_end(args);
514
515 return task;
516 }
517 EXPORT_SYMBOL(kthread_create_on_node);
518
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)519 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
520 {
521 unsigned long flags;
522
523 if (!wait_task_inactive(p, state)) {
524 WARN_ON(1);
525 return;
526 }
527
528 /* It's safe because the task is inactive. */
529 raw_spin_lock_irqsave(&p->pi_lock, flags);
530 do_set_cpus_allowed(p, mask);
531 p->flags |= PF_NO_SETAFFINITY;
532 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
533 }
534
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)535 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
536 {
537 __kthread_bind_mask(p, cpumask_of(cpu), state);
538 }
539
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)540 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
541 {
542 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
543 }
544
545 /**
546 * kthread_bind - bind a just-created kthread to a cpu.
547 * @p: thread created by kthread_create().
548 * @cpu: cpu (might not be online, must be possible) for @k to run on.
549 *
550 * Description: This function is equivalent to set_cpus_allowed(),
551 * except that @cpu doesn't need to be online, and the thread must be
552 * stopped (i.e., just returned from kthread_create()).
553 */
kthread_bind(struct task_struct * p,unsigned int cpu)554 void kthread_bind(struct task_struct *p, unsigned int cpu)
555 {
556 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
557 }
558 EXPORT_SYMBOL(kthread_bind);
559
560 /**
561 * kthread_create_on_cpu - Create a cpu bound kthread
562 * @threadfn: the function to run until signal_pending(current).
563 * @data: data ptr for @threadfn.
564 * @cpu: The cpu on which the thread should be bound,
565 * @namefmt: printf-style name for the thread. Format is restricted
566 * to "name.*%u". Code fills in cpu number.
567 *
568 * Description: This helper function creates and names a kernel thread
569 */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)570 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
571 void *data, unsigned int cpu,
572 const char *namefmt)
573 {
574 struct task_struct *p;
575
576 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
577 cpu);
578 if (IS_ERR(p))
579 return p;
580 kthread_bind(p, cpu);
581 /* CPU hotplug need to bind once again when unparking the thread. */
582 to_kthread(p)->cpu = cpu;
583 return p;
584 }
585 EXPORT_SYMBOL(kthread_create_on_cpu);
586
kthread_set_per_cpu(struct task_struct * k,int cpu)587 void kthread_set_per_cpu(struct task_struct *k, int cpu)
588 {
589 struct kthread *kthread = to_kthread(k);
590 if (!kthread)
591 return;
592
593 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
594
595 if (cpu < 0) {
596 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
597 return;
598 }
599
600 kthread->cpu = cpu;
601 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
602 }
603
kthread_is_per_cpu(struct task_struct * p)604 bool kthread_is_per_cpu(struct task_struct *p)
605 {
606 struct kthread *kthread = __to_kthread(p);
607 if (!kthread)
608 return false;
609
610 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
611 }
612
613 /**
614 * kthread_unpark - unpark a thread created by kthread_create().
615 * @k: thread created by kthread_create().
616 *
617 * Sets kthread_should_park() for @k to return false, wakes it, and
618 * waits for it to return. If the thread is marked percpu then its
619 * bound to the cpu again.
620 */
kthread_unpark(struct task_struct * k)621 void kthread_unpark(struct task_struct *k)
622 {
623 struct kthread *kthread = to_kthread(k);
624
625 /*
626 * Newly created kthread was parked when the CPU was offline.
627 * The binding was lost and we need to set it again.
628 */
629 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
630 __kthread_bind(k, kthread->cpu, TASK_PARKED);
631
632 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
633 /*
634 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
635 */
636 wake_up_state(k, TASK_PARKED);
637 }
638 EXPORT_SYMBOL_GPL(kthread_unpark);
639
640 /**
641 * kthread_park - park a thread created by kthread_create().
642 * @k: thread created by kthread_create().
643 *
644 * Sets kthread_should_park() for @k to return true, wakes it, and
645 * waits for it to return. This can also be called after kthread_create()
646 * instead of calling wake_up_process(): the thread will park without
647 * calling threadfn().
648 *
649 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
650 * If called by the kthread itself just the park bit is set.
651 */
kthread_park(struct task_struct * k)652 int kthread_park(struct task_struct *k)
653 {
654 struct kthread *kthread = to_kthread(k);
655
656 if (WARN_ON(k->flags & PF_EXITING))
657 return -ENOSYS;
658
659 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
660 return -EBUSY;
661
662 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
663 if (k != current) {
664 wake_up_process(k);
665 /*
666 * Wait for __kthread_parkme() to complete(), this means we
667 * _will_ have TASK_PARKED and are about to call schedule().
668 */
669 wait_for_completion(&kthread->parked);
670 /*
671 * Now wait for that schedule() to complete and the task to
672 * get scheduled out.
673 */
674 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
675 }
676
677 return 0;
678 }
679 EXPORT_SYMBOL_GPL(kthread_park);
680
681 /**
682 * kthread_stop - stop a thread created by kthread_create().
683 * @k: thread created by kthread_create().
684 *
685 * Sets kthread_should_stop() for @k to return true, wakes it, and
686 * waits for it to exit. This can also be called after kthread_create()
687 * instead of calling wake_up_process(): the thread will exit without
688 * calling threadfn().
689 *
690 * If threadfn() may call kthread_exit() itself, the caller must ensure
691 * task_struct can't go away.
692 *
693 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
694 * was never called.
695 */
kthread_stop(struct task_struct * k)696 int kthread_stop(struct task_struct *k)
697 {
698 struct kthread *kthread;
699 int ret;
700
701 trace_sched_kthread_stop(k);
702
703 get_task_struct(k);
704 kthread = to_kthread(k);
705 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
706 kthread_unpark(k);
707 set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
708 wake_up_process(k);
709 wait_for_completion(&kthread->exited);
710 ret = kthread->result;
711 put_task_struct(k);
712
713 trace_sched_kthread_stop_ret(ret);
714 return ret;
715 }
716 EXPORT_SYMBOL(kthread_stop);
717
kthreadd(void * unused)718 int kthreadd(void *unused)
719 {
720 struct task_struct *tsk = current;
721
722 /* Setup a clean context for our children to inherit. */
723 set_task_comm(tsk, "kthreadd");
724 ignore_signals(tsk);
725 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
726 set_mems_allowed(node_states[N_MEMORY]);
727
728 current->flags |= PF_NOFREEZE;
729 cgroup_init_kthreadd();
730
731 for (;;) {
732 set_current_state(TASK_INTERRUPTIBLE);
733 if (list_empty(&kthread_create_list))
734 schedule();
735 __set_current_state(TASK_RUNNING);
736
737 spin_lock(&kthread_create_lock);
738 while (!list_empty(&kthread_create_list)) {
739 struct kthread_create_info *create;
740
741 create = list_entry(kthread_create_list.next,
742 struct kthread_create_info, list);
743 list_del_init(&create->list);
744 spin_unlock(&kthread_create_lock);
745
746 create_kthread(create);
747
748 spin_lock(&kthread_create_lock);
749 }
750 spin_unlock(&kthread_create_lock);
751 }
752
753 return 0;
754 }
755
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)756 void __kthread_init_worker(struct kthread_worker *worker,
757 const char *name,
758 struct lock_class_key *key)
759 {
760 memset(worker, 0, sizeof(struct kthread_worker));
761 raw_spin_lock_init(&worker->lock);
762 lockdep_set_class_and_name(&worker->lock, key, name);
763 INIT_LIST_HEAD(&worker->work_list);
764 INIT_LIST_HEAD(&worker->delayed_work_list);
765 }
766 EXPORT_SYMBOL_GPL(__kthread_init_worker);
767
768 /**
769 * kthread_worker_fn - kthread function to process kthread_worker
770 * @worker_ptr: pointer to initialized kthread_worker
771 *
772 * This function implements the main cycle of kthread worker. It processes
773 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
774 * is empty.
775 *
776 * The works are not allowed to keep any locks, disable preemption or interrupts
777 * when they finish. There is defined a safe point for freezing when one work
778 * finishes and before a new one is started.
779 *
780 * Also the works must not be handled by more than one worker at the same time,
781 * see also kthread_queue_work().
782 */
kthread_worker_fn(void * worker_ptr)783 int kthread_worker_fn(void *worker_ptr)
784 {
785 struct kthread_worker *worker = worker_ptr;
786 struct kthread_work *work;
787
788 /*
789 * FIXME: Update the check and remove the assignment when all kthread
790 * worker users are created using kthread_create_worker*() functions.
791 */
792 WARN_ON(worker->task && worker->task != current);
793 worker->task = current;
794
795 if (worker->flags & KTW_FREEZABLE)
796 set_freezable();
797
798 repeat:
799 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
800
801 if (kthread_should_stop()) {
802 __set_current_state(TASK_RUNNING);
803 raw_spin_lock_irq(&worker->lock);
804 worker->task = NULL;
805 raw_spin_unlock_irq(&worker->lock);
806 return 0;
807 }
808
809 work = NULL;
810 raw_spin_lock_irq(&worker->lock);
811 if (!list_empty(&worker->work_list)) {
812 work = list_first_entry(&worker->work_list,
813 struct kthread_work, node);
814 list_del_init(&work->node);
815 }
816 worker->current_work = work;
817 raw_spin_unlock_irq(&worker->lock);
818
819 if (work) {
820 kthread_work_func_t func = work->func;
821 __set_current_state(TASK_RUNNING);
822 trace_sched_kthread_work_execute_start(work);
823 work->func(work);
824 /*
825 * Avoid dereferencing work after this point. The trace
826 * event only cares about the address.
827 */
828 trace_sched_kthread_work_execute_end(work, func);
829 } else if (!freezing(current))
830 schedule();
831
832 try_to_freeze();
833 cond_resched();
834 goto repeat;
835 }
836 EXPORT_SYMBOL_GPL(kthread_worker_fn);
837
838 static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)839 __kthread_create_worker(int cpu, unsigned int flags,
840 const char namefmt[], va_list args)
841 {
842 struct kthread_worker *worker;
843 struct task_struct *task;
844 int node = NUMA_NO_NODE;
845
846 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
847 if (!worker)
848 return ERR_PTR(-ENOMEM);
849
850 kthread_init_worker(worker);
851
852 if (cpu >= 0)
853 node = cpu_to_node(cpu);
854
855 task = __kthread_create_on_node(kthread_worker_fn, worker,
856 node, namefmt, args);
857 if (IS_ERR(task))
858 goto fail_task;
859
860 if (cpu >= 0)
861 kthread_bind(task, cpu);
862
863 worker->flags = flags;
864 worker->task = task;
865 wake_up_process(task);
866 return worker;
867
868 fail_task:
869 kfree(worker);
870 return ERR_CAST(task);
871 }
872
873 /**
874 * kthread_create_worker - create a kthread worker
875 * @flags: flags modifying the default behavior of the worker
876 * @namefmt: printf-style name for the kthread worker (task).
877 *
878 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
879 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
880 * when the caller was killed by a fatal signal.
881 */
882 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)883 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
884 {
885 struct kthread_worker *worker;
886 va_list args;
887
888 va_start(args, namefmt);
889 worker = __kthread_create_worker(-1, flags, namefmt, args);
890 va_end(args);
891
892 return worker;
893 }
894 EXPORT_SYMBOL(kthread_create_worker);
895
896 /**
897 * kthread_create_worker_on_cpu - create a kthread worker and bind it
898 * to a given CPU and the associated NUMA node.
899 * @cpu: CPU number
900 * @flags: flags modifying the default behavior of the worker
901 * @namefmt: printf-style name for the kthread worker (task).
902 *
903 * Use a valid CPU number if you want to bind the kthread worker
904 * to the given CPU and the associated NUMA node.
905 *
906 * A good practice is to add the cpu number also into the worker name.
907 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
908 *
909 * CPU hotplug:
910 * The kthread worker API is simple and generic. It just provides a way
911 * to create, use, and destroy workers.
912 *
913 * It is up to the API user how to handle CPU hotplug. They have to decide
914 * how to handle pending work items, prevent queuing new ones, and
915 * restore the functionality when the CPU goes off and on. There are a
916 * few catches:
917 *
918 * - CPU affinity gets lost when it is scheduled on an offline CPU.
919 *
920 * - The worker might not exist when the CPU was off when the user
921 * created the workers.
922 *
923 * Good practice is to implement two CPU hotplug callbacks and to
924 * destroy/create the worker when the CPU goes down/up.
925 *
926 * Return:
927 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
928 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
929 * when the caller was killed by a fatal signal.
930 */
931 struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)932 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
933 const char namefmt[], ...)
934 {
935 struct kthread_worker *worker;
936 va_list args;
937
938 va_start(args, namefmt);
939 worker = __kthread_create_worker(cpu, flags, namefmt, args);
940 va_end(args);
941
942 return worker;
943 }
944 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
945
946 /*
947 * Returns true when the work could not be queued at the moment.
948 * It happens when it is already pending in a worker list
949 * or when it is being cancelled.
950 */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)951 static inline bool queuing_blocked(struct kthread_worker *worker,
952 struct kthread_work *work)
953 {
954 lockdep_assert_held(&worker->lock);
955
956 return !list_empty(&work->node) || work->canceling;
957 }
958
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)959 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
960 struct kthread_work *work)
961 {
962 lockdep_assert_held(&worker->lock);
963 WARN_ON_ONCE(!list_empty(&work->node));
964 /* Do not use a work with >1 worker, see kthread_queue_work() */
965 WARN_ON_ONCE(work->worker && work->worker != worker);
966 }
967
968 /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)969 static void kthread_insert_work(struct kthread_worker *worker,
970 struct kthread_work *work,
971 struct list_head *pos)
972 {
973 kthread_insert_work_sanity_check(worker, work);
974
975 trace_sched_kthread_work_queue_work(worker, work);
976
977 list_add_tail(&work->node, pos);
978 work->worker = worker;
979 if (!worker->current_work && likely(worker->task))
980 wake_up_process(worker->task);
981 }
982
983 /**
984 * kthread_queue_work - queue a kthread_work
985 * @worker: target kthread_worker
986 * @work: kthread_work to queue
987 *
988 * Queue @work to work processor @task for async execution. @task
989 * must have been created with kthread_worker_create(). Returns %true
990 * if @work was successfully queued, %false if it was already pending.
991 *
992 * Reinitialize the work if it needs to be used by another worker.
993 * For example, when the worker was stopped and started again.
994 */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)995 bool kthread_queue_work(struct kthread_worker *worker,
996 struct kthread_work *work)
997 {
998 bool ret = false;
999 unsigned long flags;
1000
1001 raw_spin_lock_irqsave(&worker->lock, flags);
1002 if (!queuing_blocked(worker, work)) {
1003 kthread_insert_work(worker, work, &worker->work_list);
1004 ret = true;
1005 }
1006 raw_spin_unlock_irqrestore(&worker->lock, flags);
1007 return ret;
1008 }
1009 EXPORT_SYMBOL_GPL(kthread_queue_work);
1010
1011 /**
1012 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1013 * delayed work when the timer expires.
1014 * @t: pointer to the expired timer
1015 *
1016 * The format of the function is defined by struct timer_list.
1017 * It should have been called from irqsafe timer with irq already off.
1018 */
kthread_delayed_work_timer_fn(struct timer_list * t)1019 void kthread_delayed_work_timer_fn(struct timer_list *t)
1020 {
1021 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1022 struct kthread_work *work = &dwork->work;
1023 struct kthread_worker *worker = work->worker;
1024 unsigned long flags;
1025
1026 /*
1027 * This might happen when a pending work is reinitialized.
1028 * It means that it is used a wrong way.
1029 */
1030 if (WARN_ON_ONCE(!worker))
1031 return;
1032
1033 raw_spin_lock_irqsave(&worker->lock, flags);
1034 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1035 WARN_ON_ONCE(work->worker != worker);
1036
1037 /* Move the work from worker->delayed_work_list. */
1038 WARN_ON_ONCE(list_empty(&work->node));
1039 list_del_init(&work->node);
1040 if (!work->canceling)
1041 kthread_insert_work(worker, work, &worker->work_list);
1042
1043 raw_spin_unlock_irqrestore(&worker->lock, flags);
1044 }
1045 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1046
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1047 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1048 struct kthread_delayed_work *dwork,
1049 unsigned long delay)
1050 {
1051 struct timer_list *timer = &dwork->timer;
1052 struct kthread_work *work = &dwork->work;
1053
1054 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1055
1056 /*
1057 * If @delay is 0, queue @dwork->work immediately. This is for
1058 * both optimization and correctness. The earliest @timer can
1059 * expire is on the closest next tick and delayed_work users depend
1060 * on that there's no such delay when @delay is 0.
1061 */
1062 if (!delay) {
1063 kthread_insert_work(worker, work, &worker->work_list);
1064 return;
1065 }
1066
1067 /* Be paranoid and try to detect possible races already now. */
1068 kthread_insert_work_sanity_check(worker, work);
1069
1070 list_add(&work->node, &worker->delayed_work_list);
1071 work->worker = worker;
1072 timer->expires = jiffies + delay;
1073 add_timer(timer);
1074 }
1075
1076 /**
1077 * kthread_queue_delayed_work - queue the associated kthread work
1078 * after a delay.
1079 * @worker: target kthread_worker
1080 * @dwork: kthread_delayed_work to queue
1081 * @delay: number of jiffies to wait before queuing
1082 *
1083 * If the work has not been pending it starts a timer that will queue
1084 * the work after the given @delay. If @delay is zero, it queues the
1085 * work immediately.
1086 *
1087 * Return: %false if the @work has already been pending. It means that
1088 * either the timer was running or the work was queued. It returns %true
1089 * otherwise.
1090 */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1091 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1092 struct kthread_delayed_work *dwork,
1093 unsigned long delay)
1094 {
1095 struct kthread_work *work = &dwork->work;
1096 unsigned long flags;
1097 bool ret = false;
1098
1099 raw_spin_lock_irqsave(&worker->lock, flags);
1100
1101 if (!queuing_blocked(worker, work)) {
1102 __kthread_queue_delayed_work(worker, dwork, delay);
1103 ret = true;
1104 }
1105
1106 raw_spin_unlock_irqrestore(&worker->lock, flags);
1107 return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1110
1111 struct kthread_flush_work {
1112 struct kthread_work work;
1113 struct completion done;
1114 };
1115
kthread_flush_work_fn(struct kthread_work * work)1116 static void kthread_flush_work_fn(struct kthread_work *work)
1117 {
1118 struct kthread_flush_work *fwork =
1119 container_of(work, struct kthread_flush_work, work);
1120 complete(&fwork->done);
1121 }
1122
1123 /**
1124 * kthread_flush_work - flush a kthread_work
1125 * @work: work to flush
1126 *
1127 * If @work is queued or executing, wait for it to finish execution.
1128 */
kthread_flush_work(struct kthread_work * work)1129 void kthread_flush_work(struct kthread_work *work)
1130 {
1131 struct kthread_flush_work fwork = {
1132 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1133 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1134 };
1135 struct kthread_worker *worker;
1136 bool noop = false;
1137
1138 worker = work->worker;
1139 if (!worker)
1140 return;
1141
1142 raw_spin_lock_irq(&worker->lock);
1143 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1144 WARN_ON_ONCE(work->worker != worker);
1145
1146 if (!list_empty(&work->node))
1147 kthread_insert_work(worker, &fwork.work, work->node.next);
1148 else if (worker->current_work == work)
1149 kthread_insert_work(worker, &fwork.work,
1150 worker->work_list.next);
1151 else
1152 noop = true;
1153
1154 raw_spin_unlock_irq(&worker->lock);
1155
1156 if (!noop)
1157 wait_for_completion(&fwork.done);
1158 }
1159 EXPORT_SYMBOL_GPL(kthread_flush_work);
1160
1161 /*
1162 * Make sure that the timer is neither set nor running and could
1163 * not manipulate the work list_head any longer.
1164 *
1165 * The function is called under worker->lock. The lock is temporary
1166 * released but the timer can't be set again in the meantime.
1167 */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)1168 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1169 unsigned long *flags)
1170 {
1171 struct kthread_delayed_work *dwork =
1172 container_of(work, struct kthread_delayed_work, work);
1173 struct kthread_worker *worker = work->worker;
1174
1175 /*
1176 * del_timer_sync() must be called to make sure that the timer
1177 * callback is not running. The lock must be temporary released
1178 * to avoid a deadlock with the callback. In the meantime,
1179 * any queuing is blocked by setting the canceling counter.
1180 */
1181 work->canceling++;
1182 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1183 del_timer_sync(&dwork->timer);
1184 raw_spin_lock_irqsave(&worker->lock, *flags);
1185 work->canceling--;
1186 }
1187
1188 /*
1189 * This function removes the work from the worker queue.
1190 *
1191 * It is called under worker->lock. The caller must make sure that
1192 * the timer used by delayed work is not running, e.g. by calling
1193 * kthread_cancel_delayed_work_timer().
1194 *
1195 * The work might still be in use when this function finishes. See the
1196 * current_work proceed by the worker.
1197 *
1198 * Return: %true if @work was pending and successfully canceled,
1199 * %false if @work was not pending
1200 */
__kthread_cancel_work(struct kthread_work * work)1201 static bool __kthread_cancel_work(struct kthread_work *work)
1202 {
1203 /*
1204 * Try to remove the work from a worker list. It might either
1205 * be from worker->work_list or from worker->delayed_work_list.
1206 */
1207 if (!list_empty(&work->node)) {
1208 list_del_init(&work->node);
1209 return true;
1210 }
1211
1212 return false;
1213 }
1214
1215 /**
1216 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1217 * @worker: kthread worker to use
1218 * @dwork: kthread delayed work to queue
1219 * @delay: number of jiffies to wait before queuing
1220 *
1221 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1222 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1223 * @work is guaranteed to be queued immediately.
1224 *
1225 * Return: %false if @dwork was idle and queued, %true otherwise.
1226 *
1227 * A special case is when the work is being canceled in parallel.
1228 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1229 * or yet another kthread_mod_delayed_work() call. We let the other command
1230 * win and return %true here. The return value can be used for reference
1231 * counting and the number of queued works stays the same. Anyway, the caller
1232 * is supposed to synchronize these operations a reasonable way.
1233 *
1234 * This function is safe to call from any context including IRQ handler.
1235 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1236 * for details.
1237 */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1238 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1239 struct kthread_delayed_work *dwork,
1240 unsigned long delay)
1241 {
1242 struct kthread_work *work = &dwork->work;
1243 unsigned long flags;
1244 int ret;
1245
1246 raw_spin_lock_irqsave(&worker->lock, flags);
1247
1248 /* Do not bother with canceling when never queued. */
1249 if (!work->worker) {
1250 ret = false;
1251 goto fast_queue;
1252 }
1253
1254 /* Work must not be used with >1 worker, see kthread_queue_work() */
1255 WARN_ON_ONCE(work->worker != worker);
1256
1257 /*
1258 * Temporary cancel the work but do not fight with another command
1259 * that is canceling the work as well.
1260 *
1261 * It is a bit tricky because of possible races with another
1262 * mod_delayed_work() and cancel_delayed_work() callers.
1263 *
1264 * The timer must be canceled first because worker->lock is released
1265 * when doing so. But the work can be removed from the queue (list)
1266 * only when it can be queued again so that the return value can
1267 * be used for reference counting.
1268 */
1269 kthread_cancel_delayed_work_timer(work, &flags);
1270 if (work->canceling) {
1271 /* The number of works in the queue does not change. */
1272 ret = true;
1273 goto out;
1274 }
1275 ret = __kthread_cancel_work(work);
1276
1277 fast_queue:
1278 __kthread_queue_delayed_work(worker, dwork, delay);
1279 out:
1280 raw_spin_unlock_irqrestore(&worker->lock, flags);
1281 return ret;
1282 }
1283 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1284
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)1285 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1286 {
1287 struct kthread_worker *worker = work->worker;
1288 unsigned long flags;
1289 int ret = false;
1290
1291 if (!worker)
1292 goto out;
1293
1294 raw_spin_lock_irqsave(&worker->lock, flags);
1295 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1296 WARN_ON_ONCE(work->worker != worker);
1297
1298 if (is_dwork)
1299 kthread_cancel_delayed_work_timer(work, &flags);
1300
1301 ret = __kthread_cancel_work(work);
1302
1303 if (worker->current_work != work)
1304 goto out_fast;
1305
1306 /*
1307 * The work is in progress and we need to wait with the lock released.
1308 * In the meantime, block any queuing by setting the canceling counter.
1309 */
1310 work->canceling++;
1311 raw_spin_unlock_irqrestore(&worker->lock, flags);
1312 kthread_flush_work(work);
1313 raw_spin_lock_irqsave(&worker->lock, flags);
1314 work->canceling--;
1315
1316 out_fast:
1317 raw_spin_unlock_irqrestore(&worker->lock, flags);
1318 out:
1319 return ret;
1320 }
1321
1322 /**
1323 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1324 * @work: the kthread work to cancel
1325 *
1326 * Cancel @work and wait for its execution to finish. This function
1327 * can be used even if the work re-queues itself. On return from this
1328 * function, @work is guaranteed to be not pending or executing on any CPU.
1329 *
1330 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1331 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1332 *
1333 * The caller must ensure that the worker on which @work was last
1334 * queued can't be destroyed before this function returns.
1335 *
1336 * Return: %true if @work was pending, %false otherwise.
1337 */
kthread_cancel_work_sync(struct kthread_work * work)1338 bool kthread_cancel_work_sync(struct kthread_work *work)
1339 {
1340 return __kthread_cancel_work_sync(work, false);
1341 }
1342 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1343
1344 /**
1345 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1346 * wait for it to finish.
1347 * @dwork: the kthread delayed work to cancel
1348 *
1349 * This is kthread_cancel_work_sync() for delayed works.
1350 *
1351 * Return: %true if @dwork was pending, %false otherwise.
1352 */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)1353 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1354 {
1355 return __kthread_cancel_work_sync(&dwork->work, true);
1356 }
1357 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1358
1359 /**
1360 * kthread_flush_worker - flush all current works on a kthread_worker
1361 * @worker: worker to flush
1362 *
1363 * Wait until all currently executing or pending works on @worker are
1364 * finished.
1365 */
kthread_flush_worker(struct kthread_worker * worker)1366 void kthread_flush_worker(struct kthread_worker *worker)
1367 {
1368 struct kthread_flush_work fwork = {
1369 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1370 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1371 };
1372
1373 kthread_queue_work(worker, &fwork.work);
1374 wait_for_completion(&fwork.done);
1375 }
1376 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1377
1378 /**
1379 * kthread_destroy_worker - destroy a kthread worker
1380 * @worker: worker to be destroyed
1381 *
1382 * Flush and destroy @worker. The simple flush is enough because the kthread
1383 * worker API is used only in trivial scenarios. There are no multi-step state
1384 * machines needed.
1385 *
1386 * Note that this function is not responsible for handling delayed work, so
1387 * caller should be responsible for queuing or canceling all delayed work items
1388 * before invoke this function.
1389 */
kthread_destroy_worker(struct kthread_worker * worker)1390 void kthread_destroy_worker(struct kthread_worker *worker)
1391 {
1392 struct task_struct *task;
1393
1394 task = worker->task;
1395 if (WARN_ON(!task))
1396 return;
1397
1398 kthread_flush_worker(worker);
1399 kthread_stop(task);
1400 WARN_ON(!list_empty(&worker->delayed_work_list));
1401 WARN_ON(!list_empty(&worker->work_list));
1402 kfree(worker);
1403 }
1404 EXPORT_SYMBOL(kthread_destroy_worker);
1405
1406 /**
1407 * kthread_use_mm - make the calling kthread operate on an address space
1408 * @mm: address space to operate on
1409 */
kthread_use_mm(struct mm_struct * mm)1410 void kthread_use_mm(struct mm_struct *mm)
1411 {
1412 struct mm_struct *active_mm;
1413 struct task_struct *tsk = current;
1414
1415 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1416 WARN_ON_ONCE(tsk->mm);
1417
1418 /*
1419 * It is possible for mm to be the same as tsk->active_mm, but
1420 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1421 * because these references are not equivalent.
1422 */
1423 mmgrab(mm);
1424
1425 task_lock(tsk);
1426 /* Hold off tlb flush IPIs while switching mm's */
1427 local_irq_disable();
1428 active_mm = tsk->active_mm;
1429 tsk->active_mm = mm;
1430 tsk->mm = mm;
1431 membarrier_update_current_mm(mm);
1432 switch_mm_irqs_off(active_mm, mm, tsk);
1433 local_irq_enable();
1434 task_unlock(tsk);
1435 #ifdef finish_arch_post_lock_switch
1436 finish_arch_post_lock_switch();
1437 #endif
1438
1439 /*
1440 * When a kthread starts operating on an address space, the loop
1441 * in membarrier_{private,global}_expedited() may not observe
1442 * that tsk->mm, and not issue an IPI. Membarrier requires a
1443 * memory barrier after storing to tsk->mm, before accessing
1444 * user-space memory. A full memory barrier for membarrier
1445 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1446 * mmdrop_lazy_tlb().
1447 */
1448 mmdrop_lazy_tlb(active_mm);
1449 }
1450 EXPORT_SYMBOL_GPL(kthread_use_mm);
1451
1452 /**
1453 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1454 * @mm: address space to operate on
1455 */
kthread_unuse_mm(struct mm_struct * mm)1456 void kthread_unuse_mm(struct mm_struct *mm)
1457 {
1458 struct task_struct *tsk = current;
1459
1460 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1461 WARN_ON_ONCE(!tsk->mm);
1462
1463 task_lock(tsk);
1464 /*
1465 * When a kthread stops operating on an address space, the loop
1466 * in membarrier_{private,global}_expedited() may not observe
1467 * that tsk->mm, and not issue an IPI. Membarrier requires a
1468 * memory barrier after accessing user-space memory, before
1469 * clearing tsk->mm.
1470 */
1471 smp_mb__after_spinlock();
1472 sync_mm_rss(mm);
1473 local_irq_disable();
1474 tsk->mm = NULL;
1475 membarrier_update_current_mm(NULL);
1476 mmgrab_lazy_tlb(mm);
1477 /* active_mm is still 'mm' */
1478 enter_lazy_tlb(mm, tsk);
1479 local_irq_enable();
1480 task_unlock(tsk);
1481
1482 mmdrop(mm);
1483 }
1484 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1485
1486 #ifdef CONFIG_BLK_CGROUP
1487 /**
1488 * kthread_associate_blkcg - associate blkcg to current kthread
1489 * @css: the cgroup info
1490 *
1491 * Current thread must be a kthread. The thread is running jobs on behalf of
1492 * other threads. In some cases, we expect the jobs attach cgroup info of
1493 * original threads instead of that of current thread. This function stores
1494 * original thread's cgroup info in current kthread context for later
1495 * retrieval.
1496 */
kthread_associate_blkcg(struct cgroup_subsys_state * css)1497 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1498 {
1499 struct kthread *kthread;
1500
1501 if (!(current->flags & PF_KTHREAD))
1502 return;
1503 kthread = to_kthread(current);
1504 if (!kthread)
1505 return;
1506
1507 if (kthread->blkcg_css) {
1508 css_put(kthread->blkcg_css);
1509 kthread->blkcg_css = NULL;
1510 }
1511 if (css) {
1512 css_get(css);
1513 kthread->blkcg_css = css;
1514 }
1515 }
1516 EXPORT_SYMBOL(kthread_associate_blkcg);
1517
1518 /**
1519 * kthread_blkcg - get associated blkcg css of current kthread
1520 *
1521 * Current thread must be a kthread.
1522 */
kthread_blkcg(void)1523 struct cgroup_subsys_state *kthread_blkcg(void)
1524 {
1525 struct kthread *kthread;
1526
1527 if (current->flags & PF_KTHREAD) {
1528 kthread = to_kthread(current);
1529 if (kthread)
1530 return kthread->blkcg_css;
1531 }
1532 return NULL;
1533 }
1534 #endif
1535