1 /*
2  * linux/kernel/context.c
3  *
4  * Mechanism for running arbitrary tasks in process context
5  *
6  * dwmw2@redhat.com:		Genesis
7  *
8  * andrewm@uow.edu.au:		2.4.0-test12
9  *	- Child reaping
10  *	- Support for tasks which re-add themselves
11  *	- flush_scheduled_tasks.
12  */
13 
14 #define __KERNEL_SYSCALLS__
15 
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/init.h>
20 #include <linux/unistd.h>
21 #include <linux/signal.h>
22 #include <linux/completion.h>
23 
24 static DECLARE_TASK_QUEUE(tq_context);
25 static DECLARE_WAIT_QUEUE_HEAD(context_task_wq);
26 static DECLARE_WAIT_QUEUE_HEAD(context_task_done);
27 static int keventd_running;
28 static struct task_struct *keventd_task;
29 
need_keventd(const char * who)30 static int need_keventd(const char *who)
31 {
32 	if (keventd_running == 0)
33 		printk(KERN_ERR "%s(): keventd has not started\n", who);
34 	return keventd_running;
35 }
36 
current_is_keventd(void)37 int current_is_keventd(void)
38 {
39 	int ret = 0;
40 	if (need_keventd(__FUNCTION__))
41 		ret = (current == keventd_task);
42 	return ret;
43 }
44 
45 /**
46  * schedule_task - schedule a function for subsequent execution in process context.
47  * @task: pointer to a &tq_struct which defines the function to be scheduled.
48  *
49  * May be called from interrupt context.  The scheduled function is run at some
50  * time in the near future by the keventd kernel thread.  If it can sleep, it
51  * should be designed to do so for the minimum possible time, as it will be
52  * stalling all other scheduled tasks.
53  *
54  * schedule_task() returns non-zero if the task was successfully scheduled.
55  * If @task is already residing on a task queue then schedule_task() fails
56  * to schedule your task and returns zero.
57  */
schedule_task(struct tq_struct * task)58 int schedule_task(struct tq_struct *task)
59 {
60 	int ret;
61 	need_keventd(__FUNCTION__);
62 	ret = queue_task(task, &tq_context);
63 	wake_up(&context_task_wq);
64 	return ret;
65 }
66 
context_thread(void * startup)67 static int context_thread(void *startup)
68 {
69 	struct task_struct *curtask = current;
70 	DECLARE_WAITQUEUE(wait, curtask);
71 	struct k_sigaction sa;
72 
73 	daemonize();
74 	strcpy(curtask->comm, "keventd");
75 	keventd_running = 1;
76 	keventd_task = curtask;
77 
78 	spin_lock_irq(&curtask->sigmask_lock);
79 	siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
80 	recalc_sigpending(curtask);
81 	spin_unlock_irq(&curtask->sigmask_lock);
82 
83 	complete((struct completion *)startup);
84 
85 	/* Install a handler so SIGCLD is delivered */
86 	sa.sa.sa_handler = SIG_IGN;
87 	sa.sa.sa_flags = 0;
88 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
89 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
90 
91 	/*
92 	 * If one of the functions on a task queue re-adds itself
93 	 * to the task queue we call schedule() in state TASK_RUNNING
94 	 */
95 	for (;;) {
96 		set_task_state(curtask, TASK_INTERRUPTIBLE);
97 		add_wait_queue(&context_task_wq, &wait);
98 		if (TQ_ACTIVE(tq_context))
99 			set_task_state(curtask, TASK_RUNNING);
100 		schedule();
101 		remove_wait_queue(&context_task_wq, &wait);
102 		run_task_queue(&tq_context);
103 		wake_up(&context_task_done);
104 		if (signal_pending(curtask)) {
105 			while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
106 				;
107 			spin_lock_irq(&curtask->sigmask_lock);
108 			flush_signals(curtask);
109 			recalc_sigpending(curtask);
110 			spin_unlock_irq(&curtask->sigmask_lock);
111 		}
112 	}
113 }
114 
115 /**
116  * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion.
117  *
118  * Forces execution of the schedule_task() queue and blocks until its completion.
119  *
120  * If a kernel subsystem uses schedule_task() and wishes to flush any pending
121  * tasks, it should use this function.  This is typically used in driver shutdown
122  * handlers.
123  *
124  * The caller should hold no spinlocks and should hold no semaphores which could
125  * cause the scheduled tasks to block.
126  */
127 static struct tq_struct dummy_task;
128 
flush_scheduled_tasks(void)129 void flush_scheduled_tasks(void)
130 {
131 	int count;
132 	DECLARE_WAITQUEUE(wait, current);
133 
134 	/*
135 	 * Do it twice. It's possible, albeit highly unlikely, that
136 	 * the caller queued a task immediately before calling us,
137 	 * and that the eventd thread was already past the run_task_queue()
138 	 * but not yet into wake_up(), so it woke us up before completing
139 	 * the caller's queued task or our new dummy task.
140 	 */
141 	add_wait_queue(&context_task_done, &wait);
142 	for (count = 0; count < 2; count++) {
143 		set_current_state(TASK_UNINTERRUPTIBLE);
144 
145 		/* Queue a dummy task to make sure we get kicked */
146 		schedule_task(&dummy_task);
147 
148 		/* Wait for it to complete */
149 		schedule();
150 	}
151 	remove_wait_queue(&context_task_done, &wait);
152 }
153 
start_context_thread(void)154 int start_context_thread(void)
155 {
156 	static struct completion startup __initdata = COMPLETION_INITIALIZER(startup);
157 
158 	kernel_thread(context_thread, &startup, CLONE_FS | CLONE_FILES);
159 	wait_for_completion(&startup);
160 	return 0;
161 }
162 
163 EXPORT_SYMBOL(schedule_task);
164 EXPORT_SYMBOL(flush_scheduled_tasks);
165 
166