1 /*
2 	kmod, the new module loader (replaces kerneld)
3 	Kirk Petersen
4 
5 	Reorganized not to be a daemon by Adam Richter, with guidance
6 	from Greg Zornetzer.
7 
8 	Modified to avoid chroot and file sharing problems.
9 	Mikael Pettersson
10 
11 	Limit the concurrent number of kmod modprobes to catch loops from
12 	"modprobe needs a service that is in a module".
13 	Keith Owens <kaos@ocs.com.au> December 1999
14 
15 	Unblock all signals when we exec a usermode process.
16 	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
17 */
18 
19 #define __KERNEL_SYSCALLS__
20 
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/unistd.h>
25 #include <linux/kmod.h>
26 #include <linux/smp_lock.h>
27 #include <linux/slab.h>
28 #include <linux/namespace.h>
29 #include <linux/completion.h>
30 
31 #include <asm/uaccess.h>
32 
33 extern int max_threads;
34 
35 static inline void
use_init_fs_context(void)36 use_init_fs_context(void)
37 {
38 	struct fs_struct *our_fs, *init_fs;
39 	struct dentry *root, *pwd;
40 	struct vfsmount *rootmnt, *pwdmnt;
41 	struct namespace *our_ns, *init_ns;
42 
43 	/*
44 	 * Make modprobe's fs context be a copy of init's.
45 	 *
46 	 * We cannot use the user's fs context, because it
47 	 * may have a different root than init.
48 	 * Since init was created with CLONE_FS, we can grab
49 	 * its fs context from "init_task".
50 	 *
51 	 * The fs context has to be a copy. If it is shared
52 	 * with init, then any chdir() call in modprobe will
53 	 * also affect init and the other threads sharing
54 	 * init_task's fs context.
55 	 *
56 	 * We created the exec_modprobe thread without CLONE_FS,
57 	 * so we can update the fields in our fs context freely.
58 	 */
59 
60 	init_fs = init_task.fs;
61 	init_ns = init_task.namespace;
62 	get_namespace(init_ns);
63 	our_ns = current->namespace;
64 	current->namespace = init_ns;
65 	put_namespace(our_ns);
66 	read_lock(&init_fs->lock);
67 	rootmnt = mntget(init_fs->rootmnt);
68 	root = dget(init_fs->root);
69 	pwdmnt = mntget(init_fs->pwdmnt);
70 	pwd = dget(init_fs->pwd);
71 	read_unlock(&init_fs->lock);
72 
73 	/* FIXME - unsafe ->fs access */
74 	our_fs = current->fs;
75 	our_fs->umask = init_fs->umask;
76 	set_fs_root(our_fs, rootmnt, root);
77 	set_fs_pwd(our_fs, pwdmnt, pwd);
78 	write_lock(&our_fs->lock);
79 	if (our_fs->altroot) {
80 		struct vfsmount *mnt = our_fs->altrootmnt;
81 		struct dentry *dentry = our_fs->altroot;
82 		our_fs->altrootmnt = NULL;
83 		our_fs->altroot = NULL;
84 		write_unlock(&our_fs->lock);
85 		dput(dentry);
86 		mntput(mnt);
87 	} else
88 		write_unlock(&our_fs->lock);
89 	dput(root);
90 	mntput(rootmnt);
91 	dput(pwd);
92 	mntput(pwdmnt);
93 }
94 
exec_usermodehelper(char * program_path,char * argv[],char * envp[])95 int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
96 {
97 	int i;
98 	struct task_struct *curtask = current;
99 
100 	curtask->session = 1;
101 	curtask->pgrp = 1;
102 
103 	use_init_fs_context();
104 
105 	/* Prevent parent user process from sending signals to child.
106 	   Otherwise, if the modprobe program does not exist, it might
107 	   be possible to get a user defined signal handler to execute
108 	   as the super user right after the execve fails if you time
109 	   the signal just right.
110 	*/
111 	spin_lock_irq(&curtask->sigmask_lock);
112 	sigemptyset(&curtask->blocked);
113 	flush_signals(curtask);
114 	flush_signal_handlers(curtask);
115 	recalc_sigpending(curtask);
116 	spin_unlock_irq(&curtask->sigmask_lock);
117 
118 	for (i = 0; i < curtask->files->max_fds; i++ ) {
119 		if (curtask->files->fd[i]) close(i);
120 	}
121 
122 	switch_uid(INIT_USER);
123 
124 	/* Give kmod all effective privileges.. */
125 	curtask->euid = curtask->uid = curtask->suid = curtask->fsuid = 0;
126 	curtask->egid = curtask->gid = curtask->sgid = curtask->fsgid = 0;
127 
128 	memcpy(&curtask->rlim, &init_task.rlim, sizeof(struct rlimit)*RLIM_NLIMITS);
129 
130 	curtask->ngroups = 0;
131 
132 	cap_set_full(curtask->cap_effective);
133 
134 	/* Allow execve args to be in kernel space. */
135 	set_fs(KERNEL_DS);
136 
137 	/* Go, go, go... */
138 	if (execve(program_path, argv, envp) < 0)
139 		return -errno;
140 	return 0;
141 }
142 
143 #ifdef CONFIG_KMOD
144 
145 /*
146 	modprobe_path is set via /proc/sys.
147 */
148 char modprobe_path[256] = "/sbin/modprobe";
149 
exec_modprobe(void * module_name)150 static int exec_modprobe(void * module_name)
151 {
152 	static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
153 	char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL };
154 	int ret;
155 
156 	ret = exec_usermodehelper(modprobe_path, argv, envp);
157 	if (ret) {
158 		printk(KERN_ERR
159 		       "kmod: failed to exec %s -s -k %s, errno = %d\n",
160 		       modprobe_path, (char*) module_name, errno);
161 	}
162 	return ret;
163 }
164 
165 /**
166  * request_module - try to load a kernel module
167  * @module_name: Name of module
168  *
169  * Load a module using the user mode module loader. The function returns
170  * zero on success or a negative errno code on failure. Note that a
171  * successful module load does not mean the module did not then unload
172  * and exit on an error of its own. Callers must check that the service
173  * they requested is now available not blindly invoke it.
174  *
175  * If module auto-loading support is disabled then this function
176  * becomes a no-operation.
177  */
request_module(const char * module_name)178 int request_module(const char * module_name)
179 {
180 	pid_t pid;
181 	int waitpid_result;
182 	sigset_t tmpsig;
183 	int i;
184 	static atomic_t kmod_concurrent = ATOMIC_INIT(0);
185 #define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
186 	static int kmod_loop_msg;
187 
188 	/* Don't allow request_module() before the root fs is mounted!  */
189 	if ( ! current->fs->root ) {
190 		printk(KERN_ERR "request_module[%s]: Root fs not mounted\n",
191 			module_name);
192 		return -EPERM;
193 	}
194 
195 	/* If modprobe needs a service that is in a module, we get a recursive
196 	 * loop.  Limit the number of running kmod threads to max_threads/2 or
197 	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
198 	 * would be to run the parents of this process, counting how many times
199 	 * kmod was invoked.  That would mean accessing the internals of the
200 	 * process tables to get the command line, proc_pid_cmdline is static
201 	 * and it is not worth changing the proc code just to handle this case.
202 	 * KAO.
203 	 */
204 	i = max_threads/2;
205 	if (i > MAX_KMOD_CONCURRENT)
206 		i = MAX_KMOD_CONCURRENT;
207 	atomic_inc(&kmod_concurrent);
208 	if (atomic_read(&kmod_concurrent) > i) {
209 		if (kmod_loop_msg++ < 5)
210 			printk(KERN_ERR
211 			       "kmod: runaway modprobe loop assumed and stopped\n");
212 		atomic_dec(&kmod_concurrent);
213 		return -ENOMEM;
214 	}
215 
216 	pid = kernel_thread(exec_modprobe, (void*) module_name, 0);
217 	if (pid < 0) {
218 		printk(KERN_ERR "request_module[%s]: fork failed, errno %d\n", module_name, -pid);
219 		atomic_dec(&kmod_concurrent);
220 		return pid;
221 	}
222 
223 	/* Block everything but SIGKILL/SIGSTOP */
224 	spin_lock_irq(&current->sigmask_lock);
225 	tmpsig = current->blocked;
226 	siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
227 	recalc_sigpending(current);
228 	spin_unlock_irq(&current->sigmask_lock);
229 
230 	waitpid_result = waitpid(pid, NULL, __WCLONE);
231 	atomic_dec(&kmod_concurrent);
232 
233 	/* Allow signals again.. */
234 	spin_lock_irq(&current->sigmask_lock);
235 	current->blocked = tmpsig;
236 	recalc_sigpending(current);
237 	spin_unlock_irq(&current->sigmask_lock);
238 
239 	if (waitpid_result != pid) {
240 		printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
241 		       module_name, pid, -waitpid_result);
242 	}
243 	return 0;
244 }
245 #endif /* CONFIG_KMOD */
246 
247 
248 #ifdef CONFIG_HOTPLUG
249 /*
250 	hotplug path is set via /proc/sys
251 	invoked by hotplug-aware bus drivers,
252 	with exec_usermodehelper and some thread-spawner
253 
254 	argv [0] = hotplug_path;
255 	argv [1] = "usb", "scsi", "pci", "network", etc;
256 	... plus optional type-specific parameters
257 	argv [n] = 0;
258 
259 	envp [*] = HOME, PATH; optional type-specific parameters
260 
261 	a hotplug bus should invoke this for device add/remove
262 	events.  the command is expected to load drivers when
263 	necessary, and may perform additional system setup.
264 */
265 char hotplug_path[256] = "/sbin/hotplug";
266 
267 EXPORT_SYMBOL(hotplug_path);
268 
269 #endif /* CONFIG_HOTPLUG */
270 
271 struct subprocess_info {
272 	struct completion *complete;
273 	char *path;
274 	char **argv;
275 	char **envp;
276 	pid_t retval;
277 };
278 
279 /*
280  * This is the task which runs the usermode application
281  */
____call_usermodehelper(void * data)282 static int ____call_usermodehelper(void *data)
283 {
284 	struct subprocess_info *sub_info = data;
285 	int retval;
286 
287 	retval = -EPERM;
288 	if (current->fs->root)
289 		retval = exec_usermodehelper(sub_info->path, sub_info->argv, sub_info->envp);
290 
291 	/* Exec failed? */
292 	sub_info->retval = (pid_t)retval;
293 	do_exit(0);
294 }
295 
296 /*
297  * This is run by keventd.
298  */
__call_usermodehelper(void * data)299 static void __call_usermodehelper(void *data)
300 {
301 	struct subprocess_info *sub_info = data;
302 	pid_t pid;
303 
304 	/*
305 	 * CLONE_VFORK: wait until the usermode helper has execve'd successfully
306 	 * We need the data structures to stay around until that is done.
307 	 */
308 	pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD);
309 	if (pid < 0)
310 		sub_info->retval = pid;
311 	complete(sub_info->complete);
312 }
313 
314 /**
315  * call_usermodehelper - start a usermode application
316  * @path: pathname for the application
317  * @argv: null-terminated argument list
318  * @envp: null-terminated environment list
319  *
320  * Runs a user-space application.  The application is started asynchronously.  It
321  * runs as a child of keventd.  It runs with full root capabilities.  keventd silently
322  * reaps the child when it exits.
323  *
324  * Must be called from process context.  Returns zero on success, else a negative
325  * error code.
326  */
call_usermodehelper(char * path,char ** argv,char ** envp)327 int call_usermodehelper(char *path, char **argv, char **envp)
328 {
329 	DECLARE_COMPLETION(work);
330 	struct subprocess_info sub_info = {
331 		complete:	&work,
332 		path:		path,
333 		argv:		argv,
334 		envp:		envp,
335 		retval:		0,
336 	};
337 	struct tq_struct tqs = {
338 		routine:	__call_usermodehelper,
339 		data:		&sub_info,
340 	};
341 
342 	if (path[0] == '\0')
343 		goto out;
344 
345 	if (current_is_keventd()) {
346 		/* We can't wait on keventd! */
347 		__call_usermodehelper(&sub_info);
348 	} else {
349 		schedule_task(&tqs);
350 		wait_for_completion(&work);
351 	}
352 out:
353 	return sub_info.retval;
354 }
355 
356 /*
357  * This is for the serialisation of device probe() functions
358  * against device open() functions
359  */
360 static DECLARE_MUTEX(dev_probe_sem);
361 
dev_probe_lock(void)362 void dev_probe_lock(void)
363 {
364 	down(&dev_probe_sem);
365 }
366 
dev_probe_unlock(void)367 void dev_probe_unlock(void)
368 {
369 	up(&dev_probe_sem);
370 }
371 
372 EXPORT_SYMBOL(exec_usermodehelper);
373 EXPORT_SYMBOL(call_usermodehelper);
374 
375 #ifdef CONFIG_KMOD
376 EXPORT_SYMBOL(request_module);
377 #endif
378 
379