1 /*
2  * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <sched.h>
9 #include <errno.h>
10 #include <string.h>
11 #include <sys/mman.h>
12 #include <sys/ptrace.h>
13 #include <sys/wait.h>
14 #include <asm/unistd.h>
15 #include "as-layout.h"
16 #include "chan_user.h"
17 #include "kern_constants.h"
18 #include "kern_util.h"
19 #include "mem.h"
20 #include "os.h"
21 #include "process.h"
22 #include "proc_mm.h"
23 #include "ptrace_user.h"
24 #include "registers.h"
25 #include "skas.h"
26 #include "skas_ptrace.h"
27 #include "user.h"
28 #include "sysdep/stub.h"
29 
is_skas_winch(int pid,int fd,void * data)30 int is_skas_winch(int pid, int fd, void *data)
31 {
32 	if (pid != getpgrp())
33 		return 0;
34 
35 	register_winch_irq(-1, fd, -1, data, 0);
36 	return 1;
37 }
38 
ptrace_dump_regs(int pid)39 static int ptrace_dump_regs(int pid)
40 {
41 	unsigned long regs[MAX_REG_NR];
42 	int i;
43 
44 	if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
45 		return -errno;
46 
47 	printk(UM_KERN_ERR "Stub registers -\n");
48 	for (i = 0; i < ARRAY_SIZE(regs); i++)
49 		printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
50 
51 	return 0;
52 }
53 
54 /*
55  * Signals that are OK to receive in the stub - we'll just continue it.
56  * SIGWINCH will happen when UML is inside a detached screen.
57  */
58 #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
59 
60 /* Signals that the stub will finish with - anything else is an error */
61 #define STUB_DONE_MASK (1 << SIGTRAP)
62 
wait_stub_done(int pid)63 void wait_stub_done(int pid)
64 {
65 	int n, status, err;
66 
67 	while (1) {
68 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
69 		if ((n < 0) || !WIFSTOPPED(status))
70 			goto bad_wait;
71 
72 		if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
73 			break;
74 
75 		err = ptrace(PTRACE_CONT, pid, 0, 0);
76 		if (err) {
77 			printk(UM_KERN_ERR "wait_stub_done : continue failed, "
78 			       "errno = %d\n", errno);
79 			fatal_sigsegv();
80 		}
81 	}
82 
83 	if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
84 		return;
85 
86 bad_wait:
87 	err = ptrace_dump_regs(pid);
88 	if (err)
89 		printk(UM_KERN_ERR "Failed to get registers from stub, "
90 		       "errno = %d\n", -err);
91 	printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
92 	       "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
93 	       status);
94 	fatal_sigsegv();
95 }
96 
97 extern unsigned long current_stub_stack(void);
98 
get_skas_faultinfo(int pid,struct faultinfo * fi)99 static void get_skas_faultinfo(int pid, struct faultinfo *fi)
100 {
101 	int err;
102 
103 	if (ptrace_faultinfo) {
104 		err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
105 		if (err) {
106 			printk(UM_KERN_ERR "get_skas_faultinfo - "
107 			       "PTRACE_FAULTINFO failed, errno = %d\n", errno);
108 			fatal_sigsegv();
109 		}
110 
111 		/* Special handling for i386, which has different structs */
112 		if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
113 			memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
114 			       sizeof(struct faultinfo) -
115 			       sizeof(struct ptrace_faultinfo));
116 	}
117 	else {
118 		unsigned long fpregs[FP_SIZE];
119 
120 		err = get_fp_registers(pid, fpregs);
121 		if (err < 0) {
122 			printk(UM_KERN_ERR "save_fp_registers returned %d\n",
123 			       err);
124 			fatal_sigsegv();
125 		}
126 		err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
127 		if (err) {
128 			printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
129 			       "errno = %d\n", pid, errno);
130 			fatal_sigsegv();
131 		}
132 		wait_stub_done(pid);
133 
134 		/*
135 		 * faultinfo is prepared by the stub-segv-handler at start of
136 		 * the stub stack page. We just have to copy it.
137 		 */
138 		memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
139 
140 		err = put_fp_registers(pid, fpregs);
141 		if (err < 0) {
142 			printk(UM_KERN_ERR "put_fp_registers returned %d\n",
143 			       err);
144 			fatal_sigsegv();
145 		}
146 	}
147 }
148 
handle_segv(int pid,struct uml_pt_regs * regs)149 static void handle_segv(int pid, struct uml_pt_regs * regs)
150 {
151 	get_skas_faultinfo(pid, &regs->faultinfo);
152 	segv(regs->faultinfo, 0, 1, NULL);
153 }
154 
155 /*
156  * To use the same value of using_sysemu as the caller, ask it that value
157  * (in local_using_sysemu
158  */
handle_trap(int pid,struct uml_pt_regs * regs,int local_using_sysemu)159 static void handle_trap(int pid, struct uml_pt_regs *regs,
160 			int local_using_sysemu)
161 {
162 	int err, status;
163 
164 	if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
165 		fatal_sigsegv();
166 
167 	/* Mark this as a syscall */
168 	UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
169 
170 	if (!local_using_sysemu)
171 	{
172 		err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
173 			     __NR_getpid);
174 		if (err < 0) {
175 			printk(UM_KERN_ERR "handle_trap - nullifying syscall "
176 			       "failed, errno = %d\n", errno);
177 			fatal_sigsegv();
178 		}
179 
180 		err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
181 		if (err < 0) {
182 			printk(UM_KERN_ERR "handle_trap - continuing to end of "
183 			       "syscall failed, errno = %d\n", errno);
184 			fatal_sigsegv();
185 		}
186 
187 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
188 		if ((err < 0) || !WIFSTOPPED(status) ||
189 		    (WSTOPSIG(status) != SIGTRAP + 0x80)) {
190 			err = ptrace_dump_regs(pid);
191 			if (err)
192 				printk(UM_KERN_ERR "Failed to get registers "
193 				       "from process, errno = %d\n", -err);
194 			printk(UM_KERN_ERR "handle_trap - failed to wait at "
195 			       "end of syscall, errno = %d, status = %d\n",
196 			       errno, status);
197 			fatal_sigsegv();
198 		}
199 	}
200 
201 	handle_syscall(regs);
202 }
203 
204 extern int __syscall_stub_start;
205 
userspace_tramp(void * stack)206 static int userspace_tramp(void *stack)
207 {
208 	void *addr;
209 	int err;
210 
211 	ptrace(PTRACE_TRACEME, 0, 0, 0);
212 
213 	signal(SIGTERM, SIG_DFL);
214 	signal(SIGWINCH, SIG_IGN);
215 	err = set_interval();
216 	if (err) {
217 		printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
218 		       "errno = %d\n", err);
219 		exit(1);
220 	}
221 
222 	if (!proc_mm) {
223 		/*
224 		 * This has a pte, but it can't be mapped in with the usual
225 		 * tlb_flush mechanism because this is part of that mechanism
226 		 */
227 		int fd;
228 		unsigned long long offset;
229 		fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
230 		addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
231 			      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
232 		if (addr == MAP_FAILED) {
233 			printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
234 			       "errno = %d\n", STUB_CODE, errno);
235 			exit(1);
236 		}
237 
238 		if (stack != NULL) {
239 			fd = phys_mapping(to_phys(stack), &offset);
240 			addr = mmap((void *) STUB_DATA,
241 				    UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
242 				    MAP_FIXED | MAP_SHARED, fd, offset);
243 			if (addr == MAP_FAILED) {
244 				printk(UM_KERN_ERR "mapping segfault stack "
245 				       "at 0x%lx failed, errno = %d\n",
246 				       STUB_DATA, errno);
247 				exit(1);
248 			}
249 		}
250 	}
251 	if (!ptrace_faultinfo && (stack != NULL)) {
252 		struct sigaction sa;
253 
254 		unsigned long v = STUB_CODE +
255 				  (unsigned long) stub_segv_handler -
256 				  (unsigned long) &__syscall_stub_start;
257 
258 		set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
259 		sigemptyset(&sa.sa_mask);
260 		sa.sa_flags = SA_ONSTACK | SA_NODEFER;
261 		sa.sa_handler = (void *) v;
262 		sa.sa_restorer = NULL;
263 		if (sigaction(SIGSEGV, &sa, NULL) < 0) {
264 			printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
265 			       "handler failed - errno = %d\n", errno);
266 			exit(1);
267 		}
268 	}
269 
270 	kill(os_getpid(), SIGSTOP);
271 	return 0;
272 }
273 
274 /* Each element set once, and only accessed by a single processor anyway */
275 #undef NR_CPUS
276 #define NR_CPUS 1
277 int userspace_pid[NR_CPUS];
278 
start_userspace(unsigned long stub_stack)279 int start_userspace(unsigned long stub_stack)
280 {
281 	void *stack;
282 	unsigned long sp;
283 	int pid, status, n, flags, err;
284 
285 	stack = mmap(NULL, UM_KERN_PAGE_SIZE,
286 		     PROT_READ | PROT_WRITE | PROT_EXEC,
287 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
288 	if (stack == MAP_FAILED) {
289 		err = -errno;
290 		printk(UM_KERN_ERR "start_userspace : mmap failed, "
291 		       "errno = %d\n", errno);
292 		return err;
293 	}
294 
295 	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
296 
297 	flags = CLONE_FILES;
298 	if (proc_mm)
299 		flags |= CLONE_VM;
300 	else
301 		flags |= SIGCHLD;
302 
303 	pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
304 	if (pid < 0) {
305 		err = -errno;
306 		printk(UM_KERN_ERR "start_userspace : clone failed, "
307 		       "errno = %d\n", errno);
308 		return err;
309 	}
310 
311 	do {
312 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
313 		if (n < 0) {
314 			err = -errno;
315 			printk(UM_KERN_ERR "start_userspace : wait failed, "
316 			       "errno = %d\n", errno);
317 			goto out_kill;
318 		}
319 	} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
320 
321 	if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
322 		err = -EINVAL;
323 		printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
324 		       "status = %d\n", status);
325 		goto out_kill;
326 	}
327 
328 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
329 		   (void *) PTRACE_O_TRACESYSGOOD) < 0) {
330 		err = -errno;
331 		printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
332 		       "failed, errno = %d\n", errno);
333 		goto out_kill;
334 	}
335 
336 	if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
337 		err = -errno;
338 		printk(UM_KERN_ERR "start_userspace : munmap failed, "
339 		       "errno = %d\n", errno);
340 		goto out_kill;
341 	}
342 
343 	return pid;
344 
345  out_kill:
346 	os_kill_ptraced_process(pid, 1);
347 	return err;
348 }
349 
userspace(struct uml_pt_regs * regs)350 void userspace(struct uml_pt_regs *regs)
351 {
352 	struct itimerval timer;
353 	unsigned long long nsecs, now;
354 	int err, status, op, pid = userspace_pid[0];
355 	/* To prevent races if using_sysemu changes under us.*/
356 	int local_using_sysemu;
357 
358 	if (getitimer(ITIMER_VIRTUAL, &timer))
359 		printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
360 	nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
361 		timer.it_value.tv_usec * UM_NSEC_PER_USEC;
362 	nsecs += os_nsecs();
363 
364 	while (1) {
365 		/*
366 		 * This can legitimately fail if the process loads a
367 		 * bogus value into a segment register.  It will
368 		 * segfault and PTRACE_GETREGS will read that value
369 		 * out of the process.  However, PTRACE_SETREGS will
370 		 * fail.  In this case, there is nothing to do but
371 		 * just kill the process.
372 		 */
373 		if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
374 			fatal_sigsegv();
375 
376 		/* Now we set local_using_sysemu to be used for one loop */
377 		local_using_sysemu = get_using_sysemu();
378 
379 		op = SELECT_PTRACE_OPERATION(local_using_sysemu,
380 					     singlestepping(NULL));
381 
382 		if (ptrace(op, pid, 0, 0)) {
383 			printk(UM_KERN_ERR "userspace - ptrace continue "
384 			       "failed, op = %d, errno = %d\n", op, errno);
385 			fatal_sigsegv();
386 		}
387 
388 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
389 		if (err < 0) {
390 			printk(UM_KERN_ERR "userspace - wait failed, "
391 			       "errno = %d\n", errno);
392 			fatal_sigsegv();
393 		}
394 
395 		regs->is_user = 1;
396 		if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
397 			printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
398 			       "errno = %d\n", errno);
399 			fatal_sigsegv();
400 		}
401 
402 		UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
403 
404 		if (WIFSTOPPED(status)) {
405 			int sig = WSTOPSIG(status);
406 			switch (sig) {
407 			case SIGSEGV:
408 				if (PTRACE_FULL_FAULTINFO ||
409 				    !ptrace_faultinfo) {
410 					get_skas_faultinfo(pid,
411 							   &regs->faultinfo);
412 					(*sig_info[SIGSEGV])(SIGSEGV, regs);
413 				}
414 				else handle_segv(pid, regs);
415 				break;
416 			case SIGTRAP + 0x80:
417 			        handle_trap(pid, regs, local_using_sysemu);
418 				break;
419 			case SIGTRAP:
420 				relay_signal(SIGTRAP, regs);
421 				break;
422 			case SIGVTALRM:
423 				now = os_nsecs();
424 				if (now < nsecs)
425 					break;
426 				block_signals();
427 				(*sig_info[sig])(sig, regs);
428 				unblock_signals();
429 				nsecs = timer.it_value.tv_sec *
430 					UM_NSEC_PER_SEC +
431 					timer.it_value.tv_usec *
432 					UM_NSEC_PER_USEC;
433 				nsecs += os_nsecs();
434 				break;
435 			case SIGIO:
436 			case SIGILL:
437 			case SIGBUS:
438 			case SIGFPE:
439 			case SIGWINCH:
440 				block_signals();
441 				(*sig_info[sig])(sig, regs);
442 				unblock_signals();
443 				break;
444 			default:
445 				printk(UM_KERN_ERR "userspace - child stopped "
446 				       "with signal %d\n", sig);
447 				fatal_sigsegv();
448 			}
449 			pid = userspace_pid[0];
450 			interrupt_end();
451 
452 			/* Avoid -ERESTARTSYS handling in host */
453 			if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
454 				PT_SYSCALL_NR(regs->gp) = -1;
455 		}
456 	}
457 }
458 
459 static unsigned long thread_regs[MAX_REG_NR];
460 
init_thread_regs(void)461 static int __init init_thread_regs(void)
462 {
463 	get_safe_registers(thread_regs);
464 	/* Set parent's instruction pointer to start of clone-stub */
465 	thread_regs[REGS_IP_INDEX] = STUB_CODE +
466 				(unsigned long) stub_clone_handler -
467 				(unsigned long) &__syscall_stub_start;
468 	thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
469 		sizeof(void *);
470 #ifdef __SIGNAL_FRAMESIZE
471 	thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
472 #endif
473 	return 0;
474 }
475 
476 __initcall(init_thread_regs);
477 
copy_context_skas0(unsigned long new_stack,int pid)478 int copy_context_skas0(unsigned long new_stack, int pid)
479 {
480 	struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
481 	int err;
482 	unsigned long current_stack = current_stub_stack();
483 	struct stub_data *data = (struct stub_data *) current_stack;
484 	struct stub_data *child_data = (struct stub_data *) new_stack;
485 	unsigned long long new_offset;
486 	int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
487 
488 	/*
489 	 * prepare offset and fd of child's stack as argument for parent's
490 	 * and child's mmap2 calls
491 	 */
492 	*data = ((struct stub_data) { .offset	= MMAP_OFFSET(new_offset),
493 				      .fd	= new_fd,
494 				      .timer    = ((struct itimerval)
495 					           { .it_value = tv,
496 						     .it_interval = tv }) });
497 
498 	err = ptrace_setregs(pid, thread_regs);
499 	if (err < 0) {
500 		err = -errno;
501 		printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
502 		       "failed, pid = %d, errno = %d\n", pid, -err);
503 		return err;
504 	}
505 
506 	/* set a well known return code for detection of child write failure */
507 	child_data->err = 12345678;
508 
509 	/*
510 	 * Wait, until parent has finished its work: read child's pid from
511 	 * parent's stack, and check, if bad result.
512 	 */
513 	err = ptrace(PTRACE_CONT, pid, 0, 0);
514 	if (err) {
515 		err = -errno;
516 		printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
517 		       "errno = %d\n", pid, errno);
518 		return err;
519 	}
520 
521 	wait_stub_done(pid);
522 
523 	pid = data->err;
524 	if (pid < 0) {
525 		printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
526 		       "error %d\n", -pid);
527 		return pid;
528 	}
529 
530 	/*
531 	 * Wait, until child has finished too: read child's result from
532 	 * child's stack and check it.
533 	 */
534 	wait_stub_done(pid);
535 	if (child_data->err != STUB_DATA) {
536 		printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
537 		       "error %ld\n", child_data->err);
538 		err = child_data->err;
539 		goto out_kill;
540 	}
541 
542 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
543 		   (void *)PTRACE_O_TRACESYSGOOD) < 0) {
544 		err = -errno;
545 		printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
546 		       "failed, errno = %d\n", errno);
547 		goto out_kill;
548 	}
549 
550 	return pid;
551 
552  out_kill:
553 	os_kill_ptraced_process(pid, 1);
554 	return err;
555 }
556 
557 /*
558  * This is used only, if stub pages are needed, while proc_mm is
559  * available. Opening /proc/mm creates a new mm_context, which lacks
560  * the stub-pages. Thus, we map them using /proc/mm-fd
561  */
map_stub_pages(int fd,unsigned long code,unsigned long data,unsigned long stack)562 int map_stub_pages(int fd, unsigned long code, unsigned long data,
563 		   unsigned long stack)
564 {
565 	struct proc_mm_op mmop;
566 	int n;
567 	unsigned long long code_offset;
568 	int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
569 				   &code_offset);
570 
571 	mmop = ((struct proc_mm_op) { .op        = MM_MMAP,
572 				      .u         =
573 				      { .mmap    =
574 					{ .addr    = code,
575 					  .len     = UM_KERN_PAGE_SIZE,
576 					  .prot    = PROT_EXEC,
577 					  .flags   = MAP_FIXED | MAP_PRIVATE,
578 					  .fd      = code_fd,
579 					  .offset  = code_offset
580 	} } });
581 	CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
582 	if (n != sizeof(mmop)) {
583 		n = errno;
584 		printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
585 		       "offset = %llx\n", code, code_fd,
586 		       (unsigned long long) code_offset);
587 		printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
588 		       "failed, err = %d\n", n);
589 		return -n;
590 	}
591 
592 	if (stack) {
593 		unsigned long long map_offset;
594 		int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
595 		mmop = ((struct proc_mm_op)
596 				{ .op        = MM_MMAP,
597 				  .u         =
598 				  { .mmap    =
599 				    { .addr    = data,
600 				      .len     = UM_KERN_PAGE_SIZE,
601 				      .prot    = PROT_READ | PROT_WRITE,
602 				      .flags   = MAP_FIXED | MAP_SHARED,
603 				      .fd      = map_fd,
604 				      .offset  = map_offset
605 		} } });
606 		CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
607 		if (n != sizeof(mmop)) {
608 			n = errno;
609 			printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
610 			       "data failed, err = %d\n", n);
611 			return -n;
612 		}
613 	}
614 
615 	return 0;
616 }
617 
new_thread(void * stack,jmp_buf * buf,void (* handler)(void))618 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
619 {
620 	(*buf)[0].JB_IP = (unsigned long) handler;
621 	(*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
622 		sizeof(void *);
623 }
624 
625 #define INIT_JMP_NEW_THREAD 0
626 #define INIT_JMP_CALLBACK 1
627 #define INIT_JMP_HALT 2
628 #define INIT_JMP_REBOOT 3
629 
switch_threads(jmp_buf * me,jmp_buf * you)630 void switch_threads(jmp_buf *me, jmp_buf *you)
631 {
632 	if (UML_SETJMP(me) == 0)
633 		UML_LONGJMP(you, 1);
634 }
635 
636 static jmp_buf initial_jmpbuf;
637 
638 /* XXX Make these percpu */
639 static void (*cb_proc)(void *arg);
640 static void *cb_arg;
641 static jmp_buf *cb_back;
642 
start_idle_thread(void * stack,jmp_buf * switch_buf)643 int start_idle_thread(void *stack, jmp_buf *switch_buf)
644 {
645 	int n;
646 
647 	set_handler(SIGWINCH, (__sighandler_t) sig_handler,
648 		    SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGVTALRM, -1);
649 
650 	/*
651 	 * Can't use UML_SETJMP or UML_LONGJMP here because they save
652 	 * and restore signals, with the possible side-effect of
653 	 * trying to handle any signals which came when they were
654 	 * blocked, which can't be done on this stack.
655 	 * Signals must be blocked when jumping back here and restored
656 	 * after returning to the jumper.
657 	 */
658 	n = setjmp(initial_jmpbuf);
659 	switch (n) {
660 	case INIT_JMP_NEW_THREAD:
661 		(*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
662 		(*switch_buf)[0].JB_SP = (unsigned long) stack +
663 			UM_THREAD_SIZE - sizeof(void *);
664 		break;
665 	case INIT_JMP_CALLBACK:
666 		(*cb_proc)(cb_arg);
667 		longjmp(*cb_back, 1);
668 		break;
669 	case INIT_JMP_HALT:
670 		kmalloc_ok = 0;
671 		return 0;
672 	case INIT_JMP_REBOOT:
673 		kmalloc_ok = 0;
674 		return 1;
675 	default:
676 		printk(UM_KERN_ERR "Bad sigsetjmp return in "
677 		       "start_idle_thread - %d\n", n);
678 		fatal_sigsegv();
679 	}
680 	longjmp(*switch_buf, 1);
681 }
682 
initial_thread_cb_skas(void (* proc)(void *),void * arg)683 void initial_thread_cb_skas(void (*proc)(void *), void *arg)
684 {
685 	jmp_buf here;
686 
687 	cb_proc = proc;
688 	cb_arg = arg;
689 	cb_back = &here;
690 
691 	block_signals();
692 	if (UML_SETJMP(&here) == 0)
693 		UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
694 	unblock_signals();
695 
696 	cb_proc = NULL;
697 	cb_arg = NULL;
698 	cb_back = NULL;
699 }
700 
halt_skas(void)701 void halt_skas(void)
702 {
703 	block_signals();
704 	UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
705 }
706 
reboot_skas(void)707 void reboot_skas(void)
708 {
709 	block_signals();
710 	UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
711 }
712 
__switch_mm(struct mm_id * mm_idp)713 void __switch_mm(struct mm_id *mm_idp)
714 {
715 	int err;
716 
717 	/* FIXME: need cpu pid in __switch_mm */
718 	if (proc_mm) {
719 		err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
720 			     mm_idp->u.mm_fd);
721 		if (err) {
722 			printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
723 			       "failed, errno = %d\n", errno);
724 			fatal_sigsegv();
725 		}
726 	}
727 	else userspace_pid[0] = mm_idp->u.pid;
728 }
729