1 /* $Id: process.c,v 1.125.2.1 2001/12/18 19:40:17 davem Exp $
2 * arch/sparc64/kernel/process.c
3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 /*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13 #define __KERNEL_SYSCALLS__
14 #include <stdarg.h>
15
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/a.out.h>
28 #include <linux/config.h>
29 #include <linux/reboot.h>
30 #include <linux/delay.h>
31
32 #include <asm/oplib.h>
33 #include <asm/uaccess.h>
34 #include <asm/system.h>
35 #include <asm/page.h>
36 #include <asm/pgalloc.h>
37 #include <asm/pgtable.h>
38 #include <asm/processor.h>
39 #include <asm/pstate.h>
40 #include <asm/elf.h>
41 #include <asm/fpumacro.h>
42 #include <asm/head.h>
43
44 /* #define VERBOSE_SHOWREGS */
45
46 #ifndef CONFIG_SMP
47
48 /*
49 * the idle loop on a Sparc... ;)
50 */
cpu_idle(void)51 int cpu_idle(void)
52 {
53 if (current->pid != 0)
54 return -EPERM;
55
56 /* endless idle loop with no priority at all */
57 current->nice = 20;
58 current->counter = -100;
59 init_idle();
60
61 for (;;) {
62 /* If current->need_resched is zero we should really
63 * setup for a system wakup event and execute a shutdown
64 * instruction.
65 *
66 * But this requires writing back the contents of the
67 * L2 cache etc. so implement this later. -DaveM
68 */
69 while (!current->need_resched)
70 barrier();
71
72 schedule();
73 check_pgt_cache();
74 }
75 return 0;
76 }
77
78 #else
79
80 /*
81 * the idle loop on a UltraMultiPenguin...
82 */
83 #define idle_me_harder() (cpu_data[current->processor].idle_volume += 1)
84 #define unidle_me() (cpu_data[current->processor].idle_volume = 0)
cpu_idle(void)85 int cpu_idle(void)
86 {
87 current->nice = 20;
88 current->counter = -100;
89 init_idle();
90
91 while(1) {
92 if (current->need_resched != 0) {
93 unidle_me();
94 schedule();
95 check_pgt_cache();
96 }
97 idle_me_harder();
98
99 /* The store ordering is so that IRQ handlers on
100 * other cpus see our increasing idleness for the buddy
101 * redistribution algorithm. -DaveM
102 */
103 membar_safe("#StoreStore | #StoreLoad");
104 }
105 }
106
107 #endif
108
109 extern char reboot_command [];
110
111 #ifdef CONFIG_SUN_CONSOLE
112 extern void (*prom_palette)(int);
113 extern int serial_console;
114 #endif
115 extern void (*prom_keyboard)(void);
116
machine_halt(void)117 void machine_halt(void)
118 {
119 sti();
120 mdelay(8);
121 cli();
122 #ifdef CONFIG_SUN_CONSOLE
123 if (!serial_console && prom_palette)
124 prom_palette (1);
125 #endif
126 if (prom_keyboard)
127 prom_keyboard();
128 prom_halt();
129 panic("Halt failed!");
130 }
131
machine_alt_power_off(void)132 void machine_alt_power_off(void)
133 {
134 sti();
135 mdelay(8);
136 cli();
137 #ifdef CONFIG_SUN_CONSOLE
138 if (!serial_console && prom_palette)
139 prom_palette(1);
140 #endif
141 if (prom_keyboard)
142 prom_keyboard();
143 prom_halt_power_off();
144 panic("Power-off failed!");
145 }
146
machine_restart(char * cmd)147 void machine_restart(char * cmd)
148 {
149 char *p;
150
151 sti();
152 mdelay(8);
153 cli();
154
155 p = strchr (reboot_command, '\n');
156 if (p) *p = 0;
157 #ifdef CONFIG_SUN_CONSOLE
158 if (!serial_console && prom_palette)
159 prom_palette (1);
160 #endif
161 if (prom_keyboard)
162 prom_keyboard();
163 if (cmd)
164 prom_reboot(cmd);
165 if (*reboot_command)
166 prom_reboot(reboot_command);
167 prom_reboot("");
168 panic("Reboot failed!");
169 }
170
show_regwindow32(struct pt_regs * regs)171 static void show_regwindow32(struct pt_regs *regs)
172 {
173 struct reg_window32 *rw;
174 struct reg_window32 r_w;
175 mm_segment_t old_fs;
176
177 __asm__ __volatile__ ("flushw");
178 rw = (struct reg_window32 *)((long)(unsigned)regs->u_regs[14]);
179 old_fs = get_fs();
180 set_fs (USER_DS);
181 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
182 set_fs (old_fs);
183 return;
184 }
185 rw = &r_w;
186 set_fs (old_fs);
187 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
188 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
189 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
190 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
191 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
192 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
193 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
194 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
195 }
196
show_regwindow(struct pt_regs * regs)197 static void show_regwindow(struct pt_regs *regs)
198 {
199 struct reg_window *rw;
200 struct reg_window r_w;
201 mm_segment_t old_fs;
202
203 if ((regs->tstate & TSTATE_PRIV) || !(current->thread.flags & SPARC_FLAG_32BIT)) {
204 __asm__ __volatile__ ("flushw");
205 rw = (struct reg_window *)(regs->u_regs[14] + STACK_BIAS);
206 if (!(regs->tstate & TSTATE_PRIV)) {
207 old_fs = get_fs();
208 set_fs (USER_DS);
209 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
210 set_fs (old_fs);
211 return;
212 }
213 rw = &r_w;
214 set_fs (old_fs);
215 }
216 } else {
217 show_regwindow32(regs);
218 return;
219 }
220 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
221 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3]);
222 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
223 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
224 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
225 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3]);
226 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
227 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
228 }
229
show_stackframe(struct sparc_stackf * sf)230 void show_stackframe(struct sparc_stackf *sf)
231 {
232 unsigned long size;
233 unsigned long *stk;
234 int i;
235
236 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
237 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
238 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
239 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
240 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
241 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
242 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
243 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
244 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
245 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
246 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
247 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
248 sf->xxargs[0]);
249 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
250 size -= STACKFRAME_SZ;
251 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
252 i = 0;
253 do {
254 printk("s%d: %016lx\n", i++, *stk++);
255 } while ((size -= sizeof(unsigned long)));
256 }
257
show_stackframe32(struct sparc_stackf32 * sf)258 void show_stackframe32(struct sparc_stackf32 *sf)
259 {
260 unsigned long size;
261 unsigned *stk;
262 int i;
263
264 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
265 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
266 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
267 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
268 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
269 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
270 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
271 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
272 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
273 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
274 sf->structptr, sf->xargs[0], sf->xargs[1],
275 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
276 sf->xxargs[0]);
277 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
278 size -= STACKFRAME32_SZ;
279 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
280 i = 0;
281 do {
282 printk("s%d: %08x\n", i++, *stk++);
283 } while ((size -= sizeof(unsigned)));
284 }
285
286 #ifdef CONFIG_SMP
287 static spinlock_t regdump_lock = SPIN_LOCK_UNLOCKED;
288 #endif
289
__show_regs(struct pt_regs * regs)290 void __show_regs(struct pt_regs * regs)
291 {
292 #ifdef CONFIG_SMP
293 unsigned long flags;
294
295 /* Protect against xcall ipis which might lead to livelock on the lock */
296 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
297 "wrpr %0, %1, %%pstate"
298 : "=r" (flags)
299 : "i" (PSTATE_IE));
300 spin_lock(®dump_lock);
301 printk("CPU[%d]: local_irq_count[%u] irqs_running[%d]\n",
302 smp_processor_id(),
303 local_irq_count(smp_processor_id()),
304 irqs_running());
305 #endif
306 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
307 regs->tpc, regs->tnpc, regs->y, print_tainted());
308 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
309 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
310 regs->u_regs[3]);
311 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
312 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
313 regs->u_regs[7]);
314 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
315 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
316 regs->u_regs[11]);
317 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
318 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
319 regs->u_regs[15]);
320 show_regwindow(regs);
321 #ifdef CONFIG_SMP
322 spin_unlock(®dump_lock);
323 __asm__ __volatile__("wrpr %0, 0, %%pstate"
324 : : "r" (flags));
325 #endif
326 }
327
328 #ifdef VERBOSE_SHOWREGS
idump_from_user(unsigned int * pc)329 static void idump_from_user (unsigned int *pc)
330 {
331 int i;
332 int code;
333
334 if((((unsigned long) pc) & 3))
335 return;
336
337 pc -= 3;
338 for(i = -3; i < 6; i++) {
339 get_user(code, pc);
340 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
341 pc++;
342 }
343 printk("\n");
344 }
345 #endif
346
show_regs(struct pt_regs * regs)347 void show_regs(struct pt_regs *regs)
348 {
349 #ifdef VERBOSE_SHOWREGS
350 extern long etrap, etraptl1;
351 #endif
352 __show_regs(regs);
353 #ifdef CONFIG_SMP
354 {
355 extern void smp_report_regs(void);
356
357 smp_report_regs();
358 }
359 #endif
360
361 #ifdef VERBOSE_SHOWREGS
362 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
363 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
364 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
365 printk ("*********parent**********\n");
366 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
367 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
368 printk ("*********endpar**********\n");
369 }
370 #endif
371 }
372
show_regs32(struct pt_regs32 * regs)373 void show_regs32(struct pt_regs32 *regs)
374 {
375 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
376 regs->pc, regs->npc, regs->y, print_tainted());
377 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
378 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
379 regs->u_regs[3]);
380 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
381 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
382 regs->u_regs[7]);
383 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
384 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
385 regs->u_regs[11]);
386 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
387 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
388 regs->u_regs[15]);
389 }
390
show_thread(struct thread_struct * thread)391 void show_thread(struct thread_struct *thread)
392 {
393 int i;
394
395 #if 0
396 printk("kregs: 0x%016lx\n", (unsigned long)thread->kregs);
397 show_regs(thread->kregs);
398 #endif
399 printk("ksp: 0x%016lx\n", thread->ksp);
400
401 if (thread->w_saved) {
402 for (i = 0; i < NSWINS; i++) {
403 if (!thread->rwbuf_stkptrs[i])
404 continue;
405 printk("reg_window[%d]:\n", i);
406 printk("stack ptr: 0x%016lx\n", thread->rwbuf_stkptrs[i]);
407 }
408 printk("w_saved: 0x%04x\n", thread->w_saved);
409 }
410
411 printk("flags: 0x%08x\n", thread->flags);
412 printk("current_ds: 0x%x\n", thread->current_ds.seg);
413 }
414
415 /* Free current thread data structures etc.. */
exit_thread(void)416 void exit_thread(void)
417 {
418 struct thread_struct *t = ¤t->thread;
419
420 if (t->utraps) {
421 if (t->utraps[0] < 2)
422 kfree (t->utraps);
423 else
424 t->utraps[0]--;
425 }
426
427 /* Turn off performance counters if on. */
428 if (t->flags & SPARC_FLAG_PERFCTR) {
429 t->user_cntd0 = t->user_cntd1 = NULL;
430 t->pcr_reg = 0;
431 t->flags &= ~(SPARC_FLAG_PERFCTR);
432 write_pcr(0);
433 }
434 }
435
flush_thread(void)436 void flush_thread(void)
437 {
438 struct thread_struct *t = ¤t->thread;
439
440 if (t->flags & SPARC_FLAG_ABI_PENDING)
441 t->flags ^= (SPARC_FLAG_ABI_PENDING |
442 SPARC_FLAG_32BIT);
443 if (current->mm) {
444 unsigned long pgd_cache = 0UL;
445
446 if (t->flags & SPARC_FLAG_32BIT) {
447 struct mm_struct *mm = current->mm;
448 pgd_t *pgd0 = &mm->pgd[0];
449
450 if (pgd_none(*pgd0)) {
451 pmd_t *page = pmd_alloc_one_fast(NULL, 0);
452 if (!page)
453 page = pmd_alloc_one(NULL, 0);
454 pgd_set(pgd0, page);
455 }
456 pgd_cache = ((unsigned long) pgd_val(*pgd0)) << 11UL;
457 }
458 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
459 "membar #Sync"
460 : /* no outputs */
461 : "r" (pgd_cache),
462 "r" (TSB_REG),
463 "i" (ASI_DMMU));
464 }
465 t->w_saved = 0;
466
467 /* Turn off performance counters if on. */
468 if (t->flags & SPARC_FLAG_PERFCTR) {
469 t->user_cntd0 = t->user_cntd1 = NULL;
470 t->pcr_reg = 0;
471 t->flags &= ~(SPARC_FLAG_PERFCTR);
472 write_pcr(0);
473 }
474
475 /* Clear FPU register state. */
476 t->fpsaved[0] = 0;
477
478 if (t->current_ds.seg != ASI_AIUS)
479 set_fs(USER_DS);
480
481 /* Init new signal delivery disposition. */
482 t->flags &= ~SPARC_FLAG_NEWSIGNALS;
483 }
484
485 /* It's a bit more tricky when 64-bit tasks are involved... */
clone_stackframe(unsigned long csp,unsigned long psp)486 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
487 {
488 unsigned long fp, distance, rval;
489
490 if (!(current->thread.flags & SPARC_FLAG_32BIT)) {
491 csp += STACK_BIAS;
492 psp += STACK_BIAS;
493 __get_user(fp, &(((struct reg_window *)psp)->ins[6]));
494 fp += STACK_BIAS;
495 } else
496 __get_user(fp, &(((struct reg_window32 *)psp)->ins[6]));
497
498 /* Now 8-byte align the stack as this is mandatory in the
499 * Sparc ABI due to how register windows work. This hides
500 * the restriction from thread libraries etc. -DaveM
501 */
502 csp &= ~7UL;
503
504 distance = fp - psp;
505 rval = (csp - distance);
506 if (copy_in_user((void *)rval, (void *)psp, distance))
507 rval = 0;
508 else if (current->thread.flags & SPARC_FLAG_32BIT) {
509 if (put_user(((u32)csp), &(((struct reg_window32 *)rval)->ins[6])))
510 rval = 0;
511 } else {
512 if (put_user(((u64)csp - STACK_BIAS),
513 &(((struct reg_window *)rval)->ins[6])))
514 rval = 0;
515 else
516 rval = rval - STACK_BIAS;
517 }
518
519 return rval;
520 }
521
522 /* Standard stuff. */
shift_window_buffer(int first_win,int last_win,struct thread_struct * t)523 static inline void shift_window_buffer(int first_win, int last_win,
524 struct thread_struct *t)
525 {
526 int i;
527
528 for (i = first_win; i < last_win; i++) {
529 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
530 memcpy(&t->reg_window[i], &t->reg_window[i+1],
531 sizeof(struct reg_window));
532 }
533 }
534
synchronize_user_stack(void)535 void synchronize_user_stack(void)
536 {
537 struct thread_struct *t = ¤t->thread;
538 unsigned long window;
539
540 flush_user_windows();
541 if ((window = t->w_saved) != 0) {
542 int winsize = sizeof(struct reg_window);
543 int bias = 0;
544
545 if (t->flags & SPARC_FLAG_32BIT)
546 winsize = sizeof(struct reg_window32);
547 else
548 bias = STACK_BIAS;
549
550 window -= 1;
551 do {
552 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
553 struct reg_window *rwin = &t->reg_window[window];
554
555 if (!copy_to_user((char *)sp, rwin, winsize)) {
556 shift_window_buffer(window, t->w_saved - 1, t);
557 t->w_saved--;
558 }
559 } while (window--);
560 }
561 }
562
fault_in_user_windows(void)563 void fault_in_user_windows(void)
564 {
565 struct thread_struct *t = ¤t->thread;
566 unsigned long window;
567 int winsize = sizeof(struct reg_window);
568 int bias = 0;
569
570 if (t->flags & SPARC_FLAG_32BIT)
571 winsize = sizeof(struct reg_window32);
572 else
573 bias = STACK_BIAS;
574
575 flush_user_windows();
576 window = t->w_saved;
577
578 if (window != 0) {
579 window -= 1;
580 do {
581 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
582 struct reg_window *rwin = &t->reg_window[window];
583
584 if (copy_to_user((char *)sp, rwin, winsize))
585 goto barf;
586 } while (window--);
587 }
588 t->w_saved = 0;
589 return;
590
591 barf:
592 t->w_saved = window + 1;
593 do_exit(SIGILL);
594 }
595
596 /* Copy a Sparc thread. The fork() return value conventions
597 * under SunOS are nothing short of bletcherous:
598 * Parent --> %o0 == childs pid, %o1 == 0
599 * Child --> %o0 == parents pid, %o1 == 1
600 */
copy_thread(int nr,unsigned long clone_flags,unsigned long sp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)601 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
602 unsigned long unused,
603 struct task_struct *p, struct pt_regs *regs)
604 {
605 struct thread_struct *t = &p->thread;
606 char *child_trap_frame;
607
608 #ifdef CONFIG_DEBUG_SPINLOCK
609 t->smp_lock_count = 0;
610 t->smp_lock_pc = 0;
611 #endif
612
613 /* Calculate offset to stack_frame & pt_regs */
614 child_trap_frame = ((char *)p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
615 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
616 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
617 t->flags |= SPARC_FLAG_NEWCHILD;
618 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
619 t->cwp = (regs->tstate + 1) & TSTATE_CWP;
620 t->fpsaved[0] = 0;
621
622 if (regs->tstate & TSTATE_PRIV) {
623 /* Special case, if we are spawning a kernel thread from
624 * a userspace task (via KMOD, NFS, or similar) we must
625 * disable performance counters in the child because the
626 * address space and protection realm are changing.
627 */
628 if (t->flags & SPARC_FLAG_PERFCTR) {
629 t->user_cntd0 = t->user_cntd1 = NULL;
630 t->pcr_reg = 0;
631 t->flags &= ~(SPARC_FLAG_PERFCTR);
632 }
633 t->kregs->u_regs[UREG_FP] = p->thread.ksp;
634 t->current_ds = KERNEL_DS;
635 flush_register_windows();
636 memcpy((void *)(t->ksp + STACK_BIAS),
637 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
638 sizeof(struct sparc_stackf));
639 t->kregs->u_regs[UREG_G6] = (unsigned long) p;
640 } else {
641 if (t->flags & SPARC_FLAG_32BIT) {
642 sp &= 0x00000000ffffffffUL;
643 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
644 }
645 t->kregs->u_regs[UREG_FP] = sp;
646 t->current_ds = USER_DS;
647 if (sp != regs->u_regs[UREG_FP]) {
648 unsigned long csp;
649
650 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
651 if (!csp)
652 return -EFAULT;
653 t->kregs->u_regs[UREG_FP] = csp;
654 }
655 if (t->utraps)
656 t->utraps[0]++;
657 }
658
659 /* Set the return value for the child. */
660 t->kregs->u_regs[UREG_I0] = current->pid;
661 t->kregs->u_regs[UREG_I1] = 1;
662
663 /* Set the second return value for the parent. */
664 regs->u_regs[UREG_I1] = 0;
665
666 return 0;
667 }
668
669 /*
670 * This is the mechanism for creating a new kernel thread.
671 *
672 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
673 * who haven't done an "execve()") should use this: it will work within
674 * a system call from a "real" process, but the process memory space will
675 * not be free'd until both the parent and the child have exited.
676 */
arch_kernel_thread(int (* fn)(void *),void * arg,unsigned long flags)677 pid_t arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
678 {
679 long retval;
680
681 /* If the parent runs before fn(arg) is called by the child,
682 * the input registers of this function can be clobbered.
683 * So we stash 'fn' and 'arg' into global registers which
684 * will not be modified by the parent.
685 */
686 __asm__ __volatile("mov %4, %%g2\n\t" /* Save FN into global */
687 "mov %5, %%g3\n\t" /* Save ARG into global */
688 "mov %1, %%g1\n\t" /* Clone syscall nr. */
689 "mov %2, %%o0\n\t" /* Clone flags. */
690 "mov 0, %%o1\n\t" /* usp arg == 0 */
691 "t 0x6d\n\t" /* Linux/Sparc clone(). */
692 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
693 " mov %%o0, %0\n\t"
694 "jmpl %%g2, %%o7\n\t" /* Call the function. */
695 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
696 "mov %3, %%g1\n\t"
697 "t 0x6d\n\t" /* Linux/Sparc exit(). */
698 /* Notreached by child. */
699 "1:" :
700 "=r" (retval) :
701 "i" (__NR_clone), "r" (flags | CLONE_VM),
702 "i" (__NR_exit), "r" (fn), "r" (arg) :
703 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
704 return retval;
705 }
706
707 /*
708 * fill in the user structure for a core dump..
709 */
dump_thread(struct pt_regs * regs,struct user * dump)710 void dump_thread(struct pt_regs * regs, struct user * dump)
711 {
712 #if 1
713 /* Only should be used for SunOS and ancient a.out
714 * SparcLinux binaries... Fixme some day when bored.
715 * But for now at least plug the security hole :-)
716 */
717 memset(dump, 0, sizeof(struct user));
718 #else
719 unsigned long first_stack_page;
720 dump->magic = SUNOS_CORE_MAGIC;
721 dump->len = sizeof(struct user);
722 dump->regs.psr = regs->psr;
723 dump->regs.pc = regs->pc;
724 dump->regs.npc = regs->npc;
725 dump->regs.y = regs->y;
726 /* fuck me plenty */
727 memcpy(&dump->regs.regs[0], ®s->u_regs[1], (sizeof(unsigned long) * 15));
728 dump->u_tsize = (((unsigned long) current->mm->end_code) -
729 ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
730 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
731 dump->u_dsize -= dump->u_tsize;
732 dump->u_dsize &= ~(PAGE_SIZE - 1);
733 first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
734 dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
735 memcpy(&dump->fpu.fpstatus.fregs.regs[0], ¤t->thread.float_regs[0], (sizeof(unsigned long) * 32));
736 dump->fpu.fpstatus.fsr = current->thread.fsr;
737 dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
738 #endif
739 }
740
741 typedef struct {
742 union {
743 unsigned int pr_regs[32];
744 unsigned long pr_dregs[16];
745 } pr_fr;
746 unsigned int __unused;
747 unsigned int pr_fsr;
748 unsigned char pr_qcnt;
749 unsigned char pr_q_entrysize;
750 unsigned char pr_en;
751 unsigned int pr_q[64];
752 } elf_fpregset_t32;
753
754 /*
755 * fill in the fpu structure for a core dump.
756 */
dump_fpu(struct pt_regs * regs,elf_fpregset_t * fpregs)757 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
758 {
759 unsigned long *kfpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
760 unsigned long fprs = current->thread.fpsaved[0];
761
762 if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
763 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
764
765 if (fprs & FPRS_DL)
766 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
767 sizeof(unsigned int) * 32);
768 else
769 memset(&fpregs32->pr_fr.pr_regs[0], 0,
770 sizeof(unsigned int) * 32);
771 fpregs32->pr_qcnt = 0;
772 fpregs32->pr_q_entrysize = 8;
773 memset(&fpregs32->pr_q[0], 0,
774 (sizeof(unsigned int) * 64));
775 if (fprs & FPRS_FEF) {
776 fpregs32->pr_fsr = (unsigned int) current->thread.xfsr[0];
777 fpregs32->pr_en = 1;
778 } else {
779 fpregs32->pr_fsr = 0;
780 fpregs32->pr_en = 0;
781 }
782 } else {
783 if(fprs & FPRS_DL)
784 memcpy(&fpregs->pr_regs[0], kfpregs,
785 sizeof(unsigned int) * 32);
786 else
787 memset(&fpregs->pr_regs[0], 0,
788 sizeof(unsigned int) * 32);
789 if(fprs & FPRS_DU)
790 memcpy(&fpregs->pr_regs[16], kfpregs+16,
791 sizeof(unsigned int) * 32);
792 else
793 memset(&fpregs->pr_regs[16], 0,
794 sizeof(unsigned int) * 32);
795 if(fprs & FPRS_FEF) {
796 fpregs->pr_fsr = current->thread.xfsr[0];
797 fpregs->pr_gsr = current->thread.gsr[0];
798 } else {
799 fpregs->pr_fsr = fpregs->pr_gsr = 0;
800 }
801 fpregs->pr_fprs = fprs;
802 }
803 return 1;
804 }
805
806 /*
807 * sparc_execve() executes a new program after the asm stub has set
808 * things up for us. This should basically do what I want it to.
809 */
sparc_execve(struct pt_regs * regs)810 asmlinkage int sparc_execve(struct pt_regs *regs)
811 {
812 int error, base = 0;
813 char *filename;
814
815 /* User register window flush is done by entry.S */
816
817 /* Check for indirect call. */
818 if (regs->u_regs[UREG_G1] == 0)
819 base = 1;
820
821 filename = getname((char *)regs->u_regs[base + UREG_I0]);
822 error = PTR_ERR(filename);
823 if (IS_ERR(filename))
824 goto out;
825 error = do_execve(filename, (char **) regs->u_regs[base + UREG_I1],
826 (char **) regs->u_regs[base + UREG_I2], regs);
827 putname(filename);
828 if (!error) {
829 fprs_write(0);
830 current->thread.xfsr[0] = 0;
831 current->thread.fpsaved[0] = 0;
832 regs->tstate &= ~TSTATE_PEF;
833 }
834 out:
835 return error;
836 }
837