1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * arch/sh64/kernel/process.c
7  *
8  * Copyright (C) 2000, 2001  Paolo Alberelli
9  * Copyright (C) 2003  Paul Mundt
10  *
11  * Started from SH3/4 version:
12  *   Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
13  *
14  *   In turn started from i386 version:
15  *     Copyright (C) 1995  Linus Torvalds
16  *
17  */
18 
19 /*
20  * This file handles the architecture-dependent parts of process handling..
21  */
22 
23 /* Temporary flags/tests. All to be removed/undefined. BEGIN */
24 #define IDLE_TRACE
25 #define VM_SHOW_TABLES
26 #define VM_TEST_FAULT
27 #define VM_TEST_RTLBMISS
28 #define VM_TEST_WTLBMISS
29 
30 #undef VM_SHOW_TABLES
31 #undef IDLE_TRACE
32 /* Temporary flags/tests. All to be removed/undefined. END */
33 
34 #define __KERNEL_SYSCALLS__
35 #include <stdarg.h>
36 
37 #include <linux/config.h>
38 #include <linux/errno.h>
39 #include <linux/sched.h>
40 #include <linux/kernel.h>
41 #include <linux/mm.h>
42 #include <linux/smp.h>
43 #include <linux/smp_lock.h>
44 #include <linux/stddef.h>
45 #include <linux/ptrace.h>
46 #include <linux/slab.h>
47 #include <linux/vmalloc.h>
48 #include <linux/user.h>
49 #include <linux/a.out.h>
50 #include <linux/interrupt.h>
51 #include <linux/unistd.h>
52 #include <linux/delay.h>
53 #include <linux/reboot.h>
54 #include <linux/init.h>
55 
56 #include <asm/uaccess.h>
57 #include <asm/pgtable.h>
58 #include <asm/system.h>
59 #include <asm/io.h>
60 #include <asm/processor.h>		/* includes also <asm/registers.h> */
61 #include <asm/mmu_context.h>
62 #include <asm/elf.h>
63 #include <asm/page.h>
64 
65 #include <linux/irq.h>
66 
67 struct task_struct *last_task_used_math = NULL;
68 
69 #ifdef IDLE_TRACE
70 #ifdef VM_SHOW_TABLES
71 /* For testing */
print_PTE(long base)72 static void print_PTE(long base)
73 {
74 	int i, skip=0;
75 	long long x, y, *p = (long long *) base;
76 
77 	for (i=0; i< 512; i++, p++){
78 		if (*p == 0) {
79 			if (!skip) {
80 				skip++;
81 				printk("(0s) ");
82 			}
83 		} else {
84 			skip=0;
85 			x = (*p) >> 32;
86 			y = (*p) & 0xffffffff;
87 			printk("%08Lx%08Lx ", x, y);
88 			if (!((i+1)&0x3)) printk("\n");
89 		}
90 	}
91 }
92 
93 /* For testing */
print_DIR(long base)94 static void print_DIR(long base)
95 {
96 	int i, skip=0;
97 	long *p = (long *) base;
98 
99 	for (i=0; i< 512; i++, p++){
100 		if (*p == 0) {
101 			if (!skip) {
102 				skip++;
103 				printk("(0s) ");
104 			}
105 		} else {
106 			skip=0;
107 			printk("%08lx ", *p);
108 			if (!((i+1)&0x7)) printk("\n");
109 		}
110 	}
111 }
112 
113 /* For testing */
print_vmalloc_first_tables(void)114 static void print_vmalloc_first_tables(void)
115 {
116 
117 #define PRESENT	0x800	/* Bit 11 */
118 
119 	/*
120 	 * Do it really dirty by looking at raw addresses,
121          * raw offsets, no types. If we used pgtable/pgalloc
122 	 * macros/definitions we could hide potential bugs.
123 	 *
124 	 * Note that pointers are 32-bit for CDC.
125 	 */
126 	long pgdt, pmdt, ptet;
127 
128 	pgdt = (long) &swapper_pg_dir;
129 	printk("-->PGD (0x%08lx):\n", pgdt);
130 	print_DIR(pgdt);
131 	printk("\n");
132 
133 	/* VMALLOC pool is mapped at 0xc0000000, second (pointer) entry in PGD */
134 	pgdt += 4;
135 	pmdt = (long) (* (long *) pgdt);
136 	if (!(pmdt & PRESENT)) {
137 		printk("No PMD\n");
138 		return;
139 	} else pmdt &= 0xfffff000;
140 
141 	printk("-->PMD (0x%08lx):\n", pmdt);
142 	print_DIR(pmdt);
143 	printk("\n");
144 
145 	/* Get the pmdt displacement for 0xc0000000 */
146 	pmdt += 2048;
147 
148 	/* just look at first two address ranges ... */
149         /* ... 0xc0000000 ... */
150 	ptet = (long) (* (long *) pmdt);
151 	if (!(ptet & PRESENT)) {
152 		printk("No PTE0\n");
153 		return;
154 	} else ptet &= 0xfffff000;
155 
156 	printk("-->PTE0 (0x%08lx):\n", ptet);
157 	print_PTE(ptet);
158 	printk("\n");
159 
160         /* ... 0xc0001000 ... */
161 	ptet += 4;
162 	if (!(ptet & PRESENT)) {
163 		printk("No PTE1\n");
164 		return;
165 	} else ptet &= 0xfffff000;
166 	printk("-->PTE1 (0x%08lx):\n", ptet);
167 	print_PTE(ptet);
168 	printk("\n");
169 }
170 #else
171 #define print_vmalloc_first_tables()
172 #endif	/* VM_SHOW_TABLES */
173 
test_VM(void)174 static void test_VM(void)
175 {
176 	void *a, *b, *c;
177 
178 #ifdef VM_SHOW_TABLES
179 	printk("Initial PGD/PMD/PTE\n");
180 #endif
181         print_vmalloc_first_tables();
182 
183 	printk("Allocating 2 bytes\n");
184 	a = vmalloc(2);
185         print_vmalloc_first_tables();
186 
187 	printk("Allocating 4100 bytes\n");
188 	b = vmalloc(4100);
189         print_vmalloc_first_tables();
190 
191 	printk("Allocating 20234 bytes\n");
192 	c = vmalloc(20234);
193         print_vmalloc_first_tables();
194 
195 #ifdef VM_TEST_FAULT
196 	/* Here you may want to fault ! */
197 
198 #ifdef VM_TEST_RTLBMISS
199 	printk("Ready to fault upon read.\n");
200 	if (* (char *) a) {
201 		printk("RTLBMISSed on area a !\n");
202 	}
203 	printk("RTLBMISSed on area a !\n");
204 #endif
205 
206 #ifdef VM_TEST_WTLBMISS
207 	printk("Ready to fault upon write.\n");
208 	*((char *) b) = 'L';
209 	printk("WTLBMISSed on area b !\n");
210 #endif
211 
212 #endif	/* VM_TEST_FAULT */
213 
214 	printk("Deallocating the 4100 byte chunk\n");
215 	vfree(b);
216         print_vmalloc_first_tables();
217 
218 	printk("Deallocating the 2 byte chunk\n");
219 	vfree(a);
220         print_vmalloc_first_tables();
221 
222 	printk("Deallocating the last chunk\n");
223 	vfree(c);
224         print_vmalloc_first_tables();
225 }
226 
227 extern unsigned long volatile jiffies;
228 int once = 0;
229 unsigned long old_jiffies;
230 int pid = -1, pgid = -1;
231 
idle_trace(void)232 void idle_trace(void)
233 {
234 
235 	_syscall0(int, getpid)
236 	_syscall1(int, getpgid, int, pid)
237 
238 	if (!once) {
239         	/* VM allocation/deallocation simple test */
240 		test_VM();
241 		pid = getpid();
242 
243         	printk("Got all through to Idle !!\n");
244         	printk("I'm now going to loop forever ...\n");
245         	printk("Any ! below is a timer tick.\n");
246 		printk("Any . below is a getpgid system call from pid = %d.\n", pid);
247 
248 
249         	old_jiffies = jiffies;
250 		once++;
251 	}
252 
253 	if (old_jiffies != jiffies) {
254 		old_jiffies = jiffies - old_jiffies;
255 		switch (old_jiffies) {
256 		case 1:
257 			printk("!");
258 			break;
259 		case 2:
260 			printk("!!");
261 			break;
262 		case 3:
263 			printk("!!!");
264 			break;
265 		case 4:
266 			printk("!!!!");
267 			break;
268 		default:
269 			printk("(%d!)", (int) old_jiffies);
270 		}
271 		old_jiffies = jiffies;
272 	}
273 	pgid = getpgid(pid);
274 	printk(".");
275 }
276 #else
277 #define idle_trace()	do { } while (0)
278 #endif	/* IDLE_TRACE */
279 
280 static int hlt_counter = 1;
281 
282 #define HARD_IDLE_TIMEOUT (HZ / 3)
283 
disable_hlt(void)284 void disable_hlt(void)
285 {
286 	hlt_counter++;
287 }
288 
enable_hlt(void)289 void enable_hlt(void)
290 {
291 	hlt_counter--;
292 }
293 
nohlt_setup(char * __unused)294 static int __init nohlt_setup(char *__unused)
295 {
296 	hlt_counter = 1;
297 	return 1;
298 }
299 
hlt_setup(char * __unused)300 static int __init hlt_setup(char *__unused)
301 {
302 	hlt_counter = 0;
303 	return 1;
304 }
305 
306 __setup("nohlt", nohlt_setup);
307 __setup("hlt", hlt_setup);
308 
hlt(void)309 static inline void hlt(void)
310 {
311 	if (hlt_counter)
312 		return;
313 
314 	/*
315 	 * FIXME: Is there any reason why we can't just do a "sleep"
316 	 * instead of this crap?
317 	 */
318 	__asm__ __volatile__ (
319 		".int %0\n\t"
320 		: /* no outputs */
321 		: "g" (le32_to_cpu(0x6ff7fff0))
322 		: "memory"
323 	);
324 }
325 
326 /*
327  * The idle loop on a uniprocessor SH..
328  */
cpu_idle(void * unused)329 void cpu_idle(void *unused)
330 {
331 	/* endless idle loop with no priority at all */
332 	init_idle();
333 	current->nice = 20;
334 	current->counter = -100;
335 
336 	while (1) {
337 		while (!current->need_resched) {
338 			if (hlt_counter)
339 				continue;
340 			__sti();
341 			idle_trace();
342 			hlt();
343 		}
344 		schedule();
345 		check_pgt_cache();
346 	}
347 }
348 
machine_restart(char * __unused)349 void machine_restart(char * __unused)
350 {
351 	extern void phys_stext(void);
352 
353 	phys_stext();
354 }
355 
machine_halt(void)356 void machine_halt(void)
357 {
358 	for (;;);
359 }
360 
machine_power_off(void)361 void machine_power_off(void)
362 {
363 	enter_deep_standby();
364 }
365 
show_regs(struct pt_regs * regs)366 void show_regs(struct pt_regs * regs)
367 {
368 	unsigned long long ah, al, bh, bl, ch, cl;
369 
370 	printk("\n");
371 
372 	ah = (regs->pc) >> 32;
373 	al = (regs->pc) & 0xffffffff;
374 	bh = (regs->regs[18]) >> 32;
375 	bl = (regs->regs[18]) & 0xffffffff;
376 	ch = (regs->regs[15]) >> 32;
377 	cl = (regs->regs[15]) & 0xffffffff;
378 	printk("PC  : %08Lx%08Lx LINK: %08Lx%08Lx SP  : %08Lx%08Lx\n",
379 	       ah, al, bh, bl, ch, cl);
380 
381 	ah = (regs->sr) >> 32;
382 	al = (regs->sr) & 0xffffffff;
383         asm volatile ("getcon   " __c13 ", %0" : "=r" (bh));
384         asm volatile ("getcon   " __c13 ", %0" : "=r" (bl));
385 	bh = (bh) >> 32;
386 	bl = (bl) & 0xffffffff;
387         asm volatile ("getcon   " __c17 ", %0" : "=r" (ch));
388         asm volatile ("getcon   " __c17 ", %0" : "=r" (cl));
389 	ch = (ch) >> 32;
390 	cl = (cl) & 0xffffffff;
391 	printk("SR  : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
392 	       ah, al, bh, bl, ch, cl);
393 
394 	ah = (regs->regs[0]) >> 32;
395 	al = (regs->regs[0]) & 0xffffffff;
396 	bh = (regs->regs[1]) >> 32;
397 	bl = (regs->regs[1]) & 0xffffffff;
398 	ch = (regs->regs[2]) >> 32;
399 	cl = (regs->regs[2]) & 0xffffffff;
400 	printk("R0  : %08Lx%08Lx R1  : %08Lx%08Lx R2  : %08Lx%08Lx\n",
401 	       ah, al, bh, bl, ch, cl);
402 
403 	ah = (regs->regs[3]) >> 32;
404 	al = (regs->regs[3]) & 0xffffffff;
405 	bh = (regs->regs[4]) >> 32;
406 	bl = (regs->regs[4]) & 0xffffffff;
407 	ch = (regs->regs[5]) >> 32;
408 	cl = (regs->regs[5]) & 0xffffffff;
409 	printk("R3  : %08Lx%08Lx R4  : %08Lx%08Lx R5  : %08Lx%08Lx\n",
410 	       ah, al, bh, bl, ch, cl);
411 
412 	ah = (regs->regs[6]) >> 32;
413 	al = (regs->regs[6]) & 0xffffffff;
414 	bh = (regs->regs[7]) >> 32;
415 	bl = (regs->regs[7]) & 0xffffffff;
416 	ch = (regs->regs[8]) >> 32;
417 	cl = (regs->regs[8]) & 0xffffffff;
418 	printk("R6  : %08Lx%08Lx R7  : %08Lx%08Lx R8  : %08Lx%08Lx\n",
419 	       ah, al, bh, bl, ch, cl);
420 
421 	ah = (regs->regs[9]) >> 32;
422 	al = (regs->regs[9]) & 0xffffffff;
423 	bh = (regs->regs[10]) >> 32;
424 	bl = (regs->regs[10]) & 0xffffffff;
425 	ch = (regs->regs[11]) >> 32;
426 	cl = (regs->regs[11]) & 0xffffffff;
427 	printk("R9  : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
428 	       ah, al, bh, bl, ch, cl);
429 
430 	ah = (regs->regs[12]) >> 32;
431 	al = (regs->regs[12]) & 0xffffffff;
432 	bh = (regs->regs[13]) >> 32;
433 	bl = (regs->regs[13]) & 0xffffffff;
434 	ch = (regs->regs[14]) >> 32;
435 	cl = (regs->regs[14]) & 0xffffffff;
436 	printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
437 	       ah, al, bh, bl, ch, cl);
438 
439 	ah = (regs->regs[16]) >> 32;
440 	al = (regs->regs[16]) & 0xffffffff;
441 	bh = (regs->regs[17]) >> 32;
442 	bl = (regs->regs[17]) & 0xffffffff;
443 	ch = (regs->regs[19]) >> 32;
444 	cl = (regs->regs[19]) & 0xffffffff;
445 	printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
446 	       ah, al, bh, bl, ch, cl);
447 
448 	ah = (regs->regs[20]) >> 32;
449 	al = (regs->regs[20]) & 0xffffffff;
450 	bh = (regs->regs[21]) >> 32;
451 	bl = (regs->regs[21]) & 0xffffffff;
452 	ch = (regs->regs[22]) >> 32;
453 	cl = (regs->regs[22]) & 0xffffffff;
454 	printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
455 	       ah, al, bh, bl, ch, cl);
456 
457 	ah = (regs->regs[23]) >> 32;
458 	al = (regs->regs[23]) & 0xffffffff;
459 	bh = (regs->regs[24]) >> 32;
460 	bl = (regs->regs[24]) & 0xffffffff;
461 	ch = (regs->regs[25]) >> 32;
462 	cl = (regs->regs[25]) & 0xffffffff;
463 	printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
464 	       ah, al, bh, bl, ch, cl);
465 
466 	ah = (regs->regs[26]) >> 32;
467 	al = (regs->regs[26]) & 0xffffffff;
468 	bh = (regs->regs[27]) >> 32;
469 	bl = (regs->regs[27]) & 0xffffffff;
470 	ch = (regs->regs[28]) >> 32;
471 	cl = (regs->regs[28]) & 0xffffffff;
472 	printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
473 	       ah, al, bh, bl, ch, cl);
474 
475 	ah = (regs->regs[29]) >> 32;
476 	al = (regs->regs[29]) & 0xffffffff;
477 	bh = (regs->regs[30]) >> 32;
478 	bl = (regs->regs[30]) & 0xffffffff;
479 	ch = (regs->regs[31]) >> 32;
480 	cl = (regs->regs[31]) & 0xffffffff;
481 	printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
482 	       ah, al, bh, bl, ch, cl);
483 
484 	ah = (regs->regs[32]) >> 32;
485 	al = (regs->regs[32]) & 0xffffffff;
486 	bh = (regs->regs[33]) >> 32;
487 	bl = (regs->regs[33]) & 0xffffffff;
488 	ch = (regs->regs[34]) >> 32;
489 	cl = (regs->regs[34]) & 0xffffffff;
490 	printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
491 	       ah, al, bh, bl, ch, cl);
492 
493 	ah = (regs->regs[35]) >> 32;
494 	al = (regs->regs[35]) & 0xffffffff;
495 	bh = (regs->regs[36]) >> 32;
496 	bl = (regs->regs[36]) & 0xffffffff;
497 	ch = (regs->regs[37]) >> 32;
498 	cl = (regs->regs[37]) & 0xffffffff;
499 	printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
500 	       ah, al, bh, bl, ch, cl);
501 
502 	ah = (regs->regs[38]) >> 32;
503 	al = (regs->regs[38]) & 0xffffffff;
504 	bh = (regs->regs[39]) >> 32;
505 	bl = (regs->regs[39]) & 0xffffffff;
506 	ch = (regs->regs[40]) >> 32;
507 	cl = (regs->regs[40]) & 0xffffffff;
508 	printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
509 	       ah, al, bh, bl, ch, cl);
510 
511 	ah = (regs->regs[41]) >> 32;
512 	al = (regs->regs[41]) & 0xffffffff;
513 	bh = (regs->regs[42]) >> 32;
514 	bl = (regs->regs[42]) & 0xffffffff;
515 	ch = (regs->regs[43]) >> 32;
516 	cl = (regs->regs[43]) & 0xffffffff;
517 	printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
518 	       ah, al, bh, bl, ch, cl);
519 
520 	ah = (regs->regs[44]) >> 32;
521 	al = (regs->regs[44]) & 0xffffffff;
522 	bh = (regs->regs[45]) >> 32;
523 	bl = (regs->regs[45]) & 0xffffffff;
524 	ch = (regs->regs[46]) >> 32;
525 	cl = (regs->regs[46]) & 0xffffffff;
526 	printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
527 	       ah, al, bh, bl, ch, cl);
528 
529 	ah = (regs->regs[47]) >> 32;
530 	al = (regs->regs[47]) & 0xffffffff;
531 	bh = (regs->regs[48]) >> 32;
532 	bl = (regs->regs[48]) & 0xffffffff;
533 	ch = (regs->regs[49]) >> 32;
534 	cl = (regs->regs[49]) & 0xffffffff;
535 	printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
536 	       ah, al, bh, bl, ch, cl);
537 
538 	ah = (regs->regs[50]) >> 32;
539 	al = (regs->regs[50]) & 0xffffffff;
540 	bh = (regs->regs[51]) >> 32;
541 	bl = (regs->regs[51]) & 0xffffffff;
542 	ch = (regs->regs[52]) >> 32;
543 	cl = (regs->regs[52]) & 0xffffffff;
544 	printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
545 	       ah, al, bh, bl, ch, cl);
546 
547 	ah = (regs->regs[53]) >> 32;
548 	al = (regs->regs[53]) & 0xffffffff;
549 	bh = (regs->regs[54]) >> 32;
550 	bl = (regs->regs[54]) & 0xffffffff;
551 	ch = (regs->regs[55]) >> 32;
552 	cl = (regs->regs[55]) & 0xffffffff;
553 	printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
554 	       ah, al, bh, bl, ch, cl);
555 
556 	ah = (regs->regs[56]) >> 32;
557 	al = (regs->regs[56]) & 0xffffffff;
558 	bh = (regs->regs[57]) >> 32;
559 	bl = (regs->regs[57]) & 0xffffffff;
560 	ch = (regs->regs[58]) >> 32;
561 	cl = (regs->regs[58]) & 0xffffffff;
562 	printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
563 	       ah, al, bh, bl, ch, cl);
564 
565 	ah = (regs->regs[59]) >> 32;
566 	al = (regs->regs[59]) & 0xffffffff;
567 	bh = (regs->regs[60]) >> 32;
568 	bl = (regs->regs[60]) & 0xffffffff;
569 	ch = (regs->regs[61]) >> 32;
570 	cl = (regs->regs[61]) & 0xffffffff;
571 	printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
572 	       ah, al, bh, bl, ch, cl);
573 
574 	ah = (regs->regs[62]) >> 32;
575 	al = (regs->regs[62]) & 0xffffffff;
576 	bh = (regs->tregs[0]) >> 32;
577 	bl = (regs->tregs[0]) & 0xffffffff;
578 	ch = (regs->tregs[1]) >> 32;
579 	cl = (regs->tregs[1]) & 0xffffffff;
580 	printk("R62 : %08Lx%08Lx T0  : %08Lx%08Lx T1  : %08Lx%08Lx\n",
581 	       ah, al, bh, bl, ch, cl);
582 
583 	ah = (regs->tregs[2]) >> 32;
584 	al = (regs->tregs[2]) & 0xffffffff;
585 	bh = (regs->tregs[3]) >> 32;
586 	bl = (regs->tregs[3]) & 0xffffffff;
587 	ch = (regs->tregs[4]) >> 32;
588 	cl = (regs->tregs[4]) & 0xffffffff;
589 	printk("T2  : %08Lx%08Lx T3  : %08Lx%08Lx T4  : %08Lx%08Lx\n",
590 	       ah, al, bh, bl, ch, cl);
591 
592 	ah = (regs->tregs[5]) >> 32;
593 	al = (regs->tregs[5]) & 0xffffffff;
594 	bh = (regs->tregs[6]) >> 32;
595 	bl = (regs->tregs[6]) & 0xffffffff;
596 	ch = (regs->tregs[7]) >> 32;
597 	cl = (regs->tregs[7]) & 0xffffffff;
598 	printk("T5  : %08Lx%08Lx T6  : %08Lx%08Lx T7  : %08Lx%08Lx\n",
599 	       ah, al, bh, bl, ch, cl);
600 
601 	/*
602 	 * If we're in kernel mode, dump the stack too..
603 	 */
604 	if (!user_mode(regs)) {
605 		extern void show_task(unsigned long *sp);
606 		unsigned long sp = regs->regs[15] & 0xffffffff;
607 
608 		show_task((unsigned long *)sp);
609 	}
610 }
611 
alloc_task_struct(void)612 struct task_struct * alloc_task_struct(void)
613 {
614 	/* Get task descriptor pages */
615 	return (struct task_struct *)
616 		__get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
617 }
618 
free_task_struct(struct task_struct * p)619 void free_task_struct(struct task_struct *p)
620 {
621 	free_pages((unsigned long) p, get_order(THREAD_SIZE));
622 }
623 
624 /*
625  * Create a kernel thread
626  */
627 
628 /*
629  * This is the mechanism for creating a new kernel thread.
630  *
631  * NOTE! Only a kernel-only process(ie the swapper or direct descendants
632  * who haven't done an "execve()") should use this: it will work within
633  * a system call from a "real" process, but the process memory space will
634  * not be free'd until both the parent and the child have exited.
635  */
arch_kernel_thread(int (* fn)(void *),void * arg,unsigned long flags)636 int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
637 {
638 	/* A bit less processor dependent than older sh ... */
639 
640 	unsigned int reply;
641 
642 static __inline__ _syscall2(int,clone,unsigned long,flags,unsigned long,newsp)
643 static __inline__ _syscall1(int,exit,int,ret)
644 
645 	reply = clone(flags | CLONE_VM, 0);
646 	if (!reply) {
647 		/* Child */
648 		reply = exit(fn(arg));
649 	}
650 
651 	return reply;
652 }
653 
654 /*
655  * Free current thread data structures etc..
656  */
exit_thread(void)657 void exit_thread(void)
658 {
659 	/* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
660 
661 	   The SH-5 FPU save/restore approach relies on last_task_used_math
662 	   pointing to a live task_struct.  When another task tries to use the
663 	   FPU for the 1st time, the FPUDIS trap handling (see
664 	   arch/sh64/kernel/fpu.c) will save the existing FPU state to the
665 	   FP regs field within last_task_used_math before re-loading the new
666 	   task's FPU state (or initialising it if the FPU has been used
667 	   before).  So if last_task_used_math is stale, and its page has already been
668 	   re-allocated for another use, the consequences are rather grim. Unless we
669 	   null it here, there is no other path through which it would get safely
670 	   nulled. */
671 
672 #ifndef CONFIG_NOFPU_SUPPORT
673 	if (last_task_used_math == current) {
674 		last_task_used_math = NULL;
675 	}
676 #endif
677 }
678 
flush_thread(void)679 void flush_thread(void)
680 {
681 
682 	/* As far as I can tell, this function isn't actually called from anywhere.
683 	   So why does it have a non-null body for most architectures?? -- RPC */
684 	/* Look closer, this is used in fs/exec.c by flush_old_exec() which is
685 	   used by binfmt_elf and friends to remove leftover traces of the
686 	   previously running executable. -- PFM */
687 #ifndef CONFIG_NOFPU_SUPPORT
688 	if (last_task_used_math == current) {
689 		last_task_used_math = NULL;
690 	}
691 #endif
692 
693 	/* if we are a kernel thread, about to change to user thread,
694          * update kreg
695          */
696 	if(current->thread.kregs==&fake_swapper_regs) {
697           current->thread.kregs=
698              ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
699 	}
700 }
701 
release_thread(struct task_struct * dead_task)702 void release_thread(struct task_struct *dead_task)
703 {
704 	/* do nothing */
705 }
706 
707 /* Fill in the fpu structure for a core dump.. */
dump_fpu(struct pt_regs * regs,elf_fpregset_t * fpu)708 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
709 {
710 #ifndef CONFIG_NOFPU_SUPPORT
711 	int fpvalid;
712 	struct task_struct *tsk = current;
713 
714 	fpvalid = tsk->used_math;
715 	if (fpvalid) {
716 		if (current == last_task_used_math) {
717 			grab_fpu();
718 			fpsave(&tsk->thread.fpu.hard);
719 			release_fpu();
720 			last_task_used_math = 0;
721 			regs->sr |= SR_FD;
722 		}
723 
724 		memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
725 	}
726 
727 	return fpvalid;
728 #else
729 	return 0; /* Task didn't use the fpu at all. */
730 #endif
731 }
732 
733 asmlinkage void ret_from_fork(void);
734 
copy_thread(int nr,unsigned long clone_flags,unsigned long usp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)735 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
736 		unsigned long unused,
737 		struct task_struct *p, struct pt_regs *regs)
738 {
739 	struct pt_regs *childregs;
740 	unsigned long long se;			/* Sign extension */
741 #ifndef CONFIG_NOFPU_SUPPORT
742 	if(last_task_used_math == current) {
743 		grab_fpu();
744 		fpsave(&current->thread.fpu.hard);
745 		release_fpu();
746 		last_task_used_math = NULL;
747 		regs->sr |= SR_FD;
748 	}
749 #endif
750 	childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p)) - 1;
751 	*childregs = *regs;
752 
753 	if (user_mode(regs)) {
754 		childregs->regs[15] = usp;
755 		p->thread.kregs = childregs;
756 	} else {
757 		childregs->regs[15] = (unsigned long)p+THREAD_SIZE;
758 		p->thread.kregs = &fake_swapper_regs;
759 	}
760 
761 	childregs->regs[9] = 0; /* Set return value for child */
762 	childregs->sr |= SR_FD; /* Invalidate FPU flag */
763 
764 	p->thread.sp = (unsigned long) childregs;
765 	p->thread.pc = (unsigned long) ret_from_fork;
766 
767 	/*
768 	 * Sign extend the edited stack.
769          * Note that thread.pc and thread.pc will stay
770 	 * 32-bit wide and context switch must take care
771 	 * of NEFF sign extension.
772 	 */
773 
774 	se = childregs->regs[15];
775 	se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
776 	childregs->regs[15] = se;
777 
778 	return 0;
779 }
780 
781 /*
782  * fill in the user structure for a core dump..
783  */
dump_thread(struct pt_regs * regs,struct user * dump)784 void dump_thread(struct pt_regs * regs, struct user * dump)
785 {
786 	dump->magic = CMAGIC;
787 	dump->start_code = current->mm->start_code;
788 	dump->start_data  = current->mm->start_data;
789 	dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
790 	dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
791 	dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
792 	dump->u_ssize = (current->mm->start_stack - dump->start_stack +
793 			 PAGE_SIZE - 1) >> PAGE_SHIFT;
794 	/* Debug registers will come here. */
795 
796 	dump->regs = *regs;
797 
798 	dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
799 }
800 
801 /*
802  *	switch_to(x,y) should switch tasks from x to y.
803  *
804  */
__switch_to(struct task_struct * prev,struct task_struct * next)805 struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next)
806 {
807 	/*
808 	 * Restore the kernel mode register
809 	 *   	KCR0 =  __c17
810 	 */
811 	asm volatile("putcon	%0, " __c17 "\n"
812 		     : /* no output */
813 		     :"r" (next));
814 	return prev;
815 
816 }
817 
sys_fork(unsigned long r2,unsigned long r3,unsigned long r4,unsigned long r5,unsigned long r6,unsigned long r7,struct pt_regs * pregs)818 asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
819 			unsigned long r4, unsigned long r5,
820 			unsigned long r6, unsigned long r7,
821 			struct pt_regs *pregs)
822 {
823 	return do_fork(SIGCHLD, pregs->regs[15], pregs,0);
824 }
825 
sys_clone(unsigned long clone_flags,unsigned long newsp,unsigned long r4,unsigned long r5,unsigned long r6,unsigned long r7,struct pt_regs * pregs)826 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
827 			 unsigned long r4, unsigned long r5,
828 			 unsigned long r6, unsigned long r7,
829 			 struct pt_regs *pregs)
830 {
831 	if (!newsp)
832 		newsp = pregs->regs[15];
833 	return do_fork(clone_flags, newsp, pregs,0);
834 }
835 
836 /*
837  * This is trivial, and on the face of it looks like it
838  * could equally well be done in user mode.
839  *
840  * Not so, for quite unobvious reasons - register pressure.
841  * In user mode vfork() cannot have a stack frame, and if
842  * done by calling the "clone()" system call directly, you
843  * do not have enough call-clobbered registers to hold all
844  * the information you need.
845  */
sys_vfork(unsigned long r2,unsigned long r3,unsigned long r4,unsigned long r5,unsigned long r6,unsigned long r7,struct pt_regs * pregs)846 asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
847 			 unsigned long r4, unsigned long r5,
848 			 unsigned long r6, unsigned long r7,
849 			 struct pt_regs *pregs)
850 {
851 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs,0);
852 }
853 
854 /*
855  * sys_execve() executes a new program.
856  */
sys_execve(char * ufilename,char ** uargv,char ** uenvp,unsigned long r5,unsigned long r6,unsigned long r7,struct pt_regs * pregs)857 asmlinkage int sys_execve(char *ufilename, char **uargv,
858 			  char **uenvp, unsigned long r5,
859 			  unsigned long r6, unsigned long r7,
860 			  struct pt_regs *pregs)
861 {
862 	int error;
863 	char *filename;
864 
865 	lock_kernel();
866 	filename = getname(ufilename);
867 	error = PTR_ERR(filename);
868 	if (IS_ERR(filename))
869 		goto out;
870 
871 	error = do_execve(filename, uargv, uenvp, pregs);
872 	if (error == 0)
873 		current->ptrace &= ~PT_DTRACE;
874 	putname(filename);
875 out:
876 	unlock_kernel();
877 	return error;
878 }
879 
880 /*
881  * These bracket the sleeping functions..
882  */
883 extern void scheduling_functions_start_here(void);
884 extern void scheduling_functions_end_here(void);
885 extern void interruptible_sleep_on(wait_queue_head_t *q);
886 
887 #define first_sched	((unsigned long) scheduling_functions_start_here)
888 #define mid_sched	((unsigned long) interruptible_sleep_on)
889 #define last_sched	((unsigned long) scheduling_functions_end_here)
890 
get_wchan(struct task_struct * p)891 unsigned long get_wchan(struct task_struct *p)
892 {
893 	unsigned long schedule_frame;
894 	unsigned long pc;
895 
896 	if (!p || p == current || p->state == TASK_RUNNING)
897 		return 0;
898 
899 	/*
900 	 * The same comment as on the Alpha applies here, too ...
901 	 */
902 	pc = thread_saved_pc(&p->thread);
903 
904 	if (pc >= first_sched && pc < last_sched) {
905 
906 		schedule_frame = (long) p->thread.sp;
907 
908 		/* Should we unwind schedule_timeout() ? */
909 		if (pc < mid_sched)
910 			/* according to disasm:
911 			**     48 bytes in case of RH toolchain
912 		        */
913 			schedule_frame += 48;
914 
915 		/*
916 		** Unwind schedule(). According to disasm:
917 		**    72 bytes in case of RH toolchain
918 		** plus 304 bytes of switch_to additional frame.
919 		*/
920 		schedule_frame += 72 + 304;
921 
922 #ifdef CS_SAVE_ALL
923 		schedule_frame += 256;
924 #endif
925 		/*
926 		 * schedule_frame now according to SLEEP_ON_VAR.
927 	 	 * Bad thing is that we have no trace of the waiting
928 		 * address (the classical WCHAN). SLEEP_ON_VAR should
929 		 * have saved q. From the linked list only we can't get
930 		 * the object and first parameter is not saved on stack
931 		 * by the ABI. The best we can tell is who called the
932 		 * *sleep_on* by returning LINK, which is saved at
933 		 * offset 64 on all flavours.
934 		 */
935 		return (unsigned long)((unsigned long *)schedule_frame)[16];
936 	}
937 	return pc;
938 }
939 
940 /* Provide a /proc/asids file that lists out the
941    ASIDs currently associated with the processes.  (If the DM.PC register is
942    examined through the debug link, this shows ASID + PC.  To make use of this,
943    the PID->ASID relationship needs to be known.  This is primarily for
944    debugging.)
945    */
946 
947 #if defined(CONFIG_SH64_PROC_ASIDS)
948 #include <linux/init.h>
949 #include <linux/proc_fs.h>
950 
951 static int
asids_proc_info(char * buf,char ** start,off_t fpos,int length,int * eof,void * data)952 asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
953 {
954 	int len=0;
955 	struct task_struct *p;
956 	read_lock(&tasklist_lock);
957 	for_each_task(p) {
958 		int pid = p->pid;
959 		struct mm_struct *mm;
960 		if (!pid) continue;
961 		mm = p->mm;
962 		if (mm) {
963 			unsigned long asid, context;
964 			context = mm->context;
965 			asid = (context & 0xff);
966 			len += sprintf(buf+len, "%5d : %02x\n", pid, asid);
967 		} else {
968 			len += sprintf(buf+len, "%5d : (none)\n", pid);
969 		}
970 	}
971 	read_unlock(&tasklist_lock);
972 	*eof = 1;
973 	return len;
974 }
975 
register_proc_asids(void)976 static int __init register_proc_asids(void)
977 {
978   create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
979   return 0;
980 }
981 
982 __initcall(register_proc_asids);
983 #endif
984 
985