1 /*
2  *  linux/arch/arm/kernel/process.c
3  *
4  *  Copyright (C) 1996-2000 Russell King - Converted to ARM.
5  *  Origional Copyright (C) 1995  Linus Torvalds
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <stdarg.h>
12 
13 #include <linux/config.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/delay.h>
23 #include <linux/reboot.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 
27 #include <asm/system.h>
28 #include <asm/leds.h>
29 #include <asm/uaccess.h>
30 
31 extern const char *processor_modes[];
32 extern void setup_mm_for_reboot(char mode);
33 
34 static volatile int hlt_counter;
35 
36 #include <asm/arch/system.h>
37 
disable_hlt(void)38 void disable_hlt(void)
39 {
40 	hlt_counter++;
41 }
42 
enable_hlt(void)43 void enable_hlt(void)
44 {
45 	hlt_counter--;
46 }
47 
nohlt_setup(char * __unused)48 static int __init nohlt_setup(char *__unused)
49 {
50 	hlt_counter = 1;
51 	return 1;
52 }
53 
hlt_setup(char * __unused)54 static int __init hlt_setup(char *__unused)
55 {
56 	hlt_counter = 0;
57 	return 1;
58 }
59 
60 __setup("nohlt", nohlt_setup);
61 __setup("hlt", hlt_setup);
62 
63 /*
64  * The following aren't currently used.
65  */
66 void (*pm_idle)(void);
67 void (*pm_power_off)(void);
68 
69 /*
70  * This is our default idle handler.  We need to disable
71  * interrupts here to ensure we don't miss a wakeup call.
72  */
default_idle(void)73 void default_idle(void)
74 {
75 	local_irq_disable();
76 	if (!current->need_resched && !hlt_counter)
77 		arch_idle();
78 	local_irq_enable();
79 }
80 
81 /*
82  * The idle thread.  We try to conserve power, while trying to keep
83  * overall latency low.  The architecture specific idle is passed
84  * a value to indicate the level of "idleness" of the system.
85  */
cpu_idle(void)86 void cpu_idle(void)
87 {
88 	/* endless idle loop with no priority at all */
89 	init_idle();
90 	current->nice = 20;
91 	current->counter = -100;
92 
93 	while (1) {
94 		void (*idle)(void) = pm_idle;
95 		if (!idle)
96 			idle = default_idle;
97 		leds_event(led_idle_start);
98 		while (!current->need_resched)
99 			idle();
100 		leds_event(led_idle_end);
101 		schedule();
102 #ifndef CONFIG_NO_PGT_CACHE
103 		check_pgt_cache();
104 #endif
105 	}
106 }
107 
108 static char reboot_mode = 'h';
109 
reboot_setup(char * str)110 int __init reboot_setup(char *str)
111 {
112 	reboot_mode = str[0];
113 	return 1;
114 }
115 
116 __setup("reboot=", reboot_setup);
117 
machine_halt(void)118 void machine_halt(void)
119 {
120 	leds_event(led_halted);
121 }
122 
machine_power_off(void)123 void machine_power_off(void)
124 {
125 	leds_event(led_halted);
126 	if (pm_power_off)
127 		pm_power_off();
128 }
129 
machine_restart(char * __unused)130 void machine_restart(char * __unused)
131 {
132 	/*
133 	 * Clean and disable cache, and turn off interrupts
134 	 */
135 	cpu_proc_fin();
136 
137 	/*
138 	 * Tell the mm system that we are going to reboot -
139 	 * we may need it to insert some 1:1 mappings so that
140 	 * soft boot works.
141 	 */
142 	setup_mm_for_reboot(reboot_mode);
143 
144 	/*
145 	 * Now call the architecture specific reboot code.
146 	 */
147 	arch_reset(reboot_mode);
148 
149 	/*
150 	 * Whoops - the architecture was unable to reboot.
151 	 * Tell the user!
152 	 */
153 	mdelay(1000);
154 	printk("Reboot failed -- System halted\n");
155 	while (1);
156 }
157 
show_regs(struct pt_regs * regs)158 void show_regs(struct pt_regs * regs)
159 {
160 	unsigned long flags;
161 
162 	flags = condition_codes(regs);
163 
164 	printk("pc : [<%08lx>]    lr : [<%08lx>]    %s\n"
165 	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
166 		instruction_pointer(regs),
167 		regs->ARM_lr, print_tainted(), regs->ARM_sp,
168 		regs->ARM_ip, regs->ARM_fp);
169 	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
170 		regs->ARM_r10, regs->ARM_r9,
171 		regs->ARM_r8);
172 	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
173 		regs->ARM_r7, regs->ARM_r6,
174 		regs->ARM_r5, regs->ARM_r4);
175 	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
176 		regs->ARM_r3, regs->ARM_r2,
177 		regs->ARM_r1, regs->ARM_r0);
178 	printk("Flags: %c%c%c%c",
179 		flags & CC_N_BIT ? 'N' : 'n',
180 		flags & CC_Z_BIT ? 'Z' : 'z',
181 		flags & CC_C_BIT ? 'C' : 'c',
182 		flags & CC_V_BIT ? 'V' : 'v');
183 	printk("  IRQs o%s  FIQs o%s  Mode %s%s  Segment %s\n",
184 		interrupts_enabled(regs) ? "n" : "ff",
185 		fast_interrupts_enabled(regs) ? "n" : "ff",
186 		processor_modes[processor_mode(regs)],
187 		thumb_mode(regs) ? " (T)" : "",
188 		get_fs() == get_ds() ? "kernel" : "user");
189 #if defined(CONFIG_CPU_32)
190 	{
191 		unsigned int ctrl, transbase, dac;
192 		  __asm__ (
193 		"	mrc p15, 0, %0, c1, c0\n"
194 		"	mrc p15, 0, %1, c2, c0\n"
195 		"	mrc p15, 0, %2, c3, c0\n"
196 		: "=r" (ctrl), "=r" (transbase), "=r" (dac));
197 		printk("Control: %04X  Table: %08X  DAC: %08X\n",
198 		  	ctrl, transbase, dac);
199 	}
200 #endif
201 }
202 
show_fpregs(struct user_fp * regs)203 void show_fpregs(struct user_fp *regs)
204 {
205 	int i;
206 
207 	for (i = 0; i < 8; i++) {
208 		unsigned long *p;
209 		char type;
210 
211 		p = (unsigned long *)(regs->fpregs + i);
212 
213 		switch (regs->ftype[i]) {
214 			case 1: type = 'f'; break;
215 			case 2: type = 'd'; break;
216 			case 3: type = 'e'; break;
217 			default: type = '?'; break;
218 		}
219 		if (regs->init_flag)
220 			type = '?';
221 
222 		printk("  f%d(%c): %08lx %08lx %08lx%c",
223 			i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
224 	}
225 
226 
227 	printk("FPSR: %08lx FPCR: %08lx\n",
228 		(unsigned long)regs->fpsr,
229 		(unsigned long)regs->fpcr);
230 }
231 
232 /*
233  * Task structure and kernel stack allocation.
234  */
235 static struct task_struct *task_struct_head;
236 static unsigned int nr_task_struct;
237 
238 #ifdef CONFIG_CPU_32
239 #define EXTRA_TASK_STRUCT	4
240 #else
241 #define EXTRA_TASK_STRUCT	0
242 #endif
243 
alloc_task_struct(void)244 struct task_struct *alloc_task_struct(void)
245 {
246 	struct task_struct *tsk;
247 
248 	if (EXTRA_TASK_STRUCT)
249 		tsk = task_struct_head;
250 	else
251 		tsk = NULL;
252 
253 	if (tsk) {
254 		task_struct_head = tsk->next_task;
255 		nr_task_struct -= 1;
256 	} else
257 		tsk = ll_alloc_task_struct();
258 
259 #ifdef CONFIG_SYSRQ
260 	/*
261 	 * The stack must be cleared if you want SYSRQ-T to
262 	 * give sensible stack usage information
263 	 */
264 	if (tsk) {
265 		char *p = (char *)tsk;
266 		memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
267 	}
268 #endif
269 	return tsk;
270 }
271 
__free_task_struct(struct task_struct * p)272 void __free_task_struct(struct task_struct *p)
273 {
274 	if (EXTRA_TASK_STRUCT && nr_task_struct < EXTRA_TASK_STRUCT) {
275 		p->next_task = task_struct_head;
276 		task_struct_head = p;
277 		nr_task_struct += 1;
278 	} else
279 		ll_free_task_struct(p);
280 }
281 
282 /*
283  * Free current thread data structures etc..
284  */
exit_thread(void)285 void exit_thread(void)
286 {
287 }
288 
default_fp_init(union fp_state * fp)289 static void default_fp_init(union fp_state *fp)
290 {
291 	memset(fp, 0, sizeof(union fp_state));
292 }
293 
294 void (*fp_init)(union fp_state *) = default_fp_init;
295 
flush_thread(void)296 void flush_thread(void)
297 {
298 	struct task_struct *tsk = current;
299 
300 	tsk->flags &= ~PF_USEDFPU;
301 	tsk->used_math = 0;
302 
303 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
304 	fp_init(&tsk->thread.fpstate);
305 }
306 
release_thread(struct task_struct * dead_task)307 void release_thread(struct task_struct *dead_task)
308 {
309 }
310 
311 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
312 
313 int
copy_thread(int nr,unsigned long clone_flags,unsigned long esp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)314 copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
315 	    unsigned long unused, struct task_struct * p, struct pt_regs * regs)
316 {
317 	struct pt_regs *childregs;
318 	struct context_save_struct * save;
319 
320 	atomic_set(&p->thread.refcount, 1);
321 
322 	childregs = ((struct pt_regs *)((unsigned long)p + 8192 - 8)) - 1;
323 	*childregs = *regs;
324 	childregs->ARM_r0 = 0;
325 	childregs->ARM_sp = esp;
326 
327 	save = ((struct context_save_struct *)(childregs)) - 1;
328 	*save = INIT_CSS;
329 	save->pc |= (unsigned long)ret_from_fork;
330 
331 	p->thread.save = save;
332 
333 	return 0;
334 }
335 
336 /*
337  * fill in the fpe structure for a core dump...
338  */
dump_fpu(struct pt_regs * regs,struct user_fp * fp)339 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
340 {
341 	int used_math = current->used_math;
342 
343 	if (used_math)
344 		memcpy(fp, &current->thread.fpstate.soft, sizeof (*fp));
345 
346 	return used_math;
347 }
348 
349 /*
350  * fill in the user structure for a core dump..
351  */
dump_thread(struct pt_regs * regs,struct user * dump)352 void dump_thread(struct pt_regs * regs, struct user * dump)
353 {
354 	struct task_struct *tsk = current;
355 
356 	dump->magic = CMAGIC;
357 	dump->start_code = tsk->mm->start_code;
358 	dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
359 
360 	dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
361 	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
362 	dump->u_ssize = 0;
363 
364 	dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
365 	dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
366 	dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
367 	dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
368 	dump->u_debugreg[4] = tsk->thread.debug.nsaved;
369 
370 	if (dump->start_stack < 0x04000000)
371 		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
372 
373 	dump->regs = *regs;
374 	dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
375 }
376 
377 /*
378  * This is the mechanism for creating a new kernel thread.
379  *
380  * NOTE! Only a kernel-only process(ie the swapper or direct descendants
381  * who haven't done an "execve()") should use this: it will work within
382  * a system call from a "real" process, but the process memory space will
383  * not be free'd until both the parent and the child have exited.
384  */
arch_kernel_thread(int (* fn)(void *),void * arg,unsigned long flags)385 pid_t arch_kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
386 {
387 	pid_t __ret;
388 
389 	__asm__ __volatile__(
390 	"orr	r0, %1, %2	@ kernel_thread sys_clone	\n\
391 	mov	r1, #0						\n\
392 	"__syscall(clone)"					\n\
393 	movs	%0, r0		@ if we are the child		\n\
394 	bne	1f						\n\
395 	mov	fp, #0		@ ensure that fp is zero	\n\
396 	mov	r0, %4						\n\
397 	mov	lr, pc						\n\
398 	mov	pc, %3						\n\
399 	b	sys_exit					\n\
400 1:	"
401         : "=&r" (__ret)
402         : "Ir" (flags), "I" (CLONE_VM), "r" (fn), "r" (arg)
403 	: "r0", "r1", "lr");
404 	return __ret;
405 }
406 
407 /*
408  * These bracket the sleeping functions..
409  */
410 extern void scheduling_functions_start_here(void);
411 extern void scheduling_functions_end_here(void);
412 #define first_sched	((unsigned long) scheduling_functions_start_here)
413 #define last_sched	((unsigned long) scheduling_functions_end_here)
414 
get_wchan(struct task_struct * p)415 unsigned long get_wchan(struct task_struct *p)
416 {
417 	unsigned long fp, lr;
418 	unsigned long stack_page;
419 	int count = 0;
420 	if (!p || p == current || p->state == TASK_RUNNING)
421 		return 0;
422 
423 	stack_page = 4096 + (unsigned long)p;
424 	fp = thread_saved_fp(&p->thread);
425 	do {
426 		if (fp < stack_page || fp > 4092+stack_page)
427 			return 0;
428 		lr = pc_pointer (((unsigned long *)fp)[-1]);
429 		if (lr < first_sched || lr > last_sched)
430 			return lr;
431 		fp = *(unsigned long *) (fp - 12);
432 	} while (count ++ < 16);
433 	return 0;
434 }
435