1 /* $Id: process.c,v 1.24 2003/03/06 14:19:32 pkj Exp $
2 *
3 * linux/arch/cris/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000, 2001 Axis Communications AB
7 *
8 * Authors: Bjorn Wesen (bjornw@axis.com)
9 *
10 * $Log: process.c,v $
11 * Revision 1.24 2003/03/06 14:19:32 pkj
12 * Use a cpu_idle() function identical to the one used by i386.
13 *
14 * Revision 1.23 2002/10/14 18:29:27 johana
15 * Call etrax_gpio_wake_up_check() from cpu_idle() to reduce gpio latency
16 * from ~15 ms to ~6 ms.
17 *
18 * Revision 1.22 2001/11/13 09:40:43 orjanf
19 * Added dump_fpu (needed for core dumps).
20 *
21 * Revision 1.21 2001/11/12 18:26:21 pkj
22 * Fixed compiler warnings.
23 *
24 * Revision 1.20 2001/10/03 08:21:39 jonashg
25 * cause_of_death does not exist if CONFIG_SVINTO_SIM is defined.
26 *
27 * Revision 1.19 2001/09/26 11:52:54 bjornw
28 * INIT_MMAP is gone in 2.4.10
29 *
30 * Revision 1.18 2001/08/21 21:43:51 hp
31 * Move last watchdog fix inside #ifdef CONFIG_ETRAX_WATCHDOG
32 *
33 * Revision 1.17 2001/08/21 13:48:01 jonashg
34 * Added fix by HP to avoid oops when doing a hard_reset_now.
35 *
36 * Revision 1.16 2001/06/21 02:00:40 hp
37 * * entry.S: Include asm/unistd.h.
38 * (_sys_call_table): Use section .rodata, not .data.
39 * (_kernel_thread): Move from...
40 * * process.c: ... here.
41 * * entryoffsets.c (VAL): Break out from...
42 * (OF): Use VAL.
43 * (LCLONE_VM): New asmified value from CLONE_VM.
44 *
45 * Revision 1.15 2001/06/20 16:31:57 hp
46 * Add comments to describe empty functions according to review.
47 *
48 * Revision 1.14 2001/05/29 11:27:59 markusl
49 * Fixed so that hard_reset_now will do reset even if watchdog wasn't enabled
50 *
51 * Revision 1.13 2001/03/20 19:44:06 bjornw
52 * Use the 7th syscall argument for regs instead of current_regs
53 *
54 */
55
56 /*
57 * This file handles the architecture-dependent parts of process handling..
58 */
59
60 #define __KERNEL_SYSCALLS__
61 #include <stdarg.h>
62
63 #include <linux/errno.h>
64 #include <linux/sched.h>
65 #include <linux/kernel.h>
66 #include <linux/mm.h>
67 #include <linux/stddef.h>
68 #include <linux/unistd.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/user.h>
72 #include <linux/a.out.h>
73 #include <linux/elfcore.h>
74 #include <linux/interrupt.h>
75 #include <linux/delay.h>
76
77 #include <asm/uaccess.h>
78 #include <asm/pgtable.h>
79 #include <asm/system.h>
80 #include <asm/io.h>
81 #include <asm/processor.h>
82
83 #include <linux/smp.h>
84
85 //#define DEBUG
86
87 /*
88 * Initial task structure. Make this a per-architecture thing,
89 * because different architectures tend to have different
90 * alignment requirements and potentially different initial
91 * setup.
92 */
93
94 static struct fs_struct init_fs = INIT_FS;
95 static struct files_struct init_files = INIT_FILES;
96 static struct signal_struct init_signals = INIT_SIGNALS;
97 struct mm_struct init_mm = INIT_MM(init_mm);
98
99 /*
100 * Initial task structure.
101 *
102 * We need to make sure that this is 8192-byte aligned due to the
103 * way process stacks are handled. This is done by having a special
104 * "init_task" linker map entry..
105 */
106
107 union task_union init_task_union
108 __attribute__((__section__(".data.init_task"))) =
109 { INIT_TASK(init_task_union.task) };
110
111 /*
112 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
113 * there would ever be a halt sequence (for power save when idle) with
114 * some largish delay when halting or resuming *and* a driver that can't
115 * afford that delay. The hlt_counter would then be checked before
116 * executing the halt sequence, and the driver marks the unhaltable
117 * region by enable_hlt/disable_hlt.
118 */
119
120 static int hlt_counter;
121
122 /*
123 * Powermanagement idle function, if any..
124 */
125 void (*pm_idle)(void);
126
127 /*
128 * Power off function, if any
129 */
130 void (*pm_power_off)(void);
131
disable_hlt(void)132 void disable_hlt(void)
133 {
134 hlt_counter++;
135 }
136
enable_hlt(void)137 void enable_hlt(void)
138 {
139 hlt_counter--;
140 }
141
142 /*
143 * We use this if we don't have any better
144 * idle routine..
145 */
default_idle(void)146 static void default_idle(void)
147 {
148 #ifdef CONFIG_ETRAX_GPIO
149 extern void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
150
151 /* This can reduce latency from 15 ms to 6 ms */
152 etrax_gpio_wake_up_check(); /* drivers/gpio.c */
153 #endif
154 }
155
156 /*
157 * The idle thread. There's no useful work to be
158 * done, so just try to conserve power and have a
159 * low exit latency (ie sit in a loop waiting for
160 * somebody to say that they'd like to reschedule)
161 */
cpu_idle(void)162 void cpu_idle(void)
163 {
164 /* endless idle loop with no priority at all */
165 init_idle();
166 current->nice = 20;
167 current->counter = -100;
168
169 while(1) {
170 void (*idle)(void) = pm_idle;
171 if (!idle)
172 idle = default_idle;
173 while (!current->need_resched)
174 idle();
175 schedule();
176 check_pgt_cache();
177 }
178 }
179
180 /* if the watchdog is enabled, we can simply disable interrupts and go
181 * into an eternal loop, and the watchdog will reset the CPU after 0.1s
182 * if on the other hand the watchdog wasn't enabled, we just enable it and wait
183 */
184
hard_reset_now(void)185 void hard_reset_now (void)
186 {
187 /*
188 * Don't declare this variable elsewhere. We don't want any other
189 * code to know about it than the watchdog handler in entry.S and
190 * this code, implementing hard reset through the watchdog.
191 */
192 #if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
193 extern int cause_of_death;
194 #endif
195
196 printk("*** HARD RESET ***\n");
197 cli();
198
199 #if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
200 cause_of_death = 0xbedead;
201 #else
202 /* Since we dont plan to keep on reseting the watchdog,
203 the key can be arbitrary hence three */
204 *R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) |
205 IO_STATE(R_WATCHDOG, enable, start);
206 #endif
207
208 while(1) /* waiting for RETRIBUTION! */ ;
209 }
210
machine_restart(void)211 void machine_restart(void)
212 {
213 hard_reset_now();
214 }
215
216 /*
217 * Similar to machine_power_off, but don't shut off power. Add code
218 * here to freeze the system for e.g. post-mortem debug purpose when
219 * possible. This halt has nothing to do with the idle halt.
220 */
221
machine_halt(void)222 void machine_halt(void)
223 {
224 }
225
226 /* If or when software power-off is implemented, add code here. */
227
machine_power_off(void)228 void machine_power_off(void)
229 {
230 if (pm_power_off)
231 pm_power_off();
232 }
233
234 /*
235 * When a process does an "exec", machine state like FPU and debug
236 * registers need to be reset. This is a hook function for that.
237 * Currently we don't have any such state to reset, so this is empty.
238 */
239
flush_thread(void)240 void flush_thread(void)
241 {
242 }
243
244 asmlinkage void ret_from_sys_call(void);
245
246 /* setup the child's kernel stack with a pt_regs and switch_stack on it.
247 * it will be un-nested during _resume and _ret_from_sys_call when the
248 * new thread is scheduled.
249 *
250 * also setup the thread switching structure which is used to keep
251 * thread-specific data during _resumes.
252 *
253 */
254
copy_thread(int nr,unsigned long clone_flags,unsigned long usp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)255 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
256 unsigned long unused,
257 struct task_struct *p, struct pt_regs *regs)
258 {
259 struct pt_regs * childregs;
260 struct switch_stack *swstack;
261
262 /* put the pt_regs structure at the end of the new kernel stack page and fix it up
263 * remember that the task_struct doubles as the kernel stack for the task
264 */
265
266 childregs = user_regs(p);
267
268 *childregs = *regs; /* struct copy of pt_regs */
269
270 childregs->r10 = 0; /* child returns 0 after a fork/clone */
271
272 /* put the switch stack right below the pt_regs */
273
274 swstack = ((struct switch_stack *)childregs) - 1;
275
276 swstack->r9 = 0; /* parameter to ret_from_sys_call, 0 == dont restart the syscall */
277
278 /* we want to return into ret_from_sys_call after the _resume */
279
280 swstack->return_ip = (unsigned long) ret_from_sys_call;
281
282 /* fix the user-mode stackpointer */
283
284 p->thread.usp = usp;
285
286 /* and the kernel-mode one */
287
288 p->thread.ksp = (unsigned long) swstack;
289
290 #ifdef DEBUG
291 printk("copy_thread: new regs at 0x%p, as shown below:\n", childregs);
292 show_registers(childregs);
293 #endif
294
295 return 0;
296 }
297
298 /*
299 * fill in the user structure for a core dump..
300 */
dump_thread(struct pt_regs * regs,struct user * dump)301 void dump_thread(struct pt_regs * regs, struct user * dump)
302 {
303 #if 0
304 int i;
305
306 /* changed the size calculations - should hopefully work better. lbt */
307 dump->magic = CMAGIC;
308 dump->start_code = 0;
309 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
310 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
311 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
312 dump->u_dsize -= dump->u_tsize;
313 dump->u_ssize = 0;
314 for (i = 0; i < 8; i++)
315 dump->u_debugreg[i] = current->debugreg[i];
316
317 if (dump->start_stack < TASK_SIZE)
318 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
319
320 dump->regs = *regs;
321
322 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
323 #endif
324 }
325
326 /* Fill in the fpu structure for a core dump. */
dump_fpu(struct pt_regs * regs,elf_fpregset_t * fpu)327 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
328 {
329 return 0;
330 }
331
332 /*
333 * Be aware of the "magic" 7th argument in the four system-calls below.
334 * They need the latest stackframe, which is put as the 7th argument by
335 * entry.S. The previous arguments are dummies or actually used, but need
336 * to be defined to reach the 7th argument.
337 *
338 * N.B.: Another method to get the stackframe is to use current_regs(). But
339 * it returns the latest stack-frame stacked when going from _user mode_ and
340 * some of these (at least sys_clone) are called from kernel-mode sometimes
341 * (for example during kernel_thread, above) and thus cannot use it. Thus,
342 * to be sure not to get any surprises, we use the method for the other calls
343 * as well.
344 */
345
sys_fork(long r10,long r11,long r12,long r13,long mof,long srp,struct pt_regs * regs)346 asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
347 struct pt_regs *regs)
348 {
349 return do_fork(SIGCHLD, rdusp(), regs, 0);
350 }
351
352 /* if newusp is 0, we just grab the old usp */
353
sys_clone(unsigned long newusp,unsigned long flags,long r12,long r13,long mof,long srp,struct pt_regs * regs)354 asmlinkage int sys_clone(unsigned long newusp, unsigned long flags,
355 long r12, long r13, long mof, long srp,
356 struct pt_regs *regs)
357 {
358 if (!newusp)
359 newusp = rdusp();
360 return do_fork(flags, newusp, regs, 0);
361 }
362
363 /* vfork is a system call in i386 because of register-pressure - maybe
364 * we can remove it and handle it in libc but we put it here until then.
365 */
366
sys_vfork(long r10,long r11,long r12,long r13,long mof,long srp,struct pt_regs * regs)367 asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
368 struct pt_regs *regs)
369 {
370 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0);
371 }
372
373 /*
374 * sys_execve() executes a new program.
375 */
sys_execve(const char * fname,char ** argv,char ** envp,long r13,long mof,long srp,struct pt_regs * regs)376 asmlinkage int sys_execve(const char *fname, char **argv, char **envp,
377 long r13, long mof, long srp,
378 struct pt_regs *regs)
379 {
380 int error;
381 char *filename;
382
383 filename = getname(fname);
384 error = PTR_ERR(filename);
385
386 if (IS_ERR(filename))
387 goto out;
388 error = do_execve(filename, argv, envp, regs);
389 putname(filename);
390 out:
391 return error;
392 }
393
394 /*
395 * These bracket the sleeping functions..
396 */
397
398 extern void scheduling_functions_start_here(void);
399 extern void scheduling_functions_end_here(void);
400 #define first_sched ((unsigned long) scheduling_functions_start_here)
401 #define last_sched ((unsigned long) scheduling_functions_end_here)
402
get_wchan(struct task_struct * p)403 unsigned long get_wchan(struct task_struct *p)
404 {
405 #if 0
406 /* YURGH. TODO. */
407
408 unsigned long ebp, esp, eip;
409 unsigned long stack_page;
410 int count = 0;
411 if (!p || p == current || p->state == TASK_RUNNING)
412 return 0;
413 stack_page = (unsigned long)p;
414 esp = p->thread.esp;
415 if (!stack_page || esp < stack_page || esp > 8188+stack_page)
416 return 0;
417 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
418 ebp = *(unsigned long *) esp;
419 do {
420 if (ebp < stack_page || ebp > 8184+stack_page)
421 return 0;
422 eip = *(unsigned long *) (ebp+4);
423 if (eip < first_sched || eip >= last_sched)
424 return eip;
425 ebp = *(unsigned long *) ebp;
426 } while (count++ < 16);
427 #endif
428 return 0;
429 }
430 #undef last_sched
431 #undef first_sched
432