1 /*
2 * arch/xtensa/kernel/process.c
3 *
4 * Xtensa Processor version.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
15 * Kevin Chea
16 */
17
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/elf.h>
27 #include <linux/init.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/module.h>
31 #include <linux/mqueue.h>
32 #include <linux/fs.h>
33 #include <linux/slab.h>
34
35 #include <asm/pgtable.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/processor.h>
40 #include <asm/platform.h>
41 #include <asm/mmu.h>
42 #include <asm/irq.h>
43 #include <asm/atomic.h>
44 #include <asm/asm-offsets.h>
45 #include <asm/regs.h>
46
47 extern void ret_from_fork(void);
48
49 struct task_struct *current_set[NR_CPUS] = {&init_task, };
50
51 void (*pm_power_off)(void) = NULL;
52 EXPORT_SYMBOL(pm_power_off);
53
54
55 #if XTENSA_HAVE_COPROCESSORS
56
coprocessor_release_all(struct thread_info * ti)57 void coprocessor_release_all(struct thread_info *ti)
58 {
59 unsigned long cpenable;
60 int i;
61
62 /* Make sure we don't switch tasks during this operation. */
63
64 preempt_disable();
65
66 /* Walk through all cp owners and release it for the requested one. */
67
68 cpenable = ti->cpenable;
69
70 for (i = 0; i < XCHAL_CP_MAX; i++) {
71 if (coprocessor_owner[i] == ti) {
72 coprocessor_owner[i] = 0;
73 cpenable &= ~(1 << i);
74 }
75 }
76
77 ti->cpenable = cpenable;
78 coprocessor_clear_cpenable();
79
80 preempt_enable();
81 }
82
coprocessor_flush_all(struct thread_info * ti)83 void coprocessor_flush_all(struct thread_info *ti)
84 {
85 unsigned long cpenable;
86 int i;
87
88 preempt_disable();
89
90 cpenable = ti->cpenable;
91
92 for (i = 0; i < XCHAL_CP_MAX; i++) {
93 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
94 coprocessor_flush(ti, i);
95 cpenable >>= 1;
96 }
97
98 preempt_enable();
99 }
100
101 #endif
102
103
104 /*
105 * Powermanagement idle function, if any is provided by the platform.
106 */
107
cpu_idle(void)108 void cpu_idle(void)
109 {
110 local_irq_enable();
111
112 /* endless idle loop with no priority at all */
113 while (1) {
114 while (!need_resched())
115 platform_idle();
116 preempt_enable_no_resched();
117 schedule();
118 preempt_disable();
119 }
120 }
121
122 /*
123 * This is called when the thread calls exit().
124 */
exit_thread(void)125 void exit_thread(void)
126 {
127 #if XTENSA_HAVE_COPROCESSORS
128 coprocessor_release_all(current_thread_info());
129 #endif
130 }
131
132 /*
133 * Flush thread state. This is called when a thread does an execve()
134 * Note that we flush coprocessor registers for the case execve fails.
135 */
flush_thread(void)136 void flush_thread(void)
137 {
138 #if XTENSA_HAVE_COPROCESSORS
139 struct thread_info *ti = current_thread_info();
140 coprocessor_flush_all(ti);
141 coprocessor_release_all(ti);
142 #endif
143 }
144
145 /*
146 * This is called before the thread is copied.
147 */
prepare_to_copy(struct task_struct * tsk)148 void prepare_to_copy(struct task_struct *tsk)
149 {
150 #if XTENSA_HAVE_COPROCESSORS
151 coprocessor_flush_all(task_thread_info(tsk));
152 #endif
153 }
154
155 /*
156 * Copy thread.
157 *
158 * The stack layout for the new thread looks like this:
159 *
160 * +------------------------+ <- sp in childregs (= tos)
161 * | childregs |
162 * +------------------------+ <- thread.sp = sp in dummy-frame
163 * | dummy-frame | (saved in dummy-frame spill-area)
164 * +------------------------+
165 *
166 * We create a dummy frame to return to ret_from_fork:
167 * a0 points to ret_from_fork (simulating a call4)
168 * sp points to itself (thread.sp)
169 * a2, a3 are unused.
170 *
171 * Note: This is a pristine frame, so we don't need any spill region on top of
172 * childregs.
173 */
174
copy_thread(unsigned long clone_flags,unsigned long usp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)175 int copy_thread(unsigned long clone_flags, unsigned long usp,
176 unsigned long unused,
177 struct task_struct * p, struct pt_regs * regs)
178 {
179 struct pt_regs *childregs;
180 struct thread_info *ti;
181 unsigned long tos;
182 int user_mode = user_mode(regs);
183
184 /* Set up new TSS. */
185 tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
186 if (user_mode)
187 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
188 else
189 childregs = (struct pt_regs*)tos - 1;
190
191 *childregs = *regs;
192
193 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
194 *((int*)childregs - 3) = (unsigned long)childregs;
195 *((int*)childregs - 4) = 0;
196
197 childregs->areg[1] = tos;
198 childregs->areg[2] = 0;
199 p->set_child_tid = p->clear_child_tid = NULL;
200 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
201 p->thread.sp = (unsigned long)childregs;
202
203 if (user_mode(regs)) {
204
205 int len = childregs->wmask & ~0xf;
206 childregs->areg[1] = usp;
207 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
208 ®s->areg[XCHAL_NUM_AREGS - len/4], len);
209 // FIXME: we need to set THREADPTR in thread_info...
210 if (clone_flags & CLONE_SETTLS)
211 childregs->areg[2] = childregs->areg[6];
212
213 } else {
214 /* In kernel space, we start a new thread with a new stack. */
215 childregs->wmask = 1;
216 }
217
218 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
219 ti = task_thread_info(p);
220 ti->cpenable = 0;
221 #endif
222
223 return 0;
224 }
225
226
227 /*
228 * These bracket the sleeping functions..
229 */
230
get_wchan(struct task_struct * p)231 unsigned long get_wchan(struct task_struct *p)
232 {
233 unsigned long sp, pc;
234 unsigned long stack_page = (unsigned long) task_stack_page(p);
235 int count = 0;
236
237 if (!p || p == current || p->state == TASK_RUNNING)
238 return 0;
239
240 sp = p->thread.sp;
241 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
242
243 do {
244 if (sp < stack_page + sizeof(struct task_struct) ||
245 sp >= (stack_page + THREAD_SIZE) ||
246 pc == 0)
247 return 0;
248 if (!in_sched_functions(pc))
249 return pc;
250
251 /* Stack layout: sp-4: ra, sp-3: sp' */
252
253 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
254 sp = *(unsigned long *)sp - 3;
255 } while (count++ < 16);
256 return 0;
257 }
258
259 /*
260 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
261 * of processor registers. Besides different ordering,
262 * xtensa_gregset_t contains non-live register information that
263 * 'struct pt_regs' does not. Exception handling (primarily) uses
264 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
265 *
266 */
267
xtensa_elf_core_copy_regs(xtensa_gregset_t * elfregs,struct pt_regs * regs)268 void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
269 {
270 unsigned long wb, ws, wm;
271 int live, last;
272
273 wb = regs->windowbase;
274 ws = regs->windowstart;
275 wm = regs->wmask;
276 ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
277
278 /* Don't leak any random bits. */
279
280 memset(elfregs, 0, sizeof (elfregs));
281
282 /* Note: PS.EXCM is not set while user task is running; its
283 * being set in regs->ps is for exception handling convenience.
284 */
285
286 elfregs->pc = regs->pc;
287 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
288 elfregs->lbeg = regs->lbeg;
289 elfregs->lend = regs->lend;
290 elfregs->lcount = regs->lcount;
291 elfregs->sar = regs->sar;
292 elfregs->windowstart = ws;
293
294 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
295 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
296 memcpy(elfregs->a, regs->areg, live * 4);
297 memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
298 }
299
dump_fpu(void)300 int dump_fpu(void)
301 {
302 return 0;
303 }
304
305 asmlinkage
xtensa_clone(unsigned long clone_flags,unsigned long newsp,void __user * parent_tid,void * child_tls,void __user * child_tid,long a5,struct pt_regs * regs)306 long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
307 void __user *parent_tid, void *child_tls,
308 void __user *child_tid, long a5,
309 struct pt_regs *regs)
310 {
311 if (!newsp)
312 newsp = regs->areg[1];
313 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
314 }
315
316 /*
317 * xtensa_execve() executes a new program.
318 */
319
320 asmlinkage
xtensa_execve(const char __user * name,const char __user * const __user * argv,const char __user * const __user * envp,long a3,long a4,long a5,struct pt_regs * regs)321 long xtensa_execve(const char __user *name,
322 const char __user *const __user *argv,
323 const char __user *const __user *envp,
324 long a3, long a4, long a5,
325 struct pt_regs *regs)
326 {
327 long error;
328 char * filename;
329
330 filename = getname(name);
331 error = PTR_ERR(filename);
332 if (IS_ERR(filename))
333 goto out;
334 error = do_execve(filename, argv, envp, regs);
335 putname(filename);
336 out:
337 return error;
338 }
339
340