1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
13#include <linux/errno.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22	.macro	disable_irq
23	msrclr r0, MSR_IE
24	.endm
25
26	.macro	enable_irq
27	msrset r0, MSR_IE
28	.endm
29
30	.macro	clear_bip
31	msrclr r0, MSR_BIP
32	.endm
33#else
34	.macro	disable_irq
35	mfs r11, rmsr
36	andi r11, r11, ~MSR_IE
37	mts rmsr, r11
38	.endm
39
40	.macro	enable_irq
41	mfs r11, rmsr
42	ori r11, r11, MSR_IE
43	mts rmsr, r11
44	.endm
45
46	.macro	clear_bip
47	mfs r11, rmsr
48	andi r11, r11, ~MSR_BIP
49	mts rmsr, r11
50	.endm
51#endif
52
53ENTRY(_interrupt)
54	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
55	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
56	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
57	beqid	r11, 1f
58	nop
59	brid	2f				/* jump over */
60	addik	r1, r1, (-PT_SIZE)	/* room for pt_regs (delay slot) */
611:						/* switch to kernel stack */
62	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
63	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
64	/* calculate kernel stack pointer */
65	addik	r1, r1, THREAD_SIZE - PT_SIZE
662:
67	swi	r11, r1, PT_MODE		/* store the mode */
68	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
69	swi	r2, r1, PT_R2
70	swi	r3, r1, PT_R3
71	swi	r4, r1, PT_R4
72	swi	r5, r1, PT_R5
73	swi	r6, r1, PT_R6
74	swi	r7, r1, PT_R7
75	swi	r8, r1, PT_R8
76	swi	r9, r1, PT_R9
77	swi	r10, r1, PT_R10
78	swi	r11, r1, PT_R11
79	swi	r12, r1, PT_R12
80	swi	r13, r1, PT_R13
81	swi	r14, r1, PT_R14
82	swi	r14, r1, PT_PC
83	swi	r15, r1, PT_R15
84	swi	r16, r1, PT_R16
85	swi	r17, r1, PT_R17
86	swi	r18, r1, PT_R18
87	swi	r19, r1, PT_R19
88	swi	r20, r1, PT_R20
89	swi	r21, r1, PT_R21
90	swi	r22, r1, PT_R22
91	swi	r23, r1, PT_R23
92	swi	r24, r1, PT_R24
93	swi	r25, r1, PT_R25
94	swi	r26, r1, PT_R26
95	swi	r27, r1, PT_R27
96	swi	r28, r1, PT_R28
97	swi	r29, r1, PT_R29
98	swi	r30, r1, PT_R30
99	swi	r31, r1, PT_R31
100	/* special purpose registers */
101	mfs	r11, rmsr
102	swi	r11, r1, PT_MSR
103	mfs	r11, rear
104	swi	r11, r1, PT_EAR
105	mfs	r11, resr
106	swi	r11, r1, PT_ESR
107	mfs	r11, rfsr
108	swi	r11, r1, PT_FSR
109	/* reload original stack pointer and save it */
110	lwi	r11, r0, PER_CPU(ENTRY_SP)
111	swi	r11, r1, PT_R1
112	/* update mode indicator we are in kernel mode */
113	addik	r11, r0, 1
114	swi	r11, r0, PER_CPU(KM)
115	/* restore r31 */
116	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
117	/* prepare the link register, the argument and jump */
118	addik	r15, r0, ret_from_intr - 8
119	addk	r6, r0, r15
120	braid	do_IRQ
121	add	r5, r0, r1
122
123ret_from_intr:
124	lwi	r11, r1, PT_MODE
125	bneid	r11, no_intr_resched
126
127	lwi	r6, r31, TS_THREAD_INFO	/* get thread info */
128	lwi	r19, r6, TI_FLAGS	/* get flags in thread info */
129				/* do an extra work if any bits are set */
130
131	andi	r11, r19, _TIF_NEED_RESCHED
132	beqi	r11, 1f
133	bralid	r15, schedule
134	nop
1351:	andi	r11, r19, _TIF_SIGPENDING
136	beqid	r11, no_intr_resched
137	addk	r5, r1, r0
138	addk	r7, r0, r0
139	bralid	r15, do_signal
140	addk	r6, r0, r0
141
142no_intr_resched:
143	/* Disable interrupts, we are now committed to the state restore */
144	disable_irq
145
146	/* save mode indicator */
147	lwi	r11, r1, PT_MODE
148	swi	r11, r0, PER_CPU(KM)
149
150	/* save r31 */
151	swi	r31, r0, PER_CPU(CURRENT_SAVE)
152restore_context:
153	/* special purpose registers */
154	lwi	r11, r1, PT_FSR
155	mts	rfsr, r11
156	lwi	r11, r1, PT_ESR
157	mts	resr, r11
158	lwi	r11, r1, PT_EAR
159	mts	rear, r11
160	lwi	r11, r1, PT_MSR
161	mts	rmsr, r11
162
163	lwi	r31, r1, PT_R31
164	lwi	r30, r1, PT_R30
165	lwi	r29, r1, PT_R29
166	lwi	r28, r1, PT_R28
167	lwi	r27, r1, PT_R27
168	lwi	r26, r1, PT_R26
169	lwi	r25, r1, PT_R25
170	lwi	r24, r1, PT_R24
171	lwi	r23, r1, PT_R23
172	lwi	r22, r1, PT_R22
173	lwi	r21, r1, PT_R21
174	lwi	r20, r1, PT_R20
175	lwi	r19, r1, PT_R19
176	lwi	r18, r1, PT_R18
177	lwi	r17, r1, PT_R17
178	lwi	r16, r1, PT_R16
179	lwi	r15, r1, PT_R15
180	lwi	r14, r1, PT_PC
181	lwi	r13, r1, PT_R13
182	lwi	r12, r1, PT_R12
183	lwi	r11, r1, PT_R11
184	lwi	r10, r1, PT_R10
185	lwi	r9, r1, PT_R9
186	lwi	r8, r1, PT_R8
187	lwi	r7, r1, PT_R7
188	lwi	r6, r1, PT_R6
189	lwi	r5, r1, PT_R5
190	lwi	r4, r1, PT_R4
191	lwi	r3, r1, PT_R3
192	lwi	r2, r1, PT_R2
193	lwi	r1, r1, PT_R1
194	rtid	r14, 0
195	nop
196
197ENTRY(_reset)
198	brai	0;
199
200ENTRY(_user_exception)
201	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
202	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
203	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
204	beqid	r11, 1f				/* Already in kernel mode? */
205	nop
206	brid	2f				/* jump over */
207	addik	r1, r1, (-PT_SIZE)	/* Room for pt_regs (delay slot) */
2081:						/* Switch to kernel stack */
209	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
210	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
211	/* calculate kernel stack pointer */
212	addik	r1, r1, THREAD_SIZE - PT_SIZE
2132:
214	swi	r11, r1, PT_MODE		/* store the mode */
215	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
216	/* save them on stack */
217	swi	r2, r1, PT_R2
218	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
219	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
220	swi	r5, r1, PT_R5
221	swi	r6, r1, PT_R6
222	swi	r7, r1, PT_R7
223	swi	r8, r1, PT_R8
224	swi	r9, r1, PT_R9
225	swi	r10, r1, PT_R10
226	swi	r11, r1, PT_R11
227	/* r12: _always_ in clobber list; see unistd.h */
228	swi	r12, r1, PT_R12
229	swi	r13, r1, PT_R13
230	/* r14: _always_ in clobber list; see unistd.h */
231	swi	r14, r1, PT_R14
232	/* but we want to return to the next inst. */
233	addik	r14, r14, 0x4
234	swi	r14, r1, PT_PC		/* increment by 4 and store in pc */
235	swi	r15, r1, PT_R15
236	swi	r16, r1, PT_R16
237	swi	r17, r1, PT_R17
238	swi	r18, r1, PT_R18
239	swi	r19, r1, PT_R19
240	swi	r20, r1, PT_R20
241	swi	r21, r1, PT_R21
242	swi	r22, r1, PT_R22
243	swi	r23, r1, PT_R23
244	swi	r24, r1, PT_R24
245	swi	r25, r1, PT_R25
246	swi	r26, r1, PT_R26
247	swi	r27, r1, PT_R27
248	swi	r28, r1, PT_R28
249	swi	r29, r1, PT_R29
250	swi	r30, r1, PT_R30
251	swi	r31, r1, PT_R31
252
253	disable_irq
254	nop		/* make sure IE bit is in effect */
255	clear_bip	/* once IE is in effect it is safe to clear BIP */
256	nop
257
258	/* special purpose registers */
259	mfs	r11, rmsr
260	swi	r11, r1, PT_MSR
261	mfs	r11, rear
262	swi	r11, r1, PT_EAR
263	mfs	r11, resr
264	swi	r11, r1, PT_ESR
265	mfs	r11, rfsr
266	swi	r11, r1, PT_FSR
267	/* reload original stack pointer and save it */
268	lwi	r11, r0, PER_CPU(ENTRY_SP)
269	swi	r11, r1, PT_R1
270	/* update mode indicator we are in kernel mode */
271	addik	r11, r0, 1
272	swi	r11, r0, PER_CPU(KM)
273	/* restore r31 */
274	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
275	/* re-enable interrupts now we are in kernel mode */
276	enable_irq
277
278	/* See if the system call number is valid. */
279	addi	r11, r12, -__NR_syscalls
280	bgei	r11, 1f			/* return to user if not valid */
281	/* Figure out which function to use for this system call. */
282	/* Note Microblaze barrel shift is optional, so don't rely on it */
283	add	r12, r12, r12			/* convert num -> ptr */
284	add	r12, r12, r12
285	lwi	r12, r12, sys_call_table	/* Get function pointer */
286	addik	r15, r0, ret_to_user-8		/* set return address */
287	bra	r12				/* Make the system call. */
288	bri	0				/* won't reach here */
2891:
290	brid	ret_to_user			/* jump to syscall epilogue */
291	addi	r3, r0, -ENOSYS			/* set errno in delay slot */
292
293/*
294 * Debug traps are like a system call, but entered via brki r14, 0x60
295 * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
296 * will handle the rest
297 */
298ENTRY(_debug_exception)
299	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
300	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
301	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
302	addik	r1, r1, THREAD_SIZE - PT_SIZE	/* get the kernel stack */
303	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
304	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
305//save_context:
306	swi	r11, r1, PT_MODE	/* store the mode */
307	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
308	/* save them on stack */
309	swi	r2, r1, PT_R2
310	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
311	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
312	swi	r5, r1, PT_R5
313	swi	r6, r1, PT_R6
314	swi	r7, r1, PT_R7
315	swi	r8, r1, PT_R8
316	swi	r9, r1, PT_R9
317	swi	r10, r1, PT_R10
318	swi	r11, r1, PT_R11
319	/* r12: _always_ in clobber list; see unistd.h */
320	swi	r12, r1, PT_R12
321	swi	r13, r1, PT_R13
322	/* r14: _always_ in clobber list; see unistd.h */
323	swi	r14, r1, PT_R14
324	swi	r14, r1, PT_PC /* Will return to interrupted instruction */
325	swi	r15, r1, PT_R15
326	swi	r16, r1, PT_R16
327	swi	r17, r1, PT_R17
328	swi	r18, r1, PT_R18
329	swi	r19, r1, PT_R19
330	swi	r20, r1, PT_R20
331	swi	r21, r1, PT_R21
332	swi	r22, r1, PT_R22
333	swi	r23, r1, PT_R23
334	swi	r24, r1, PT_R24
335	swi	r25, r1, PT_R25
336	swi	r26, r1, PT_R26
337	swi	r27, r1, PT_R27
338	swi	r28, r1, PT_R28
339	swi	r29, r1, PT_R29
340	swi	r30, r1, PT_R30
341	swi	r31, r1, PT_R31
342
343	disable_irq
344	nop		/* make sure IE bit is in effect */
345	clear_bip	/* once IE is in effect it is safe to clear BIP */
346	nop
347
348	/* special purpose registers */
349	mfs	r11, rmsr
350	swi	r11, r1, PT_MSR
351	mfs	r11, rear
352	swi	r11, r1, PT_EAR
353	mfs	r11, resr
354	swi	r11, r1, PT_ESR
355	mfs	r11, rfsr
356	swi	r11, r1, PT_FSR
357	/* reload original stack pointer and save it */
358	lwi	r11, r0, PER_CPU(ENTRY_SP)
359	swi	r11, r1, PT_R1
360	/* update mode indicator we are in kernel mode */
361	addik	r11, r0, 1
362	swi	r11, r0, PER_CPU(KM)
363	/* restore r31 */
364	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
365	/* re-enable interrupts now we are in kernel mode */
366	enable_irq
367
368	addi	r5, r0, SIGTRAP			/* sending the trap signal */
369	add	r6, r0, r31			/* to current */
370	bralid	r15, send_sig
371	add	r7, r0, r0			/* 3rd param zero */
372
373	/* Restore r3/r4 to work around how ret_to_user works */
374	lwi	r3, r1, PT_R3
375	lwi	r4, r1, PT_R4
376	bri	ret_to_user
377
378ENTRY(_break)
379	bri	0
380
381/* struct task_struct *_switch_to(struct thread_info *prev,
382					struct thread_info *next); */
383ENTRY(_switch_to)
384	/* prepare return value */
385	addk	r3, r0, r31
386
387	/* save registers in cpu_context */
388	/* use r11 and r12, volatile registers, as temp register */
389	addik	r11, r5, TI_CPU_CONTEXT
390	swi	r1, r11, CC_R1
391	swi	r2, r11, CC_R2
392	/* skip volatile registers.
393	 * they are saved on stack when we jumped to _switch_to() */
394	/* dedicated registers */
395	swi	r13, r11, CC_R13
396	swi	r14, r11, CC_R14
397	swi	r15, r11, CC_R15
398	swi	r16, r11, CC_R16
399	swi	r17, r11, CC_R17
400	swi	r18, r11, CC_R18
401	/* save non-volatile registers */
402	swi	r19, r11, CC_R19
403	swi	r20, r11, CC_R20
404	swi	r21, r11, CC_R21
405	swi	r22, r11, CC_R22
406	swi	r23, r11, CC_R23
407	swi	r24, r11, CC_R24
408	swi	r25, r11, CC_R25
409	swi	r26, r11, CC_R26
410	swi	r27, r11, CC_R27
411	swi	r28, r11, CC_R28
412	swi	r29, r11, CC_R29
413	swi	r30, r11, CC_R30
414	/* special purpose registers */
415	mfs	r12, rmsr
416	swi	r12, r11, CC_MSR
417	mfs	r12, rear
418	swi	r12, r11, CC_EAR
419	mfs	r12, resr
420	swi	r12, r11, CC_ESR
421	mfs	r12, rfsr
422	swi	r12, r11, CC_FSR
423
424	/* update r31, the current */
425	lwi	r31, r6, TI_TASK
426	swi	r31, r0, PER_CPU(CURRENT_SAVE)
427
428	/* get new process' cpu context and restore */
429	addik	r11, r6, TI_CPU_CONTEXT
430
431	/* special purpose registers */
432	lwi	r12, r11, CC_FSR
433	mts	rfsr, r12
434	lwi	r12, r11, CC_ESR
435	mts	resr, r12
436	lwi	r12, r11, CC_EAR
437	mts	rear, r12
438	lwi	r12, r11, CC_MSR
439	mts	rmsr, r12
440	/* non-volatile registers */
441	lwi	r30, r11, CC_R30
442	lwi	r29, r11, CC_R29
443	lwi	r28, r11, CC_R28
444	lwi	r27, r11, CC_R27
445	lwi	r26, r11, CC_R26
446	lwi	r25, r11, CC_R25
447	lwi	r24, r11, CC_R24
448	lwi	r23, r11, CC_R23
449	lwi	r22, r11, CC_R22
450	lwi	r21, r11, CC_R21
451	lwi	r20, r11, CC_R20
452	lwi	r19, r11, CC_R19
453	/* dedicated registers */
454	lwi	r18, r11, CC_R18
455	lwi	r17, r11, CC_R17
456	lwi	r16, r11, CC_R16
457	lwi	r15, r11, CC_R15
458	lwi	r14, r11, CC_R14
459	lwi	r13, r11, CC_R13
460	/* skip volatile registers */
461	lwi	r2, r11, CC_R2
462	lwi	r1, r11, CC_R1
463
464	rtsd	r15, 8
465	nop
466
467ENTRY(ret_from_fork)
468	addk	r5, r0, r3
469	addk	r6, r0, r1
470	brlid	r15, schedule_tail
471	nop
472	swi	r31, r1, PT_R31		/* save r31 in user context. */
473			/* will soon be restored to r31 in ret_to_user */
474	addk	r3, r0, r0
475	brid	ret_to_user
476	nop
477
478work_pending:
479	enable_irq
480
481	andi	r11, r19, _TIF_NEED_RESCHED
482	beqi	r11, 1f
483	bralid	r15, schedule
484	nop
4851:	andi	r11, r19, _TIF_SIGPENDING
486	beqi	r11, no_work_pending
487	addk	r5, r1, r0
488	addik	r7, r0, 1
489	bralid	r15, do_signal
490	addk	r6, r0, r0
491	bri	no_work_pending
492
493ENTRY(ret_to_user)
494	disable_irq
495
496	swi	r4, r1, PT_R4		/* return val */
497	swi	r3, r1, PT_R3		/* return val */
498
499	lwi	r6, r31, TS_THREAD_INFO /* get thread info */
500	lwi	r19, r6, TI_FLAGS /* get flags in thread info */
501	bnei	r19, work_pending /* do an extra work if any bits are set */
502no_work_pending:
503	disable_irq
504
505	/* save r31 */
506	swi	r31, r0, PER_CPU(CURRENT_SAVE)
507	/* save mode indicator */
508	lwi	r18, r1, PT_MODE
509	swi	r18, r0, PER_CPU(KM)
510//restore_context:
511	/* special purpose registers */
512	lwi	r18, r1, PT_FSR
513	mts	rfsr, r18
514	lwi	r18, r1, PT_ESR
515	mts	resr, r18
516	lwi	r18, r1, PT_EAR
517	mts	rear, r18
518	lwi	r18, r1, PT_MSR
519	mts	rmsr, r18
520
521	lwi	r31, r1, PT_R31
522	lwi	r30, r1, PT_R30
523	lwi	r29, r1, PT_R29
524	lwi	r28, r1, PT_R28
525	lwi	r27, r1, PT_R27
526	lwi	r26, r1, PT_R26
527	lwi	r25, r1, PT_R25
528	lwi	r24, r1, PT_R24
529	lwi	r23, r1, PT_R23
530	lwi	r22, r1, PT_R22
531	lwi	r21, r1, PT_R21
532	lwi	r20, r1, PT_R20
533	lwi	r19, r1, PT_R19
534	lwi	r18, r1, PT_R18
535	lwi	r17, r1, PT_R17
536	lwi	r16, r1, PT_R16
537	lwi	r15, r1, PT_R15
538	lwi	r14, r1, PT_PC
539	lwi	r13, r1, PT_R13
540	lwi	r12, r1, PT_R12
541	lwi	r11, r1, PT_R11
542	lwi	r10, r1, PT_R10
543	lwi	r9, r1, PT_R9
544	lwi	r8, r1, PT_R8
545	lwi	r7, r1, PT_R7
546	lwi	r6, r1, PT_R6
547	lwi	r5, r1, PT_R5
548	lwi	r4, r1, PT_R4		/* return val */
549	lwi	r3, r1, PT_R3		/* return val */
550	lwi	r2, r1, PT_R2
551	lwi	r1, r1, PT_R1
552
553	rtid	r14, 0
554	nop
555
556sys_vfork:
557	brid	microblaze_vfork
558	addk	r5, r1, r0
559
560sys_clone:
561	brid	microblaze_clone
562	addk	r7, r1, r0
563
564sys_execve:
565	brid	microblaze_execve
566	addk	r8, r1, r0
567
568sys_rt_sigreturn_wrapper:
569	brid	sys_rt_sigreturn
570	addk	r5, r1, r0
571
572sys_rt_sigsuspend_wrapper:
573	brid	sys_rt_sigsuspend
574	addk	r7, r1, r0
575
576	/* Interrupt vector table */
577	.section	.init.ivt, "ax"
578	.org 0x0
579	brai	_reset
580	brai	_user_exception
581	brai	_interrupt
582	brai	_break
583	brai	_hw_exception_handler
584	.org 0x60
585	brai	_debug_exception
586
587.section .rodata,"a"
588#include "syscall_table.S"
589
590syscall_table_size=(.-sys_call_table)
591
592type_SYSCALL:
593	.ascii "SYSCALL\0"
594type_IRQ:
595	.ascii "IRQ\0"
596type_IRQ_PREEMPT:
597	.ascii "IRQ (PREEMPTED)\0"
598type_SYSCALL_PREEMPT:
599	.ascii " SYSCALL (PREEMPTED)\0"
600
601	/*
602	 * Trap decoding for stack unwinder
603	 * Tuples are (start addr, end addr, string)
604	 * If return address lies on [start addr, end addr],
605	 * unwinder displays 'string'
606	 */
607
608	.align 4
609.global microblaze_trap_handlers
610microblaze_trap_handlers:
611	/* Exact matches come first */
612	.word ret_to_user  ; .word ret_to_user    ; .word type_SYSCALL
613	.word ret_from_intr; .word ret_from_intr  ; .word type_IRQ
614	/* Fuzzy matches go here */
615	.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
616	.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
617	/* End of table */
618	.word 0             ; .word 0               ; .word 0
619