1/*
2 *  arch/ppc64/kernel/entry.S
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the system call entry code, context switch
15 *  code, and exception/interrupt return code for PowerPC.
16 *
17 *  This program is free software; you can redistribute it and/or
18 *  modify it under the terms of the GNU General Public License
19 *  as published by the Free Software Foundation; either version
20 *  2 of the License, or (at your option) any later version.
21 */
22
23#include "ppc_asm.h"
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <linux/errno.h>
28#include <linux/sys.h>
29#include <linux/config.h>
30#include <asm/cputable.h>
31
32#ifdef CONFIG_PPC_ISERIES
33#define DO_SOFT_DISABLE
34#endif
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39#ifdef SHOW_SYSCALLS_TASK
40	.data
41show_syscalls_task:
42	.long	-1
43#endif
44
45/*
46 * Handle a system call.
47 */
48	.text
49_GLOBAL(DoSyscall)
50	ld	r11,_CCR(r1)	/* Clear SO bit in CR */
51	lis	r10,0x1000
52	andc	r11,r11,r10
53	std	r11,_CCR(r1)
54	ld	r10,PACACURRENT(r13)
55	std	r0,THREAD+LAST_SYSCALL(r10)
56#ifdef SHOW_SYSCALLS
57# ifdef SHOW_SYSCALLS_TASK
58	LOADBASE(r31,show_syscalls_task)
59	ld	r31,show_syscalls_task@l(r31)
60	cmp	0,r10,r31
61	bne	1f
62# endif /* SHOW_SYSCALLS_TASK */
63	LOADADDR(r3,7f)
64	ld	r4,GPR0(r1)
65	ld	r5,GPR3(r1)
66	ld	r6,GPR4(r1)
67	ld	r7,GPR5(r1)
68	ld	r8,GPR6(r1)
69	ld	r9,GPR7(r1)
70	bl	.printk
71	LOADADDR(r3,77f)
72	ld	r4,GPR8(r1)
73	ld	r5,GPR9(r1)
74	ld	r6, PACACURRENT(r13)
75	bl	.printk
76	ld	r0,GPR0(r1)
77	ld	r3,GPR3(r1)
78	ld	r4,GPR4(r1)
79	ld	r5,GPR5(r1)
80	ld	r6,GPR6(r1)
81	ld	r7,GPR7(r1)
82	ld	r8,GPR8(r1)
831:
84#endif /* SHOW_SYSCALLS */
85	ld	r11,TASK_PTRACE(r10)
86	andi.	r11,r11,PT_TRACESYS
87	bne-	50f
88	cmpli	0,r0,NR_syscalls
89	bge-	66f
90/* Ken Aaker: Need to vector to 32 Bit or default sys_call_table here,
91 *            based on caller's run-mode / personality.
92 *
93 */
94#ifdef CONFIG_BINFMT_ELF32
95	ld	r11,THREAD+THREAD_FLAGS(r10)
96	andi.	r11,r11,PPC_FLAG_32BIT
97	beq-	15f
98	LOADADDR(r11,.sys_call_table32)
99/* Now mung the first 4 parameters into shape, by making certain that
100 * the high bits (most significant 32 bits in 64 bit reg) are 0
101 * for the first 4 parameter regs(3-6).
102 */
103	clrldi	r3,r3,32
104	clrldi	r4,r4,32
105	clrldi	r5,r5,32
106	clrldi	r6,r6,32
107	b	17f
10815:
109#endif
110	LOADADDR(r11,.sys_call_table)
11117:
112	slwi	r0,r0,3
113	ldx	r10,r11,r0	/* Fetch system call handler [ptr] */
114	mtlr	r10
115	addi	r9,r1,STACK_FRAME_OVERHEAD
116	blrl			/* Call handler */
117_GLOBAL(ret_from_syscall_1)
11820:	std	r3,RESULT(r1)	/* Save result */
119#ifdef SHOW_SYSCALLS
120# ifdef SHOW_SYSCALLS_TASK
121	cmp	0,r13,r31
122	bne	91f
123# endif /* SHOW_SYSCALLS_TASK */
124	mr	r4,r3
125	LOADADDR(r3,79f)
126	bl	.printk
127	ld	r3,RESULT(r1)
12891:
129#endif /* SHOW_SYSCALLS */
130	li	r10,-_LAST_ERRNO
131	cmpl	0,r3,r10
132	blt	30f
133	neg	r3,r3
134	cmpi	0,r3,ERESTARTNOHAND
135	bne	22f
136	li	r3,EINTR
13722:	ld	r10,_CCR(r1)	/* Set SO bit in CR */
138	oris	r10,r10,0x1000
139	std	r10,_CCR(r1)
14030:	std	r3,GPR3(r1)	/* Update return value */
141	b	.ret_from_except
14266:	li	r3,ENOSYS
143	b	22b
144
145/* Traced system call support */
14650:	bl	.syscall_trace
147	ld	r0,GPR0(r1)	/* Restore original registers */
148	ld	r3,GPR3(r1)
149	ld	r4,GPR4(r1)
150	ld	r5,GPR5(r1)
151	ld	r6,GPR6(r1)
152	ld	r7,GPR7(r1)
153	ld	r8,GPR8(r1)
154	ld	r9,GPR9(r1)
155	cmpli	0,r0,NR_syscalls
156	bge-	66f
157#ifdef CONFIG_BINFMT_ELF32
158	ld	r10,PACACURRENT(r13)
159	ld	r10,THREAD+THREAD_FLAGS(r10)
160	andi.	r10,r10,PPC_FLAG_32BIT
161	beq-	55f
162	LOADADDR(r10,.sys_call_table32)
163/* Now mung the first 4 parameters into shape, by making certain that
164 * the high bits (most significant 32 bits in 64 bit reg) are 0
165 * for the first 4 parameter regs(3-6).
166 */
167	clrldi	r3,r3,32
168	clrldi	r4,r4,32
169	clrldi	r5,r5,32
170	clrldi	r6,r6,32
171	b	57f
17255:
173#endif
174	LOADADDR(r10,.sys_call_table)
17557:
176	slwi	r0,r0,3
177	ldx	r10,r10,r0	/* Fetch system call handler [ptr] */
178	mtlr	r10
179	addi	r9,r1,STACK_FRAME_OVERHEAD
180	blrl			/* Call handler */
181_GLOBAL(ret_from_syscall_2)
18258:	std	r3,RESULT(r1)	/* Save result */
183	li	r10,-_LAST_ERRNO
184	cmpl	0,r3,r10
185	blt	60f
186	neg	r3,r3
187	cmpi	0,r3,ERESTARTNOHAND
188	bne	57f
189	li	r3,EINTR
19057:	ld	r10,_CCR(r1)	/* Set SO bit in CR */
191	oris	r10,r10,0x1000
192	std	r10,_CCR(r1)
19360:	std	r3,GPR3(r1)	/* Update return value */
194	bl	.syscall_trace
195	b	.ret_from_except
19666:	li	r3,ENOSYS
197	b	57b
198#ifdef SHOW_SYSCALLS
1997:	.string	"syscall %d(%x, %x, %x, %x, %x, "
20077:	.string	"%x, %x), current=%p\n"
20179:	.string	" -> %x\n"
202	.align	2,0
203#endif
204
205_GLOBAL(ppc32_sigreturn)
206	bl	.sys32_sigreturn
207	b	80f
208
209_GLOBAL(ppc32_rt_sigreturn)
210	bl	.sys32_rt_sigreturn
211	b	80f
212
213_GLOBAL(ppc64_sigreturn)
214	bl	.sys_sigreturn
215	b	80f
216
217_GLOBAL(ppc64_rt_sigreturn)
218	bl	.sys_rt_sigreturn
219
22080:	ld	r10,PACACURRENT(r13)
221	ld	r10,TASK_PTRACE(r10)
222	andi.	r10,r10,PT_TRACESYS
223	bne-	81f
224	cmpi	0,r3,0
225	bge	.ret_from_except
226	b	20b
22781:	cmpi	0,r3,0
228	blt	58b
229	bl	.syscall_trace
230	b	.ret_from_except
231
232/*
233 * This routine switches between two different tasks.  The process
234 * state of one is saved on its kernel stack.  Then the state
235 * of the other is restored from its kernel stack.  The memory
236 * management hardware is updated to the second process's state.
237 * Finally, we can return to the second process, via ret_from_except.
238 * On entry, r3 points to the THREAD for the current task, r4
239 * points to the THREAD for the new task.
240 *
241 * Note: there are two ways to get to the "going out" portion
242 * of this code; either by coming in via the entry (_switch)
243 * or via "fork" which must set up an environment equivalent
244 * to the "_switch" path.  If you change this (or in particular, the
245 * SAVE_REGS macro), you'll have to change the fork code also.
246 *
247 * The code which creates the new task context is in 'copy_thread'
248 * in arch/ppc64/kernel/process.c
249 */
250_GLOBAL(_switch)
251	stdu	r1,-INT_FRAME_SIZE(r1)
252	ld	r6,0(r1)
253	std	r6,GPR1(r1)
254	/* r3-r13 are caller saved -- Cort */
255	SAVE_GPR(2, r1)
256	SAVE_8GPRS(14, r1)
257	SAVE_10GPRS(22, r1)
258	mflr	r20		/* Return to switch caller */
259	mfmsr	r22
260	li	r6,MSR_FP	/* Disable floating-point */
261#ifdef CONFIG_ALTIVEC
262BEGIN_FTR_SECTION
263	oris	r6,r6,MSR_VEC@h	/* Disable altivec */
264END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
265#endif /* CONFIG_ALTIVEC */
266	andc	r22,r22,r6
267	mtmsrd	r22
268	isync
269	std	r20,_NIP(r1)
270	std	r22,_MSR(r1)
271	std	r20,_LINK(r1)
272	mfcr	r20
273	std	r20,_CCR(r1)
274	li	r6,0x0ff0
275	std	r6,TRAP(r1)
276	std	r1,KSP(r3)	/* Set old stack pointer */
277
278	addi	r3,r3,-THREAD	/* old 'current' for return value */
279	addi	r7,r4,-THREAD	/* Convert THREAD to 'current' */
280	std	r7,PACACURRENT(r13)	/* Set new 'current' */
281
282#ifdef CONFIG_PPC_ISERIES
283	ld	r7,THREAD_FLAGS(r4)	/* Get run light flag */
284	mfspr	r9,CTRLF
285	srdi	r7,r7,1		/* Align to run light bit in CTRL reg */
286	insrdi	r9,r7,1,63	/* Insert run light into CTRL */
287	mtspr	CTRLT,r9
288#endif
289	ld	r1,KSP(r4)	/* Load new stack pointer */
290	ld	r6,_CCR(r1)
291	mtcrf	0xFF,r6
292	/* r3-r13 are destroyed -- Cort */
293	REST_8GPRS(14, r1)
294	REST_10GPRS(22, r1)
295
296	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
297	ld	r1,GPR1(r1)
298	mtlr	r7
299	blr
300
301_GLOBAL(ret_from_fork)
302	bl	.schedule_tail
303	ld	r4,PACACURRENT(r13)
304	ld	r0,TASK_PTRACE(r4)
305	andi.	r0,r0,PT_TRACESYS
306	beq+	.ret_from_except
307	bl	.syscall_trace
308	b	.ret_from_except
309
310_GLOBAL(ret_from_except)
311#ifdef CONFIG_PPC_ISERIES
312	ld	r5,SOFTE(r1)
313	cmpdi	0,r5,0
314	beq	4f
315irq_recheck:
316	/* Check for pending interrupts (iSeries) */
317	CHECKANYINT(r3,r4)
318	beq+	4f	/* skip do_IRQ if no interrupts */
319
320	li	r3,0
321	stb	r3,PACAPROCENABLED(r13)	/* ensure we are disabled */
322	addi	r3,r1,STACK_FRAME_OVERHEAD
323	bl	.do_IRQ
324	b	irq_recheck	/* loop back and handle more */
3254:
326#endif
327_GLOBAL(do_bottom_half_ret)
328	ld	r3,_MSR(r1)	/* Returning to user mode? */
329	andi.	r3,r3,MSR_PR
330	beq+	restore		/* if so, check need_resched and signals */
331_GLOBAL(ret_to_user_hook)
332	nop
333	/* NEED_RESCHED is a volatile long (64-bits) */
334	ld	r3,PACACURRENT(r13)
335	ld	r3,NEED_RESCHED(r3)
336	cmpi	0,r3,0		/* check need_resched flag */
337	beq+	7f
338	bl	.schedule
339	/* SIGPENDING is an int (32-bits) */
3407:
341	ld	r5,PACACURRENT(r13)
342	lwz	r5,SIGPENDING(r5) /* Check for pending unblocked signals */
343	cmpwi	0,r5,0
344	beq+	restore
345	li	r3,0
346	addi	r4,r1,STACK_FRAME_OVERHEAD
347	bl	.do_signal
348_GLOBAL(do_signal_ret)
349restore:
350	ld	r3,_CTR(r1)
351	ld	r0,_LINK(r1)
352	mtctr	r3
353	mtlr	r0
354	ld	r3,_XER(r1)
355	mtspr	XER,r3
356
357	REST_8GPRS(5, r1)
358	REST_10GPRS(14, r1)
359	REST_8GPRS(24, r1)
360
361	/* make sure we hard disable here, even if rtl is active, to protect
362	 * SRR[01] and SPRG2 -- Cort
363	 */
364	mfmsr	r0		/* Get current interrupt state */
365	li	r4,0
366	ori	r4,r4,MSR_EE|MSR_RI
367	andc	r0,r0,r4	/* clear MSR_EE and MSR_RI */
368	mtmsrd	r0		/* Update machine state */
369#ifdef CONFIG_PPC_ISERIES
370	ld	r0,SOFTE(r1)
371	cmpi	0,r0,0
372	beq+	1f
373
374	CHECKANYINT(r4,r3)
375	beq+	1f
376	mfmsr	r0
377	ori	r0,r0,MSR_EE|MSR_RI
378	mtmsrd	r0
379	b	irq_recheck
380
3811:
382#endif
383	stdcx.	r0,0,r1		/* to clear the reservation */
384
385#ifdef DO_SOFT_DISABLE
386	ld	r0,SOFTE(r1)
387	stb	r0,PACAPROCENABLED(r13)
388#endif
389	/* if returning to user mode, save kernel SP */
390	ld	r0,_MSR(r1)
391	andi.	r0,r0,MSR_PR
392	beq+	1f
393	ld	r4,PACACURRENT(r13)
394#ifdef CONFIG_ALTIVEC
395BEGIN_FTR_SECTION
396	ld	r0,THREAD+THREAD_VRSAVE(r4)
397	mtspr	SPRN_VRSAVE,r0		/* if GPUL, restore VRSAVE reg */
398END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
399#endif /* CONFIG_ALTIVEC */
400	addi	r0,r1,INT_FRAME_SIZE	/* size of frame */
401	std	r0,THREAD+KSP(r4)	/* save kernel stack pointer */
402	std	r1,PACAKSAVE(r13)	/* save exception stack pointer */
403	REST_GPR(13,r1)
4041:
405	mfmsr	r0
406	li	r2, MSR_RI
407	andc	r0,r0,r2
408	mtmsrd	r0,1
409
410	ld	r0,_MSR(r1)
411	mtspr	SRR1,r0
412	ld	r2,_CCR(r1)
413	mtcrf	0xFF,r2
414	ld	r2,_NIP(r1)
415	mtspr	SRR0,r2
416	ld	r0,GPR0(r1)
417	ld	r2,GPR2(r1)
418	ld	r3,GPR3(r1)
419	ld	r4,GPR4(r1)
420	ld	r1,GPR1(r1)
421
422	rfid
423
424/*
425 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
426 * called with the MMU off.
427 *
428 * In addition, we need to be in 32b mode, at least for now.
429 *
430 * Note: r3 is an input parameter to rtas, so don't trash it...
431 */
432_GLOBAL(enter_rtas)
433	mflr	r0
434	std	r0,16(r1)
435        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
436
437	/* Because RTAS is running in 32b mode, it clobbers the high order half
438	 * of all registers that it saves.  We therefore save those registers
439	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
440   	 */
441	SAVE_GPR(2, r1)			/* Save the TOC */
442	SAVE_GPR(13, r1)		/* Save current */
443	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
444	SAVE_10GPRS(22, r1)		/* ditto */
445
446	mfcr	r4
447	std	r4,_CCR(r1)
448	mfctr	r5
449	std	r5,_CTR(r1)
450	mfspr	r6,XER
451	std	r6,_XER(r1)
452	mfdar	r7
453	std	r7,_DAR(r1)
454	mfdsisr	r8
455	std	r8,_DSISR(r1)
456	mfsrr0	r9
457	std	r9,_SRR0(r1)
458	mfsrr1	r10
459	std	r10,_SRR1(r1)
460
461	/* Unfortunately, the stack pointer and the MSR are also clobbered,
462	 * so they are saved in the PACA (SPRG3) which allows us to restore
463	 * our original state after RTAS returns.
464	 */
465	std	r1,PACAR1(r13)
466	mfmsr	r6
467	std	r6,PACASAVEDMSR(r13)
468
469	/* Setup our real return addr */
470	SET_REG_TO_LABEL(r4,.rtas_return_loc)
471	SET_REG_TO_CONST(r9,KERNELBASE)
472	sub	r4,r4,r9
473       	mtlr	r4
474
475	li	r0,0
476	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
477	andc	r0,r6,r0
478
479        li      r9,1
480        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
481	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
482	andc	r6,r0,r9
483	sync				/* disable interrupts so SRR0/1 */
484	mtmsrd	r0			/* don't get trashed */
485
486	SET_REG_TO_LABEL(r4,rtas)
487	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
488	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
489
490	mtspr	SRR0,r5
491	mtspr	SRR1,r6
492	rfid
493
494_STATIC(rtas_return_loc)
495	/* relocation is off at this point */
496	mfspr	r13,SPRG3	        /* Get PACA */
497	SET_REG_TO_CONST(r5, KERNELBASE)
498	sub     r13,r13,r5              /* RELOC the PACA base pointer */
499
500	mfmsr	r6			/* Clear RI while SRR0/1 are live */
501	li	r0,MSR_RI
502	andc	r6,r6,r0
503	sync
504	mtmsrd	r6
505
506	ld	r1,PACAR1(r13)           /* Restore our SP */
507	LOADADDR(r3,.rtas_restore_regs)
508	ld	r4,PACASAVEDMSR(r13)     /* Restore our MSR */
509
510	mtspr	SRR0,r3
511	mtspr	SRR1,r4
512	rfid
513
514_STATIC(rtas_restore_regs)
515	/* relocation is on at this point */
516	REST_GPR(2, r1)			/* Restore the TOC */
517	REST_GPR(13, r1)		/* Restore current */
518	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
519	REST_10GPRS(22, r1)		/* ditto */
520
521	/* put back paca in r13 */
522	mfspr	r13,SPRG3
523
524	ld	r4,_CCR(r1)
525	mtcr	r4
526	ld	r5,_CTR(r1)
527	mtctr	r5
528	ld	r6,_XER(r1)
529	mtspr	XER,r6
530	ld	r7,_DAR(r1)
531	mtdar	r7
532	ld	r8,_DSISR(r1)
533	mtdsisr	r8
534	ld	r9,_SRR0(r1)
535	mtsrr0	r9
536	ld	r10,_SRR1(r1)
537	mtsrr1	r10
538
539        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
540	ld	r0,16(r1)		/* get return address */
541
542	mtlr    r0
543        blr				/* return to caller */
544
545_GLOBAL(enter_prom)
546	mflr	r0
547	std	r0,16(r1)
548        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
549
550	/* Because PROM is running in 32b mode, it clobbers the high order half
551	 * of all registers that it saves.  We therefore save those registers
552	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
553   	 */
554	SAVE_8GPRS(2, r1)		/* Save the TOC & incoming param(s) */
555	SAVE_GPR(13, r1)		/* Save current */
556	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
557	SAVE_10GPRS(22, r1)		/* ditto */
558
559	mfcr	r4
560	std	r4,_CCR(r1)
561	mfctr	r5
562	std	r5,_CTR(r1)
563	mfspr	r6,XER
564	std	r6,_XER(r1)
565	mfdar	r7
566	std	r7,_DAR(r1)
567	mfdsisr	r8
568	std	r8,_DSISR(r1)
569	mfsrr0	r9
570	std	r9,_SRR0(r1)
571	mfsrr1	r10
572	std	r10,_SRR1(r1)
573	mfmsr	r11
574	std	r11,_MSR(r1)
575
576	/* Unfortunately, the stack pointer is also clobbered, so it is saved
577	 * in the SPRG2 which allows us to restore our original state after
578	 * PROM returns.
579         */
580	mtspr	SPRG2,r1
581
582        /* put a relocation offset into r3 */
583        bl      .reloc_offset
584	LOADADDR(r12,prom)
585	sub	r12,r12,r3
586	ld	r12,PROMENTRY(r12)	/* get the prom->entry value */
587	mtlr	r12
588
589        mfmsr   r11			/* grab the current MSR */
590        li      r12,1
591        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
592        andc    r11,r11,r12
593        li      r12,1
594        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
595        andc    r11,r11,r12
596        mtmsrd  r11
597        isync
598
599	REST_8GPRS(2, r1)		/* Restore the TOC & param(s) */
600	REST_GPR(13, r1)		/* Restore current */
601	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
602	REST_10GPRS(22, r1)		/* ditto */
603	blrl				/* Entering PROM here... */
604
605	mfspr	r1,SPRG2		/* Restore the stack pointer */
606	ld	r6,_MSR(r1)		/* Restore the MSR */
607	mtmsrd	r6
608        isync
609
610	REST_GPR(2, r1)			/* Restore the TOC */
611	REST_GPR(13, r1)		/* Restore current */
612	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
613	REST_10GPRS(22, r1)		/* ditto */
614
615	ld	r4,_CCR(r1)
616	mtcr	r4
617	ld	r5,_CTR(r1)
618	mtctr	r5
619	ld	r6,_XER(r1)
620	mtspr	XER,r6
621	ld	r7,_DAR(r1)
622	mtdar	r7
623	ld	r8,_DSISR(r1)
624	mtdsisr	r8
625	ld	r9,_SRR0(r1)
626	mtsrr0	r9
627	ld	r10,_SRR1(r1)
628	mtsrr1	r10
629        addi	r1,r1,PROM_FRAME_SIZE
630	ld	r0,16(r1)		/* get return address */
631
632	mtlr    r0
633        blr				/* return to caller */
634