1/*
2 *  arch/ppc64/kernel/head.S
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 *  Adapted for Power Macintosh by Paul Mackerras.
10 *  Low-level exception handlers and MMU support
11 *  rewritten by Paul Mackerras.
12 *    Copyright (C) 1996 Paul Mackerras.
13 *
14 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 *  VMX/Altivec port from ppc32 (C) IBM 2003
18 *   Denis Joseph Barrow (dj@de.ibm.com,barrow_dj@yahoo.com)
19 *   additional debugging & 2.4-2.5 VMX port
20 *   Ben Herrenschmidt 	(benh@kernel.crashing.org)
21 *  This file contains the low-level support and setup for the
22 *  PowerPC-64 platform, including trap and interrupt dispatch.
23 *
24 *  This program is free software; you can redistribute it and/or
25 *  modify it under the terms of the GNU General Public License
26 *  as published by the Free Software Foundation; either version
27 *  2 of the License, or (at your option) any later version.
28 */
29
30#define SECONDARY_PROCESSORS
31
32#include "ppc_asm.h"
33#include "ppc_defs.h"
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <linux/config.h>
37#include <asm/mmu.h>
38#include <asm/perfmon.h>
39#include <asm/cputable.h>
40
41#ifdef CONFIG_PPC_ISERIES
42#define DO_SOFT_DISABLE
43#endif
44
45/* copy saved SOFTE bit or EE bit from saved MSR depending
46 * if we are doing soft-disable or not
47 */
48#ifdef DO_SOFT_DISABLE
49#define DO_COPY_EE()	ld	r20,SOFTE(r1)
50#else
51#define DO_COPY_EE()	rldicl	r20,r23,49,63
52#endif
53
54/*
55 * hcall interface to pSeries LPAR
56 */
57#define HSC .long 0x44000022
58#define H_SET_ASR		0x30
59
60/*
61 * We layout physical memory as follows:
62 * 0x0000 - 0x00ff : Secondary processor spin code
63 * 0x0100 - 0x2fff : pSeries Interrupt prologs
64 * 0x3000 - 0x3fff : Interrupt support
65 * 0x4000 - 0x4fff : NACA
66 * 0x5000 - 0x5fff : SystemCfg
67 * 0x6000          : iSeries and common interrupt prologs
68 * 0x9000 - 0x9fff : Initial segment table
69 */
70
71/*
72 *   SPRG Usage
73 *
74 *   Register          Definition
75 *
76 *   SPRG0             reserved for hypervisor
77 *   SPRG1             temp - used to save gpr
78 *   SPRG2             temp - used to save gpr
79 *   SPRG3             virt addr of paca
80 */
81
82/*
83 * Entering into this code we make the following assumptions:
84 *  For pSeries:
85 *   1. The MMU is off & open firmware is running in real mode.
86 *   2. The kernel is entered at __start
87 *
88 *  For iSeries:
89 *   1. The MMU is on (as it always is for iSeries)
90 *   2. The kernel is entered at SystemReset_Iseries
91 */
92
93	.text
94	.globl  _stext
95_stext:
96_STATIC(__start)
97	b .__start_initialization_pSeries
98	.long  0x0
99
100	. = 0x8
101	.globl	__zero
102__zero:
103	.llong	0
104
105	/* At offset 0x20, there is a pointer to iSeries LPAR data.
106	 * This is required by the hypervisor */
107	. = 0x20
108	.llong hvReleaseData-KERNELBASE
109
110	/* At offset 0x28 and 0x30 are offsets to the msChunks
111	 * array (used by the iSeries LPAR debugger to do translation
112	 * between physical addresses and absolute addresses) and
113	 * to the pidhash table (also used by the debugger) */
114	.llong msChunks-KERNELBASE
115	.llong pidhash-KERNELBASE
116
117	/* Offset 0x38 - Pointer to start of embedded System.map */
118	.globl	embedded_sysmap_start
119embedded_sysmap_start:
120	.llong	0
121	/* Offset 0x40 - Pointer to end of embedded System.map */
122	.globl	embedded_sysmap_end
123embedded_sysmap_end:
124	.llong	0
125
126	/* Secondary processors spin on this value until it goes to 1. */
127	.globl  __secondary_hold_spinloop
128__secondary_hold_spinloop:
129	.llong  0x0
130
131	/* Secondary processors write this value with their cpu # */
132	/* after they enter the spin loop immediatly below.       */
133	.globl  __secondary_hold_acknowledge
134__secondary_hold_acknowledge:
135	.llong  0x0
136
137	. = 0x60
138/*
139 * The following code is used on pSeries to hold secondary processors
140 * in a spin loop after they have been freed from OpenFirmware, but
141 * before the bulk of the kernel has been relocated.  This code
142 * is relocated to physical address 0x60 before prom_init is run.
143 * All of it must fit below the first exception vector at 0x100.
144 */
145_GLOBAL(__secondary_hold)
146	mfmsr	r24
147	ori	r24,r24,MSR_RI
148	mtmsrd	r24			/* RI on */
149
150	/* Grab our linux cpu number */
151	mr      r24,r3
152
153	/* Tell the master cpu we're here */
154	/* Relocation is off & we are located at an address less */
155	/* than 0x100, so only need to grab low order offset.    */
156	std     r24,__secondary_hold_acknowledge@l(0)
157
158	/* All secondary cpu's wait here until told to start. */
159100:    ld      r4,__secondary_hold_spinloop@l(0)
160	cmpdi   0,r4,1
161	bne     100b
162
163#ifdef CONFIG_HMT
164	b	.hmt_init
165#else
166#ifdef CONFIG_SMP
167	mr      r3,r24
168	b       .pseries_secondary_smp_init
169#else
170	BUG_OPCODE
171#endif
172#endif
173
174/*
175 * The following macros define the code that appears as
176 * the prologue to each of the exception handlers.  They
177 * are split into two parts to allow a single kernel binary
178 * to be used for pSeries, and iSeries.
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_SRR0		0
192#define EX_SRR1		8
193#define EX_R20		16
194#define EX_R21		24
195#define EX_R22		32
196#define EX_R23		40
197#define EX_DAR		48
198#define EX_DSISR	56
199#define EX_CCR   	60
200#define EX_TRAP   	60
201
202#define EXCEPTION_PROLOG_PSERIES(n,label)                                \
203	mtspr   SPRG2,r20;              /* use SPRG2 as scratch reg   */ \
204	mtspr   SPRG1,r21;              /* save r21                   */ \
205	mfspr   r20,SPRG3;              /* get paca virt addr         */ \
206	ld      r21,PACAEXCSP(r20);     /* get exception stack ptr    */ \
207	addi    r21,r21,EXC_FRAME_SIZE; /* make exception frame       */ \
208	std	r22,EX_R22(r21);	/* Save r22 in exc. frame     */ \
209	li	r22,n;                  /* Save the ex # in exc. frame*/ \
210	stw	r22,EX_TRAP(r21);	/*                            */ \
211	std	r23,EX_R23(r21);	/* Save r23 in exc. frame     */ \
212	mfspr   r22,SRR0;               /* EA of interrupted instr    */ \
213	std	r22,EX_SRR0(r21);	/* Save SRR0 in exc. frame    */ \
214	mfspr   r23,SRR1;               /* machine state at interrupt */ \
215	std	r23,EX_SRR1(r21);	/* Save SRR1 in exc. frame    */ \
216	clrrdi  r22,r20,60;             /* Get 0xc part of the vaddr  */ \
217	ori	r22,r22,(label)@l;      /* add in the vaddr offset    */ \
218		                        /*   assumes *_common < 16b   */ \
219	mfmsr   r23;                                                     \
220	rotldi  r23,r23,4;                                               \
221	ori     r23,r23,0x32B;          /* Set IR, DR, RI, SF, ISF, HV*/ \
222	rotldi  r23,r23,60;             /* for generic handlers       */ \
223	mtspr   SRR0,r22;                                                \
224	mtspr   SRR1,r23;                                                \
225	mfcr    r23;                    /* save CR in r23             */ \
226	rfid
227
228/*
229 * This is the start of the interrupt handlers for iSeries
230 * This code runs with relocation on.
231 */
232#define EXCEPTION_PROLOG_ISERIES(n)	                                      \
233	mtspr	SPRG2,r20;		    /* use SPRG2 as scratch reg    */ \
234	mtspr   SPRG1,r21;                  /* save r21                    */ \
235	mfspr	r20,SPRG3;		    /* get paca                    */ \
236	ld      r21,PACAEXCSP(r20);         /* get exception stack ptr     */ \
237	addi    r21,r21,EXC_FRAME_SIZE;     /* make exception frame        */ \
238	std	r22,EX_R22(r21);	    /* save r22 on exception frame */ \
239	li	r22,n;                      /* Save the ex # in exc. frame */ \
240	stw	r22,EX_TRAP(r21);	    /*                             */ \
241	std	r23,EX_R23(r21);	    /* Save r23 in exc. frame      */ \
242	ld      r22,LPPACA+LPPACASRR0(r20); /* Get SRR0 from ItLpPaca      */ \
243	std	r22,EX_SRR0(r21);	    /* save SRR0 in exc. frame     */ \
244	ld      r23,LPPACA+LPPACASRR1(r20); /* Get SRR1 from ItLpPaca      */ \
245	std	r23,EX_SRR1(r21);	    /* save SRR1 in exc. frame     */ \
246	mfcr    r23;                        /* save CR in r23              */
247
248/*
249 * The common exception prolog is used for all except a few exceptions
250 * such as a segment miss on a kernel address.  We have to be prepared
251 * to take another exception from the point where we first touch the
252 * kernel stack onwards.
253 *
254 * On entry r20 points to the paca and r21 points to the exception
255 * frame on entry, r23 contains the saved CR, and relocation is on.
256 */
257#define EXCEPTION_PROLOG_COMMON                                           \
258	mfspr	r22,SPRG2;		/* Save r20 in exc. frame      */ \
259	std	r22,EX_R20(r21);	                                  \
260	mfspr	r22,SPRG1;		/* Save r21 in exc. frame      */ \
261	std	r22,EX_R21(r21);	                                  \
262	mfspr   r22,DAR;                /* Save DAR in exc. frame      */ \
263	std	r22,EX_DAR(r21);	                                  \
264	std     r21,PACAEXCSP(r20);     /* update exception stack ptr  */ \
265		                        /*   iff no protection flt     */ \
266	mfspr	r22,DSISR;		/* Save DSISR in exc. frame    */ \
267	stw	r22,EX_DSISR(r21);	                                  \
268	ld	r22,EX_SRR1(r21);	/* Get SRR1 from exc. frame    */ \
269	andi.   r22,r22,MSR_PR;         /* Set CR for later branch     */ \
270	mr      r22,r1;                 /* Save r1                     */ \
271	subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack */ \
272	beq-    1f;                                                       \
273	ld      r1,PACAKSAVE(r20);      /* kernel stack to use         */ \
2741:      std     r22,GPR1(r1);           /* save r1 in stackframe       */ \
275	std     r22,0(r1);              /* make stack chain pointer    */ \
276	std     r23,_CCR(r1);           /* save CR in stackframe       */ \
277	ld	r22,EX_R20(r21);	/* move r20 to stackframe      */ \
278	std	r22,GPR20(r1);		                                  \
279	ld	r23,EX_R21(r21);	/* move r21 to stackframe      */ \
280	std	r23,GPR21(r1);		                                  \
281	ld	r22,EX_R22(r21);	/* move r22 to stackframe      */ \
282	std	r22,GPR22(r1);		                                  \
283	ld	r23,EX_R23(r21);	/* move r23 to stackframe      */ \
284	std	r23,GPR23(r1);		                                  \
285	mflr    r22;                    /* save LR in stackframe       */ \
286	std     r22,_LINK(r1);                                            \
287	mfctr   r23;                    /* save CTR in stackframe      */ \
288	std     r23,_CTR(r1);                                             \
289	mfspr   r22,XER;                /* save XER in stackframe      */ \
290	std     r22,_XER(r1);                                             \
291	ld	r23,EX_DAR(r21);	/* move DAR to stackframe      */ \
292	std	r23,_DAR(r1);		                                  \
293	lwz     r22,EX_DSISR(r21);	/* move DSISR to stackframe    */ \
294	std	r22,_DSISR(r1);		                                  \
295	lbz	r22,PACAPROCENABLED(r20);                                 \
296	std	r22,SOFTE(r1);		                                  \
297	ld	r22,EX_SRR0(r21);	/* get SRR0 from exc. frame    */ \
298	ld	r23,EX_SRR1(r21);	/* get SRR1 from exc. frame    */ \
299	addi    r21,r21,-EXC_FRAME_SIZE;/* pop off exception frame     */ \
300	std     r21,PACAEXCSP(r20);                                       \
301	SAVE_GPR(0, r1);                /* save r0 in stackframe       */ \
302	SAVE_8GPRS(2, r1);              /* save r2 - r13 in stackframe */ \
303	SAVE_4GPRS(10, r1);                                               \
304	ld      r2,PACATOC(r20);	                                  \
305	mr	r13,r20
306
307/*
308 * Note: code which follows this uses cr0.eq (set if from kernel),
309 * r1, r22 (SRR0), and r23 (SRR1).
310 */
311
312/*
313 * Exception vectors.
314 */
315#define STD_EXCEPTION_PSERIES(n, label )	\
316	. = n;					\
317	.globl label##_Pseries;			\
318label##_Pseries:				\
319	EXCEPTION_PROLOG_PSERIES( n, label##_common )
320
321#define STD_EXCEPTION_ISERIES( n, label )	\
322	.globl label##_Iseries;			\
323label##_Iseries:				\
324	EXCEPTION_PROLOG_ISERIES( n );          \
325	b	label##_common
326
327#define MASKABLE_EXCEPTION_ISERIES( n, label )	\
328	.globl label##_Iseries;			\
329label##_Iseries:				\
330	EXCEPTION_PROLOG_ISERIES( n );		\
331	lbz	r22,PACAPROFMODE(r20);		\
332	cmpi	0,r22,PMC_STATE_DECR_PROFILE;	\
333	beq-	label##_Iseries_profile;	\
334label##_Iseries_prof_ret:			\
335	lbz	r22,PACAPROCENABLED(r20);	\
336	cmpi	0,r22,0;			\
337	beq-	label##_Iseries_masked;		\
338	b	label##_common;			\
339label##_Iseries_profile:			\
340	std	r24,48(r21);			\
341	std	r25,56(r21);			\
342	mflr	r24;				\
343	bl	do_profile;			\
344	mtlr	r24;				\
345	ld	r24,48(r21);			\
346	ld	r25,56(r21);			\
347	b	label##_Iseries_prof_ret
348
349#define STD_EXCEPTION_COMMON( trap, label, hdlr )	\
350	.globl label##_common;			\
351label##_common:					\
352	EXCEPTION_PROLOG_COMMON;		\
353	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
354	li	r20,0;				\
355	li	r6,trap;			\
356	bl      .save_remaining_regs;           \
357	bl      hdlr;                           \
358	b       .ret_from_except
359
360/*
361 * Start of pSeries system interrupt routines
362 */
363	. = 0x100
364	.globl __start_interupts
365__start_interupts:
366
367	STD_EXCEPTION_PSERIES( 0x100, SystemReset )
368	STD_EXCEPTION_PSERIES( 0x200, MachineCheck )
369	STD_EXCEPTION_PSERIES( 0x300, DataAccess )
370	STD_EXCEPTION_PSERIES( 0x380, DataAccessSLB )
371	STD_EXCEPTION_PSERIES( 0x400, InstructionAccess )
372	STD_EXCEPTION_PSERIES( 0x480, InstructionAccessSLB )
373	STD_EXCEPTION_PSERIES( 0x500, HardwareInterrupt )
374	STD_EXCEPTION_PSERIES( 0x600, Alignment )
375	STD_EXCEPTION_PSERIES( 0x700, ProgramCheck )
376	STD_EXCEPTION_PSERIES( 0x800, FPUnavailable )
377	STD_EXCEPTION_PSERIES( 0x900, Decrementer )
378	STD_EXCEPTION_PSERIES( 0xa00, Trap_0a )
379	STD_EXCEPTION_PSERIES( 0xb00, Trap_0b )
380	STD_EXCEPTION_PSERIES( 0xc00, SystemCall )
381	STD_EXCEPTION_PSERIES( 0xd00, SingleStep )
382	STD_EXCEPTION_PSERIES( 0xe00, Trap_0e )
383	. = 0xf00
384	b   PerformanceMonitor_Pseries
385	STD_EXCEPTION_PSERIES( 0xf20, AltiVecUnavailable )
386	. = 0xf90
387	.globl PerformanceMonitor_Pseries
388PerformanceMonitor_Pseries:
389	EXCEPTION_PROLOG_PSERIES( 0xf00, PerformanceMonitor_common )
390	STD_EXCEPTION_PSERIES( 0x1300, InstructionBreakpoint )
391	STD_EXCEPTION_PSERIES( 0x1700, AltiVecAssist )
392	STD_EXCEPTION_PSERIES( 0x1800, ThermalInterrupt)
393
394
395	/* Space for the naca.  Architected to be located at real address
396	 * 0x4000.  Various tools rely on this location being fixed.
397	 * The first dword of the naca is required by iSeries LPAR to
398	 * point to itVpdAreas.  On pSeries native, this value is not used.
399	 */
400	. = 0x4000
401	.globl __end_interupts
402	.globl __start_naca
403__end_interupts:
404__start_naca:
405	.llong itVpdAreas
406	.llong 0x0
407	.llong 0x0
408	.llong paca
409
410	. = 0x5000
411	.globl __end_naca
412	.globl __start_systemcfg
413__end_naca:
414__start_systemcfg:
415	. = 0x6000
416	.globl __end_systemcfg
417__end_systemcfg:
418
419	/*
420	 * The iSeries LPAR map is at this fixed address
421	 * so that the HvReleaseData structure can address
422	 * it with a 32-bit offset.
423	 *
424	 * The VSID values below are dependent on the
425	 * VSID generation algorithm.  See include/asm/mmu_context.h.
426	 */
427
428	.llong	1		/* # ESIDs to be mapped by hypervisor         */
429	.llong	1		/* # memory ranges to be mapped by hypervisor */
430	.llong	STAB0_PAGE	/* Page # of segment table within load area   */
431	.llong	0		/* Reserved */
432	.llong  0		/* Reserved */
433	.llong  0		/* Reserved */
434	.llong	0		/* Reserved */
435	.llong	0		/* Reserved */
436	.llong	0x0c00000000	/* ESID to map (Kernel at EA = 0xC000000000000000) */
437	.llong	0x06a99b4b14    /* VSID to map (Kernel at VA = 0x6a99b4b140000000) */
438	.llong	8192		/* # pages to map (32 MB) */
439	.llong	0		/* Offset from start of loadarea to start of map */
440	.llong	0x0006a99b4b140000	/* VPN of first page to map */
441
442	. = 0x6100
443
444/***  ISeries-LPAR interrupt handlers ***/
445
446	STD_EXCEPTION_ISERIES( 0x200, MachineCheck )
447	STD_EXCEPTION_ISERIES( 0x300, DataAccess )
448	STD_EXCEPTION_ISERIES( 0x380, DataAccessSLB )
449	STD_EXCEPTION_ISERIES( 0x400, InstructionAccess )
450	STD_EXCEPTION_ISERIES( 0x480, InstructionAccessSLB )
451	MASKABLE_EXCEPTION_ISERIES( 0x500, HardwareInterrupt )
452	STD_EXCEPTION_ISERIES( 0x600, Alignment )
453	STD_EXCEPTION_ISERIES( 0x700, ProgramCheck )
454	STD_EXCEPTION_ISERIES( 0x800, FPUnavailable )
455	MASKABLE_EXCEPTION_ISERIES( 0x900, Decrementer )
456	STD_EXCEPTION_ISERIES( 0xa00, Trap_0a )
457	STD_EXCEPTION_ISERIES( 0xb00, Trap_0b )
458	STD_EXCEPTION_ISERIES( 0xc00, SystemCall )
459	STD_EXCEPTION_ISERIES( 0xd00, SingleStep )
460	STD_EXCEPTION_ISERIES( 0xe00, Trap_0e )
461	MASKABLE_EXCEPTION_ISERIES( 0xf00, PerformanceMonitor )
462
463	.globl SystemReset_Iseries
464SystemReset_Iseries:
465	mfspr	13,SPRG3		/* Get paca address */
466	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
467	cmpi	0,r24,0			/* Are we processor 0? */
468	beq	.__start_initialization_iSeries	/* Start up the first processor */
469	mfspr	r4,CTRLF
470	li	r5,RUNLATCH		/* Turn off the run light */
471	andc	r4,r4,r5
472	mtspr	CTRLT,r4
473
4741:
475	HMT_LOW
476#ifdef CONFIG_SMP
477	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
478					 * should start */
479	sync
480	LOADADDR(r3,current_set)
481	sldi	r28,r24,4		/* get current_set[cpu#] */
482	ldx	r3,r3,r28
483	addi	r1,r3,TASK_UNION_SIZE
484	subi	r1,r1,STACK_FRAME_OVERHEAD
485
486	cmpi	0,r23,0
487	beq	iseries_secondary_smp_loop	/* Loop until told to go */
488#ifdef SECONDARY_PROCESSORS
489	bne	.__secondary_start		/* Loop until told to go */
490#endif
491iseries_secondary_smp_loop:
492	/* Let the Hypervisor know we are alive */
493	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
494	lis	r3,0x8002
495	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
496#else /* CONFIG_SMP */
497	/* Yield the processor.  This is required for non-SMP kernels
498	   which are running on multi-threaded machines. */
499	lis	r3,0x8000
500	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
501	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
502	li	r4,0			/* "yield timed" */
503	li	r5,-1			/* "yield forever" */
504#endif /* CONFIG_SMP */
505	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
506	sc				/* Invoke the hypervisor via a system call */
507	mfspr	r13,SPRG3		/* Put r13 back - why???? */
508	b	1b			/* If SMP not configured, secondaries
509					 * loop forever */
510
511	.globl HardwareInterrupt_Iseries_masked
512HardwareInterrupt_Iseries_masked:
513	b	maskable_exception_exit
514
515	.globl PerformanceMonitor_Iseries_masked
516PerformanceMonitor_Iseries_masked:
517	li	r22,1
518	stb	r22,PACALPPACA+LPPACAPDCINT(r20)
519	b	maskable_exception_exit
520
521	.globl Decrementer_Iseries_masked
522Decrementer_Iseries_masked:
523	li	r22,1
524	stb	r22,PACALPPACA+LPPACADECRINT(r20)
525	lwz	r22,PACADEFAULTDECR(r20)
526	mtspr	DEC,r22
527maskable_exception_exit:
528	mtcrf	0xff,r23		/* Restore regs and free exception frame */
529	ld	r22,EX_SRR0(r21)
530	ld	r23,EX_SRR1(r21)
531	mtspr	SRR0,r22
532	mtspr	SRR1,r23
533	ld	r22,EX_R22(r21)
534	ld	r23,EX_R23(r21)
535	mfspr	r21,SPRG1
536	mfspr	r20,SPRG2
537	rfid
538
539/*
540 * Data area reserved for FWNMI option.
541 */
542        .= 0x7000
543	.globl fwnmi_data_area
544fwnmi_data_area:
545
546/*
547 * Vectors for the FWNMI option.  Share common code.
548 */
549	. = 0x8000
550	.globl SystemReset_FWNMI
551SystemReset_FWNMI:
552	EXCEPTION_PROLOG_PSERIES(0x100, SystemReset_common)
553	.globl MachineCheck_FWNMI
554MachineCheck_FWNMI:
555	EXCEPTION_PROLOG_PSERIES(0x200, MachineCheck_common)
556
557	/*
558	 * Space for the initial segment table
559	 * For LPAR, the hypervisor must fill in at least one entry
560	 * before we get control (with relocate on)
561	 */
562	. = STAB0_PHYS_ADDR
563	.globl __start_stab
564__start_stab:
565
566	. = (STAB0_PHYS_ADDR + PAGE_SIZE)
567	.globl __end_stab
568__end_stab:
569
570
571/*** Common interrupt handlers ***/
572
573	STD_EXCEPTION_COMMON( 0x100, SystemReset, .SystemResetException )
574	STD_EXCEPTION_COMMON( 0x200, MachineCheck, .MachineCheckException )
575	STD_EXCEPTION_COMMON( 0x900, Decrementer, .timer_interrupt )
576	STD_EXCEPTION_COMMON( 0xa00, Trap_0a, .UnknownException )
577	STD_EXCEPTION_COMMON( 0xb00, Trap_0b, .UnknownException )
578	STD_EXCEPTION_COMMON( 0xd00, SingleStep, .SingleStepException )
579	STD_EXCEPTION_COMMON( 0xe00, Trap_0e, .UnknownException )
580
581	.globl AltiVecUnavailable_common
582AltiVecUnavailable_common:
583	EXCEPTION_PROLOG_COMMON
584#ifdef CONFIG_ALTIVEC
585	bne	.load_up_altivec	/* if from user, just load it up */
586#endif
587	addi	r3,r1,STACK_FRAME_OVERHEAD
588	DO_COPY_EE()
589	li	r6,0xf20
590	bl      .save_remaining_regs
591#ifndef CONFIG_ALTIVEC
592	beq    1f
593	bl     .IllegalAltiVecInstruction
594	b      .ret_from_except
5951:
596#endif
597	bl      .KernelAltiVecUnavailableException
598	BUG_OPCODE
599
600	.global AltiVecAssist_common
601AltiVecAssist_common:
602	EXCEPTION_PROLOG_COMMON
603	addi	r3,r1,STACK_FRAME_OVERHEAD
604	DO_COPY_EE()
605	li	r6,0x1700
606	bl	.save_remaining_regs
607	bl	.AltiVecAssistException
608	b	.ret_from_except
609
610	.global ThermalInterrupt_common
611ThermalInterrupt_common:
612	EXCEPTION_PROLOG_COMMON
613	addi	r3,r1,STACK_FRAME_OVERHEAD
614	DO_COPY_EE()
615	li	r6,0x1800
616	bl      .save_remaining_regs
617	bl	.ThermalInterrupt
618	BUG_OPCODE
619
620
621	STD_EXCEPTION_COMMON(0x1300, InstructionBreakpoint, .InstructionBreakpointException )
622
623/*
624 * Return from an exception which is handled without calling
625 * save_remaining_regs.  The caller is assumed to have done
626 * EXCEPTION_PROLOG_COMMON.
627 */
628fast_exception_return:
629	ld      r3,_CCR(r1)
630	ld      r4,_LINK(r1)
631	ld      r5,_CTR(r1)
632	ld      r6,_XER(r1)
633	mtcr    r3
634	mtlr    r4
635	mtctr   r5
636	mtspr   XER,r6
637	REST_GPR(0, r1)
638	REST_8GPRS(2, r1)
639	REST_4GPRS(10, r1)
640
641	mfmsr	r20
642	li	r21, MSR_RI
643	andc	r20,r20,r21
644	mtmsrd	r20,1
645
646	mtspr   SRR1,r23
647	mtspr   SRR0,r22
648	REST_4GPRS(20, r1)
649	ld      r1,GPR1(r1)
650	rfid
651
652/*
653 * Here r20 points to the PACA, r21 to the exception frame,
654 * r23 contains the saved CR.
655 * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
656 */
657	.globl DataAccess_common
658DataAccess_common:
659	mfspr   r22,DSISR
660	andis.  r22,r22,0x0020
661	beq	1f
662	mfspr   r22,DAR			/* if it's a segment table miss, */
663	srdi    r22,r22,60		/* check if it is in kernel region */
664	cmpi    0,r22,0xc		/* and call do_stab_bolted if so */
665	beq     .do_stab_bolted
666	cmpi    0,r22,0xb
667	beq     .do_stab_bolted
668
6691:
670	EXCEPTION_PROLOG_COMMON
671	ld      r3,_DSISR(r1)
672	andis.	r0,r3,0xa450		/* weird error? */
673	bne	1f			/* if not, try to put a PTE */
674	andis.	r0,r3,0x0020		/* Is it a page table fault? */
675	rlwinm	r4,r3,32-23,29,29	/* DSISR_STORE -> _PAGE_RW */
676	ld      r3,_DAR(r1)             /* into the hash table */
677
678	beq	2f			/* If so handle it */
679	li	r4,0x300                /* Trap number */
680	bl	.do_stab_SI
681	b	1f
682
6832:	li	r5,0x300
684	bl	.do_hash_page_DSI 	/* Try to handle as hpte fault */
6851:
686	ld      r4,_DAR(r1)
687	ld      r5,_DSISR(r1)
688	addi	r3,r1,STACK_FRAME_OVERHEAD
689	DO_COPY_EE()
690	li	r6,0x300
691	bl      .save_remaining_regs
692	bl      .do_page_fault
693	b       .ret_from_except
694
695	.globl DataAccessSLB_common
696DataAccessSLB_common:
697	mfspr   r22,DAR
698	srdi    r22,r22,60
699	cmpi    0,r22,0xc
700	beq     .do_slb_bolted
701	cmpi    0,r22,0xb
702	beq     .do_slb_bolted
703
704	EXCEPTION_PROLOG_COMMON
705	ld      r3,_DAR(r1)
706	li      r4,0x380                /* Exception vector  */
707	bl	.ste_allocate
708	or.	r3,r3,r3		/* Check return code */
709	beq     fast_exception_return   /* Return if we succeeded */
710	addi	r3,r1,STACK_FRAME_OVERHEAD
711	DO_COPY_EE()
712	li	r6,0x380
713	li	r5,0
714	bl      .save_remaining_regs
715	bl      .do_page_fault
716	b       .ret_from_except
717
718	.globl InstructionAccess_common
719InstructionAccess_common:
720	EXCEPTION_PROLOG_COMMON
721
722	andis.	r0,r23,0x0020		/* no ste found? */
723	beq	2f
724	mr	r3,r22			/* SRR0 at interrupt */
725	li	r4,0x400		/* Trap number       */
726	bl	.do_stab_SI
727	b	1f
728
7292:	andis.	r0,r23,0x4000		/* no pte found? */
730	beq	1f			/* if so, try to put a PTE */
731	mr	r3,r22			/* into the hash table */
732	bl	.do_hash_page_ISI	/* Try to handle as hpte fault */
7331:
734	mr	r4,r22
735	rlwinm	r5,r23,0,4,4		/* We only care about PR in error_code */
736	addi	r3,r1,STACK_FRAME_OVERHEAD
737	DO_COPY_EE()
738	li	r6,0x400
739	bl      .save_remaining_regs
740	bl      .do_page_fault
741	b       .ret_from_except
742
743	.globl InstructionAccessSLB_common
744InstructionAccessSLB_common:
745	EXCEPTION_PROLOG_COMMON
746	mr      r3,r22                  /* SRR0 = NIA        */
747	li	r4,0x480                /* Exception vector  */
748	bl	.ste_allocate
749	or.	r3,r3,r3		/* Check return code */
750	beq     fast_exception_return   /* Return if we succeeded */
751
752	addi	r3,r1,STACK_FRAME_OVERHEAD
753	DO_COPY_EE()
754	li	r6,0x480
755	li	r5,0
756	bl      .save_remaining_regs
757	bl      .do_page_fault
758	b       .ret_from_except
759
760	.globl HardwareInterrupt_common
761HardwareInterrupt_common:
762	EXCEPTION_PROLOG_COMMON
763HardwareInterrupt_entry:
764	addi	r3,r1,STACK_FRAME_OVERHEAD
765	li	r20,0
766	li	r6,0x500
767	bl      .save_remaining_regs
768	/* Determine if need to run do_irq on a hardware interrupt stack  */
769	/*   The first invocation of do_irq will occur on the kernel      */
770	/*   stack in the current stack                                   */
771	/*   All other invocations of do_irq will run on the hardware     */
772	/*   interrupt stack associated with the PACA of the current      */
773	/*   processor.                                                   */
774	/*                                                                */
775	/*  The call to do_irq will preserve the value of r14 - r31       */
776	/*                                                                */
777	mfspr	r20,SPRG3 		    /* get paca                   */
778	lbz     r21,PACAHRDWINTCOUNT(r20)    /* get hardware interrupt cnt */
779	cmpi    0,r21,0                     /*                            */
780	addi    r21,r21,1                   /* incr hardware interrupt cnt*/
781	stb     r21,PACAHRDWINTCOUNT(r20)   /*                            */
782	bne     2f                          /*                            */
783
784	mr      r14,r1                      /* preserve current r1        */
785	ld      r1,PACAHRDWINTSTACK(r20)    /*                            */
786	std     r14,0(r1)                   /* set the back chain         */
787	bl      .do_IRQ
788	lbz     r22,PACAHRDWINTCOUNT(r20)   /* get hardware interrupt cnt */
789	cmp     0,r22,r21                   /* debug test                 */
790	bne     3f
791	subi    r21,r21,1
792	stb     r21,PACAHRDWINTCOUNT(r20)   /*                            */
793	mr      r1,r14                      /*                            */
794	b       .ret_from_except
795
7962:
797	bl      .do_IRQ
798
799	lbz     r22,PACAHRDWINTCOUNT(r20)   /* get hardware interrupt cnt */
800	cmp     0,r22,r21                   /* debug test                 */
801	bne     3f                          /*                            */
802	subi    r21,r21,1                   /* decr hardware interrupt cnt*/
803	stb     r21,PACAHRDWINTCOUNT(r20)   /*                            */
804
805	b       .ret_from_except
806
8073:
808	/* error - counts out of sync                                      */
809#ifdef CONFIG_XMON
810	bl	.xmon
811#endif
812#ifdef CONFIG_KDB
813        /*          kdb(KDB_REASON_FAULT,regs->trap,regs); */
814	li      r3,1
815	li 	r4,0x200
816	li      r5,0
817	bl	.kdb
818#endif
8194:	b	4b
820
821
822	.globl Alignment_common
823Alignment_common:
824	EXCEPTION_PROLOG_COMMON
825	addi	r3,r1,STACK_FRAME_OVERHEAD
826	DO_COPY_EE()
827	li	r6,0x600
828	bl      .save_remaining_regs
829	bl      .AlignmentException
830	b       .ret_from_except
831
832	.globl ProgramCheck_common
833ProgramCheck_common:
834	EXCEPTION_PROLOG_COMMON
835	addi	r3,r1,STACK_FRAME_OVERHEAD
836	DO_COPY_EE()
837	li	r6,0x700
838	bl      .save_remaining_regs
839	bl      .ProgramCheckException
840	b       .ret_from_except
841
842	.globl FPUnavailable_common
843FPUnavailable_common:
844	EXCEPTION_PROLOG_COMMON
845	bne	.load_up_fpu		/* if from user, just load it up */
846	addi	r3,r1,STACK_FRAME_OVERHEAD
847	DO_COPY_EE()
848	li	r6,0x800
849	bl      .save_remaining_regs
850	bl      .KernelFPUnavailableException
851	BUG_OPCODE
852
853	.globl SystemCall_common
854SystemCall_common:
855	EXCEPTION_PROLOG_COMMON
856#ifdef CONFIG_PPC_ISERIES
857	cmpi	0,r0,0x5555		/* Special syscall to handle pending */
858	bne+	1f			/* interrupts */
859	andi.	r6,r23,MSR_PR		/* Only allowed from kernel */
860	beq+	HardwareInterrupt_entry
8611:
862#endif
863	std	r3,ORIG_GPR3(r1)
864	DO_COPY_EE()
865	li	r6,0xC00
866	bl      .save_remaining_regs
867	bl      .DoSyscall
868	b       .ret_from_except
869
870	.globl PerformanceMonitor_common
871PerformanceMonitor_common:
872	EXCEPTION_PROLOG_COMMON
873	bl	.PerformanceMonitorException
874	b	fast_exception_return
875
876_GLOBAL(PerformanceMonitorException)
877	sync
878	mfspr	r7,SPRG3
879	lbz	r8,PACAPROFMODE(r7)
880	cmpi	0,r8,PMC_STATE_PROFILE_KERN
881	beq 	5f
882	cmpi	0,r8,PMC_STATE_TRACE_KERN
883	beq 	6f
884	cmpi	0,r8,PMC_STATE_TRACE_USER
885	beq 	9f
886	cmpi	0,r8,PMC_STATE_TIMESLICE
887	beq 	10f
888	blr
889
890	/* PMC Profile Kernel */
8915:	mfspr   r9,SIAR
892	srdi    r8,r9,60
893	cmpi    0,r8,0xc
894	beq     3f
895	li	r9,0xc
896	sldi	r9,r9,60
8973:      ld	r8,PACAPROFSTEXT(r7)	/* _stext */
898	subf	r9,r8,r9		/* offset into kernel */
899	lwz	r8,PACAPROFSHIFT(r7)
900	srd	r9,r9,r8
901	lwz	r8,PACAPROFLEN(r7)	/* length of profile table (-1) */
902	srdi	r8,r8,2
903	cmpd	r9,r8		/* off end? */
904	ble	1f
905	mr	r9,r8			/* force into last entry */
906	srdi	r9,r9,2
9071:	sldi	r9,r9,2		/* convert to offset into buffer */
908	ld	r8,PACAPROFBUFFER(r7)	/* profile buffer */
909	add	r8,r8,r9
9102:	lwarx	r9,0,r8		/* atomically increment */
911	addi	r9,r9,1
912	stwcx.	r9,0,r8
913	bne-	2b
914	addi	r10,r7,PACAPMC1
915	addi	r7,r7,PACAPMCC1
916	b	7f
917
918	/* PMC Trace Kernel */
9196:	LOADADDR(r11, perfmon_base)
920  	addi	r8,r11,32
921	ld	r12,24(r11)
922	subi	r12,r12,1
9238:	ldarx	r10,0,r8
924	addi	r9,r10,16
925	and	r9,r9,r12
926	stdcx.	r9,0,r8
927	bne-	8b
928	ld	r9,16(r11)	/* profile buffer */
929	add	r8,r9,r10
930  	mfspr   r9,SIAR
931	std	r9,0(r8)
932	mfspr   r9,SDAR
933	std	r9,8(r8)
934	addi	r10,r7,PACAPMC1
935	addi	r7,r7,PACAPMCC1
936	b	7f
937
938	/* PMC Trace User */
9399:	LOADADDR(r11, perfmon_base)
940#if 0
941  	addi	r8,r11,32
942	ld	r12,24(r11)
943	subi	r12,r12,1
9448:	ldarx	r10,0,r8
945	addi	r9,r10,16
946	and	r9,r9,r12
947	stdcx.	r9,0,r8
948	bne-	8b
949	ld	r9,16(r11)	/* profile buffer */
950	add	r8,r9,r10
951  	mfspr   r9,SIAR
952	std	r9,0(r8)
953	mfspr   r9,SDAR
954	std	r9,8(r8)
955	addi	r10,r13,THREAD+THREAD_PMC1
956	addi	r7,r13,THREAD+THREAD_PMCC1
957#endif
958	b	7f
959
960	/* PMC Timeslice */
96110:	addi	r10,r7,PACAPMC1
962	addi	r7,r7,PACAPMCC1
963	b	7f
964
965	/* Accumulate counter values for kernel traces */
9667:	ld	r9,0(r7)
967	mfspr   r8,PMC1
968	add	r9,r9,r8
969	std	r9,0(r7)
970	ld	r9,8(r7)
971	mfspr	r8,PMC2
972	add	r9,r9,r8
973	std	r9,8(r7)
974	ld	r9,16(r7)
975	mfspr	r8,PMC3
976	add	r9,r9,r8
977	std	r9,16(r7)
978	ld	r9,24(r7)
979	mfspr	r8,PMC4
980	add	r9,r9,r8
981	std	r9,24(r7)
982	ld	r9,32(r7)
983	mfspr	r8,PMC5
984	add	r9,r9,r8
985	std	r9,32(r7)
986	ld	r9,40(r7)
987	mfspr	r8,PMC6
988	add	r9,r9,r8
989	std	r9,40(r7)
990	ld	r9,48(r7)
991	mfspr	r8,PMC7
992	add	r9,r9,r8
993	std	r9,48(r7)
994	ld	r9,56(r7)
995	mfspr	r8,PMC8
996	add	r9,r9,r8
997	std	r9,56(r7)
998
999	/* Reset all counters for kernel traces */
1000	ld	r9,0(r10)
1001	mtspr	PMC1,r9
1002	ld	r9,8(r10)
1003	mtspr	PMC2,r9
1004	ld	r9,16(r10)
1005	mtspr	PMC3,r9
1006	ld	r9,24(r10)
1007	mtspr	PMC4,r9
1008	ld	r9,32(r10)
1009	mtspr	PMC5,r9
1010	ld	r9,40(r10)
1011	mtspr	PMC6,r9
1012	ld	r9,48(r10)
1013	mtspr	PMC7,r9
1014	ld	r9,56(r10)
1015	mtspr	PMC8,r9
1016	ld	r9,64(r10)
1017	mtspr	MMCR0,r9
1018	ld	r9,72(r10)
1019	mtspr	MMCR1,r9
1020	ld	r9,80(r10)
1021	mtspr	MMCRA,r9
1022	blr
1023
1024_GLOBAL(do_hash_page_ISI)
1025	li	r4,0
1026_GLOBAL(do_hash_page_DSI)
1027	rlwimi	r4,r23,32-13,30,30	/* Insert MSR_PR as _PAGE_USER */
1028	ori	r4,r4,1			/* add _PAGE_PRESENT */
1029
1030	mflr	r21			/* Save LR in r21 */
1031
1032#ifdef DO_SOFT_DISABLE
1033	/*
1034	 * We hard enable here (but first soft disable) so that the hash_page
1035	 * code can spin on the hash_table_lock with problem on a shared
1036	 * processor.
1037	 */
1038	li	r0,0
1039	stb	r0,PACAPROCENABLED(r20)	/* Soft Disabled */
1040
1041	mfmsr	r0
1042	ori	r0,r0,MSR_EE+MSR_RI
1043	mtmsrd	r0			/* Hard Enable, RI on */
1044#endif
1045
1046	/*
1047	 * r3 contains the faulting address
1048	 * r4 contains the required access permissions
1049	 * r5 contains the trap number
1050	 *
1051	 * at return r3 = 0 for success
1052	 */
1053
1054	bl	.hash_page		/* build HPTE if possible */
1055
1056#ifdef DO_SOFT_DISABLE
1057	/*
1058	 * Now go back to hard disabled.
1059	 */
1060	mfmsr	r0
1061	li	r4,0
1062	ori	r4,r4,MSR_EE+MSR_RI
1063	andc	r0,r0,r4
1064	mtmsrd	r0			/* Hard Disable, RI off */
1065
1066	ld	r0,SOFTE(r1)
1067	cmpdi	0,r0,0			/* See if we will soft enable in */
1068					/* save_remaining_regs */
1069	beq	5f
1070	CHECKANYINT(r4,r5)
1071	bne-	HardwareInterrupt_entry	/* Convert this DSI into an External */
1072					/* to process interrupts which occurred */
1073					/* during hash_page */
10745:
1075	stb	r0,PACAPROCENABLED(r20)	/* Restore soft enable/disable status */
1076#endif
1077	or.	r3,r3,r3		/* Check return code */
1078	beq     fast_exception_return   /* Return from exception on success */
1079
1080	mtlr    r21                     /* restore LR */
1081	blr                             /* Return to DSI or ISI on failure */
1082
1083/*
1084 * r20 points to the PACA, r21 to the exception frame,
1085 * r23 contains the saved CR.
1086 * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
1087 * We assume we aren't going to take any exceptions during this procedure.
1088 */
1089_GLOBAL(do_stab_bolted)
1090	stw	r23,EX_CCR(r21)	/* save CR in exc. frame */
1091
1092	/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
1093	mfspr	r21,DAR
1094	rldicl  r20,r21,36,32   /* Permits a full 32b of ESID */
1095	rldicr  r20,r20,15,48
1096	rldicl  r21,r21,4,60
1097	or      r20,r20,r21
1098
1099	li      r21,9           /* VSID_RANDOMIZER */
1100	sldi    r21,r21,32
1101	oris    r21,r21,58231
1102	ori     r21,r21,39831
1103
1104	mulld   r20,r20,r21
1105	clrldi  r20,r20,28      /* r20 = vsid */
1106
1107	mfsprg  r21,3
1108	ld      r21,PACASTABVIRT(r21)
1109
1110	/* Hash to the primary group */
1111	mfspr	r22,DAR
1112	rldicl  r22,r22,36,59
1113	rldicr  r22,r22,7,56
1114	or      r21,r21,r22     /* r21 = first ste of the group */
1115
1116	/* Search the primary group for a free entry */
1117	li      r22,0
11181:
1119	ld      r23,0(r21)      /* Test valid bit of the current ste   */
1120	rldicl  r23,r23,57,63
1121	cmpwi   r23,0
1122	bne     2f
1123	ld      r23,8(r21)      /* Get the current vsid part of the ste */
1124	rldimi  r23,r20,12,0    /* Insert the new vsid value            */
1125	std     r23,8(r21)      /* Put new entry back into the stab     */
1126	eieio                  /* Order vsid update                    */
1127	ld      r23,0(r21)      /* Get the esid part of the ste         */
1128	mfspr	r20,DAR        /* Get the new esid                     */
1129	rldicl  r20,r20,36,28  /* Permits a full 36b of ESID           */
1130	rldimi  r23,r20,28,0    /* Insert the new esid value            */
1131	ori     r23,r23,144      /* Turn on valid and kp                 */
1132	std     r23,0(r21)      /* Put new entry back into the stab     */
1133	sync                   /* Order the update                     */
1134	b       3f
11352:
1136	addi    r22,r22,1
1137	addi    r21,r21,16
1138	cmpldi  r22,7
1139	ble     1b
1140
1141	/* Stick for only searching the primary group for now.          */
1142	/* At least for now, we use a very simple random castout scheme */
1143	/* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
1144	mftb    r22
1145	andi.   r22,r22,7
1146	ori	r22,r22,1
1147	sldi	r22,r22,4
1148
1149	/* r21 currently points to and ste one past the group of interest */
1150	/* make it point to the randomly selected entry                   */
1151	subi	r21,r21,128
1152	or 	r21,r21,r22      /* r21 is the entry to invalidate        */
1153
1154	isync                    /* mark the entry invalid                */
1155	ld      r23,0(r21)
1156	li      r22,-129
1157	and     r23,r23,r22
1158	std     r23,0(r21)
1159	sync
1160
1161	ld      r23,8(r21)
1162	rldimi  r23,r20,12,0
1163	std     r23,8(r21)
1164	eieio
1165
1166	ld      r23,0(r21)      /* Get the esid part of the ste         */
1167	mr      r22,r23
1168	mfspr	r20,DAR         /* Get the new esid                     */
1169	rldicl  r20,r20,36,28   /* Permits a full 32b of ESID           */
1170	rldimi  r23,r20,28,0    /* Insert the new esid value            */
1171	ori     r23,r23,144     /* Turn on valid and kp                 */
1172	std     r23,0(r21)      /* Put new entry back into the stab     */
1173
1174	rldicl  r22,r22,36,28
1175	rldicr  r22,r22,28,35
1176	slbie   r22
1177	sync
1178
11793:
1180	/* All done -- return from exception. */
1181	mfsprg  r20,3                   /* Load the PACA pointer  */
1182	ld      r21,PACAEXCSP(r20)      /* Get the exception frame pointer */
1183	addi    r21,r21,EXC_FRAME_SIZE
1184	lwz	r23,EX_CCR(r21)		/* get saved CR */
1185	/* note that this is almost identical to maskable_exception_exit */
1186	mtcr    r23                     /* restore CR */
1187
1188	mfmsr	r22
1189	li	r23, MSR_RI
1190	andc	r22,r22,r23
1191	mtmsrd	r22,1
1192
1193	ld	r22,EX_SRR0(r21)	/* Get SRR0 from exc. frame */
1194	ld	r23,EX_SRR1(r21)	/* Get SRR1 from exc. frame */
1195	mtspr	SRR0,r22
1196	mtspr   SRR1,r23
1197	ld	r22,EX_R22(r21)		/* restore r22 and r23 */
1198	ld	r23,EX_R23(r21)
1199	mfspr	r20,SPRG2
1200	mfspr	r21,SPRG1
1201	rfid
1202_TRACEBACK(do_stab_bolted)
1203
1204/*
1205 * r20 points to the PACA, r21 to the exception frame,
1206 * r23 contains the saved CR.
1207 * r20 - r23, SRR0 and SRR1 are saved in the exception frame.
1208 * We assume we aren't going to take any exceptions during this procedure.
1209 */
1210_GLOBAL(do_slb_bolted)
1211	stw     r23,EX_CCR(r21) /* save CR in exc. frame */
1212
1213	/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
1214	mfspr	r21,DAR
1215	rldicl  r20,r21,36,32   /* Permits a full 32b of ESID */
1216	rldicr  r20,r20,15,48
1217	rldicl  r21,r21,4,60
1218	or      r20,r20,r21
1219
1220	li      r21,9           /* VSID_RANDOMIZER */
1221	sldi    r21,r21,32
1222	oris    r21,r21,58231
1223	ori     r21,r21,39831
1224
1225	mulld   r20,r20,r21
1226	clrldi  r20,r20,28      /* r20 = vsid */
1227
1228	/* Search the SLB for a free entry */
1229	li      r22,1
12301:
1231	slbmfee	r23,r22
1232	rldicl  r23,r23,37,63
1233	cmpwi   r23,0
1234	beq     4f              /* Found an invalid entry              */
1235
1236	addi	r22,r22,1
1237	cmpldi	r22,64
1238	blt	1b
1239
1240	/* No free entry - just take the next entry, round-robin */
1241	/* XXX we should get the number of SLB entries from the naca */
1242SLB_NUM_ENTRIES = 64
12432:	mfspr	r21,SPRG3
1244	ld	r22,PACASTABRR(r21)
1245	addi	r23,r22,1
1246	cmpdi	r23,SLB_NUM_ENTRIES
1247	blt	3f
1248	li	r23,1
12493:	std	r23,PACASTABRR(r21)
1250
1251	/* r20 = vsid, r22 = entry */
1252
1253	/*
1254	 * Never cast out the segment for our kernel stack. Since we
1255	 * dont invalidate the ERAT we could have a valid translation
1256	 * for the kernel stack during the first part of exception exit
1257	 * which gets invalidated due to a tlbie from another cpu at a
1258	 * non recoverable point (after setting srr0/1) - Anton
1259	 */
1260	slbmfee	r23,r22
1261	srdi	r23,r23,28
1262	/*
1263	 * This is incorrect (r1 is not the kernel stack) if we entered
1264	 * from userspace but there is no critical window from userspace
1265	 * so this should be OK. Also if we cast out the userspace stack
1266	 * segment while in userspace we will fault it straight back in.
1267	 */
1268	srdi	r21,r1,28
1269	cmpd	r21,r23
1270	beq-	2b
1271
12724:
1273	/* Put together the vsid portion of the entry. */
1274	li      r21,0
1275	rldimi  r21,r20,12,0
1276	ori     r20,r21,1024
1277#ifndef CONFIG_PPC_ISERIES
1278	ori	r20,r20,256    /* map kernel region with large ptes */
1279#endif
1280
1281	/* Invalidate the old entry */
1282	slbmfee	r21,r22
1283	lis	r23,-2049
1284	ori	r23,r23,65535
1285	and	r21,r21,r23
1286	slbie	r21
1287
1288	/* Put together the esid portion of the entry. */
1289	mfspr	r21,DAR        /* Get the new esid                     */
1290	rldicl  r21,r21,36,28  /* Permits a full 36b of ESID           */
1291	li      r23,0
1292	rldimi  r23,r21,28,0   /* Insert esid  */
1293	oris    r21,r23,2048   /* valid bit    */
1294	rldimi  r21,r22,0,52   /* Insert entry */
1295
1296	isync
1297	slbmte  r20,r21
1298	isync
1299
1300	/* All done -- return from exception. */
1301	mfsprg  r20,3                   /* Load the PACA pointer  */
1302	ld      r21,PACAEXCSP(r20)      /* Get the exception frame pointer */
1303	addi    r21,r21,EXC_FRAME_SIZE
1304	lwz	r23,EX_CCR(r21)		/* get saved CR */
1305	/* note that this is almost identical to maskable_exception_exit */
1306	mtcr    r23                     /* restore CR */
1307
1308	mfmsr	r22
1309	li	r23, MSR_RI
1310	andc	r22,r22,r23
1311	mtmsrd	r22,1
1312
1313	ld	r22,EX_SRR0(r21)	/* Get SRR0 from exc. frame */
1314	ld	r23,EX_SRR1(r21)	/* Get SRR1 from exc. frame */
1315	mtspr	SRR0,r22
1316	mtspr   SRR1,r23
1317	ld	r22,EX_R22(r21)		/* restore r22 and r23 */
1318	ld	r23,EX_R23(r21)
1319	mfspr	r20,SPRG2
1320	mfspr	r21,SPRG1
1321	rfid
1322_TRACEBACK(do_slb_bolted)
1323
1324_GLOBAL(do_stab_SI)
1325	mflr	r21			/* Save LR in r21 */
1326
1327	/*
1328	 * r3 contains the faulting address
1329	 * r4 contains the required access permissions
1330	 *
1331	 * at return r3 = 0 for success
1332	 */
1333
1334	bl	.ste_allocate		/* build STE if possible */
1335	or.	r3,r3,r3		/* Check return code */
1336	beq     fast_exception_return   /* Return from exception on success */
1337	mtlr    r21                     /* restore LR */
1338	blr                             /* Return to DSI or ISI on failure */
1339
1340/*
1341 * This code finishes saving the registers to the exception frame.
1342 * Address translation is already on.
1343 */
1344_GLOBAL(save_remaining_regs)
1345	/*
1346	 * Save the rest of the registers into the pt_regs structure
1347	 */
1348	std     r22,_NIP(r1)
1349	std     r23,_MSR(r1)
1350	std     r6,TRAP(r1)
1351	ld      r6,GPR6(r1)
1352	SAVE_2GPRS(14, r1)
1353	SAVE_4GPRS(16, r1)
1354	SAVE_8GPRS(24, r1)
1355
1356	/*
1357	 * Clear the RESULT field
1358	 */
1359	li	r22,0
1360	std	r22,RESULT(r1)
1361
1362	/*
1363	 * Test if from user state; result will be tested later
1364	 */
1365	andi.	r23,r23,MSR_PR		/* Set CR for later branch */
1366
1367	/*
1368	 * Indicate that r1 contains the kernel stack and
1369	 * get the Kernel TOC and CURRENT pointers from the paca
1370	 */
1371	std	r22,PACAKSAVE(r13)	/* r1 is now kernel sp */
1372	ld	r2,PACATOC(r13)		/* Get Kernel TOC pointer */
1373
1374	/*
1375	 * If from user state, update THREAD.regs
1376	 */
1377	beq	2f			/* Modify THREAD.regs if from user */
1378	addi	r24,r1,STACK_FRAME_OVERHEAD
1379	ld	r22,PACACURRENT(r13)
1380	std	r24,THREAD+PT_REGS(r22)
1381#ifdef CONFIG_ALTIVEC
1382BEGIN_FTR_SECTION
1383	mfspr	r24,SPRN_VRSAVE		/* if save vrsave register value */
1384	std	r24,THREAD+THREAD_VRSAVE(r22)
1385END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1386#endif /* CONFIG_ALTIVEC */
13872:
1388	SET_REG_TO_CONST(r22, MSR_KERNEL)
1389
1390#ifdef DO_SOFT_DISABLE
1391	stb	r20,PACAPROCENABLED(r13) /* possibly soft enable */
1392	ori	r22,r22,MSR_EE		/* always hard enable */
1393#else
1394	rldimi	r22,r20,15,48		/* Insert desired EE value */
1395#endif
1396
1397	mtmsrd  r22
1398	blr
1399
1400/*
1401 * Kernel profiling with soft disable on iSeries
1402 */
1403do_profile:
1404	ld	r22,8(r21)		/* Get SRR1 */
1405	andi.	r22,r22,MSR_PR		/* Test if in kernel */
1406	bnelr				/* return if not in kernel */
1407	ld	r22,0(r21)		/* Get SRR0 */
1408	ld	r25,PACAPROFSTEXT(r20)	/* _stext */
1409	subf	r22,r25,r22		/* offset into kernel */
1410	lwz	r25,PACAPROFSHIFT(r20)
1411	srd	r22,r22,r25
1412	lwz	r25,PACAPROFLEN(r20)	/* length of profile table (-1) */
1413	cmp	0,r22,r25		/* off end? */
1414	ble	1f
1415	mr	r22,r25			/* force into last entry */
14161:	sldi	r22,r22,2		/* convert to offset into buffer */
1417	ld	r25,PACAPROFBUFFER(r20)	/* profile buffer */
1418	add	r25,r25,r22
14192:	lwarx	r22,0,r25		/* atomically increment */
1420	addi	r22,r22,1
1421	stwcx.	r22,0,r25
1422	bne-	2b
1423	blr
1424
1425
1426/*
1427 * On pSeries, secondary processors spin in the following code.
1428 * At entry, r3 = this processor's number (in Linux terms, not hardware).
1429 */
1430_GLOBAL(pseries_secondary_smp_init)
1431
1432	/* turn on 64-bit mode */
1433	bl	.enable_64b_mode
1434	isync
1435
1436	/* Set up a paca value for this processor. */
1437	LOADADDR(r24, paca) 		 /* Get base vaddr of Paca array  */
1438	mulli	r13,r3,PACA_SIZE	 /* Calculate vaddr of right Paca */
1439	add	r13,r13,r24              /* for this processor.           */
1440
1441	mtspr	SPRG3,r13		 /* Save vaddr of Paca in SPRG3   */
1442	mr	r24,r3			 /* __secondary_start needs cpu#  */
1443
14441:
1445	HMT_LOW
1446	lbz	r23,PACAPROCSTART(r13)	 /* Test if this processor should */
1447					 /* start.                        */
1448	sync
1449
1450        /* Create a temp kernel stack for use before relocation is on.    */
1451	mr	r1,r13
1452        addi    r1,r1,PACAGUARD
1453        addi    r1,r1,0x1000
1454        subi    r1,r1,STACK_FRAME_OVERHEAD
1455
1456	cmpi	0,r23,0
1457#ifdef CONFIG_SMP
1458#ifdef SECONDARY_PROCESSORS
1459	bne	.__secondary_start
1460#endif
1461#endif
1462	b 	1b			 /* Loop until told to go         */
1463
1464_GLOBAL(__start_initialization_iSeries)
1465
1466	LOADADDR(r1,init_task_union)
1467	addi	r1,r1,TASK_UNION_SIZE
1468	li	r0,0
1469	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
1470
1471	LOADADDR(r3,cpu_specs)
1472	LOADADDR(r4,cur_cpu_spec)
1473	li	r5,0
1474	bl	.identify_cpu
1475
1476	LOADADDR(r2,__toc_start)
1477	addi	r2,r2,0x4000
1478	addi	r2,r2,0x4000
1479
1480	LOADADDR(r9,systemcfg)
1481	SET_REG_TO_CONST(r4, KERNELBASE+0x5000)
1482	std	r4,0(r9)		/* set the systemcfg pointer */
1483
1484	LOADADDR(r9,naca)
1485	SET_REG_TO_CONST(r4, KERNELBASE+0x4000)
1486	std	r4,0(r9)		/* set the naca pointer */
1487
1488#if 1 /* DRENG:PPPBBB:TIA This looks like dead code to me -Peter */
1489	/* Get the pointer to the segment table */
1490	ld	r6,PACA(r4)             /* Get the base paca pointer       */
1491	ld	r4,PACASTABVIRT(r6)
1492#endif
1493
1494	bl      .iSeries_fixup_klimit
1495
1496	b	.start_here_common
1497
1498_GLOBAL(__start_initialization_pSeries)
1499	mr	r31,r3			/* save parameters */
1500	mr	r30,r4
1501	mr	r29,r5
1502	mr	r28,r6
1503	mr	r27,r7
1504	mr	r26,r8                  /* YABOOT: debug_print() routine */
1505	mr	r25,r9                  /* YABOOT: debug_delay() routine */
1506	mr	r24,r10                 /* YABOOT: debug_prom() routine */
1507
1508	bl	.enable_64b_mode
1509
1510	/* put a relocation offset into r3 */
1511	bl	.reloc_offset
1512
1513	LOADADDR(r2,__toc_start)
1514	addi    r2,r2,0x4000
1515	addi    r2,r2,0x4000
1516
1517	/* Relocate the TOC from a virt addr to a real addr */
1518	sub	r2,r2,r3
1519
1520	/* setup the systemcfg pointer which is needed by prom_init       */
1521	LOADADDR(r9,systemcfg)
1522	sub	r9,r9,r3                /* addr of the variable systemcfg */
1523	SET_REG_TO_CONST(r4, KERNELBASE+0x5000)
1524	sub	r4,r4,r3
1525	std	r4,0(r9)		/* set the value of systemcfg     */
1526
1527	/* setup the naca pointer which is needed by prom_init            */
1528	LOADADDR(r9,naca)
1529	sub	r9,r9,r3                /* addr of the variable naca      */
1530	SET_REG_TO_CONST(r4, KERNELBASE+0x4000)
1531	sub	r4,r4,r3
1532	std	r4,0(r9)		/* set the value of naca          */
1533
1534	/* DRENG / PPPBBB Fix the following comment!!! -Peter */
1535	/* The following copies the first 0x100 bytes of code from the    */
1536	/* load addr to physical addr 0x0.  This code causes secondary    */
1537	/* processors to spin until a flag in the PACA is set.  This      */
1538	/* is done at this time rather than with the entire kernel        */
1539	/* relocation which is done below because we need to cause the    */
1540	/* processors to spin on code that is not going to move while OF  */
1541	/* is still alive. Although the spin code is not actually run on  */
1542	/* a uniprocessor, we always do this copy.                        */
1543	SET_REG_TO_CONST(r4, KERNELBASE)/* Src addr                       */
1544	sub	r4,r4,r3  		/* current address of __start     */
1545			                /*        the source addr         */
1546	li	r3,0                    /* Dest addr                      */
1547	li	r5,0x100 		/* # bytes of memory to copy      */
1548	li	r6,0			/* Destination offset             */
1549	bl	.copy_and_flush		/* copy the first 0x100 bytes     */
1550
1551	mr	r3,r31
1552	mr	r4,r30
1553	mr	r5,r29
1554	mr	r6,r28
1555	mr	r7,r27
1556	mr	r8,r26
1557	mr	r9,r25
1558	mr	r10,r24
1559
1560	bl	.prom_init
1561
1562	li	r24,0			/* cpu # */
1563
1564/*
1565 * At this point, r3 contains the physical address we are running at,
1566 * returned by prom_init()
1567 */
1568_STATIC(__after_prom_start)
1569
1570/*
1571 * We need to run with __start at physical address 0.
1572 * This will leave some code in the first 256B of
1573 * real memory, which are reserved for software use.
1574 * The remainder of the first page is loaded with the fixed
1575 * interrupt vectors.  The next two pages are filled with
1576 * unknown exception placeholders.
1577 *
1578 * Note: This process overwrites the OF exception vectors.
1579 *       r26 == relocation offset
1580 *       r27 == KERNELBASE
1581 */
1582	bl	.reloc_offset
1583	mr	r26,r3
1584	SET_REG_TO_CONST(r27,KERNELBASE)
1585
1586	li	r3,0                    /* target addr */
1587
1588	sub	r4,r27,r26 		/* source addr */
1589					/* current address of _start   */
1590			                /*   i.e. where we are running */
1591			                /*        the source addr      */
1592
1593	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy      */
1594	sub	r5,r5,r27
1595
1596	li	r6,0x100		/* Start offset, the first 0x100  */
1597					/* bytes were copied earlier.	  */
1598
1599	bl	.copy_and_flush		/* copy the first n bytes         */
1600					/* this includes the code being   */
1601					/* executed here.                 */
1602
1603        LOADADDR(r0, 4f)                /* Jump to the copy of this code  */
1604	mtctr	r0			/* that we just made/relocated    */
1605	bctr
1606
16074:	LOADADDR(r5,klimit)
1608	sub	r5,r5,r26
1609	ld	r5,0(r5)		/* get the value of klimit */
1610	sub	r5,r5,r27
1611	bl	.copy_and_flush		/* copy the rest */
1612	b	.start_here_pSeries
1613
1614/*
1615 * Copy routine used to copy the kernel to start at physical address 0
1616 * and flush and invalidate the caches as needed.
1617 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1618 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1619 *
1620 * Note: this routine *only* clobbers r0, r6 and lr
1621 */
1622_STATIC(copy_and_flush)
1623	addi	r5,r5,-8
1624	addi	r6,r6,-8
16254:	li	r0,16                   /* Use the least common      */
1626					/* denominator cache line    */
1627			                /* size.  This results in    */
1628					/* extra cache line flushes  */
1629					/* but operation is correct. */
1630					/* Can't get cache line size */
1631					/* from NACA as it is being  */
1632					/* moved too.                */
1633
1634	mtctr	r0			/* put # words/line in ctr */
16353:	addi	r6,r6,8			/* copy a cache line */
1636	ldx	r0,r6,r4
1637	stdx	r0,r6,r3
1638	bdnz	3b
1639	dcbst	r6,r3			/* write it to memory */
1640	sync
1641	icbi	r6,r3			/* flush the icache line */
1642	cmpld	0,r6,r5
1643	blt	4b
1644	sync
1645	addi	r5,r5,8
1646	addi	r6,r6,8
1647	blr
1648
1649.align 8
1650copy_to_here:
1651
1652/*
1653 * load_up_fpu(unused, unused, tsk)
1654 * Disable FP for the task which had the FPU previously,
1655 * and save its floating-point registers in its thread_struct.
1656 * Enables the FPU for use in the kernel on return.
1657 * On SMP we know the fpu is free, since we give it up every
1658 * switch (ie, no lazy save of the FP registers).
1659 * On entry: r13 == 'current' && last_task_used_math != 'current'
1660 */
1661_STATIC(load_up_fpu)
1662	mfmsr	r5                      /* grab the current MSR */
1663	ori	r5,r5,MSR_FP
1664	mtmsrd  r5			/* enable use of fpu now */
1665	isync
1666/*
1667 * For SMP, we don't do lazy FPU switching because it just gets too
1668 * horrendously complex, especially when a task switches from one CPU
1669 * to another.  Instead we call giveup_fpu in switch_to.
1670 *
1671 */
1672#ifndef CONFIG_SMP
1673	LOADBASE(r3,last_task_used_math)
1674	ld	r4,last_task_used_math@l(r3)
1675	cmpi	0,r4,0
1676	beq	1f
1677	/* Save FP state to last_task_used_math's THREAD struct */
1678	addi	r4,r4,THREAD
1679	SAVE_32FPRS(0, r4)
1680	mffs	fr0
1681	stfd	fr0,THREAD_FPSCR(r4)
1682	/* Disable FP for last_task_used_math */
1683	ld	r5,PT_REGS(r4)
1684	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1685	li	r20,MSR_FP|MSR_FE0|MSR_FE1
1686	andc	r4,r4,r20
1687	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16881:
1689#endif /* CONFIG_SMP */
1690	/* enable use of FP after return */
1691	ld	r4,PACACURRENT(r13)
1692	addi	r5,r4,THREAD		/* Get THREAD */
1693	ld	r20,THREAD_FPEXC_MODE(r5)
1694	ori	r23,r23,MSR_FP
1695	or	r23,r23,r20
1696	lfd	fr0,THREAD_FPSCR(r5)
1697	mtfsf	0xff,fr0
1698	REST_32FPRS(0, r5)
1699#ifndef CONFIG_SMP
1700	/* Update last_task_used_math to 'current' */
1701	std	r4,last_task_used_math@l(r3)
1702#endif /* CONFIG_SMP */
1703	/* restore registers and return */
1704	b	fast_exception_return
1705
1706
1707/*
1708 * disable_kernel_fp()
1709 * Disable the FPU.
1710 */
1711_GLOBAL(disable_kernel_fp)
1712	mfmsr   r3
1713	rldicl  r0,r3,(63-MSR_FP_LG),1
1714	rldicl  r3,r0,(MSR_FP_LG+1),0
1715	mtmsrd  r3			/* disable use of fpu now */
1716	isync
1717	blr
1718
1719
1720/*
1721 * giveup_fpu(tsk)
1722 * Disable FP for the task given as the argument,
1723 * and save the floating-point registers in its thread_struct.
1724 * Enables the FPU for use in the kernel on return.
1725 */
1726_GLOBAL(giveup_fpu)
1727	mfmsr	r5
1728	ori	r5,r5,MSR_FP
1729	mtmsrd	r5			/* enable use of fpu now */
1730	isync
1731	cmpi	0,r3,0
1732	beqlr-				/* if no previous owner, done */
1733	addi	r3,r3,THREAD		/* want THREAD of task */
1734	ld	r5,PT_REGS(r3)
1735	cmpi	0,r5,0
1736	SAVE_32FPRS(0, r3)
1737	mffs	fr0
1738	stfd	fr0,THREAD_FPSCR(r3)
1739#if 0 /* PPPBBB: enable code below if we run with FE0/1 = 0,0 as default */
1740	clrrdi	r4,r13,60		/* r4 <- 0xC000000000000000 */
1741	lfd	fr0,__zero@l(r4)
1742	mtfsf	0xff,fr0
1743#endif
1744	beq	1f
1745	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1746	li	r3,MSR_FP|MSR_FE0|MSR_FE1
1747	andc	r4,r4,r3		/* disable FP for previous task */
1748	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
17491:
1750#ifndef CONFIG_SMP
1751	li	r5,0
1752	LOADBASE(r4,last_task_used_math)
1753	std	r5,last_task_used_math@l(r4)
1754#endif /* CONFIG_SMP */
1755	blr
1756
1757#ifdef CONFIG_ALTIVEC
1758/*
1759 * load_up_altivec(unused, unused, tsk)
1760 * Disable Altivec for the task which used altivec upreviously,
1761 * and save its altivec registers in its thread_struct.
1762 * Enables Altivec for use in the kernel on return.
1763 * On SMP we know the fpu is free, since we give it up every
1764 * switch (ie, no lazy save of the altivec registers).
1765 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1766 */
1767_STATIC(load_up_altivec)
1768/*
1769 * Disable AltiVec for the task which had AltiVec previously,
1770 * and save its AltiVec registers in its thread_struct.
1771 * Enables AltiVec for use in the kernel on return.
1772 * On SMP we know the AltiVec units are free, since we give it up every
1773 * switch.  -- Kumar
1774 */
1775	mfmsr	r5
1776	oris	r5,r5,MSR_VEC@h
1777	mtmsrd	r5			/* enable use of AltiVec now */
1778	isync
1779/*
1780 * For SMP, we don't do lazy AltiVec switching because it just gets too
1781 * horrendously complex, especially when a task switches from one CPU
1782 * to another.  Instead we call giveup_altivec in switch_to.
1783 */
1784#ifndef CONFIG_SMP
1785	LOADBASE(r3,last_task_used_altivec)
1786	ld	r4,last_task_used_altivec@l(r3)
1787	cmpi	0,r4,0
1788	beq	1f
1789	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */
1790	SAVE_32VR(0,r20,r4)
1791	MFVSCR(vr0)
1792	li	r20,THREAD_VSCR
1793	STVX(vr0,r20,r4)
1794	ld	r5,PT_REGS(r4)
1795	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1796	lis	r20,MSR_VEC@h
1797	andc	r4,r4,r20	/* disable altivec for previous task */
1798	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
17991:
1800#endif /* CONFIG_SMP */
1801	/* enable use of AltiVec after return */
1802	ld	r4,PACACURRENT(r13)
1803	addi	r5,r4,THREAD
1804	oris	r23,r23,MSR_VEC@h
1805	li	r20,THREAD_VSCR
1806	LVX(vr0,r20,r5)
1807	MTVSCR(vr0)
1808	REST_32VR(0,r20,r5)
1809#ifndef CONFIG_SMP
1810	/* Update last_task_used_altivec to 'current' */
1811	std	r4,last_task_used_altivec@l(r3)
1812#endif /* CONFIG_SMP */
1813	/* restore registers and return */
1814	b	fast_exception_return
1815/*
1816 * giveup_altivec(tsk)
1817 * Disable AltiVec for the task given as the argument,
1818 * and save the AltiVec registers in its thread_struct.
1819 * Enables AltiVec for use in the kernel on return.
1820 */
1821_GLOBAL(giveup_altivec)
1822	mfmsr	r5
1823	oris	r5,r5,MSR_VEC@h
1824	mtmsrd	r5			/* enable use of AltiVec now */
1825	isync
1826	cmpi	0,r3,0
1827	beqlr-				/* if no previous owner, done */
1828	addi	r3,r3,THREAD		/* want THREAD of task */
1829	ld	r5,PT_REGS(r3)
1830	cmpi	0,r5,0
1831	SAVE_32VR(0, r4, r3)
1832	MFVSCR(vr0)
1833	li	r4,THREAD_VSCR
1834	STVX(vr0, r4, r3)
1835	beq	1f
1836	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1837	lis	r3,MSR_VEC@h
1838	andc	r4,r4,r3		/* disable AltiVec for previous task */
1839	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
18401:
1841#ifndef CONFIG_SMP
1842	li	r5,0
1843	LOADBASE(r4,last_task_used_altivec)
1844	std	r5,last_task_used_altivec@l(r4)
1845#endif /* CONFIG_SMP */
1846	blr
1847#endif /* CONFIG_ALTIVEC */
1848
1849
1850
1851#ifdef CONFIG_SMP
1852/*
1853 * This function is called after the master CPU has released the
1854 * secondary processors.  The execution environment is relocation off.
1855 * The paca for this processor has the following fields initialized at
1856 * this point:
1857 *   1. Processor number
1858 *   2. Segment table pointer (virtual address)
1859 * On entry the following are set:
1860 *   r1    = stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1861 *   r24   = cpu# (in Linux terms)
1862 *   r13   = paca virtual address
1863 *   SPRG3 = paca virtual address
1864 */
1865_GLOBAL(__secondary_start)
1866
1867	HMT_MEDIUM			/* Set thread priority to MEDIUM */
1868
1869	/* set up the TOC (virtual address) */
1870	LOADADDR(r2,__toc_start)
1871	addi    r2,r2,0x4000
1872	addi    r2,r2,0x4000
1873
1874	std	r2,PACATOC(r13)
1875	li	r6,0
1876	std	r6,PACAKSAVE(r13)
1877	stb	r6,PACAPROCENABLED(r13)
1878
1879#ifndef CONFIG_PPC_ISERIES
1880	/* Initialize the page table pointer register. */
1881	LOADADDR(r6,_SDR1)
1882	ld	r6,0(r6)		/* get the value of _SDR1 */
1883	mtspr	SDR1,r6			/* set the htab location  */
1884#endif
1885	/* Initialize the first segment table (or SLB) entry                */
1886	ld	r3,PACASTABVIRT(r13)    /* get addr of segment table        */
1887	bl	.stab_initialize
1888
1889	/* Initialize the kernel stack.  Just a repeat for iSeries.         */
1890	LOADADDR(r3,current_set)
1891	sldi	r28,r24,4		/* get current_set[cpu#] */
1892	ldx	r28,r3,r28
1893	std	r28,PACACURRENT(r13)
1894	addi	r1,r28,TASK_UNION_SIZE
1895	subi	r1,r1,STACK_FRAME_OVERHEAD
1896
1897	ld	r3,PACASTABREAL(r13)    /* get raddr of segment table       */
1898	ori	r4,r3,1			/* turn on valid bit                */
1899
1900#ifdef CONFIG_PPC_ISERIES
1901	li	r0,-1			/* hypervisor call */
1902	li	r3,1
1903	sldi	r3,r3,63		/* 0x8000000000000000 */
1904	ori	r3,r3,4			/* 0x8000000000000004 */
1905	sc				/* HvCall_setASR */
1906#else
1907	/* set the ASR */
1908	li	r3,0x5000	/* r3 = ptr to systemcfg  */
1909	lwz   	r3,PLATFORM(r3) /* r3 = platform flags */
1910	cmpldi 	r3,PLATFORM_PSERIES_LPAR
1911	bne   	98f
1912	mfspr	r3,PVR
1913	srwi	r3,r3,16
1914	cmpwi	r3,0x37         /* SStar  */
1915	beq	97f
1916	cmpwi	r3,0x36         /* IStar  */
1917	beq	97f
1918	cmpwi	r3,0x34         /* Pulsar */
1919	bne	98f
192097:	li	r3,H_SET_ASR    /* hcall = H_SET_ASR */
1921	HSC     		/* Invoking hcall */
1922	b	99f
192398:                             /* !(rpa hypervisor) || !(sstar) */
1924	mtasr	r4	        /* set the stab location         */
192599:
1926#endif
1927	li	r7,0
1928	mtlr	r7
1929
1930	/* enable MMU and jump to start_secondary */
1931	LOADADDR(r3,.start_secondary_prolog)
1932	SET_REG_TO_CONST(r4, MSR_KERNEL)
1933#ifdef DO_SOFT_DISABLE
1934	ori	r4,r4,MSR_EE
1935#endif
1936	mtspr	SRR0,r3
1937	mtspr	SRR1,r4
1938	rfid
1939
1940/*
1941 * Running with relocation on at this point.  All we want to do is
1942 * zero the stack back-chain pointer before going into C code.
1943 */
1944_GLOBAL(start_secondary_prolog)
1945	li	r3,0
1946	std	r3,0(r1)                /* Zero the stack frame pointer     */
1947	bl	.start_secondary
1948#endif /* CONFIG_SMP */
1949
1950/*
1951 * This subroutine clobbers r11, r12 and the LR
1952 */
1953_GLOBAL(enable_64b_mode)
1954	mfmsr   r11                      /* grab the current MSR */
1955	li      r12,1
1956	rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1957	or      r11,r11,r12
1958	li      r12,1
1959	rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1960	or      r11,r11,r12
1961	mtmsrd  r11
1962	isync
1963	blr
1964
1965/*
1966 * This subroutine clobbers r11, r12 and the LR
1967 */
1968_GLOBAL(enable_32b_mode)
1969	mfmsr   r11                      /* grab the current MSR */
1970	li      r12,1
1971	rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1972	andc	r11,r11,r12
1973	li      r12,1
1974	rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1975	andc	r11,r11,r12
1976	mtmsrd  r11
1977	isync
1978	blr
1979
1980/*
1981 * This is where the main kernel code starts.
1982 */
1983_STATIC(start_here_pSeries)
1984	/* get a new offset, now that the kernel has moved. */
1985	bl	.reloc_offset
1986	mr	r26,r3
1987
1988	mfmsr	r6
1989	ori	r6,r6,MSR_RI
1990	mtmsrd	r6			/* RI on */
1991
1992	/* setup the systemcfg pointer which is needed by *tab_initialize  */
1993	LOADADDR(r6,systemcfg)
1994	sub	r6,r6,r26                /* addr of the variable systemcfg */
1995	li	r27,0x5000
1996	std	r27,0(r6)	 	 /* set the value of systemcfg     */
1997
1998	/* setup the naca pointer which is needed by *tab_initialize       */
1999	LOADADDR(r6,naca)
2000	sub	r6,r6,r26                /* addr of the variable naca      */
2001	li	r27,0x4000
2002	std	r27,0(r6)	 	 /* set the value of naca          */
2003
2004#ifdef CONFIG_HMT
2005	/* Start up the second thread on cpu 0 */
2006	mfspr	r3,PVR
2007	srwi	r3,r3,16
2008	cmpwi	r3,0x34                 /* Pulsar  */
2009	beq	90f
2010	cmpwi	r3,0x36                 /* Icestar */
2011	beq	90f
2012	cmpwi	r3,0x37                 /* SStar   */
2013	beq	90f
2014	b	91f                     /* HMT not supported */
201590:	li      r3,0
2016	bl	.hmt_start_secondary
201791:
2018#endif
2019
2020#ifdef CONFIG_SMP
2021	/* All secondary cpus are now spinning on a common
2022	 * spinloop, release them all now so they can start
2023	 * to spin on their individual paca spinloops.
2024	 * For non SMP kernels, the secondary cpus never
2025	 * get out of the common spinloop.
2026	 */
2027	li	r3,1
2028	LOADADDR(r5,__secondary_hold_spinloop)
2029	tophys(r4,r5)
2030	std     r3,0(r4)
2031#endif
2032
2033	/* The following gets the stack and TOC set up with the regs */
2034	/* pointing to the real addr of the kernel stack.  This is   */
2035	/* all done to support the C function call below which sets  */
2036	/* up the htab.  This is done because we have relocated the  */
2037	/* kernel but are still running in real mode. */
2038
2039	/* real ptr to current */
2040	LOADADDR(r3,init_task_union)
2041	sub	r3,r3,r26
2042
2043	/* set up a stack pointer (physical address) */
2044	addi	r1,r3,TASK_UNION_SIZE
2045	li	r0,0
2046	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
2047
2048	LOADADDR(r3,cpu_specs)
2049	sub	r3,r3,r26
2050	LOADADDR(r4,cur_cpu_spec)
2051	sub	r4,r4,r26
2052	mr	r5,r26
2053	bl	.identify_cpu
2054
2055	/* set up the TOC (physical address) */
2056	LOADADDR(r2,__toc_start)
2057	addi    r2,r2,0x4000
2058	addi    r2,r2,0x4000
2059	sub	r2,r2,r26
2060
2061	/* Get the pointer to the segment table which is used by           */
2062	/* stab_initialize                                                 */
2063	li	r27,0x4000
2064	ld	r6,PACA(r27)            /* Get the base paca pointer       */
2065	sub	r13,r6,r26		/* convert to physical addr         */
2066	mtspr	SPRG3,r13		/* PPPBBB: Temp... -Peter */
2067	ld	r3,PACASTABREAL(r13)
2068	ori	r4,r3,1			/* turn on valid bit                */
2069
2070	/* set the ASR */
2071	li	r3,0x5000	/* r3 = ptr to systemcfg */
2072	lwz   	r3,PLATFORM(r3) /* r3 = platform flags */
2073	cmpldi 	r3,PLATFORM_PSERIES_LPAR
2074	bne   	98f
2075	mfspr	r3,PVR
2076	srwi	r3,r3,16
2077	cmpwi	r3,0x37         /* SStar */
2078	beq	97f
2079	cmpwi	r3,0x36         /* IStar  */
2080	beq	97f
2081	cmpwi	r3,0x34         /* Pulsar */
2082	bne	98f
208397:	li	r3,H_SET_ASR    /* hcall = H_SET_ASR */
2084	HSC     	        /* Invoking hcall */
2085	b     	99f
208698:                 /* This is not a hypervisor machine */
2087	mtasr	r4			/* set the stab location            */
208899:
2089	mfspr	r6,SPRG3
2090	ld	r3,PACASTABREAL(r6)     /* restore r3 for stab_initialize */
2091
2092	/* Initialize an initial memory mapping and turn on relocation.   */
2093	bl	.stab_initialize
2094	bl	.htab_initialize
2095
2096	li	r3,0x5000	/* r3 = ptr to systemcfg */
2097	lwz	r3,PLATFORM(r3) /* r3 = platform flags */
2098	cmpldi r3,PLATFORM_PSERIES
2099	bne    98f
2100	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
2101	sub	r6,r6,r26
2102	ld	r6,0(r6)		/* get the value of _SDR1 */
2103	mtspr	SDR1,r6			/* set the htab location  */
210498:
2105	LOADADDR(r3,.start_here_common)
2106	SET_REG_TO_CONST(r4, MSR_KERNEL)
2107	mtspr	SRR0,r3
2108	mtspr	SRR1,r4
2109	rfid
2110
2111	/* This is where all platforms converge execution */
2112_STATIC(start_here_common)
2113	/* relocation is on at this point */
2114
2115	/* Clear out the BSS */
2116	LOADADDR(r11,_end)
2117
2118	LOADADDR(r8,__bss_start)
2119
2120	sub	r11,r11,r8        /* bss size                        */
2121	addi	r11,r11,7         /* round up to an even double word */
2122	rldicl. r11,r11,61,3      /* shift right by 3                */
2123	beq	4f
2124	addi	r8,r8,-8
2125	li	r0,0
2126	mtctr	r11		  /* zero this many doublewords      */
21273:	stdu	r0,8(r8)
2128	bdnz	3b
21294:
2130
2131	/* The following code sets up the SP and TOC now that we are */
2132	/* running with translation enabled. */
2133
2134	/* ptr to current */
2135	LOADADDR(r3,init_task_union)
2136
2137	/* set up the stack */
2138	addi	r1,r3,TASK_UNION_SIZE
2139	li	r0,0
2140	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
2141
2142	/* set up the TOC */
2143	LOADADDR(r2,__toc_start)
2144	addi    r2,r2,0x4000
2145	addi    r2,r2,0x4000
2146
2147	li	r3,0
2148	bl	.do_cpu_ftr_fixups
2149
2150	/* setup the systemcfg pointer                                       */
2151	LOADADDR(r9,systemcfg)
2152	SET_REG_TO_CONST(r8, KERNELBASE+0x5000)
2153	std	r8,0(r9)
2154
2155	/* setup the naca pointer                                         */
2156	LOADADDR(r9,naca)
2157	SET_REG_TO_CONST(r8, KERNELBASE+0x4000)
2158	std	r8,0(r9)		/* set the value of the naca ptr  */
2159
2160	LOADADDR(r4,naca)               /* Get naca ptr address           */
2161	ld	r4,0(r4)                /* Get the location of the naca   */
2162	ld	r13,PACA(r4)            /* Get the base paca pointer      */
2163	mtspr	SPRG3,r13
2164
2165	/* ptr to current */
2166	LOADADDR(r4,init_task_union)
2167	std	r4,PACACURRENT(r13)
2168
2169	std	r2,PACATOC(r13)
2170	li	r5,0
2171	std	r0,PACAKSAVE(r13)
2172
2173	/* ptr to hardware interrupt stack for processor 0                */
2174	LOADADDR(r3, hardware_int_paca0)
2175	li      r5,0x1000
2176	sldi    r5,r5,3
2177	subi    r5,r5,STACK_FRAME_OVERHEAD
2178
2179	add     r3,r3,r5
2180	std     r3,PACAHRDWINTSTACK(r13)
2181
2182	li      r3,0
2183	stb     r3,PACAHRDWINTCOUNT(r13)
2184
2185	/* Restore the parms passed in from the bootloader. */
2186	mr	r3,r31
2187	mr	r4,r30
2188	mr	r5,r29
2189	mr	r6,r28
2190	mr	r7,r27
2191
2192	bl	.setup_system
2193
2194	/* Load up the kernel context */
21955:
2196#ifdef DO_SOFT_DISABLE
2197	mfspr	r4,SPRG3
2198	li	r5,0
2199	stb	r5,PACAPROCENABLED(r4)	/* Soft Disabled */
2200	mfmsr	r5
2201	ori	r5,r5,MSR_EE		/* Hard Enabled */
2202	mtmsrd	r5
2203#endif
2204
2205	bl .start_kernel
2206
2207_GLOBAL(__setup_cpu_power3)
2208	blr
2209_GLOBAL(__setup_cpu_power4)
2210	blr
2211
2212_GLOBAL(hmt_init)
2213#ifdef CONFIG_HMT
2214	LOADADDR(r5, hmt_thread_data)
2215	mfspr	r7,PVR
2216	srwi	r7,r7,16
2217	cmpwi	r7,0x34                 /* Pulsar  */
2218	beq	90f
2219	cmpwi	r7,0x36                 /* Icestar */
2220	beq	91f
2221	cmpwi	r7,0x37                 /* SStar   */
2222	beq	91f
2223	b	101f
222490:	mfspr	r6,PIR
2225	andi.	r6,r6,0x1f
2226	b	92f
222791:	mfspr	r6,PIR
2228	andi.	r6,r6,0x3ff
222992:	sldi	r4,r24,3
2230	stwx	r6,r5,r4
2231	bl	.hmt_start_secondary
2232	b	101f
2233
2234__hmt_secondary_hold:
2235	LOADADDR(r5, hmt_thread_data)
2236	clrldi	r5,r5,4
2237	li	r7,0
2238	mfspr	r6,PIR
2239	mfspr	r8,PVR
2240	srwi	r8,r8,16
2241	cmpwi	r8,0x34
2242	bne	93f
2243	andi.	r6,r6,0x1f
2244	b	103f
224593:	andi.	r6,r6,0x3f
2246
2247103:	lwzx	r8,r5,r7
2248	cmpw	r8,r6
2249	beq	104f
2250	addi	r7,r7,8
2251	b	103b
2252
2253104:	addi	r7,r7,4
2254	lwzx	r9,r5,r7
2255	mr      r24,r9
2256101:
2257#endif
2258	mr      r3,r24
2259	b       .pseries_secondary_smp_init
2260
2261#ifdef CONFIG_HMT
2262_GLOBAL(hmt_start_secondary)
2263	LOADADDR(r4,__hmt_secondary_hold)
2264	clrldi	r4,r4,4
2265	mtspr   NIADORM, r4
2266	mfspr   r4, MSRDORM
2267	li      r5, -65
2268	and     r4, r4, r5
2269	mtspr   MSRDORM, r4
2270	lis	r4,0xffef
2271	ori	r4,r4,0x7403
2272	mtspr	TSC, r4
2273	li	r4,0x1f4
2274	mtspr	TST, r4
2275	mfspr   r4, HID0
2276	ori     r4, r4, 0x1
2277	mtspr   HID0, r4
2278	mfspr   r4, CTRLF
2279	oris    r4, r4, 0x40
2280	mtspr   CTRLT, r4
2281	blr
2282#endif
2283
2284/*
2285 * We put a few things here that have to be page-aligned.
2286 * This stuff goes at the beginning of the data segment,
2287 * which is page-aligned.
2288 */
2289	.data
2290	.align  12
2291	.globl	sdata
2292sdata:
2293	.globl	empty_zero_page
2294empty_zero_page:
2295	.space	4096
2296
2297	.globl	swapper_pg_dir
2298swapper_pg_dir:
2299	.space	4096
2300
2301	.globl	ioremap_dir
2302ioremap_dir:
2303	.space	4096
2304
2305	.globl	bolted_dir
2306bolted_dir:
2307	.space	4096
2308
2309	.globl  hardware_int_paca0
2310hardware_int_paca0:
2311	.space	8*4096
2312
2313/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
2314	.globl	stab_array
2315stab_array:
2316        .space	4096 * (48 - 1)
2317