1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/exception-64s.h>
16#include <asm/ptrace.h>
17
18/*
19 * We layout physical memory as follows:
20 * 0x0000 - 0x00ff : Secondary processor spin code
21 * 0x0100 - 0x2fff : pSeries Interrupt prologs
22 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
23 * 0x6000 - 0x6fff : Initial (CPU0) segment table
24 * 0x7000 - 0x7fff : FWNMI data area
25 * 0x8000 -        : Early init and support code
26 */
27
28/*
29 * This is the start of the interrupt handlers for pSeries
30 * This code runs with relocation off.
31 * Code from here to __end_interrupts gets copied down to real
32 * address 0x100 when we are running a relocatable kernel.
33 * Therefore any relative branches in this section must only
34 * branch to labels in this section.
35 */
36	. = 0x100
37	.globl __start_interrupts
38__start_interrupts:
39
40	STD_EXCEPTION_PSERIES(0x100, system_reset)
41
42	. = 0x200
43_machine_check_pSeries:
44	HMT_MEDIUM
45	DO_KVM	0x200
46	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
47	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
48
49	. = 0x300
50	.globl data_access_pSeries
51data_access_pSeries:
52	HMT_MEDIUM
53	DO_KVM	0x300
54	mtspr	SPRN_SPRG_SCRATCH0,r13
55BEGIN_FTR_SECTION
56	mfspr	r13,SPRN_SPRG_PACA
57	std	r9,PACA_EXSLB+EX_R9(r13)
58	std	r10,PACA_EXSLB+EX_R10(r13)
59	mfspr	r10,SPRN_DAR
60	mfspr	r9,SPRN_DSISR
61	srdi	r10,r10,60
62	rlwimi	r10,r9,16,0x20
63	mfcr	r9
64	cmpwi	r10,0x2c
65	beq	do_stab_bolted_pSeries
66	ld	r10,PACA_EXSLB+EX_R10(r13)
67	std	r11,PACA_EXGEN+EX_R11(r13)
68	ld	r11,PACA_EXSLB+EX_R9(r13)
69	std	r12,PACA_EXGEN+EX_R12(r13)
70	mfspr	r12,SPRN_SPRG_SCRATCH0
71	std	r10,PACA_EXGEN+EX_R10(r13)
72	std	r11,PACA_EXGEN+EX_R9(r13)
73	std	r12,PACA_EXGEN+EX_R13(r13)
74	EXCEPTION_PROLOG_PSERIES_1(data_access_common)
75FTR_SECTION_ELSE
76	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
77ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
78
79	. = 0x380
80	.globl data_access_slb_pSeries
81data_access_slb_pSeries:
82	HMT_MEDIUM
83	DO_KVM	0x380
84	mtspr	SPRN_SPRG_SCRATCH0,r13
85	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
86	std	r3,PACA_EXSLB+EX_R3(r13)
87	mfspr	r3,SPRN_DAR
88	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
89	mfcr	r9
90#ifdef __DISABLED__
91	/* Keep that around for when we re-implement dynamic VSIDs */
92	cmpdi	r3,0
93	bge	slb_miss_user_pseries
94#endif /* __DISABLED__ */
95	std	r10,PACA_EXSLB+EX_R10(r13)
96	std	r11,PACA_EXSLB+EX_R11(r13)
97	std	r12,PACA_EXSLB+EX_R12(r13)
98	mfspr	r10,SPRN_SPRG_SCRATCH0
99	std	r10,PACA_EXSLB+EX_R13(r13)
100	mfspr	r12,SPRN_SRR1		/* and SRR1 */
101#ifndef CONFIG_RELOCATABLE
102	b	.slb_miss_realmode
103#else
104	/*
105	 * We can't just use a direct branch to .slb_miss_realmode
106	 * because the distance from here to there depends on where
107	 * the kernel ends up being put.
108	 */
109	mfctr	r11
110	ld	r10,PACAKBASE(r13)
111	LOAD_HANDLER(r10, .slb_miss_realmode)
112	mtctr	r10
113	bctr
114#endif
115
116	STD_EXCEPTION_PSERIES(0x400, instruction_access)
117
118	. = 0x480
119	.globl instruction_access_slb_pSeries
120instruction_access_slb_pSeries:
121	HMT_MEDIUM
122	DO_KVM	0x480
123	mtspr	SPRN_SPRG_SCRATCH0,r13
124	mfspr	r13,SPRN_SPRG_PACA		/* get paca address into r13 */
125	std	r3,PACA_EXSLB+EX_R3(r13)
126	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
127	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
128	mfcr	r9
129#ifdef __DISABLED__
130	/* Keep that around for when we re-implement dynamic VSIDs */
131	cmpdi	r3,0
132	bge	slb_miss_user_pseries
133#endif /* __DISABLED__ */
134	std	r10,PACA_EXSLB+EX_R10(r13)
135	std	r11,PACA_EXSLB+EX_R11(r13)
136	std	r12,PACA_EXSLB+EX_R12(r13)
137	mfspr	r10,SPRN_SPRG_SCRATCH0
138	std	r10,PACA_EXSLB+EX_R13(r13)
139	mfspr	r12,SPRN_SRR1		/* and SRR1 */
140#ifndef CONFIG_RELOCATABLE
141	b	.slb_miss_realmode
142#else
143	mfctr	r11
144	ld	r10,PACAKBASE(r13)
145	LOAD_HANDLER(r10, .slb_miss_realmode)
146	mtctr	r10
147	bctr
148#endif
149
150	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
151	STD_EXCEPTION_PSERIES(0x600, alignment)
152	STD_EXCEPTION_PSERIES(0x700, program_check)
153	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
154	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
155	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
156	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
157
158	. = 0xc00
159	.globl	system_call_pSeries
160system_call_pSeries:
161	HMT_MEDIUM
162	DO_KVM	0xc00
163BEGIN_FTR_SECTION
164	cmpdi	r0,0x1ebe
165	beq-	1f
166END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
167	mr	r9,r13
168	mfspr	r13,SPRN_SPRG_PACA
169	mfspr	r11,SPRN_SRR0
170	ld	r12,PACAKBASE(r13)
171	ld	r10,PACAKMSR(r13)
172	LOAD_HANDLER(r12, system_call_entry)
173	mtspr	SPRN_SRR0,r12
174	mfspr	r12,SPRN_SRR1
175	mtspr	SPRN_SRR1,r10
176	rfid
177	b	.	/* prevent speculative execution */
178
179/* Fast LE/BE switch system call */
1801:	mfspr	r12,SPRN_SRR1
181	xori	r12,r12,MSR_LE
182	mtspr	SPRN_SRR1,r12
183	rfid		/* return to userspace */
184	b	.
185
186	STD_EXCEPTION_PSERIES(0xd00, single_step)
187	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
188
189	/* We need to deal with the Altivec unavailable exception
190	 * here which is at 0xf20, thus in the middle of the
191	 * prolog code of the PerformanceMonitor one. A little
192	 * trickery is thus necessary
193	 */
194performance_monitor_pSeries_1:
195	. = 0xf00
196	DO_KVM	0xf00
197	b	performance_monitor_pSeries
198
199altivec_unavailable_pSeries_1:
200	. = 0xf20
201	DO_KVM	0xf20
202	b	altivec_unavailable_pSeries
203
204vsx_unavailable_pSeries_1:
205	. = 0xf40
206	DO_KVM	0xf40
207	b	vsx_unavailable_pSeries
208
209#ifdef CONFIG_CBE_RAS
210	HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
211#endif /* CONFIG_CBE_RAS */
212	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
213#ifdef CONFIG_CBE_RAS
214	HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
215#endif /* CONFIG_CBE_RAS */
216	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
217#ifdef CONFIG_CBE_RAS
218	HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
219#endif /* CONFIG_CBE_RAS */
220
221	. = 0x3000
222
223/*** pSeries interrupt support ***/
224
225	/* moved from 0xf00 */
226	STD_EXCEPTION_PSERIES(., performance_monitor)
227	STD_EXCEPTION_PSERIES(., altivec_unavailable)
228	STD_EXCEPTION_PSERIES(., vsx_unavailable)
229
230/*
231 * An interrupt came in while soft-disabled; clear EE in SRR1,
232 * clear paca->hard_enabled and return.
233 */
234masked_interrupt:
235	stb	r10,PACAHARDIRQEN(r13)
236	mtcrf	0x80,r9
237	ld	r9,PACA_EXGEN+EX_R9(r13)
238	mfspr	r10,SPRN_SRR1
239	rldicl	r10,r10,48,1		/* clear MSR_EE */
240	rotldi	r10,r10,16
241	mtspr	SPRN_SRR1,r10
242	ld	r10,PACA_EXGEN+EX_R10(r13)
243	mfspr	r13,SPRN_SPRG_SCRATCH0
244	rfid
245	b	.
246
247	.align	7
248do_stab_bolted_pSeries:
249	std	r11,PACA_EXSLB+EX_R11(r13)
250	std	r12,PACA_EXSLB+EX_R12(r13)
251	mfspr	r10,SPRN_SPRG_SCRATCH0
252	std	r10,PACA_EXSLB+EX_R13(r13)
253	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
254
255#ifdef CONFIG_PPC_PSERIES
256/*
257 * Vectors for the FWNMI option.  Share common code.
258 */
259	.globl system_reset_fwnmi
260      .align 7
261system_reset_fwnmi:
262	HMT_MEDIUM
263	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
264	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
265
266	.globl machine_check_fwnmi
267      .align 7
268machine_check_fwnmi:
269	HMT_MEDIUM
270	mtspr	SPRN_SPRG_SCRATCH0,r13		/* save r13 */
271	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
272
273#endif /* CONFIG_PPC_PSERIES */
274
275#ifdef __DISABLED__
276/*
277 * This is used for when the SLB miss handler has to go virtual,
278 * which doesn't happen for now anymore but will once we re-implement
279 * dynamic VSIDs for shared page tables
280 */
281slb_miss_user_pseries:
282	std	r10,PACA_EXGEN+EX_R10(r13)
283	std	r11,PACA_EXGEN+EX_R11(r13)
284	std	r12,PACA_EXGEN+EX_R12(r13)
285	mfspr	r10,SPRG_SCRATCH0
286	ld	r11,PACA_EXSLB+EX_R9(r13)
287	ld	r12,PACA_EXSLB+EX_R3(r13)
288	std	r10,PACA_EXGEN+EX_R13(r13)
289	std	r11,PACA_EXGEN+EX_R9(r13)
290	std	r12,PACA_EXGEN+EX_R3(r13)
291	clrrdi	r12,r13,32
292	mfmsr	r10
293	mfspr	r11,SRR0			/* save SRR0 */
294	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
295	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
296	mtspr	SRR0,r12
297	mfspr	r12,SRR1			/* and SRR1 */
298	mtspr	SRR1,r10
299	rfid
300	b	.				/* prevent spec. execution */
301#endif /* __DISABLED__ */
302
303/* KVM's trampoline code needs to be close to the interrupt handlers */
304
305#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
306#include "../kvm/book3s_rmhandlers.S"
307#endif
308
309	.align	7
310	.globl	__end_interrupts
311__end_interrupts:
312
313/*
314 * Code from here down to __end_handlers is invoked from the
315 * exception prologs above.  Because the prologs assemble the
316 * addresses of these handlers using the LOAD_HANDLER macro,
317 * which uses an addi instruction, these handlers must be in
318 * the first 32k of the kernel image.
319 */
320
321/*** Common interrupt handlers ***/
322
323	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
324
325	/*
326	 * Machine check is different because we use a different
327	 * save area: PACA_EXMC instead of PACA_EXGEN.
328	 */
329	.align	7
330	.globl machine_check_common
331machine_check_common:
332	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
333	FINISH_NAP
334	DISABLE_INTS
335	bl	.save_nvgprs
336	addi	r3,r1,STACK_FRAME_OVERHEAD
337	bl	.machine_check_exception
338	b	.ret_from_except
339
340	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
341	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
342	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
343	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
344	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
345	STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
346	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
347#ifdef CONFIG_ALTIVEC
348	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
349#else
350	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
351#endif
352#ifdef CONFIG_CBE_RAS
353	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
354	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
355	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
356#endif /* CONFIG_CBE_RAS */
357
358	.align	7
359system_call_entry:
360	b	system_call_common
361
362/*
363 * Here we have detected that the kernel stack pointer is bad.
364 * R9 contains the saved CR, r13 points to the paca,
365 * r10 contains the (bad) kernel stack pointer,
366 * r11 and r12 contain the saved SRR0 and SRR1.
367 * We switch to using an emergency stack, save the registers there,
368 * and call kernel_bad_stack(), which panics.
369 */
370bad_stack:
371	ld	r1,PACAEMERGSP(r13)
372	subi	r1,r1,64+INT_FRAME_SIZE
373	std	r9,_CCR(r1)
374	std	r10,GPR1(r1)
375	std	r11,_NIP(r1)
376	std	r12,_MSR(r1)
377	mfspr	r11,SPRN_DAR
378	mfspr	r12,SPRN_DSISR
379	std	r11,_DAR(r1)
380	std	r12,_DSISR(r1)
381	mflr	r10
382	mfctr	r11
383	mfxer	r12
384	std	r10,_LINK(r1)
385	std	r11,_CTR(r1)
386	std	r12,_XER(r1)
387	SAVE_GPR(0,r1)
388	SAVE_GPR(2,r1)
389	SAVE_4GPRS(3,r1)
390	SAVE_2GPRS(7,r1)
391	SAVE_10GPRS(12,r1)
392	SAVE_10GPRS(22,r1)
393	lhz	r12,PACA_TRAP_SAVE(r13)
394	std	r12,_TRAP(r1)
395	addi	r11,r1,INT_FRAME_SIZE
396	std	r11,0(r1)
397	li	r12,0
398	std	r12,0(r11)
399	ld	r2,PACATOC(r13)
4001:	addi	r3,r1,STACK_FRAME_OVERHEAD
401	bl	.kernel_bad_stack
402	b	1b
403
404/*
405 * Here r13 points to the paca, r9 contains the saved CR,
406 * SRR0 and SRR1 are saved in r11 and r12,
407 * r9 - r13 are saved in paca->exgen.
408 */
409	.align	7
410	.globl data_access_common
411data_access_common:
412	mfspr	r10,SPRN_DAR
413	std	r10,PACA_EXGEN+EX_DAR(r13)
414	mfspr	r10,SPRN_DSISR
415	stw	r10,PACA_EXGEN+EX_DSISR(r13)
416	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
417	ld	r3,PACA_EXGEN+EX_DAR(r13)
418	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
419	li	r5,0x300
420	b	.do_hash_page	 	/* Try to handle as hpte fault */
421
422	.align	7
423	.globl instruction_access_common
424instruction_access_common:
425	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
426	ld	r3,_NIP(r1)
427	andis.	r4,r12,0x5820
428	li	r5,0x400
429	b	.do_hash_page		/* Try to handle as hpte fault */
430
431/*
432 * Here is the common SLB miss user that is used when going to virtual
433 * mode for SLB misses, that is currently not used
434 */
435#ifdef __DISABLED__
436	.align	7
437	.globl	slb_miss_user_common
438slb_miss_user_common:
439	mflr	r10
440	std	r3,PACA_EXGEN+EX_DAR(r13)
441	stw	r9,PACA_EXGEN+EX_CCR(r13)
442	std	r10,PACA_EXGEN+EX_LR(r13)
443	std	r11,PACA_EXGEN+EX_SRR0(r13)
444	bl	.slb_allocate_user
445
446	ld	r10,PACA_EXGEN+EX_LR(r13)
447	ld	r3,PACA_EXGEN+EX_R3(r13)
448	lwz	r9,PACA_EXGEN+EX_CCR(r13)
449	ld	r11,PACA_EXGEN+EX_SRR0(r13)
450	mtlr	r10
451	beq-	slb_miss_fault
452
453	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
454	beq-	unrecov_user_slb
455	mfmsr	r10
456
457.machine push
458.machine "power4"
459	mtcrf	0x80,r9
460.machine pop
461
462	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
463	mtmsrd	r10,1
464
465	mtspr	SRR0,r11
466	mtspr	SRR1,r12
467
468	ld	r9,PACA_EXGEN+EX_R9(r13)
469	ld	r10,PACA_EXGEN+EX_R10(r13)
470	ld	r11,PACA_EXGEN+EX_R11(r13)
471	ld	r12,PACA_EXGEN+EX_R12(r13)
472	ld	r13,PACA_EXGEN+EX_R13(r13)
473	rfid
474	b	.
475
476slb_miss_fault:
477	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
478	ld	r4,PACA_EXGEN+EX_DAR(r13)
479	li	r5,0
480	std	r4,_DAR(r1)
481	std	r5,_DSISR(r1)
482	b	handle_page_fault
483
484unrecov_user_slb:
485	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
486	DISABLE_INTS
487	bl	.save_nvgprs
4881:	addi	r3,r1,STACK_FRAME_OVERHEAD
489	bl	.unrecoverable_exception
490	b	1b
491
492#endif /* __DISABLED__ */
493
494
495/*
496 * r13 points to the PACA, r9 contains the saved CR,
497 * r12 contain the saved SRR1, SRR0 is still ready for return
498 * r3 has the faulting address
499 * r9 - r13 are saved in paca->exslb.
500 * r3 is saved in paca->slb_r3
501 * We assume we aren't going to take any exceptions during this procedure.
502 */
503_GLOBAL(slb_miss_realmode)
504	mflr	r10
505#ifdef CONFIG_RELOCATABLE
506	mtctr	r11
507#endif
508
509	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
510	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
511
512	bl	.slb_allocate_realmode
513
514	/* All done -- return from exception. */
515
516	ld	r10,PACA_EXSLB+EX_LR(r13)
517	ld	r3,PACA_EXSLB+EX_R3(r13)
518	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
519#ifdef CONFIG_PPC_ISERIES
520BEGIN_FW_FTR_SECTION
521	ld	r11,PACALPPACAPTR(r13)
522	ld	r11,LPPACASRR0(r11)		/* get SRR0 value */
523END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
524#endif /* CONFIG_PPC_ISERIES */
525
526	mtlr	r10
527
528	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
529	beq-	2f
530
531.machine	push
532.machine	"power4"
533	mtcrf	0x80,r9
534	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
535.machine	pop
536
537#ifdef CONFIG_PPC_ISERIES
538BEGIN_FW_FTR_SECTION
539	mtspr	SPRN_SRR0,r11
540	mtspr	SPRN_SRR1,r12
541END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
542#endif /* CONFIG_PPC_ISERIES */
543	ld	r9,PACA_EXSLB+EX_R9(r13)
544	ld	r10,PACA_EXSLB+EX_R10(r13)
545	ld	r11,PACA_EXSLB+EX_R11(r13)
546	ld	r12,PACA_EXSLB+EX_R12(r13)
547	ld	r13,PACA_EXSLB+EX_R13(r13)
548	rfid
549	b	.	/* prevent speculative execution */
550
5512:
552#ifdef CONFIG_PPC_ISERIES
553BEGIN_FW_FTR_SECTION
554	b	unrecov_slb
555END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
556#endif /* CONFIG_PPC_ISERIES */
557	mfspr	r11,SPRN_SRR0
558	ld	r10,PACAKBASE(r13)
559	LOAD_HANDLER(r10,unrecov_slb)
560	mtspr	SPRN_SRR0,r10
561	ld	r10,PACAKMSR(r13)
562	mtspr	SPRN_SRR1,r10
563	rfid
564	b	.
565
566unrecov_slb:
567	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
568	DISABLE_INTS
569	bl	.save_nvgprs
5701:	addi	r3,r1,STACK_FRAME_OVERHEAD
571	bl	.unrecoverable_exception
572	b	1b
573
574	.align	7
575	.globl hardware_interrupt_common
576	.globl hardware_interrupt_entry
577hardware_interrupt_common:
578	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
579	FINISH_NAP
580hardware_interrupt_entry:
581	DISABLE_INTS
582BEGIN_FTR_SECTION
583	bl	.ppc64_runlatch_on
584END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
585	addi	r3,r1,STACK_FRAME_OVERHEAD
586	bl	.do_IRQ
587	b	.ret_from_except_lite
588
589#ifdef CONFIG_PPC_970_NAP
590power4_fixup_nap:
591	andc	r9,r9,r10
592	std	r9,TI_LOCAL_FLAGS(r11)
593	ld	r10,_LINK(r1)		/* make idle task do the */
594	std	r10,_NIP(r1)		/* equivalent of a blr */
595	blr
596#endif
597
598	.align	7
599	.globl alignment_common
600alignment_common:
601	mfspr	r10,SPRN_DAR
602	std	r10,PACA_EXGEN+EX_DAR(r13)
603	mfspr	r10,SPRN_DSISR
604	stw	r10,PACA_EXGEN+EX_DSISR(r13)
605	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
606	ld	r3,PACA_EXGEN+EX_DAR(r13)
607	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
608	std	r3,_DAR(r1)
609	std	r4,_DSISR(r1)
610	bl	.save_nvgprs
611	addi	r3,r1,STACK_FRAME_OVERHEAD
612	ENABLE_INTS
613	bl	.alignment_exception
614	b	.ret_from_except
615
616	.align	7
617	.globl program_check_common
618program_check_common:
619	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
620	bl	.save_nvgprs
621	addi	r3,r1,STACK_FRAME_OVERHEAD
622	ENABLE_INTS
623	bl	.program_check_exception
624	b	.ret_from_except
625
626	.align	7
627	.globl fp_unavailable_common
628fp_unavailable_common:
629	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
630	bne	1f			/* if from user, just load it up */
631	bl	.save_nvgprs
632	addi	r3,r1,STACK_FRAME_OVERHEAD
633	ENABLE_INTS
634	bl	.kernel_fp_unavailable_exception
635	BUG_OPCODE
6361:	bl	.load_up_fpu
637	b	fast_exception_return
638
639	.align	7
640	.globl altivec_unavailable_common
641altivec_unavailable_common:
642	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
643#ifdef CONFIG_ALTIVEC
644BEGIN_FTR_SECTION
645	beq	1f
646	bl	.load_up_altivec
647	b	fast_exception_return
6481:
649END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
650#endif
651	bl	.save_nvgprs
652	addi	r3,r1,STACK_FRAME_OVERHEAD
653	ENABLE_INTS
654	bl	.altivec_unavailable_exception
655	b	.ret_from_except
656
657	.align	7
658	.globl vsx_unavailable_common
659vsx_unavailable_common:
660	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
661#ifdef CONFIG_VSX
662BEGIN_FTR_SECTION
663	bne	.load_up_vsx
6641:
665END_FTR_SECTION_IFSET(CPU_FTR_VSX)
666#endif
667	bl	.save_nvgprs
668	addi	r3,r1,STACK_FRAME_OVERHEAD
669	ENABLE_INTS
670	bl	.vsx_unavailable_exception
671	b	.ret_from_except
672
673	.align	7
674	.globl	__end_handlers
675__end_handlers:
676
677/*
678 * Return from an exception with minimal checks.
679 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
680 * If interrupts have been enabled, or anything has been
681 * done that might have changed the scheduling status of
682 * any task or sent any task a signal, you should use
683 * ret_from_except or ret_from_except_lite instead of this.
684 */
685fast_exc_return_irq:			/* restores irq state too */
686	ld	r3,SOFTE(r1)
687	TRACE_AND_RESTORE_IRQ(r3);
688	ld	r12,_MSR(r1)
689	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
690	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
691	b	1f
692
693	.globl	fast_exception_return
694fast_exception_return:
695	ld	r12,_MSR(r1)
6961:	ld	r11,_NIP(r1)
697	andi.	r3,r12,MSR_RI		/* check if RI is set */
698	beq-	unrecov_fer
699
700#ifdef CONFIG_VIRT_CPU_ACCOUNTING
701	andi.	r3,r12,MSR_PR
702	beq	2f
703	ACCOUNT_CPU_USER_EXIT(r3, r4)
7042:
705#endif
706
707	ld	r3,_CCR(r1)
708	ld	r4,_LINK(r1)
709	ld	r5,_CTR(r1)
710	ld	r6,_XER(r1)
711	mtcr	r3
712	mtlr	r4
713	mtctr	r5
714	mtxer	r6
715	REST_GPR(0, r1)
716	REST_8GPRS(2, r1)
717
718	mfmsr	r10
719	rldicl	r10,r10,48,1		/* clear EE */
720	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
721	mtmsrd	r10,1
722
723	mtspr	SPRN_SRR1,r12
724	mtspr	SPRN_SRR0,r11
725	REST_4GPRS(10, r1)
726	ld	r1,GPR1(r1)
727	rfid
728	b	.	/* prevent speculative execution */
729
730unrecov_fer:
731	bl	.save_nvgprs
7321:	addi	r3,r1,STACK_FRAME_OVERHEAD
733	bl	.unrecoverable_exception
734	b	1b
735
736
737/*
738 * Hash table stuff
739 */
740	.align	7
741_STATIC(do_hash_page)
742	std	r3,_DAR(r1)
743	std	r4,_DSISR(r1)
744
745	andis.	r0,r4,0xa410		/* weird error? */
746	bne-	handle_page_fault	/* if not, try to insert a HPTE */
747	andis.  r0,r4,DSISR_DABRMATCH@h
748	bne-    handle_dabr_fault
749
750BEGIN_FTR_SECTION
751	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
752	bne-	do_ste_alloc		/* If so handle it */
753END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
754
755	clrrdi	r11,r1,THREAD_SHIFT
756	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
757	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
758	bne	77f			/* then don't call hash_page now */
759
760	/*
761	 * On iSeries, we soft-disable interrupts here, then
762	 * hard-enable interrupts so that the hash_page code can spin on
763	 * the hash_table_lock without problems on a shared processor.
764	 */
765	DISABLE_INTS
766
767	/*
768	 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
769	 * and will clobber volatile registers when irq tracing is enabled
770	 * so we need to reload them. It may be possible to be smarter here
771	 * and move the irq tracing elsewhere but let's keep it simple for
772	 * now
773	 */
774#ifdef CONFIG_TRACE_IRQFLAGS
775	ld	r3,_DAR(r1)
776	ld	r4,_DSISR(r1)
777	ld	r5,_TRAP(r1)
778	ld	r12,_MSR(r1)
779	clrrdi	r5,r5,4
780#endif /* CONFIG_TRACE_IRQFLAGS */
781	/*
782	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
783	 * accessing a userspace segment (even from the kernel). We assume
784	 * kernel addresses always have the high bit set.
785	 */
786	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
787	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
788	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
789	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
790	ori	r4,r4,1			/* add _PAGE_PRESENT */
791	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
792
793	/*
794	 * r3 contains the faulting address
795	 * r4 contains the required access permissions
796	 * r5 contains the trap number
797	 *
798	 * at return r3 = 0 for success
799	 */
800	bl	.hash_page		/* build HPTE if possible */
801	cmpdi	r3,0			/* see if hash_page succeeded */
802
803BEGIN_FW_FTR_SECTION
804	/*
805	 * If we had interrupts soft-enabled at the point where the
806	 * DSI/ISI occurred, and an interrupt came in during hash_page,
807	 * handle it now.
808	 * We jump to ret_from_except_lite rather than fast_exception_return
809	 * because ret_from_except_lite will check for and handle pending
810	 * interrupts if necessary.
811	 */
812	beq	13f
813END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
814
815BEGIN_FW_FTR_SECTION
816	/*
817	 * Here we have interrupts hard-disabled, so it is sufficient
818	 * to restore paca->{soft,hard}_enable and get out.
819	 */
820	beq	fast_exc_return_irq	/* Return from exception on success */
821END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
822
823	/* For a hash failure, we don't bother re-enabling interrupts */
824	ble-	12f
825
826	/*
827	 * hash_page couldn't handle it, set soft interrupt enable back
828	 * to what it was before the trap.  Note that .arch_local_irq_restore
829	 * handles any interrupts pending at this point.
830	 */
831	ld	r3,SOFTE(r1)
832	TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
833	bl	.arch_local_irq_restore
834	b	11f
835
836/* We have a data breakpoint exception - handle it */
837handle_dabr_fault:
838	bl	.save_nvgprs
839	ld      r4,_DAR(r1)
840	ld      r5,_DSISR(r1)
841	addi    r3,r1,STACK_FRAME_OVERHEAD
842	bl      .do_dabr
843	b       .ret_from_except_lite
844
845/* Here we have a page fault that hash_page can't handle. */
846handle_page_fault:
847	ENABLE_INTS
84811:	ld	r4,_DAR(r1)
849	ld	r5,_DSISR(r1)
850	addi	r3,r1,STACK_FRAME_OVERHEAD
851	bl	.do_page_fault
852	cmpdi	r3,0
853	beq+	13f
854	bl	.save_nvgprs
855	mr	r5,r3
856	addi	r3,r1,STACK_FRAME_OVERHEAD
857	lwz	r4,_DAR(r1)
858	bl	.bad_page_fault
859	b	.ret_from_except
860
86113:	b	.ret_from_except_lite
862
863/* We have a page fault that hash_page could handle but HV refused
864 * the PTE insertion
865 */
86612:	bl	.save_nvgprs
867	mr	r5,r3
868	addi	r3,r1,STACK_FRAME_OVERHEAD
869	ld	r4,_DAR(r1)
870	bl	.low_hash_fault
871	b	.ret_from_except
872
873/*
874 * We come here as a result of a DSI at a point where we don't want
875 * to call hash_page, such as when we are accessing memory (possibly
876 * user memory) inside a PMU interrupt that occurred while interrupts
877 * were soft-disabled.  We want to invoke the exception handler for
878 * the access, or panic if there isn't a handler.
879 */
88077:	bl	.save_nvgprs
881	mr	r4,r3
882	addi	r3,r1,STACK_FRAME_OVERHEAD
883	li	r5,SIGSEGV
884	bl	.bad_page_fault
885	b	.ret_from_except
886
887	/* here we have a segment miss */
888do_ste_alloc:
889	bl	.ste_allocate		/* try to insert stab entry */
890	cmpdi	r3,0
891	bne-	handle_page_fault
892	b	fast_exception_return
893
894/*
895 * r13 points to the PACA, r9 contains the saved CR,
896 * r11 and r12 contain the saved SRR0 and SRR1.
897 * r9 - r13 are saved in paca->exslb.
898 * We assume we aren't going to take any exceptions during this procedure.
899 * We assume (DAR >> 60) == 0xc.
900 */
901	.align	7
902_GLOBAL(do_stab_bolted)
903	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
904	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
905
906	/* Hash to the primary group */
907	ld	r10,PACASTABVIRT(r13)
908	mfspr	r11,SPRN_DAR
909	srdi	r11,r11,28
910	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
911
912	/* Calculate VSID */
913	/* This is a kernel address, so protovsid = ESID */
914	ASM_VSID_SCRAMBLE(r11, r9, 256M)
915	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
916
917	/* Search the primary group for a free entry */
9181:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
919	andi.	r11,r11,0x80
920	beq	2f
921	addi	r10,r10,16
922	andi.	r11,r10,0x70
923	bne	1b
924
925	/* Stick for only searching the primary group for now.		*/
926	/* At least for now, we use a very simple random castout scheme */
927	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
928	mftb	r11
929	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
930	ori	r11,r11,0x10
931
932	/* r10 currently points to an ste one past the group of interest */
933	/* make it point to the randomly selected entry			*/
934	subi	r10,r10,128
935	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
936
937	isync			/* mark the entry invalid		*/
938	ld	r11,0(r10)
939	rldicl	r11,r11,56,1	/* clear the valid bit */
940	rotldi	r11,r11,8
941	std	r11,0(r10)
942	sync
943
944	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
945	slbie	r11
946
9472:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
948	eieio
949
950	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
951	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
952	ori	r11,r11,0x90	/* Turn on valid and kp			*/
953	std	r11,0(r10)	/* Put new entry back into the stab	*/
954
955	sync
956
957	/* All done -- return from exception. */
958	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
959	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
960
961	andi.	r10,r12,MSR_RI
962	beq-	unrecov_slb
963
964	mtcrf	0x80,r9			/* restore CR */
965
966	mfmsr	r10
967	clrrdi	r10,r10,2
968	mtmsrd	r10,1
969
970	mtspr	SPRN_SRR0,r11
971	mtspr	SPRN_SRR1,r12
972	ld	r9,PACA_EXSLB+EX_R9(r13)
973	ld	r10,PACA_EXSLB+EX_R10(r13)
974	ld	r11,PACA_EXSLB+EX_R11(r13)
975	ld	r12,PACA_EXSLB+EX_R12(r13)
976	ld	r13,PACA_EXSLB+EX_R13(r13)
977	rfid
978	b	.	/* prevent speculative execution */
979
980#ifdef CONFIG_PPC_PSERIES
981/*
982 * Data area reserved for FWNMI option.
983 * This address (0x7000) is fixed by the RPA.
984 */
985	.= 0x7000
986	.globl fwnmi_data_area
987fwnmi_data_area:
988#endif /* CONFIG_PPC_PSERIES */
989
990	/* iSeries does not use the FWNMI stuff, so it is safe to put
991	 * this here, even if we later allow kernels that will boot on
992	 * both pSeries and iSeries */
993#ifdef CONFIG_PPC_ISERIES
994        . = LPARMAP_PHYS
995	.globl xLparMap
996xLparMap:
997	.quad	HvEsidsToMap		/* xNumberEsids */
998	.quad	HvRangesToMap		/* xNumberRanges */
999	.quad	STAB0_PAGE		/* xSegmentTableOffs */
1000	.zero	40			/* xRsvd */
1001	/* xEsids (HvEsidsToMap entries of 2 quads) */
1002	.quad	PAGE_OFFSET_ESID	/* xKernelEsid */
1003	.quad	PAGE_OFFSET_VSID	/* xKernelVsid */
1004	.quad	VMALLOC_START_ESID	/* xKernelEsid */
1005	.quad	VMALLOC_START_VSID	/* xKernelVsid */
1006	/* xRanges (HvRangesToMap entries of 3 quads) */
1007	.quad	HvPagesToMap		/* xPages */
1008	.quad	0			/* xOffset */
1009	.quad	PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT)	/* xVPN */
1010
1011#endif /* CONFIG_PPC_ISERIES */
1012
1013#ifdef CONFIG_PPC_PSERIES
1014        . = 0x8000
1015#endif /* CONFIG_PPC_PSERIES */
1016
1017/*
1018 * Space for CPU0's segment table.
1019 *
1020 * On iSeries, the hypervisor must fill in at least one entry before
1021 * we get control (with relocate on).  The address is given to the hv
1022 * as a page number (see xLparMap above), so this must be at a
1023 * fixed address (the linker can't compute (u64)&initial_stab >>
1024 * PAGE_SHIFT).
1025 */
1026	. = STAB0_OFFSET	/* 0x8000 */
1027	.globl initial_stab
1028initial_stab:
1029	.space	4096
1030