1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-extable.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
33STACK_SIZE  = 1 << STACK_SHIFT
34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
35
36_LPP_OFFSET	= __LC_LPP
37
38	.macro STBEAR address
39	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
40	.endm
41
42	.macro LBEAR address
43	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
44	.endm
45
46	.macro LPSWEY address,lpswe
47	ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
48	.endm
49
50	.macro MBEAR reg
51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
52	.endm
53
54	.macro	CHECK_STACK savearea
55#ifdef CONFIG_CHECK_STACK
56	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
57	lghi	%r14,\savearea
58	jz	stack_overflow
59#endif
60	.endm
61
62	.macro	CHECK_VMAP_STACK savearea,oklabel
63#ifdef CONFIG_VMAP_STACK
64	lgr	%r14,%r15
65	nill	%r14,0x10000 - STACK_SIZE
66	oill	%r14,STACK_INIT
67	clg	%r14,__LC_KERNEL_STACK
68	je	\oklabel
69	clg	%r14,__LC_ASYNC_STACK
70	je	\oklabel
71	clg	%r14,__LC_MCCK_STACK
72	je	\oklabel
73	clg	%r14,__LC_NODAT_STACK
74	je	\oklabel
75	clg	%r14,__LC_RESTART_STACK
76	je	\oklabel
77	lghi	%r14,\savearea
78	j	stack_overflow
79#else
80	j	\oklabel
81#endif
82	.endm
83
84	/*
85	 * The TSTMSK macro generates a test-under-mask instruction by
86	 * calculating the memory offset for the specified mask value.
87	 * Mask value can be any constant.  The macro shifts the mask
88	 * value to calculate the memory offset for the test-under-mask
89	 * instruction.
90	 */
91	.macro TSTMSK addr, mask, size=8, bytepos=0
92		.if (\bytepos < \size) && (\mask >> 8)
93			.if (\mask & 0xff)
94				.error "Mask exceeds byte boundary"
95			.endif
96			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
97			.exitm
98		.endif
99		.ifeq \mask
100			.error "Mask must not be zero"
101		.endif
102		off = \size - \bytepos - 1
103		tm	off+\addr, \mask
104	.endm
105
106	.macro BPOFF
107	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
108	.endm
109
110	.macro BPON
111	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
112	.endm
113
114	.macro BPENTER tif_ptr,tif_mask
115	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
116		    "j .+12; nop; nop", 82
117	.endm
118
119	.macro BPEXIT tif_ptr,tif_mask
120	TSTMSK	\tif_ptr,\tif_mask
121	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
122		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
123	.endm
124
125	/*
126	 * The CHKSTG macro jumps to the provided label in case the
127	 * machine check interruption code reports one of unrecoverable
128	 * storage errors:
129	 * - Storage error uncorrected
130	 * - Storage key error uncorrected
131	 * - Storage degradation with Failing-storage-address validity
132	 */
133	.macro CHKSTG errlabel
134	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
135	jnz	\errlabel
136	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
137	jz	.Loklabel\@
138	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
139	jnz	\errlabel
140.Loklabel\@:
141	.endm
142
143#if IS_ENABLED(CONFIG_KVM)
144	/*
145	 * The OUTSIDE macro jumps to the provided label in case the value
146	 * in the provided register is outside of the provided range. The
147	 * macro is useful for checking whether a PSW stored in a register
148	 * pair points inside or outside of a block of instructions.
149	 * @reg: register to check
150	 * @start: start of the range
151	 * @end: end of the range
152	 * @outside_label: jump here if @reg is outside of [@start..@end)
153	 */
154	.macro OUTSIDE reg,start,end,outside_label
155	lgr	%r14,\reg
156	larl	%r13,\start
157	slgr	%r14,%r13
158#ifdef CONFIG_AS_IS_LLVM
159	clgfrl	%r14,.Lrange_size\@
160#else
161	clgfi	%r14,\end - \start
162#endif
163	jhe	\outside_label
164#ifdef CONFIG_AS_IS_LLVM
165	.section .rodata, "a"
166	.align 4
167.Lrange_size\@:
168	.long	\end - \start
169	.previous
170#endif
171	.endm
172
173	.macro SIEEXIT
174	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
175	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
176	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
177	larl	%r9,sie_exit			# skip forward to sie_exit
178	.endm
179#endif
180
181	GEN_BR_THUNK %r14
182
183	.section .kprobes.text, "ax"
184.Ldummy:
185	/*
186	 * This nop exists only in order to avoid that __bpon starts at
187	 * the beginning of the kprobes text section. In that case we would
188	 * have several symbols at the same address. E.g. objdump would take
189	 * an arbitrary symbol name when disassembling this code.
190	 * With the added nop in between the __bpon symbol is unique
191	 * again.
192	 */
193	nop	0
194
195ENTRY(__bpon)
196	.globl __bpon
197	BPON
198	BR_EX	%r14
199ENDPROC(__bpon)
200
201/*
202 * Scheduler resume function, called by switch_to
203 *  gpr2 = (task_struct *) prev
204 *  gpr3 = (task_struct *) next
205 * Returns:
206 *  gpr2 = prev
207 */
208ENTRY(__switch_to)
209	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
210	lghi	%r4,__TASK_stack
211	lghi	%r1,__TASK_thread
212	llill	%r5,STACK_INIT
213	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
214	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
215	agr	%r15,%r5			# end of kernel stack of next
216	stg	%r3,__LC_CURRENT		# store task struct of next
217	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
218	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
219	aghi	%r3,__TASK_pid
220	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
221	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
222	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
223	BR_EX	%r14
224ENDPROC(__switch_to)
225
226#if IS_ENABLED(CONFIG_KVM)
227/*
228 * sie64a calling convention:
229 * %r2 pointer to sie control block
230 * %r3 guest register save area
231 */
232ENTRY(sie64a)
233	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
234	lg	%r12,__LC_CURRENT
235	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
236	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
237	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
238	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
239	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
240	lg	%r14,__LC_GMAP			# get gmap pointer
241	ltgr	%r14,%r14
242	jz	.Lsie_gmap
243	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
244.Lsie_gmap:
245	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
246	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
247	tm	__SIE_PROG20+3(%r14),3		# last exit...
248	jnz	.Lsie_skip
249	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
250	jo	.Lsie_skip			# exit if fp/vx regs changed
251	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
252.Lsie_entry:
253	sie	0(%r14)
254# Let the next instruction be NOP to avoid triggering a machine check
255# and handling it in a guest as result of the instruction execution.
256	nopr	7
257.Lsie_leave:
258	BPOFF
259	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
260.Lsie_skip:
261	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
262	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
263.Lsie_done:
264# some program checks are suppressing. C code (e.g. do_protection_exception)
265# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
266# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
267# Other instructions between sie64a and .Lsie_done should not cause program
268# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
269.Lrewind_pad6:
270	nopr	7
271.Lrewind_pad4:
272	nopr	7
273.Lrewind_pad2:
274	nopr	7
275	.globl sie_exit
276sie_exit:
277	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
278	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
279	xgr	%r0,%r0				# clear guest registers to
280	xgr	%r1,%r1				# prevent speculative use
281	xgr	%r3,%r3
282	xgr	%r4,%r4
283	xgr	%r5,%r5
284	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
285	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
286	BR_EX	%r14
287.Lsie_fault:
288	lghi	%r14,-EFAULT
289	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
290	j	sie_exit
291
292	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
293	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
294	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
295	EX_TABLE(sie_exit,.Lsie_fault)
296ENDPROC(sie64a)
297EXPORT_SYMBOL(sie64a)
298EXPORT_SYMBOL(sie_exit)
299#endif
300
301/*
302 * SVC interrupt handler routine. System calls are synchronous events and
303 * are entered with interrupts disabled.
304 */
305
306ENTRY(system_call)
307	stpt	__LC_SYS_ENTER_TIMER
308	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
309	BPOFF
310	lghi	%r14,0
311.Lsysc_per:
312	STBEAR	__LC_LAST_BREAK
313	lctlg	%c1,%c1,__LC_KERNEL_ASCE
314	lg	%r12,__LC_CURRENT
315	lg	%r15,__LC_KERNEL_STACK
316	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
317	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
318	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
319	# clear user controlled register to prevent speculative use
320	xgr	%r0,%r0
321	xgr	%r1,%r1
322	xgr	%r4,%r4
323	xgr	%r5,%r5
324	xgr	%r6,%r6
325	xgr	%r7,%r7
326	xgr	%r8,%r8
327	xgr	%r9,%r9
328	xgr	%r10,%r10
329	xgr	%r11,%r11
330	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
331	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
332	MBEAR	%r2
333	lgr	%r3,%r14
334	brasl	%r14,__do_syscall
335	lctlg	%c1,%c1,__LC_USER_ASCE
336	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
337	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
338	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
339	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
340	stpt	__LC_EXIT_TIMER
341	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
342ENDPROC(system_call)
343
344#
345# a new process exits the kernel with ret_from_fork
346#
347ENTRY(ret_from_fork)
348	lgr	%r3,%r11
349	brasl	%r14,__ret_from_fork
350	lctlg	%c1,%c1,__LC_USER_ASCE
351	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
352	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
353	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
354	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
355	stpt	__LC_EXIT_TIMER
356	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
357ENDPROC(ret_from_fork)
358
359/*
360 * Program check handler routine
361 */
362
363ENTRY(pgm_check_handler)
364	stpt	__LC_SYS_ENTER_TIMER
365	BPOFF
366	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
367	lg	%r12,__LC_CURRENT
368	lghi	%r10,0
369	lmg	%r8,%r9,__LC_PGM_OLD_PSW
370	tmhh	%r8,0x0001		# coming from user space?
371	jno	.Lpgm_skip_asce
372	lctlg	%c1,%c1,__LC_KERNEL_ASCE
373	j	3f			# -> fault in user space
374.Lpgm_skip_asce:
375#if IS_ENABLED(CONFIG_KVM)
376	# cleanup critical section for program checks in sie64a
377	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
378	SIEEXIT
379	lghi	%r10,_PIF_GUEST_FAULT
380#endif
3811:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
382	jnz	2f			# -> enabled, can't be a double fault
383	tm	__LC_PGM_ILC+3,0x80	# check for per exception
384	jnz	.Lpgm_svcper		# -> single stepped svc
3852:	CHECK_STACK __LC_SAVE_AREA_SYNC
386	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
387	# CHECK_VMAP_STACK branches to stack_overflow or 4f
388	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3893:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
390	lg	%r15,__LC_KERNEL_STACK
3914:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
392	stg	%r10,__PT_FLAGS(%r11)
393	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
394	stmg	%r0,%r7,__PT_R0(%r11)
395	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
396	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
397	stmg	%r8,%r9,__PT_PSW(%r11)
398
399	# clear user controlled registers to prevent speculative use
400	xgr	%r0,%r0
401	xgr	%r1,%r1
402	xgr	%r3,%r3
403	xgr	%r4,%r4
404	xgr	%r5,%r5
405	xgr	%r6,%r6
406	xgr	%r7,%r7
407	lgr	%r2,%r11
408	brasl	%r14,__do_pgm_check
409	tmhh	%r8,0x0001		# returning to user space?
410	jno	.Lpgm_exit_kernel
411	lctlg	%c1,%c1,__LC_USER_ASCE
412	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
413	stpt	__LC_EXIT_TIMER
414.Lpgm_exit_kernel:
415	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
416	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
417	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
418	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
419
420#
421# single stepped system call
422#
423.Lpgm_svcper:
424	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
425	larl	%r14,.Lsysc_per
426	stg	%r14,__LC_RETURN_PSW+8
427	lghi	%r14,1
428	LBEAR	__LC_PGM_LAST_BREAK
429	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
430ENDPROC(pgm_check_handler)
431
432/*
433 * Interrupt handler macro used for external and IO interrupts.
434 */
435.macro INT_HANDLER name,lc_old_psw,handler
436ENTRY(\name)
437	stckf	__LC_INT_CLOCK
438	stpt	__LC_SYS_ENTER_TIMER
439	STBEAR	__LC_LAST_BREAK
440	BPOFF
441	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
442	lg	%r12,__LC_CURRENT
443	lmg	%r8,%r9,\lc_old_psw
444	tmhh	%r8,0x0001			# interrupting from user ?
445	jnz	1f
446#if IS_ENABLED(CONFIG_KVM)
447	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
448	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
449	SIEEXIT
450#endif
4510:	CHECK_STACK __LC_SAVE_AREA_ASYNC
452	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
453	j	2f
4541:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
455	lctlg	%c1,%c1,__LC_KERNEL_ASCE
456	lg	%r15,__LC_KERNEL_STACK
4572:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
458	la	%r11,STACK_FRAME_OVERHEAD(%r15)
459	stmg	%r0,%r7,__PT_R0(%r11)
460	# clear user controlled registers to prevent speculative use
461	xgr	%r0,%r0
462	xgr	%r1,%r1
463	xgr	%r3,%r3
464	xgr	%r4,%r4
465	xgr	%r5,%r5
466	xgr	%r6,%r6
467	xgr	%r7,%r7
468	xgr	%r10,%r10
469	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
470	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
471	MBEAR	%r11
472	stmg	%r8,%r9,__PT_PSW(%r11)
473	lgr	%r2,%r11		# pass pointer to pt_regs
474	brasl	%r14,\handler
475	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
476	tmhh	%r8,0x0001		# returning to user ?
477	jno	2f
478	lctlg	%c1,%c1,__LC_USER_ASCE
479	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
480	stpt	__LC_EXIT_TIMER
4812:	LBEAR	__PT_LAST_BREAK(%r11)
482	lmg	%r0,%r15,__PT_R0(%r11)
483	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
484ENDPROC(\name)
485.endm
486
487INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
488INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
489
490/*
491 * Load idle PSW.
492 */
493ENTRY(psw_idle)
494	stg	%r14,(__SF_GPRS+8*8)(%r15)
495	stg	%r3,__SF_EMPTY(%r15)
496	larl	%r1,psw_idle_exit
497	stg	%r1,__SF_EMPTY+8(%r15)
498	larl	%r1,smp_cpu_mtid
499	llgf	%r1,0(%r1)
500	ltgr	%r1,%r1
501	jz	.Lpsw_idle_stcctm
502	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
503.Lpsw_idle_stcctm:
504	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
505	BPON
506	stckf	__CLOCK_IDLE_ENTER(%r2)
507	stpt	__TIMER_IDLE_ENTER(%r2)
508	lpswe	__SF_EMPTY(%r15)
509.globl psw_idle_exit
510psw_idle_exit:
511	BR_EX	%r14
512ENDPROC(psw_idle)
513
514/*
515 * Machine check handler routines
516 */
517ENTRY(mcck_int_handler)
518	stckf	__LC_MCCK_CLOCK
519	BPOFF
520	la	%r1,4095		# validate r1
521	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
522	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
523	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
524	lg	%r12,__LC_CURRENT
525	lmg	%r8,%r9,__LC_MCK_OLD_PSW
526	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
527	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
528	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
529	jno	.Lmcck_panic		# control registers invalid -> panic
530	la	%r14,4095
531	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
532	ptlb
533	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
534	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
535	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
536	jo	3f
537	la	%r14,__LC_SYS_ENTER_TIMER
538	clc	0(8,%r14),__LC_EXIT_TIMER
539	jl	1f
540	la	%r14,__LC_EXIT_TIMER
5411:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
542	jl	2f
543	la	%r14,__LC_LAST_UPDATE_TIMER
5442:	spt	0(%r14)
545	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5463:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
547	jno	.Lmcck_panic
548	tmhh	%r8,0x0001		# interrupting from user ?
549	jnz	6f
550	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
551	jno	.Lmcck_panic
552#if IS_ENABLED(CONFIG_KVM)
553	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,6f
554	OUTSIDE	%r9,.Lsie_entry,.Lsie_leave,4f
555	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
556	j	5f
5574:	CHKSTG	.Lmcck_panic
5585:	larl	%r14,.Lstosm_tmp
559	stosm	0(%r14),0x04		# turn dat on, keep irqs off
560	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
561	SIEEXIT
562	j	.Lmcck_stack
563#endif
5646:	CHKSTG	.Lmcck_panic
565	larl	%r14,.Lstosm_tmp
566	stosm	0(%r14),0x04		# turn dat on, keep irqs off
567	tmhh	%r8,0x0001		# interrupting from user ?
568	jz	.Lmcck_stack
569	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
570.Lmcck_stack:
571	lg	%r15,__LC_MCCK_STACK
572	la	%r11,STACK_FRAME_OVERHEAD(%r15)
573	stctg	%c1,%c1,__PT_CR1(%r11)
574	lctlg	%c1,%c1,__LC_KERNEL_ASCE
575	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
576	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
577	stmg	%r0,%r7,__PT_R0(%r11)
578	# clear user controlled registers to prevent speculative use
579	xgr	%r0,%r0
580	xgr	%r1,%r1
581	xgr	%r3,%r3
582	xgr	%r4,%r4
583	xgr	%r5,%r5
584	xgr	%r6,%r6
585	xgr	%r7,%r7
586	xgr	%r10,%r10
587	mvc	__PT_R8(64,%r11),0(%r14)
588	stmg	%r8,%r9,__PT_PSW(%r11)
589	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
590	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
591	lgr	%r2,%r11		# pass pointer to pt_regs
592	brasl	%r14,s390_do_machine_check
593	cghi	%r2,0
594	je	.Lmcck_return
595	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
596	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
597	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
598	la	%r11,STACK_FRAME_OVERHEAD(%r1)
599	lgr	%r2,%r11
600	lgr	%r15,%r1
601	brasl	%r14,s390_handle_mcck
602.Lmcck_return:
603	lctlg	%c1,%c1,__PT_CR1(%r11)
604	lmg	%r0,%r10,__PT_R0(%r11)
605	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
606	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
607	jno	0f
608	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
609	stpt	__LC_EXIT_TIMER
6100:	ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
611	LBEAR	0(%r12)
612	lmg	%r11,%r15,__PT_R11(%r11)
613	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
614
615.Lmcck_panic:
616	/*
617	 * Iterate over all possible CPU addresses in the range 0..0xffff
618	 * and stop each CPU using signal processor. Use compare and swap
619	 * to allow just one CPU-stopper and prevent concurrent CPUs from
620	 * stopping each other while leaving the others running.
621	 */
622	lhi	%r5,0
623	lhi	%r6,1
624	larl	%r7,.Lstop_lock
625	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
626	jnz	4f
627	larl	%r7,.Lthis_cpu
628	stap	0(%r7)			# this CPU address
629	lh	%r4,0(%r7)
630	nilh	%r4,0
631	lhi	%r0,1
632	sll	%r0,16			# CPU counter
633	lhi	%r3,0			# next CPU address
6340:	cr	%r3,%r4
635	je	2f
6361:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
637	brc	SIGP_CC_BUSY,1b
6382:	ahi	%r3,1
639	brct	%r0,0b
6403:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
641	brc	SIGP_CC_BUSY,3b
6424:	j	4b
643ENDPROC(mcck_int_handler)
644
645ENTRY(restart_int_handler)
646	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
647	stg	%r15,__LC_SAVE_AREA_RESTART
648	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
649	jz	0f
650	la	%r15,4095
651	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6520:	larl	%r15,.Lstosm_tmp
653	stosm	0(%r15),0x04			# turn dat on, keep irqs off
654	lg	%r15,__LC_RESTART_STACK
655	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
656	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
657	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
658	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
659	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
660	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
661	lg	%r2,__LC_RESTART_DATA
662	lgf	%r3,__LC_RESTART_SOURCE
663	ltgr	%r3,%r3				# test source cpu address
664	jm	1f				# negative -> skip source stop
6650:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
666	brc	10,0b				# wait for status stored
6671:	basr	%r14,%r1			# call function
668	stap	__SF_EMPTY(%r15)		# store cpu address
669	llgh	%r3,__SF_EMPTY(%r15)
6702:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
671	brc	2,2b
6723:	j	3b
673ENDPROC(restart_int_handler)
674
675	.section .kprobes.text, "ax"
676
677#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
678/*
679 * The synchronous or the asynchronous stack overflowed. We are dead.
680 * No need to properly save the registers, we are going to panic anyway.
681 * Setup a pt_regs so that show_trace can provide a good call trace.
682 */
683ENTRY(stack_overflow)
684	lg	%r15,__LC_NODAT_STACK	# change to panic stack
685	la	%r11,STACK_FRAME_OVERHEAD(%r15)
686	stmg	%r0,%r7,__PT_R0(%r11)
687	stmg	%r8,%r9,__PT_PSW(%r11)
688	mvc	__PT_R8(64,%r11),0(%r14)
689	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
690	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
691	lgr	%r2,%r11		# pass pointer to pt_regs
692	jg	kernel_stack_overflow
693ENDPROC(stack_overflow)
694#endif
695
696	.section .data, "aw"
697		.align	4
698.Lstop_lock:	.long	0
699.Lthis_cpu:	.short	0
700.Lstosm_tmp:	.byte	0
701	.section .rodata, "a"
702#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
703	.globl	sys_call_table
704sys_call_table:
705#include "asm/syscall_table.h"
706#undef SYSCALL
707
708#ifdef CONFIG_COMPAT
709
710#define SYSCALL(esame,emu)	.quad __s390_ ## emu
711	.globl	sys_call_table_emu
712sys_call_table_emu:
713#include "asm/syscall_table.h"
714#undef SYSCALL
715#endif
716