1/* setjmp for PowerPC.
2   Copyright (C) 1995-2022 Free Software Foundation, Inc.
3   This file is part of the GNU C Library.
4
5   The GNU C Library is free software; you can redistribute it and/or
6   modify it under the terms of the GNU Lesser General Public
7   License as published by the Free Software Foundation; either
8   version 2.1 of the License, or (at your option) any later version.
9
10   The GNU C Library is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   Lesser General Public License for more details.
14
15   You should have received a copy of the GNU Lesser General Public
16   License along with the GNU C Library; if not, see
17   <https://www.gnu.org/licenses/>.  */
18
19#include <sysdep.h>
20#include <stap-probe.h>
21#define _ASM
22#ifdef __NO_VMX__
23# include <novmxsetjmp.h>
24#else
25# include <jmpbuf-offsets.h>
26#endif
27
28	.machine	"altivec"
29ENTRY (__sigsetjmp_symbol)
30
31#ifdef PTR_MANGLE
32	mr   r5,r1
33	PTR_MANGLE(r5, r6)
34	stw  r5,(JB_GPR1*4)(3)
35#else
36	stw  r1,(JB_GPR1*4)(3)
37#endif
38	mflr r0
39	/* setjmp probe expects longjmp first argument (4@3), second argument
40	   (-4@4), and target address (4@0), respectively.  */
41	LIBC_PROBE (setjmp, 3, 4@3, -4@4, 4@0)
42	stw  r14,((JB_GPRS+0)*4)(3)
43	stfd fp14,((JB_FPRS+0*2)*4)(3)
44#ifdef PTR_MANGLE
45	PTR_MANGLE2 (r0, r6)
46#endif
47	stw  r0,(JB_LR*4)(3)
48	stw  r15,((JB_GPRS+1)*4)(3)
49	stfd fp15,((JB_FPRS+1*2)*4)(3)
50	mfcr r0
51	stw  r16,((JB_GPRS+2)*4)(3)
52	stfd fp16,((JB_FPRS+2*2)*4)(3)
53	stw  r0,(JB_CR*4)(3)
54	stw  r17,((JB_GPRS+3)*4)(3)
55	stfd fp17,((JB_FPRS+3*2)*4)(3)
56	stw  r18,((JB_GPRS+4)*4)(3)
57	stfd fp18,((JB_FPRS+4*2)*4)(3)
58	stw  r19,((JB_GPRS+5)*4)(3)
59	stfd fp19,((JB_FPRS+5*2)*4)(3)
60	stw  r20,((JB_GPRS+6)*4)(3)
61	stfd fp20,((JB_FPRS+6*2)*4)(3)
62	stw  r21,((JB_GPRS+7)*4)(3)
63	stfd fp21,((JB_FPRS+7*2)*4)(3)
64	stw  r22,((JB_GPRS+8)*4)(3)
65	stfd fp22,((JB_FPRS+8*2)*4)(3)
66	stw  r23,((JB_GPRS+9)*4)(3)
67	stfd fp23,((JB_FPRS+9*2)*4)(3)
68	stw  r24,((JB_GPRS+10)*4)(3)
69	stfd fp24,((JB_FPRS+10*2)*4)(3)
70	stw  r25,((JB_GPRS+11)*4)(3)
71	stfd fp25,((JB_FPRS+11*2)*4)(3)
72	stw  r26,((JB_GPRS+12)*4)(3)
73	stfd fp26,((JB_FPRS+12*2)*4)(3)
74	stw  r27,((JB_GPRS+13)*4)(3)
75	stfd fp27,((JB_FPRS+13*2)*4)(3)
76	stw  r28,((JB_GPRS+14)*4)(3)
77	stfd fp28,((JB_FPRS+14*2)*4)(3)
78	stw  r29,((JB_GPRS+15)*4)(3)
79	stfd fp29,((JB_FPRS+15*2)*4)(3)
80	stw  r30,((JB_GPRS+16)*4)(3)
81	stfd fp30,((JB_FPRS+16*2)*4)(3)
82	stw  r31,((JB_GPRS+17)*4)(3)
83	stfd fp31,((JB_FPRS+17*2)*4)(3)
84#ifndef __NO_VMX__
85# ifdef PIC
86	mflr    r6
87	cfi_register(lr,r6)
88	SETUP_GOT_ACCESS(r5,got_label)
89	addis	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@ha
90	addi	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@l
91	mtlr	r6
92	cfi_same_value (lr)
93#  ifdef SHARED
94#   if IS_IN (rtld)
95	/* Inside ld.so we use the local alias to avoid runtime GOT
96	   relocations.  */
97	lwz     r5,_rtld_local_ro@got(r5)
98#   else
99	lwz     r5,_rtld_global_ro@got(r5)
100#   endif
101	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
102#  else
103	lwz     r5,_dl_hwcap@got(r5)
104	lwz     r5,LOWORD(r5)
105#  endif
106# else
107	lis	r6,(_dl_hwcap+LOWORD)@ha
108	lwz     r5,(_dl_hwcap+LOWORD)@l(r6)
109# endif
110	andis.	r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
111	beq	L(no_vmx)
112	la	r5,((JB_VRS)*4)(3)
113	andi.	r6,r5,0xf
114	mfspr	r0,VRSAVE
115	stw	r0,((JB_VRSAVE)*4)(3)
116	addi	r6,r5,16
117	beq+	L(aligned_save_vmx)
118
119	lvsr	v0,0,r5
120	lvsl	v1,0,r5
121	addi	r6,r5,-16
122
123# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \
124	addi	addgpr,addgpr,32;					 \
125	vperm	tmpvr,prevvr,savevr,shiftvr;				 \
126	stvx	tmpvr,0,savegpr
127
128	/*
129	 * We have to be careful not to corrupt the data below v20 and
130	 * above v31. To keep things simple we just rotate both ends in
131	 * the opposite direction to our main permute so we can use
132	 * the common macro.
133	 */
134
135	/* load and rotate data below v20 */
136	lvx	v2,0,r5
137	vperm	v2,v2,v2,v1
138	save_misaligned_vmx(v20,v2,v0,v3,r5,r6)
139	save_misaligned_vmx(v21,v20,v0,v3,r6,r5)
140	save_misaligned_vmx(v22,v21,v0,v3,r5,r6)
141	save_misaligned_vmx(v23,v22,v0,v3,r6,r5)
142	save_misaligned_vmx(v24,v23,v0,v3,r5,r6)
143	save_misaligned_vmx(v25,v24,v0,v3,r6,r5)
144	save_misaligned_vmx(v26,v25,v0,v3,r5,r6)
145	save_misaligned_vmx(v27,v26,v0,v3,r6,r5)
146	save_misaligned_vmx(v28,v27,v0,v3,r5,r6)
147	save_misaligned_vmx(v29,v28,v0,v3,r6,r5)
148	save_misaligned_vmx(v30,v29,v0,v3,r5,r6)
149	save_misaligned_vmx(v31,v30,v0,v3,r6,r5)
150	/* load and rotate data above v31 */
151	lvx	v2,0,r6
152	vperm	v2,v2,v2,v1
153	save_misaligned_vmx(v2,v31,v0,v3,r5,r6)
154
155	b	L(no_vmx)
156
157L(aligned_save_vmx):
158	stvx	20,0,r5
159	addi	r5,r5,32
160	stvx	21,0,r6
161	addi	r6,r6,32
162	stvx	22,0,r5
163	addi	r5,r5,32
164	stvx	23,0,r6
165	addi	r6,r6,32
166	stvx	24,0,r5
167	addi	r5,r5,32
168	stvx	25,0,r6
169	addi	r6,r6,32
170	stvx	26,0,r5
171	addi	r5,r5,32
172	stvx	27,0,r6
173	addi	r6,r6,32
174	stvx	28,0,r5
175	addi	r5,r5,32
176	stvx	29,0,r6
177	addi	r6,r6,32
178	stvx	30,0,r5
179	stvx	31,0,r6
180L(no_vmx):
181#endif
182	b __sigjmp_save_symbol@local
183END (__sigsetjmp_symbol)
184