1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/threads.h>
3#include <linux/linkage.h>
4
5#include <asm/processor.h>
6#include <asm/page.h>
7#include <asm/cputable.h>
8#include <asm/thread_info.h>
9#include <asm/ppc_asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/mmu.h>
12#include <asm/feature-fixups.h>
13
14/*
15 * Structure for storing CPU registers on the save area.
16 */
17#define SL_SP		0
18#define SL_PC		4
19#define SL_MSR		8
20#define SL_SDR1		0xc
21#define SL_SPRG0	0x10	/* 4 sprg's */
22#define SL_DBAT0	0x20
23#define SL_IBAT0	0x28
24#define SL_DBAT1	0x30
25#define SL_IBAT1	0x38
26#define SL_DBAT2	0x40
27#define SL_IBAT2	0x48
28#define SL_DBAT3	0x50
29#define SL_IBAT3	0x58
30#define SL_DBAT4	0x60
31#define SL_IBAT4	0x68
32#define SL_DBAT5	0x70
33#define SL_IBAT5	0x78
34#define SL_DBAT6	0x80
35#define SL_IBAT6	0x88
36#define SL_DBAT7	0x90
37#define SL_IBAT7	0x98
38#define SL_TB		0xa0
39#define SL_R2		0xa8
40#define SL_CR		0xac
41#define SL_LR		0xb0
42#define SL_R12		0xb4	/* r12 to r31 */
43#define SL_SIZE		(SL_R12 + 80)
44
45	.section .data
46	.align	5
47
48_GLOBAL(swsusp_save_area)
49	.space	SL_SIZE
50
51
52	.section .text
53	.align	5
54
55_GLOBAL(swsusp_arch_suspend)
56
57	lis	r11,swsusp_save_area@h
58	ori	r11,r11,swsusp_save_area@l
59
60	mflr	r0
61	stw	r0,SL_LR(r11)
62	mfcr	r0
63	stw	r0,SL_CR(r11)
64	stw	r1,SL_SP(r11)
65	stw	r2,SL_R2(r11)
66	stmw	r12,SL_R12(r11)
67
68	/* Save MSR & SDR1 */
69	mfmsr	r4
70	stw	r4,SL_MSR(r11)
71	mfsdr1	r4
72	stw	r4,SL_SDR1(r11)
73
74	/* Get a stable timebase and save it */
751:	mftbu	r4
76	stw	r4,SL_TB(r11)
77	mftb	r5
78	stw	r5,SL_TB+4(r11)
79	mftbu	r3
80	cmpw	r3,r4
81	bne	1b
82
83	/* Save SPRGs */
84	mfsprg	r4,0
85	stw	r4,SL_SPRG0(r11)
86	mfsprg	r4,1
87	stw	r4,SL_SPRG0+4(r11)
88	mfsprg	r4,2
89	stw	r4,SL_SPRG0+8(r11)
90	mfsprg	r4,3
91	stw	r4,SL_SPRG0+12(r11)
92
93	/* Save BATs */
94	mfdbatu	r4,0
95	stw	r4,SL_DBAT0(r11)
96	mfdbatl	r4,0
97	stw	r4,SL_DBAT0+4(r11)
98	mfdbatu	r4,1
99	stw	r4,SL_DBAT1(r11)
100	mfdbatl	r4,1
101	stw	r4,SL_DBAT1+4(r11)
102	mfdbatu	r4,2
103	stw	r4,SL_DBAT2(r11)
104	mfdbatl	r4,2
105	stw	r4,SL_DBAT2+4(r11)
106	mfdbatu	r4,3
107	stw	r4,SL_DBAT3(r11)
108	mfdbatl	r4,3
109	stw	r4,SL_DBAT3+4(r11)
110	mfibatu	r4,0
111	stw	r4,SL_IBAT0(r11)
112	mfibatl	r4,0
113	stw	r4,SL_IBAT0+4(r11)
114	mfibatu	r4,1
115	stw	r4,SL_IBAT1(r11)
116	mfibatl	r4,1
117	stw	r4,SL_IBAT1+4(r11)
118	mfibatu	r4,2
119	stw	r4,SL_IBAT2(r11)
120	mfibatl	r4,2
121	stw	r4,SL_IBAT2+4(r11)
122	mfibatu	r4,3
123	stw	r4,SL_IBAT3(r11)
124	mfibatl	r4,3
125	stw	r4,SL_IBAT3+4(r11)
126
127BEGIN_MMU_FTR_SECTION
128	mfspr	r4,SPRN_DBAT4U
129	stw	r4,SL_DBAT4(r11)
130	mfspr	r4,SPRN_DBAT4L
131	stw	r4,SL_DBAT4+4(r11)
132	mfspr	r4,SPRN_DBAT5U
133	stw	r4,SL_DBAT5(r11)
134	mfspr	r4,SPRN_DBAT5L
135	stw	r4,SL_DBAT5+4(r11)
136	mfspr	r4,SPRN_DBAT6U
137	stw	r4,SL_DBAT6(r11)
138	mfspr	r4,SPRN_DBAT6L
139	stw	r4,SL_DBAT6+4(r11)
140	mfspr	r4,SPRN_DBAT7U
141	stw	r4,SL_DBAT7(r11)
142	mfspr	r4,SPRN_DBAT7L
143	stw	r4,SL_DBAT7+4(r11)
144	mfspr	r4,SPRN_IBAT4U
145	stw	r4,SL_IBAT4(r11)
146	mfspr	r4,SPRN_IBAT4L
147	stw	r4,SL_IBAT4+4(r11)
148	mfspr	r4,SPRN_IBAT5U
149	stw	r4,SL_IBAT5(r11)
150	mfspr	r4,SPRN_IBAT5L
151	stw	r4,SL_IBAT5+4(r11)
152	mfspr	r4,SPRN_IBAT6U
153	stw	r4,SL_IBAT6(r11)
154	mfspr	r4,SPRN_IBAT6L
155	stw	r4,SL_IBAT6+4(r11)
156	mfspr	r4,SPRN_IBAT7U
157	stw	r4,SL_IBAT7(r11)
158	mfspr	r4,SPRN_IBAT7L
159	stw	r4,SL_IBAT7+4(r11)
160END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
161
162#if  0
163	/* Backup various CPU config stuffs */
164	bl	__save_cpu_setup
165#endif
166	/* Call the low level suspend stuff (we should probably have made
167	 * a stackframe...
168	 */
169	bl	swsusp_save
170
171	/* Restore LR from the save area */
172	lis	r11,swsusp_save_area@h
173	ori	r11,r11,swsusp_save_area@l
174	lwz	r0,SL_LR(r11)
175	mtlr	r0
176
177	blr
178
179
180/* Resume code */
181_GLOBAL(swsusp_arch_resume)
182
183#ifdef CONFIG_ALTIVEC
184	/* Stop pending alitvec streams and memory accesses */
185BEGIN_FTR_SECTION
186	PPC_DSSALL
187END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
188#endif
189 	sync
190
191	/* Disable MSR:DR to make sure we don't take a TLB or
192	 * hash miss during the copy, as our hash table will
193	 * for a while be unusable. For .text, we assume we are
194	 * covered by a BAT. This works only for non-G5 at this
195	 * point. G5 will need a better approach, possibly using
196	 * a small temporary hash table filled with large mappings,
197	 * disabling the MMU completely isn't a good option for
198	 * performance reasons.
199	 * (Note that 750's may have the same performance issue as
200	 * the G5 in this case, we should investigate using moving
201	 * BATs for these CPUs)
202	 */
203	mfmsr	r0
204	sync
205	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
206	mtmsr	r0
207	sync
208	isync
209
210	/* Load ptr the list of pages to copy in r3 */
211	lis	r11,(restore_pblist - KERNELBASE)@h
212	ori	r11,r11,restore_pblist@l
213	lwz	r10,0(r11)
214
215	/* Copy the pages. This is a very basic implementation, to
216	 * be replaced by something more cache efficient */
2171:
218	tophys(r3,r10)
219	li	r0,256
220	mtctr	r0
221	lwz	r11,pbe_address(r3)	/* source */
222	tophys(r5,r11)
223	lwz	r10,pbe_orig_address(r3)	/* destination */
224	tophys(r6,r10)
2252:
226	lwz	r8,0(r5)
227	lwz	r9,4(r5)
228	lwz	r10,8(r5)
229	lwz	r11,12(r5)
230	addi	r5,r5,16
231	stw	r8,0(r6)
232	stw	r9,4(r6)
233	stw	r10,8(r6)
234	stw	r11,12(r6)
235	addi	r6,r6,16
236	bdnz	2b
237	lwz		r10,pbe_next(r3)
238	cmpwi	0,r10,0
239	bne	1b
240
241	/* Do a very simple cache flush/inval of the L1 to ensure
242	 * coherency of the icache
243	 */
244	lis	r3,0x0002
245	mtctr	r3
246	li	r3, 0
2471:
248	lwz	r0,0(r3)
249	addi	r3,r3,0x0020
250	bdnz	1b
251	isync
252	sync
253
254	/* Now flush those cache lines */
255	lis	r3,0x0002
256	mtctr	r3
257	li	r3, 0
2581:
259	dcbf	0,r3
260	addi	r3,r3,0x0020
261	bdnz	1b
262	sync
263
264	/* Ok, we are now running with the kernel data of the old
265	 * kernel fully restored. We can get to the save area
266	 * easily now. As for the rest of the code, it assumes the
267	 * loader kernel and the booted one are exactly identical
268	 */
269	lis	r11,swsusp_save_area@h
270	ori	r11,r11,swsusp_save_area@l
271	tophys(r11,r11)
272
273#if 0
274	/* Restore various CPU config stuffs */
275	bl	__restore_cpu_setup
276#endif
277	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
278	 * This is a bit hairy as we are running out of those BATs,
279	 * but first, our code is probably in the icache, and we are
280	 * writing the same value to the BAT, so that should be fine,
281	 * though a better solution will have to be found long-term
282	 */
283	lwz	r4,SL_SDR1(r11)
284	mtsdr1	r4
285	lwz	r4,SL_SPRG0(r11)
286	mtsprg	0,r4
287	lwz	r4,SL_SPRG0+4(r11)
288	mtsprg	1,r4
289	lwz	r4,SL_SPRG0+8(r11)
290	mtsprg	2,r4
291	lwz	r4,SL_SPRG0+12(r11)
292	mtsprg	3,r4
293
294#if 0
295	lwz	r4,SL_DBAT0(r11)
296	mtdbatu	0,r4
297	lwz	r4,SL_DBAT0+4(r11)
298	mtdbatl	0,r4
299	lwz	r4,SL_DBAT1(r11)
300	mtdbatu	1,r4
301	lwz	r4,SL_DBAT1+4(r11)
302	mtdbatl	1,r4
303	lwz	r4,SL_DBAT2(r11)
304	mtdbatu	2,r4
305	lwz	r4,SL_DBAT2+4(r11)
306	mtdbatl	2,r4
307	lwz	r4,SL_DBAT3(r11)
308	mtdbatu	3,r4
309	lwz	r4,SL_DBAT3+4(r11)
310	mtdbatl	3,r4
311	lwz	r4,SL_IBAT0(r11)
312	mtibatu	0,r4
313	lwz	r4,SL_IBAT0+4(r11)
314	mtibatl	0,r4
315	lwz	r4,SL_IBAT1(r11)
316	mtibatu	1,r4
317	lwz	r4,SL_IBAT1+4(r11)
318	mtibatl	1,r4
319	lwz	r4,SL_IBAT2(r11)
320	mtibatu	2,r4
321	lwz	r4,SL_IBAT2+4(r11)
322	mtibatl	2,r4
323	lwz	r4,SL_IBAT3(r11)
324	mtibatu	3,r4
325	lwz	r4,SL_IBAT3+4(r11)
326	mtibatl	3,r4
327BEGIN_MMU_FTR_SECTION
328	lwz	r4,SL_DBAT4(r11)
329	mtspr	SPRN_DBAT4U,r4
330	lwz	r4,SL_DBAT4+4(r11)
331	mtspr	SPRN_DBAT4L,r4
332	lwz	r4,SL_DBAT5(r11)
333	mtspr	SPRN_DBAT5U,r4
334	lwz	r4,SL_DBAT5+4(r11)
335	mtspr	SPRN_DBAT5L,r4
336	lwz	r4,SL_DBAT6(r11)
337	mtspr	SPRN_DBAT6U,r4
338	lwz	r4,SL_DBAT6+4(r11)
339	mtspr	SPRN_DBAT6L,r4
340	lwz	r4,SL_DBAT7(r11)
341	mtspr	SPRN_DBAT7U,r4
342	lwz	r4,SL_DBAT7+4(r11)
343	mtspr	SPRN_DBAT7L,r4
344	lwz	r4,SL_IBAT4(r11)
345	mtspr	SPRN_IBAT4U,r4
346	lwz	r4,SL_IBAT4+4(r11)
347	mtspr	SPRN_IBAT4L,r4
348	lwz	r4,SL_IBAT5(r11)
349	mtspr	SPRN_IBAT5U,r4
350	lwz	r4,SL_IBAT5+4(r11)
351	mtspr	SPRN_IBAT5L,r4
352	lwz	r4,SL_IBAT6(r11)
353	mtspr	SPRN_IBAT6U,r4
354	lwz	r4,SL_IBAT6+4(r11)
355	mtspr	SPRN_IBAT6L,r4
356	lwz	r4,SL_IBAT7(r11)
357	mtspr	SPRN_IBAT7U,r4
358	lwz	r4,SL_IBAT7+4(r11)
359	mtspr	SPRN_IBAT7L,r4
360END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
361#endif
362
363	/* Flush all TLBs */
364	lis	r4,0x1000
3651:	addic.	r4,r4,-0x1000
366	tlbie	r4
367	bgt	1b
368	sync
369
370	/* restore the MSR and turn on the MMU */
371	lwz	r3,SL_MSR(r11)
372	bl	turn_on_mmu
373	tovirt(r11,r11)
374
375	/* Restore TB */
376	li	r3,0
377	mttbl	r3
378	lwz	r3,SL_TB(r11)
379	lwz	r4,SL_TB+4(r11)
380	mttbu	r3
381	mttbl	r4
382
383	/* Kick decrementer */
384	li	r0,1
385	mtdec	r0
386
387	/* Restore the callee-saved registers and return */
388	lwz	r0,SL_CR(r11)
389	mtcr	r0
390	lwz	r2,SL_R2(r11)
391	lmw	r12,SL_R12(r11)
392	lwz	r1,SL_SP(r11)
393	lwz	r0,SL_LR(r11)
394	mtlr	r0
395
396	// XXX Note: we don't really need to call swsusp_resume
397
398	li	r3,0
399	blr
400_ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
401
402/* FIXME:This construct is actually not useful since we don't shut
403 * down the instruction MMU, we could just flip back MSR-DR on.
404 */
405SYM_FUNC_START_LOCAL(turn_on_mmu)
406	mflr	r4
407	mtsrr0	r4
408	mtsrr1	r3
409	sync
410	isync
411	rfi
412_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
413SYM_FUNC_END(turn_on_mmu)
414
415