1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
6#include <asm/export.h>
7#include <asm/loongarch.h>
8#include <asm/page.h>
9#include <asm/pgtable.h>
10#include <asm/regdef.h>
11#include <asm/stackframe.h>
12
13#define INVTLB_ADDR_GFALSE_AND_ASID	5
14
15#define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
16#define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
17#define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
18#define PTRS_PER_PTE_BITS	(PAGE_SHIFT - 3)
19
20	.macro tlb_do_page_fault, write
21	SYM_FUNC_START(tlb_do_page_fault_\write)
22	SAVE_ALL
23	csrrd		a2, LOONGARCH_CSR_BADV
24	move		a0, sp
25	REG_S		a2, sp, PT_BVADDR
26	li.w		a1, \write
27	la.abs		t0, do_page_fault
28	jirl		ra, t0, 0
29	RESTORE_ALL_AND_RET
30	SYM_FUNC_END(tlb_do_page_fault_\write)
31	.endm
32
33	tlb_do_page_fault 0
34	tlb_do_page_fault 1
35
36SYM_FUNC_START(handle_tlb_protect)
37	BACKUP_T0T1
38	SAVE_ALL
39	move		a0, sp
40	move		a1, zero
41	csrrd		a2, LOONGARCH_CSR_BADV
42	REG_S		a2, sp, PT_BVADDR
43	la.abs		t0, do_page_fault
44	jirl		ra, t0, 0
45	RESTORE_ALL_AND_RET
46SYM_FUNC_END(handle_tlb_protect)
47
48SYM_FUNC_START(handle_tlb_load)
49	csrwr		t0, EXCEPTION_KS0
50	csrwr		t1, EXCEPTION_KS1
51	csrwr		ra, EXCEPTION_KS2
52
53	/*
54	 * The vmalloc handling is not in the hotpath.
55	 */
56	csrrd		t0, LOONGARCH_CSR_BADV
57	bltz		t0, vmalloc_load
58	csrrd		t1, LOONGARCH_CSR_PGDL
59
60vmalloc_done_load:
61	/* Get PGD offset in bytes */
62	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
63	alsl.d		t1, ra, t1, 3
64#if CONFIG_PGTABLE_LEVELS > 3
65	ld.d		t1, t1, 0
66	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
67	alsl.d		t1, ra, t1, 3
68#endif
69#if CONFIG_PGTABLE_LEVELS > 2
70	ld.d		t1, t1, 0
71	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
72	alsl.d		t1, ra, t1, 3
73#endif
74	ld.d		ra, t1, 0
75
76	/*
77	 * For huge tlb entries, pmde doesn't contain an address but
78	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
79	 * see if we need to jump to huge tlb processing.
80	 */
81	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
82	bltz		ra, tlb_huge_update_load
83
84	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
85	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
86	alsl.d		t1, t0, ra, _PTE_T_LOG2
87
88#ifdef CONFIG_SMP
89smp_pgtable_change_load:
90	ll.d		t0, t1, 0
91#else
92	ld.d		t0, t1, 0
93#endif
94	andi		ra, t0, _PAGE_PRESENT
95	beqz		ra, nopage_tlb_load
96
97	ori		t0, t0, _PAGE_VALID
98#ifdef CONFIG_SMP
99	sc.d		t0, t1, 0
100	beqz		t0, smp_pgtable_change_load
101#else
102	st.d		t0, t1, 0
103#endif
104	tlbsrch
105	bstrins.d	t1, zero, 3, 3
106	ld.d		t0, t1, 0
107	ld.d		t1, t1, 8
108	csrwr		t0, LOONGARCH_CSR_TLBELO0
109	csrwr		t1, LOONGARCH_CSR_TLBELO1
110	tlbwr
111
112	csrrd		t0, EXCEPTION_KS0
113	csrrd		t1, EXCEPTION_KS1
114	csrrd		ra, EXCEPTION_KS2
115	ertn
116
117#ifdef CONFIG_64BIT
118vmalloc_load:
119	la.abs		t1, swapper_pg_dir
120	b		vmalloc_done_load
121#endif
122
123	/* This is the entry point of a huge page. */
124tlb_huge_update_load:
125#ifdef CONFIG_SMP
126	ll.d		ra, t1, 0
127#endif
128	andi		t0, ra, _PAGE_PRESENT
129	beqz		t0, nopage_tlb_load
130
131#ifdef CONFIG_SMP
132	ori		t0, ra, _PAGE_VALID
133	sc.d		t0, t1, 0
134	beqz		t0, tlb_huge_update_load
135	ori		t0, ra, _PAGE_VALID
136#else
137	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
138	ori		t0, ra, _PAGE_VALID
139	st.d		t0, t1, 0
140#endif
141	csrrd		ra, LOONGARCH_CSR_ASID
142	csrrd		t1, LOONGARCH_CSR_BADV
143	andi		ra, ra, CSR_ASID_ASID
144	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
145
146	/*
147	 * A huge PTE describes an area the size of the
148	 * configured huge page size. This is twice the
149	 * of the large TLB entry size we intend to use.
150	 * A TLB entry half the size of the configured
151	 * huge page size is configured into entrylo0
152	 * and entrylo1 to cover the contiguous huge PTE
153	 * address space.
154	 */
155	/* Huge page: Move Global bit */
156	xori		t0, t0, _PAGE_HUGE
157	lu12i.w		t1, _PAGE_HGLOBAL >> 12
158	and		t1, t0, t1
159	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
160	or		t0, t0, t1
161
162	move		ra, t0
163	csrwr		ra, LOONGARCH_CSR_TLBELO0
164
165	/* Convert to entrylo1 */
166	addi.d		t1, zero, 1
167	slli.d		t1, t1, (HPAGE_SHIFT - 1)
168	add.d		t0, t0, t1
169	csrwr		t0, LOONGARCH_CSR_TLBELO1
170
171	/* Set huge page tlb entry size */
172	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
173	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
174	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
175
176	tlbfill
177
178	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
179	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
180	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
181
182	csrrd		t0, EXCEPTION_KS0
183	csrrd		t1, EXCEPTION_KS1
184	csrrd		ra, EXCEPTION_KS2
185	ertn
186
187nopage_tlb_load:
188	dbar		0
189	csrrd		ra, EXCEPTION_KS2
190	la.abs		t0, tlb_do_page_fault_0
191	jr		t0
192SYM_FUNC_END(handle_tlb_load)
193
194SYM_FUNC_START(handle_tlb_store)
195	csrwr		t0, EXCEPTION_KS0
196	csrwr		t1, EXCEPTION_KS1
197	csrwr		ra, EXCEPTION_KS2
198
199	/*
200	 * The vmalloc handling is not in the hotpath.
201	 */
202	csrrd		t0, LOONGARCH_CSR_BADV
203	bltz		t0, vmalloc_store
204	csrrd		t1, LOONGARCH_CSR_PGDL
205
206vmalloc_done_store:
207	/* Get PGD offset in bytes */
208	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
209	alsl.d		t1, ra, t1, 3
210#if CONFIG_PGTABLE_LEVELS > 3
211	ld.d		t1, t1, 0
212	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
213	alsl.d		t1, ra, t1, 3
214#endif
215#if CONFIG_PGTABLE_LEVELS > 2
216	ld.d		t1, t1, 0
217	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
218	alsl.d		t1, ra, t1, 3
219#endif
220	ld.d		ra, t1, 0
221
222	/*
223	 * For huge tlb entries, pmde doesn't contain an address but
224	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
225	 * see if we need to jump to huge tlb processing.
226	 */
227	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
228	bltz		ra, tlb_huge_update_store
229
230	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
231	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
232	alsl.d		t1, t0, ra, _PTE_T_LOG2
233
234#ifdef CONFIG_SMP
235smp_pgtable_change_store:
236	ll.d		t0, t1, 0
237#else
238	ld.d		t0, t1, 0
239#endif
240	andi		ra, t0, _PAGE_PRESENT | _PAGE_WRITE
241	xori		ra, ra, _PAGE_PRESENT | _PAGE_WRITE
242	bnez		ra, nopage_tlb_store
243
244	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
245#ifdef CONFIG_SMP
246	sc.d		t0, t1, 0
247	beqz		t0, smp_pgtable_change_store
248#else
249	st.d		t0, t1, 0
250#endif
251	tlbsrch
252	bstrins.d	t1, zero, 3, 3
253	ld.d		t0, t1, 0
254	ld.d		t1, t1, 8
255	csrwr		t0, LOONGARCH_CSR_TLBELO0
256	csrwr		t1, LOONGARCH_CSR_TLBELO1
257	tlbwr
258
259	csrrd		t0, EXCEPTION_KS0
260	csrrd		t1, EXCEPTION_KS1
261	csrrd		ra, EXCEPTION_KS2
262	ertn
263
264#ifdef CONFIG_64BIT
265vmalloc_store:
266	la.abs		t1, swapper_pg_dir
267	b		vmalloc_done_store
268#endif
269
270	/* This is the entry point of a huge page. */
271tlb_huge_update_store:
272#ifdef CONFIG_SMP
273	ll.d		ra, t1, 0
274#endif
275	andi		t0, ra, _PAGE_PRESENT | _PAGE_WRITE
276	xori		t0, t0, _PAGE_PRESENT | _PAGE_WRITE
277	bnez		t0, nopage_tlb_store
278
279#ifdef CONFIG_SMP
280	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
281	sc.d		t0, t1, 0
282	beqz		t0, tlb_huge_update_store
283	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
284#else
285	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
286	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
287	st.d		t0, t1, 0
288#endif
289	csrrd		ra, LOONGARCH_CSR_ASID
290	csrrd		t1, LOONGARCH_CSR_BADV
291	andi		ra, ra, CSR_ASID_ASID
292	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
293
294	/*
295	 * A huge PTE describes an area the size of the
296	 * configured huge page size. This is twice the
297	 * of the large TLB entry size we intend to use.
298	 * A TLB entry half the size of the configured
299	 * huge page size is configured into entrylo0
300	 * and entrylo1 to cover the contiguous huge PTE
301	 * address space.
302	 */
303	/* Huge page: Move Global bit */
304	xori		t0, t0, _PAGE_HUGE
305	lu12i.w		t1, _PAGE_HGLOBAL >> 12
306	and		t1, t0, t1
307	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
308	or		t0, t0, t1
309
310	move		ra, t0
311	csrwr		ra, LOONGARCH_CSR_TLBELO0
312
313	/* Convert to entrylo1 */
314	addi.d		t1, zero, 1
315	slli.d		t1, t1, (HPAGE_SHIFT - 1)
316	add.d		t0, t0, t1
317	csrwr		t0, LOONGARCH_CSR_TLBELO1
318
319	/* Set huge page tlb entry size */
320	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
321	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
322	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
323
324	tlbfill
325
326	/* Reset default page size */
327	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
328	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
329	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
330
331	csrrd		t0, EXCEPTION_KS0
332	csrrd		t1, EXCEPTION_KS1
333	csrrd		ra, EXCEPTION_KS2
334	ertn
335
336nopage_tlb_store:
337	dbar		0
338	csrrd		ra, EXCEPTION_KS2
339	la.abs		t0, tlb_do_page_fault_1
340	jr		t0
341SYM_FUNC_END(handle_tlb_store)
342
343SYM_FUNC_START(handle_tlb_modify)
344	csrwr		t0, EXCEPTION_KS0
345	csrwr		t1, EXCEPTION_KS1
346	csrwr		ra, EXCEPTION_KS2
347
348	/*
349	 * The vmalloc handling is not in the hotpath.
350	 */
351	csrrd		t0, LOONGARCH_CSR_BADV
352	bltz		t0, vmalloc_modify
353	csrrd		t1, LOONGARCH_CSR_PGDL
354
355vmalloc_done_modify:
356	/* Get PGD offset in bytes */
357	bstrpick.d	ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
358	alsl.d		t1, ra, t1, 3
359#if CONFIG_PGTABLE_LEVELS > 3
360	ld.d		t1, t1, 0
361	bstrpick.d	ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
362	alsl.d		t1, ra, t1, 3
363#endif
364#if CONFIG_PGTABLE_LEVELS > 2
365	ld.d		t1, t1, 0
366	bstrpick.d	ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
367	alsl.d		t1, ra, t1, 3
368#endif
369	ld.d		ra, t1, 0
370
371	/*
372	 * For huge tlb entries, pmde doesn't contain an address but
373	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
374	 * see if we need to jump to huge tlb processing.
375	 */
376	rotri.d		ra, ra, _PAGE_HUGE_SHIFT + 1
377	bltz		ra, tlb_huge_update_modify
378
379	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
380	bstrpick.d	t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
381	alsl.d		t1, t0, ra, _PTE_T_LOG2
382
383#ifdef CONFIG_SMP
384smp_pgtable_change_modify:
385	ll.d		t0, t1, 0
386#else
387	ld.d		t0, t1, 0
388#endif
389	andi		ra, t0, _PAGE_WRITE
390	beqz		ra, nopage_tlb_modify
391
392	ori		t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
393#ifdef CONFIG_SMP
394	sc.d		t0, t1, 0
395	beqz		t0, smp_pgtable_change_modify
396#else
397	st.d		t0, t1, 0
398#endif
399	tlbsrch
400	bstrins.d	t1, zero, 3, 3
401	ld.d		t0, t1, 0
402	ld.d		t1, t1, 8
403	csrwr		t0, LOONGARCH_CSR_TLBELO0
404	csrwr		t1, LOONGARCH_CSR_TLBELO1
405	tlbwr
406
407	csrrd		t0, EXCEPTION_KS0
408	csrrd		t1, EXCEPTION_KS1
409	csrrd		ra, EXCEPTION_KS2
410	ertn
411
412#ifdef CONFIG_64BIT
413vmalloc_modify:
414	la.abs		t1, swapper_pg_dir
415	b		vmalloc_done_modify
416#endif
417
418	/* This is the entry point of a huge page. */
419tlb_huge_update_modify:
420#ifdef CONFIG_SMP
421	ll.d		ra, t1, 0
422#endif
423	andi		t0, ra, _PAGE_WRITE
424	beqz		t0, nopage_tlb_modify
425
426#ifdef CONFIG_SMP
427	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
428	sc.d		t0, t1, 0
429	beqz		t0, tlb_huge_update_modify
430	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
431#else
432	rotri.d		ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
433	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
434	st.d		t0, t1, 0
435#endif
436	csrrd		ra, LOONGARCH_CSR_ASID
437	csrrd		t1, LOONGARCH_CSR_BADV
438	andi		ra, ra, CSR_ASID_ASID
439	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
440
441	/*
442	 * A huge PTE describes an area the size of the
443	 * configured huge page size. This is twice the
444	 * of the large TLB entry size we intend to use.
445	 * A TLB entry half the size of the configured
446	 * huge page size is configured into entrylo0
447	 * and entrylo1 to cover the contiguous huge PTE
448	 * address space.
449	 */
450	/* Huge page: Move Global bit */
451	xori		t0, t0, _PAGE_HUGE
452	lu12i.w		t1, _PAGE_HGLOBAL >> 12
453	and		t1, t0, t1
454	srli.d		t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
455	or		t0, t0, t1
456
457	move		ra, t0
458	csrwr		ra, LOONGARCH_CSR_TLBELO0
459
460	/* Convert to entrylo1 */
461	addi.d		t1, zero, 1
462	slli.d		t1, t1, (HPAGE_SHIFT - 1)
463	add.d		t0, t0, t1
464	csrwr		t0, LOONGARCH_CSR_TLBELO1
465
466	/* Set huge page tlb entry size */
467	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
468	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
469	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
470
471	tlbfill
472
473	/* Reset default page size */
474	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)
475	addu16i.d	t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
476	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
477
478	csrrd		t0, EXCEPTION_KS0
479	csrrd		t1, EXCEPTION_KS1
480	csrrd		ra, EXCEPTION_KS2
481	ertn
482
483nopage_tlb_modify:
484	dbar		0
485	csrrd		ra, EXCEPTION_KS2
486	la.abs		t0, tlb_do_page_fault_1
487	jr		t0
488SYM_FUNC_END(handle_tlb_modify)
489
490SYM_FUNC_START(handle_tlb_refill)
491	csrwr		t0, LOONGARCH_CSR_TLBRSAVE
492	csrrd		t0, LOONGARCH_CSR_PGD
493	lddir		t0, t0, 3
494#if CONFIG_PGTABLE_LEVELS > 3
495	lddir		t0, t0, 2
496#endif
497#if CONFIG_PGTABLE_LEVELS > 2
498	lddir		t0, t0, 1
499#endif
500	ldpte		t0, 0
501	ldpte		t0, 1
502	tlbfill
503	csrrd		t0, LOONGARCH_CSR_TLBRSAVE
504	ertn
505SYM_FUNC_END(handle_tlb_refill)
506