1/*
2 *  Parisc tlb and cache flushing support
3 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
4 *
5 *    This program is free software; you can redistribute it and/or modify
6 *    it under the terms of the GNU General Public License as published by
7 *    the Free Software Foundation; either version 2, or (at your option)
8 *    any later version.
9 *
10 *    This program is distributed in the hope that it will be useful,
11 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 *    GNU General Public License for more details.
14 *
15 *    You should have received a copy of the GNU General Public License
16 *    along with this program; if not, write to the Free Software
17 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/*
21 * NOTE: fdc,fic, and pdc instructions that use base register modification
22 *       should only use index and base registers that are not shadowed,
23 *       so that the fast path emulation in the non access miss handler
24 *       can be used.
25 */
26
27#ifdef __LP64__
28#define ADDIB   addib,*
29#define CMPB    cmpb,*
30#define ANDCM   andcm,*
31
32	.level 2.0w
33#else
34#define ADDIB   addib,
35#define CMPB    cmpb,
36#define ANDCM   andcm
37
38	.level 2.0
39#endif
40
41#include <asm/assembly.h>
42#include <asm/psw.h>
43#include <asm/pgtable.h>
44#include <asm/cache.h>
45
46	.text
47	.align 128
48
49	.export flush_tlb_all_local,code
50
51flush_tlb_all_local:
52	.proc
53	.callinfo NO_CALLS
54	.entry
55
56	/*
57	 * The pitlbe and pdtlbe instructions should only be used to
58	 * flush the entire tlb. Also, there needs to be no intervening
59	 * tlb operations, e.g. tlb misses, so the operation needs
60	 * to happen in real mode with all interruptions disabled.
61	 */
62
63	/*
64	 * Once again, we do the rfi dance ... some day we need examine
65	 * all of our uses of this type of code and see what can be
66	 * consolidated.
67	 */
68
69	rsm     PSW_SM_I,%r19      /* relied upon translation! */
70	nop
71	nop
72	nop
73	nop
74	nop
75	nop
76	nop
77
78	rsm     PSW_SM_Q,%r0       /* Turn off Q bit to load iia queue */
79	ldil    L%REAL_MODE_PSW, %r1
80	ldo     R%REAL_MODE_PSW(%r1), %r1
81	mtctl	%r1, %cr22
82	mtctl	%r0, %cr17
83	mtctl	%r0, %cr17
84	ldil    L%PA(1f),%r1
85	ldo     R%PA(1f)(%r1),%r1
86	mtctl	%r1, %cr18
87	ldo	4(%r1), %r1
88	mtctl	%r1, %cr18
89	rfi
90	nop
91
921:      ldil            L%PA(cache_info),%r1
93	ldo             R%PA(cache_info)(%r1),%r1
94
95	/* Flush Instruction Tlb */
96
97	LDREG           ITLB_SID_BASE(%r1),%r20
98	LDREG           ITLB_SID_STRIDE(%r1),%r21
99	LDREG           ITLB_SID_COUNT(%r1),%r22
100	LDREG           ITLB_OFF_BASE(%r1),%arg0
101	LDREG           ITLB_OFF_STRIDE(%r1),%arg1
102	LDREG           ITLB_OFF_COUNT(%r1),%arg2
103	LDREG           ITLB_LOOP(%r1),%arg3
104
105	ADDIB=          -1,%arg3,fitoneloop     /* Preadjust and test */
106	movb,<,n        %arg3,%r31,fitdone      /* If loop < 0, skip */
107	copy            %arg0,%r28              /* Init base addr */
108
109fitmanyloop:                                    /* Loop if LOOP >= 2 */
110	mtsp            %r20,%sr1
111	add             %r21,%r20,%r20          /* increment space */
112	copy            %arg2,%r29              /* Init middle loop count */
113
114fitmanymiddle:                                  /* Loop if LOOP >= 2 */
115	ADDIB>          -1,%r31,fitmanymiddle   /* Adjusted inner loop decr */
116	pitlbe          0(%sr1,%r28)
117	pitlbe,m        %arg1(%sr1,%r28)        /* Last pitlbe and addr adjust */
118	ADDIB>          -1,%r29,fitmanymiddle   /* Middle loop decr */
119	copy            %arg3,%r31              /* Re-init inner loop count */
120
121	movb,tr         %arg0,%r28,fitmanyloop  /* Re-init base addr */
122	ADDIB<=,n       -1,%r22,fitdone         /* Outer loop count decr */
123
124fitoneloop:                                     /* Loop if LOOP = 1 */
125	mtsp            %r20,%sr1
126	copy            %arg0,%r28              /* init base addr */
127	copy            %arg2,%r29              /* init middle loop count */
128
129fitonemiddle:                                   /* Loop if LOOP = 1 */
130	ADDIB>          -1,%r29,fitonemiddle    /* Middle loop count decr */
131	pitlbe,m        %arg1(%sr1,%r28)        /* pitlbe for one loop */
132
133	ADDIB>          -1,%r22,fitoneloop      /* Outer loop count decr */
134	add             %r21,%r20,%r20          /* increment space */
135
136fitdone:
137
138	/* Flush Data Tlb */
139
140	LDREG           DTLB_SID_BASE(%r1),%r20
141	LDREG           DTLB_SID_STRIDE(%r1),%r21
142	LDREG           DTLB_SID_COUNT(%r1),%r22
143	LDREG           DTLB_OFF_BASE(%r1),%arg0
144	LDREG           DTLB_OFF_STRIDE(%r1),%arg1
145	LDREG           DTLB_OFF_COUNT(%r1),%arg2
146	LDREG           DTLB_LOOP(%r1),%arg3
147
148	ADDIB=          -1,%arg3,fdtoneloop     /* Preadjust and test */
149	movb,<,n        %arg3,%r31,fdtdone      /* If loop < 0, skip */
150	copy            %arg0,%r28              /* Init base addr */
151
152fdtmanyloop:                                    /* Loop if LOOP >= 2 */
153	mtsp            %r20,%sr1
154	add             %r21,%r20,%r20          /* increment space */
155	copy            %arg2,%r29              /* Init middle loop count */
156
157fdtmanymiddle:                                  /* Loop if LOOP >= 2 */
158	ADDIB>          -1,%r31,fdtmanymiddle   /* Adjusted inner loop decr */
159	pdtlbe          0(%sr1,%r28)
160	pdtlbe,m        %arg1(%sr1,%r28)        /* Last pdtlbe and addr adjust */
161	ADDIB>          -1,%r29,fdtmanymiddle   /* Middle loop decr */
162	copy            %arg3,%r31              /* Re-init inner loop count */
163
164	movb,tr         %arg0,%r28,fdtmanyloop  /* Re-init base addr */
165	ADDIB<=,n       -1,%r22,fdtdone         /* Outer loop count decr */
166
167fdtoneloop:                                     /* Loop if LOOP = 1 */
168	mtsp            %r20,%sr1
169	copy            %arg0,%r28              /* init base addr */
170	copy            %arg2,%r29              /* init middle loop count */
171
172fdtonemiddle:                                   /* Loop if LOOP = 1 */
173	ADDIB>          -1,%r29,fdtonemiddle    /* Middle loop count decr */
174	pdtlbe,m        %arg1(%sr1,%r28)        /* pdtlbe for one loop */
175
176	ADDIB>          -1,%r22,fdtoneloop      /* Outer loop count decr */
177	add             %r21,%r20,%r20          /* increment space */
178
179fdtdone:
180
181	/* Switch back to virtual mode */
182
183	rsm     PSW_SM_Q,%r0       /* clear Q bit to load iia queue */
184	ldil	L%KERNEL_PSW, %r1
185	ldo	R%KERNEL_PSW(%r1), %r1
186	or      %r1,%r19,%r1    /* Set I bit if set on entry */
187	mtctl	%r1, %cr22
188	mtctl	%r0, %cr17
189	mtctl	%r0, %cr17
190	ldil    L%(2f), %r1
191	ldo     R%(2f)(%r1), %r1
192	mtctl	%r1, %cr18
193	ldo	4(%r1), %r1
194	mtctl	%r1, %cr18
195	rfi
196	nop
197
1982:      bv      %r0(%r2)
199	nop
200	.exit
201
202	.procend
203
204	.export flush_instruction_cache_local,code
205	.import cache_info,data
206
207flush_instruction_cache_local:
208	.proc
209	.callinfo NO_CALLS
210	.entry
211
212	mtsp            %r0,%sr1
213	ldil            L%cache_info,%r1
214	ldo             R%cache_info(%r1),%r1
215
216	/* Flush Instruction Cache */
217
218	LDREG           ICACHE_BASE(%r1),%arg0
219	LDREG           ICACHE_STRIDE(%r1),%arg1
220	LDREG           ICACHE_COUNT(%r1),%arg2
221	LDREG           ICACHE_LOOP(%r1),%arg3
222	ADDIB=          -1,%arg3,fioneloop      /* Preadjust and test */
223	movb,<,n        %arg3,%r31,fisync       /* If loop < 0, do sync */
224
225fimanyloop:                                     /* Loop if LOOP >= 2 */
226	ADDIB>          -1,%r31,fimanyloop      /* Adjusted inner loop decr */
227	fice            0(%sr1,%arg0)
228	fice,m          %arg1(%sr1,%arg0)       /* Last fice and addr adjust */
229	movb,tr         %arg3,%r31,fimanyloop   /* Re-init inner loop count */
230	ADDIB<=,n       -1,%arg2,fisync         /* Outer loop decr */
231
232fioneloop:                                      /* Loop if LOOP = 1 */
233	ADDIB>          -1,%arg2,fioneloop      /* Outer loop count decr */
234	fice,m          %arg1(%sr1,%arg0)       /* Fice for one loop */
235
236fisync:
237	sync
238	bv      %r0(%r2)
239	nop
240	.exit
241
242	.procend
243
244	.export flush_data_cache_local,code
245	.import cache_info,data
246
247flush_data_cache_local:
248	.proc
249	.callinfo NO_CALLS
250	.entry
251
252	mtsp            %r0,%sr1
253	ldil            L%cache_info,%r1
254	ldo             R%cache_info(%r1),%r1
255
256	/* Flush Data Cache */
257
258	LDREG           DCACHE_BASE(%r1),%arg0
259	LDREG           DCACHE_STRIDE(%r1),%arg1
260	LDREG           DCACHE_COUNT(%r1),%arg2
261	LDREG           DCACHE_LOOP(%r1),%arg3
262	rsm             PSW_SM_I,%r22
263	ADDIB=          -1,%arg3,fdoneloop      /* Preadjust and test */
264	movb,<,n        %arg3,%r31,fdsync       /* If loop < 0, do sync */
265
266fdmanyloop:                                     /* Loop if LOOP >= 2 */
267	ADDIB>          -1,%r31,fdmanyloop      /* Adjusted inner loop decr */
268	fdce            0(%sr1,%arg0)
269	fdce,m          %arg1(%sr1,%arg0)       /* Last fdce and addr adjust */
270	movb,tr         %arg3,%r31,fdmanyloop   /* Re-init inner loop count */
271	ADDIB<=,n       -1,%arg2,fdsync         /* Outer loop decr */
272
273fdoneloop:                                      /* Loop if LOOP = 1 */
274	ADDIB>          -1,%arg2,fdoneloop      /* Outer loop count decr */
275	fdce,m          %arg1(%sr1,%arg0)       /* Fdce for one loop */
276
277fdsync:
278	syncdma
279	sync
280	mtsm    %r22
281	bv      %r0(%r2)
282	nop
283	.exit
284
285	.procend
286
287	.export copy_user_page_asm,code
288
289copy_user_page_asm:
290	.proc
291	.callinfo NO_CALLS
292	.entry
293
294	ldi 64,%r1
295
296	/*
297	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
298	 * bundles (very restricted rules for bundling). It probably
299	 * does OK on PCXU and better, but we could do better with
300	 * ldd/std instructions. Note that until (if) we start saving
301	 * the full 64 bit register values on interrupt, we can't
302	 * use ldd/std on a 32 bit kernel.
303	 */
304
305
3061:
307	ldw 0(%r25),%r19
308	ldw 4(%r25),%r20
309	ldw 8(%r25),%r21
310	ldw 12(%r25),%r22
311	stw %r19,0(%r26)
312	stw %r20,4(%r26)
313	stw %r21,8(%r26)
314	stw %r22,12(%r26)
315	ldw 16(%r25),%r19
316	ldw 20(%r25),%r20
317	ldw 24(%r25),%r21
318	ldw 28(%r25),%r22
319	stw %r19,16(%r26)
320	stw %r20,20(%r26)
321	stw %r21,24(%r26)
322	stw %r22,28(%r26)
323	ldw 32(%r25),%r19
324	ldw 36(%r25),%r20
325	ldw 40(%r25),%r21
326	ldw 44(%r25),%r22
327	stw %r19,32(%r26)
328	stw %r20,36(%r26)
329	stw %r21,40(%r26)
330	stw %r22,44(%r26)
331	ldw 48(%r25),%r19
332	ldw 52(%r25),%r20
333	ldw 56(%r25),%r21
334	ldw 60(%r25),%r22
335	stw %r19,48(%r26)
336	stw %r20,52(%r26)
337	stw %r21,56(%r26)
338	stw %r22,60(%r26)
339	ldo 64(%r26),%r26
340	ADDIB>  -1,%r1,1b
341	ldo 64(%r25),%r25
342
343	bv      %r0(%r2)
344	nop
345	.exit
346
347	.procend
348
349#if (TMPALIAS_MAP_START >= 0x80000000UL)
350Warning TMPALIAS_MAP_START changed. If > 2 Gb, code in pacache.S is bogus
351#endif
352
353/*
354 * NOTE: Code in clear_user_page has a hard coded dependency on the
355 *       maximum alias boundary being 4 Mb. We've been assured by the
356 *       parisc chip designers that there will not ever be a parisc
357 *       chip with a larger alias boundary (Never say never :-) ).
358 *
359 *       Subtle: the dtlb miss handlers support the temp alias region by
360 *       "knowing" that if a dtlb miss happens within the temp alias
361 *       region it must have occurred while in clear_user_page. Since
362 *       this routine makes use of processor local translations, we
363 *       don't want to insert them into the kernel page table. Instead,
364 *       we load up some general registers (they need to be registers
365 *       which aren't shadowed) with the physical page numbers (preshifted
366 *       for tlb insertion) needed to insert the translations. When we
367 *       miss on the translation, the dtlb miss handler inserts the
368 *       translation into the tlb using these values:
369 *
370 *          %r26 physical page (shifted for tlb insert) of "to" translation
371 *          %r23 physical page (shifted for tlb insert) of "from" translation
372 */
373
374#if 0
375
376	/*
377	 * We can't do this since copy_user_page is used to bring in
378	 * file data that might have instructions. Since the data would
379	 * then need to be flushed out so the i-fetch can see it, it
380	 * makes more sense to just copy through the kernel translation
381	 * and flush it.
382	 *
383	 * I'm still keeping this around because it may be possible to
384	 * use it if more information is passed into copy_user_page().
385	 * Have to do some measurements to see if it is worthwhile to
386	 * lobby for such a change.
387	 */
388
389	.export copy_user_page_asm,code
390
391copy_user_page_asm:
392	.proc
393	.callinfo NO_CALLS
394	.entry
395
396	ldil    L%(__PAGE_OFFSET),%r1
397	sub     %r26,%r1,%r26
398	sub     %r25,%r1,%r23  /* move physical addr into non shadowed reg */
399
400	ldil    L%(TMPALIAS_MAP_START),%r28
401#ifdef __LP64__
402	extrd,u %r26,56,32,%r26 /* convert phys addr to tlb insert format */
403	extrd,u %r23,56,32,%r23 /* convert phys addr to tlb insert format */
404	depd    %r24,63,22,%r28 /* Form aliased virtual address 'to' */
405	depdi   0,63,12,%r28    /* Clear any offset bits */
406	copy    %r28,%r29
407	depdi   1,41,1,%r29     /* Form aliased virtual address 'from' */
408#else
409	extrw,u %r26,24,25,%r26 /* convert phys addr to tlb insert format */
410	extrw,u %r23,24,25,%r23 /* convert phys addr to tlb insert format */
411	depw    %r24,31,22,%r28 /* Form aliased virtual address 'to' */
412	depwi   0,31,12,%r28    /* Clear any offset bits */
413	copy    %r28,%r29
414	depwi   1,9,1,%r29      /* Form aliased virtual address 'from' */
415#endif
416
417	/* Purge any old translations */
418
419	pdtlb   0(%r28)
420	pdtlb   0(%r29)
421
422	ldi 64,%r1
423
424	/*
425	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
426	 * bundles (very restricted rules for bundling). It probably
427	 * does OK on PCXU and better, but we could do better with
428	 * ldd/std instructions. Note that until (if) we start saving
429	 * the full 64 bit register values on interrupt, we can't
430	 * use ldd/std on a 32 bit kernel.
431	 */
432
433
4341:
435	ldw 0(%r29),%r19
436	ldw 4(%r29),%r20
437	ldw 8(%r29),%r21
438	ldw 12(%r29),%r22
439	stw %r19,0(%r28)
440	stw %r20,4(%r28)
441	stw %r21,8(%r28)
442	stw %r22,12(%r28)
443	ldw 16(%r29),%r19
444	ldw 20(%r29),%r20
445	ldw 24(%r29),%r21
446	ldw 28(%r29),%r22
447	stw %r19,16(%r28)
448	stw %r20,20(%r28)
449	stw %r21,24(%r28)
450	stw %r22,28(%r28)
451	ldw 32(%r29),%r19
452	ldw 36(%r29),%r20
453	ldw 40(%r29),%r21
454	ldw 44(%r29),%r22
455	stw %r19,32(%r28)
456	stw %r20,36(%r28)
457	stw %r21,40(%r28)
458	stw %r22,44(%r28)
459	ldw 48(%r29),%r19
460	ldw 52(%r29),%r20
461	ldw 56(%r29),%r21
462	ldw 60(%r29),%r22
463	stw %r19,48(%r28)
464	stw %r20,52(%r28)
465	stw %r21,56(%r28)
466	stw %r22,60(%r28)
467	ldo 64(%r28),%r28
468	ADDIB>  -1,%r1,1b
469	ldo 64(%r29),%r29
470
471	bv      %r0(%r2)
472	nop
473	.exit
474
475	.procend
476#endif
477
478	.export clear_user_page_asm,code
479
480clear_user_page_asm:
481	.proc
482	.callinfo NO_CALLS
483	.entry
484
485	tophys_r1 %r26
486
487	ldil    L%(TMPALIAS_MAP_START),%r28
488#ifdef __LP64__
489	extrd,u %r26,56,32,%r26 /* convert phys addr to tlb insert format */
490	depd    %r25,63,22,%r28 /* Form aliased virtual address 'to' */
491	depdi   0,63,12,%r28    /* Clear any offset bits */
492#else
493	extrw,u %r26,24,25,%r26 /* convert phys addr to tlb insert format */
494	depw    %r25,31,22,%r28 /* Form aliased virtual address 'to' */
495	depwi   0,31,12,%r28    /* Clear any offset bits */
496#endif
497
498	/* Purge any old translation */
499
500	pdtlb   0(%r28)
501
502	ldi 64,%r1
503
5041:
505	stw %r0,0(%r28)
506	stw %r0,4(%r28)
507	stw %r0,8(%r28)
508	stw %r0,12(%r28)
509	stw %r0,16(%r28)
510	stw %r0,20(%r28)
511	stw %r0,24(%r28)
512	stw %r0,28(%r28)
513	stw %r0,32(%r28)
514	stw %r0,36(%r28)
515	stw %r0,40(%r28)
516	stw %r0,44(%r28)
517	stw %r0,48(%r28)
518	stw %r0,52(%r28)
519	stw %r0,56(%r28)
520	stw %r0,60(%r28)
521	ADDIB>  -1,%r1,1b
522	ldo 64(%r28),%r28
523
524	bv      %r0(%r2)
525	nop
526	.exit
527
528	.procend
529
530	.export flush_kernel_dcache_page
531
532flush_kernel_dcache_page:
533	.proc
534	.callinfo NO_CALLS
535	.entry
536
537	ldil    L%dcache_stride,%r1
538	ldw     R%dcache_stride(%r1),%r23
539
540#ifdef __LP64__
541	depdi,z 1,63-PAGE_SHIFT,1,%r25
542#else
543	depwi,z 1,31-PAGE_SHIFT,1,%r25
544#endif
545	add     %r26,%r25,%r25
546	sub     %r25,%r23,%r25
547
548
5491:      fdc,m   %r23(%r26)
550	fdc,m   %r23(%r26)
551	fdc,m   %r23(%r26)
552	fdc,m   %r23(%r26)
553	fdc,m   %r23(%r26)
554	fdc,m   %r23(%r26)
555	fdc,m   %r23(%r26)
556	fdc,m   %r23(%r26)
557	fdc,m   %r23(%r26)
558	fdc,m   %r23(%r26)
559	fdc,m   %r23(%r26)
560	fdc,m   %r23(%r26)
561	fdc,m   %r23(%r26)
562	fdc,m   %r23(%r26)
563	fdc,m   %r23(%r26)
564	CMPB<<  %r26,%r25,1b
565	fdc,m   %r23(%r26)
566
567	sync
568	bv      %r0(%r2)
569	nop
570	.exit
571
572	.procend
573
574	.export purge_kernel_dcache_page
575
576purge_kernel_dcache_page:
577	.proc
578	.callinfo NO_CALLS
579	.entry
580
581	ldil    L%dcache_stride,%r1
582	ldw     R%dcache_stride(%r1),%r23
583
584#ifdef __LP64__
585	depdi,z 1,63-PAGE_SHIFT,1,%r25
586#else
587	depwi,z 1,31-PAGE_SHIFT,1,%r25
588#endif
589	add      %r26,%r25,%r25
590	sub      %r25,%r23,%r25
591
5921:      pdc,m   %r23(%r26)
593	pdc,m   %r23(%r26)
594	pdc,m   %r23(%r26)
595	pdc,m   %r23(%r26)
596	pdc,m   %r23(%r26)
597	pdc,m   %r23(%r26)
598	pdc,m   %r23(%r26)
599	pdc,m   %r23(%r26)
600	pdc,m   %r23(%r26)
601	pdc,m   %r23(%r26)
602	pdc,m   %r23(%r26)
603	pdc,m   %r23(%r26)
604	pdc,m   %r23(%r26)
605	pdc,m   %r23(%r26)
606	pdc,m   %r23(%r26)
607	CMPB<<  %r26,%r25,1b
608	pdc,m   %r23(%r26)
609
610	sync
611	bv      %r0(%r2)
612	nop
613	.exit
614
615	.procend
616
617#if 0
618	/* Currently not used, but it still is a possible alternate
619	 * solution.
620	 */
621
622	.export flush_alias_page
623
624flush_alias_page:
625	.proc
626	.callinfo NO_CALLS
627	.entry
628
629	tophys_r1 %r26
630
631	ldil    L%(TMPALIAS_MAP_START),%r28
632#ifdef __LP64__
633	extrd,u %r26,56,32,%r26 /* convert phys addr to tlb insert format */
634	depd    %r25,63,22,%r28 /* Form aliased virtual address 'to' */
635	depdi   0,63,12,%r28    /* Clear any offset bits */
636#else
637	extrw,u %r26,24,25,%r26 /* convert phys addr to tlb insert format */
638	depw    %r25,31,22,%r28 /* Form aliased virtual address 'to' */
639	depwi   0,31,12,%r28    /* Clear any offset bits */
640#endif
641
642	/* Purge any old translation */
643
644	pdtlb   0(%r28)
645
646	ldil    L%dcache_stride,%r1
647	ldw     R%dcache_stride(%r1),%r23
648
649#ifdef __LP64__
650	depdi,z 1,63-PAGE_SHIFT,1,%r29
651#else
652	depwi,z 1,31-PAGE_SHIFT,1,%r29
653#endif
654	add      %r28,%r29,%r29
655	sub      %r29,%r23,%r29
656
6571:      fdc,m   %r23(%r28)
658	fdc,m   %r23(%r28)
659	fdc,m   %r23(%r28)
660	fdc,m   %r23(%r28)
661	fdc,m   %r23(%r28)
662	fdc,m   %r23(%r28)
663	fdc,m   %r23(%r28)
664	fdc,m   %r23(%r28)
665	fdc,m   %r23(%r28)
666	fdc,m   %r23(%r28)
667	fdc,m   %r23(%r28)
668	fdc,m   %r23(%r28)
669	fdc,m   %r23(%r28)
670	fdc,m   %r23(%r28)
671	fdc,m   %r23(%r28)
672	CMPB<<  %r28,%r29,1b
673	fdc,m   %r23(%r28)
674
675	sync
676	bv      %r0(%r2)
677	nop
678	.exit
679
680	.procend
681#endif
682
683	.export flush_user_dcache_range_asm
684
685flush_user_dcache_range_asm:
686	.proc
687	.callinfo NO_CALLS
688	.entry
689
690	ldil    L%dcache_stride,%r1
691	ldw     R%dcache_stride(%r1),%r23
692	ldo     -1(%r23),%r21
693	ANDCM   %r26,%r21,%r26
694
6951:      CMPB<<,n %r26,%r25,1b
696	fdc,m   %r23(%sr3,%r26)
697
698	sync
699	bv      %r0(%r2)
700	nop
701	.exit
702
703	.procend
704
705	.export flush_kernel_dcache_range_asm
706
707flush_kernel_dcache_range_asm:
708	.proc
709	.callinfo NO_CALLS
710	.entry
711
712	ldil    L%dcache_stride,%r1
713	ldw     R%dcache_stride(%r1),%r23
714	ldo     -1(%r23),%r21
715	ANDCM   %r26,%r21,%r26
716
7171:      CMPB<<,n %r26,%r25,1b
718	fdc,m   %r23(%r26)
719
720	sync
721	syncdma
722	bv      %r0(%r2)
723	nop
724	.exit
725
726	.procend
727
728	.export flush_user_icache_range_asm
729
730flush_user_icache_range_asm:
731	.proc
732	.callinfo NO_CALLS
733	.entry
734
735	ldil    L%icache_stride,%r1
736	ldw     R%icache_stride(%r1),%r23
737	ldo     -1(%r23),%r21
738	ANDCM   %r26,%r21,%r26
739
7401:      CMPB<<,n %r26,%r25,1b
741	fic,m   %r23(%sr3,%r26)
742
743	sync
744	bv      %r0(%r2)
745	nop
746	.exit
747
748	.procend
749
750	.export flush_kernel_icache_page
751
752flush_kernel_icache_page:
753	.proc
754	.callinfo NO_CALLS
755	.entry
756
757	ldil    L%icache_stride,%r1
758	ldw     R%icache_stride(%r1),%r23
759
760#ifdef __LP64__
761	depdi,z 1,63-PAGE_SHIFT,1,%r25
762#else
763	depwi,z 1,31-PAGE_SHIFT,1,%r25
764#endif
765	add     %r26,%r25,%r25
766	sub     %r25,%r23,%r25
767
768
7691:      fic,m   %r23(%r26)
770	fic,m   %r23(%r26)
771	fic,m   %r23(%r26)
772	fic,m   %r23(%r26)
773	fic,m   %r23(%r26)
774	fic,m   %r23(%r26)
775	fic,m   %r23(%r26)
776	fic,m   %r23(%r26)
777	fic,m   %r23(%r26)
778	fic,m   %r23(%r26)
779	fic,m   %r23(%r26)
780	fic,m   %r23(%r26)
781	fic,m   %r23(%r26)
782	fic,m   %r23(%r26)
783	fic,m   %r23(%r26)
784	CMPB<<  %r26,%r25,1b
785	fic,m   %r23(%r26)
786
787	sync
788	bv      %r0(%r2)
789	nop
790	.exit
791
792	.procend
793
794	.export flush_kernel_icache_range_asm
795
796flush_kernel_icache_range_asm:
797	.proc
798	.callinfo NO_CALLS
799	.entry
800
801	ldil    L%icache_stride,%r1
802	ldw     R%icache_stride(%r1),%r23
803	ldo     -1(%r23),%r21
804	ANDCM   %r26,%r21,%r26
805
8061:      CMPB<<,n %r26,%r25,1b
807	fic,m   %r23(%r26)
808
809	sync
810	bv      %r0(%r2)
811	nop
812	.exit
813
814	.procend
815
816	.align 128
817
818	.export disable_sr_hashing_asm,code
819
820disable_sr_hashing_asm:
821	.proc
822	.callinfo NO_CALLS
823	.entry
824
825	/* Switch to real mode */
826
827	ssm     0,%r0           /* relied upon translation! */
828	nop
829	nop
830	nop
831	nop
832	nop
833	nop
834	nop
835
836	rsm     (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q&I to load the iia queue */
837	ldil    L%REAL_MODE_PSW, %r1
838	ldo     R%REAL_MODE_PSW(%r1), %r1
839	mtctl	%r1, %cr22
840	mtctl	%r0, %cr17
841	mtctl	%r0, %cr17
842	ldil    L%PA(1f),%r1
843	ldo     R%PA(1f)(%r1),%r1
844	mtctl	%r1, %cr18
845	ldo	4(%r1), %r1
846	mtctl	%r1, %cr18
847	rfi
848	nop
849
8501:      cmpib,=,n SRHASH_PCXST,%r26,srdis_pcxs
851	cmpib,=,n SRHASH_PCXL,%r26,srdis_pcxl
852	cmpib,=,n SRHASH_PA20,%r26,srdis_pa20
853	b,n       srdis_done
854
855srdis_pcxs:
856
857	/* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
858
859	.word           0x141c1a00  /* mfdiag %dr0,%r28 */
860	.word           0x141c1a00  /* must issue twice */
861	depwi           0,18,1,%r28 /* Clear DHE (dcache hash enable) */
862	depwi           0,20,1,%r28 /* Clear IHE (icache hash enable) */
863	.word           0x141c1600  /* mtdiag %r28,%dr0 */
864	.word           0x141c1600  /* must issue twice */
865	b,n             srdis_done
866
867srdis_pcxl:
868
869	/* Disable Space Register Hashing for PCXL */
870
871	.word           0x141c0600  /* mfdiag %dr0,%r28 */
872	depwi           0,28,2,%r28 /* Clear DHASH_EN & IHASH_EN */
873	.word           0x141c0240  /* mtdiag %r28,%dr0 */
874	b,n             srdis_done
875
876srdis_pa20:
877
878	/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+ */
879
880	.word           0x144008bc  /* mfdiag %dr2,%r28 */
881	depdi           0,54,1,%r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
882	.word           0x145c1840  /* mtdiag %r28,%dr2 */
883
884srdis_done:
885
886	/* Switch back to virtual mode */
887
888	rsm     PSW_SM_Q,%r0           /* clear Q bit to load iia queue */
889	ldil	L%KERNEL_PSW, %r1
890	ldo	R%KERNEL_PSW(%r1), %r1
891	mtctl	%r1, %cr22
892	mtctl	%r0, %cr17
893	mtctl	%r0, %cr17
894	ldil    L%(2f), %r1
895	ldo     R%(2f)(%r1), %r1
896	mtctl	%r1, %cr18
897	ldo	4(%r1), %r1
898	mtctl	%r1, %cr18
899	rfi
900	nop
901
9022:      bv      %r0(%r2)
903	nop
904	.exit
905
906	.procend
907
908	.end
909