1 #ifndef _M68K_CACHEFLUSH_H
2 #define _M68K_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 
6 /* cache code */
7 #define FLUSH_I_AND_D	(0x00000808)
8 #define FLUSH_I		(0x00000008)
9 
10 /*
11  * Cache handling functions
12  */
13 
flush_icache(void)14 static inline void flush_icache(void)
15 {
16 	if (CPU_IS_040_OR_060)
17 		asm volatile (	"nop\n"
18 			"	.chip	68040\n"
19 			"	cpusha	%bc\n"
20 			"	.chip	68k");
21 	else {
22 		unsigned long tmp;
23 		asm volatile (	"movec	%%cacr,%0\n"
24 			"	or.w	%1,%0\n"
25 			"	movec	%0,%%cacr"
26 			: "=&d" (tmp)
27 			: "id" (FLUSH_I));
28 	}
29 }
30 
31 /*
32  * invalidate the cache for the specified memory range.
33  * It starts at the physical address specified for
34  * the given number of bytes.
35  */
36 extern void cache_clear(unsigned long paddr, int len);
37 /*
38  * push any dirty cache in the specified memory range.
39  * It starts at the physical address specified for
40  * the given number of bytes.
41  */
42 extern void cache_push(unsigned long paddr, int len);
43 
44 /*
45  * push and invalidate pages in the specified user virtual
46  * memory range.
47  */
48 extern void cache_push_v(unsigned long vaddr, int len);
49 
50 /* This is needed whenever the virtual mapping of the current
51    process changes.  */
52 #define __flush_cache_all()					\
53 ({								\
54 	if (CPU_IS_040_OR_060)					\
55 		__asm__ __volatile__("nop\n\t"			\
56 				     ".chip 68040\n\t"		\
57 				     "cpusha %dc\n\t"		\
58 				     ".chip 68k");		\
59 	else {							\
60 		unsigned long _tmp;				\
61 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
62 				     "orw %1,%0\n\t"		\
63 				     "movec %0,%%cacr"		\
64 				     : "=&d" (_tmp)		\
65 				     : "di" (FLUSH_I_AND_D));	\
66 	}							\
67 })
68 
69 #define __flush_cache_030()					\
70 ({								\
71 	if (CPU_IS_020_OR_030) {				\
72 		unsigned long _tmp;				\
73 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
74 				     "orw %1,%0\n\t"		\
75 				     "movec %0,%%cacr"		\
76 				     : "=&d" (_tmp)		\
77 				     : "di" (FLUSH_I_AND_D));	\
78 	}							\
79 })
80 
81 #define flush_cache_all() __flush_cache_all()
82 
83 #define flush_cache_vmap(start, end)		flush_cache_all()
84 #define flush_cache_vunmap(start, end)		flush_cache_all()
85 
flush_cache_mm(struct mm_struct * mm)86 static inline void flush_cache_mm(struct mm_struct *mm)
87 {
88 	if (mm == current->mm)
89 		__flush_cache_030();
90 }
91 
92 #define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
93 
94 /* flush_cache_range/flush_cache_page must be macros to avoid
95    a dependency on linux/mm.h, which includes this file... */
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)96 static inline void flush_cache_range(struct vm_area_struct *vma,
97 				     unsigned long start,
98 				     unsigned long end)
99 {
100 	if (vma->vm_mm == current->mm)
101 	        __flush_cache_030();
102 }
103 
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)104 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
105 {
106 	if (vma->vm_mm == current->mm)
107 	        __flush_cache_030();
108 }
109 
110 
111 /* Push the page at kernel virtual address and clear the icache */
112 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
__flush_page_to_ram(void * vaddr)113 static inline void __flush_page_to_ram(void *vaddr)
114 {
115 	if (CPU_IS_040_OR_060) {
116 		__asm__ __volatile__("nop\n\t"
117 				     ".chip 68040\n\t"
118 				     "cpushp %%bc,(%0)\n\t"
119 				     ".chip 68k"
120 				     : : "a" (__pa(vaddr)));
121 	} else {
122 		unsigned long _tmp;
123 		__asm__ __volatile__("movec %%cacr,%0\n\t"
124 				     "orw %1,%0\n\t"
125 				     "movec %0,%%cacr"
126 				     : "=&d" (_tmp)
127 				     : "di" (FLUSH_I));
128 	}
129 }
130 
131 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
132 #define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
133 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
134 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
135 #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
136 
137 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
138 				    unsigned long addr, int len);
139 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
140 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)141 static inline void copy_to_user_page(struct vm_area_struct *vma,
142 				     struct page *page, unsigned long vaddr,
143 				     void *dst, void *src, int len)
144 {
145 	flush_cache_page(vma, vaddr, page_to_pfn(page));
146 	memcpy(dst, src, len);
147 	flush_icache_user_range(vma, page, vaddr, len);
148 }
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)149 static inline void copy_from_user_page(struct vm_area_struct *vma,
150 				       struct page *page, unsigned long vaddr,
151 				       void *dst, void *src, int len)
152 {
153 	flush_cache_page(vma, vaddr, page_to_pfn(page));
154 	memcpy(dst, src, len);
155 }
156 
157 #endif /* _M68K_CACHEFLUSH_H */
158