1 /*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 #include <linux/pgtable.h>
28
29 #include <asm/bootparam.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 #include <asm/tlbflush.h>
33 #include <asm/page.h>
34
35 /*
36 * Note:
37 * The kernel provides one architecture bit PG_arch_1 in the page flags that
38 * can be used for cache coherency.
39 *
40 * I$-D$ coherency.
41 *
42 * The Xtensa architecture doesn't keep the instruction cache coherent with
43 * the data cache. We use the architecture bit to indicate if the caches
44 * are coherent. The kernel clears this bit whenever a page is added to the
45 * page cache. At that time, the caches might not be in sync. We, therefore,
46 * define this flag as 'clean' if set.
47 *
48 * D-cache aliasing.
49 *
50 * With cache aliasing, we have to always flush the cache when pages are
51 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
52 * page.
53 *
54 *
55 *
56 */
57
58 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)59 static inline void kmap_invalidate_coherent(struct page *page,
60 unsigned long vaddr)
61 {
62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
63 unsigned long kvaddr;
64
65 if (!PageHighMem(page)) {
66 kvaddr = (unsigned long)page_to_virt(page);
67
68 __invalidate_dcache_page(kvaddr);
69 } else {
70 kvaddr = TLBTEMP_BASE_1 +
71 (page_to_phys(page) & DCACHE_ALIAS_MASK);
72
73 preempt_disable();
74 __invalidate_dcache_page_alias(kvaddr,
75 page_to_phys(page));
76 preempt_enable();
77 }
78 }
79 }
80
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)81 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
82 unsigned long vaddr, unsigned long *paddr)
83 {
84 *paddr = page_to_phys(page);
85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
86 }
87
clear_user_highpage(struct page * page,unsigned long vaddr)88 void clear_user_highpage(struct page *page, unsigned long vaddr)
89 {
90 unsigned long paddr;
91 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
92
93 preempt_disable();
94 kmap_invalidate_coherent(page, vaddr);
95 set_bit(PG_arch_1, &page->flags);
96 clear_page_alias(kvaddr, paddr);
97 preempt_enable();
98 }
99 EXPORT_SYMBOL(clear_user_highpage);
100
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)101 void copy_user_highpage(struct page *dst, struct page *src,
102 unsigned long vaddr, struct vm_area_struct *vma)
103 {
104 unsigned long dst_paddr, src_paddr;
105 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
106 &dst_paddr);
107 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
108 &src_paddr);
109
110 preempt_disable();
111 kmap_invalidate_coherent(dst, vaddr);
112 set_bit(PG_arch_1, &dst->flags);
113 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
114 preempt_enable();
115 }
116 EXPORT_SYMBOL(copy_user_highpage);
117
118 /*
119 * Any time the kernel writes to a user page cache page, or it is about to
120 * read from a page cache page this routine is called.
121 *
122 */
123
flush_dcache_folio(struct folio * folio)124 void flush_dcache_folio(struct folio *folio)
125 {
126 struct address_space *mapping = folio_flush_mapping(folio);
127
128 /*
129 * If we have a mapping but the page is not mapped to user-space
130 * yet, we simply mark this page dirty and defer flushing the
131 * caches until update_mmu().
132 */
133
134 if (mapping && !mapping_mapped(mapping)) {
135 if (!test_bit(PG_arch_1, &folio->flags))
136 set_bit(PG_arch_1, &folio->flags);
137 return;
138
139 } else {
140 unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
141 unsigned long temp = folio_pos(folio);
142 unsigned int i, nr = folio_nr_pages(folio);
143 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
144 unsigned long virt;
145
146 /*
147 * Flush the page in kernel space and user space.
148 * Note that we can omit that step if aliasing is not
149 * an issue, but we do have to synchronize I$ and D$
150 * if we have a mapping.
151 */
152
153 if (!alias && !mapping)
154 return;
155
156 preempt_disable();
157 for (i = 0; i < nr; i++) {
158 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
159 __flush_invalidate_dcache_page_alias(virt, phys);
160
161 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
162
163 if (alias)
164 __flush_invalidate_dcache_page_alias(virt, phys);
165
166 if (mapping)
167 __invalidate_icache_page_alias(virt, phys);
168 phys += PAGE_SIZE;
169 temp += PAGE_SIZE;
170 }
171 preempt_enable();
172 }
173
174 /* There shouldn't be an entry in the cache for this page anymore. */
175 }
176 EXPORT_SYMBOL(flush_dcache_folio);
177
178 /*
179 * For now, flush the whole cache. FIXME??
180 */
181
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)182 void local_flush_cache_range(struct vm_area_struct *vma,
183 unsigned long start, unsigned long end)
184 {
185 __flush_invalidate_dcache_all();
186 __invalidate_icache_all();
187 }
188 EXPORT_SYMBOL(local_flush_cache_range);
189
190 /*
191 * Remove any entry in the cache for this page.
192 *
193 * Note that this function is only called for user pages, so use the
194 * alias versions of the cache flush functions.
195 */
196
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)197 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
198 unsigned long pfn)
199 {
200 /* Note that we have to use the 'alias' address to avoid multi-hit */
201
202 unsigned long phys = page_to_phys(pfn_to_page(pfn));
203 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
204
205 preempt_disable();
206 __flush_invalidate_dcache_page_alias(virt, phys);
207 __invalidate_icache_page_alias(virt, phys);
208 preempt_enable();
209 }
210 EXPORT_SYMBOL(local_flush_cache_page);
211
212 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
213
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr)214 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
215 unsigned long addr, pte_t *ptep, unsigned int nr)
216 {
217 unsigned long pfn = pte_pfn(*ptep);
218 struct folio *folio;
219 unsigned int i;
220
221 if (!pfn_valid(pfn))
222 return;
223
224 folio = page_folio(pfn_to_page(pfn));
225
226 /* Invalidate old entries in TLBs */
227 for (i = 0; i < nr; i++)
228 flush_tlb_page(vma, addr + i * PAGE_SIZE);
229 nr = folio_nr_pages(folio);
230
231 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
232
233 if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
234 unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
235 unsigned long tmp;
236
237 preempt_disable();
238 for (i = 0; i < nr; i++) {
239 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
240 __flush_invalidate_dcache_page_alias(tmp, phys);
241 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
242 __flush_invalidate_dcache_page_alias(tmp, phys);
243 __invalidate_icache_page_alias(tmp, phys);
244 phys += PAGE_SIZE;
245 }
246 preempt_enable();
247
248 clear_bit(PG_arch_1, &folio->flags);
249 }
250 #else
251 if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
252 && (vma->vm_flags & VM_EXEC) != 0) {
253 for (i = 0; i < nr; i++) {
254 void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
255 __flush_dcache_page((unsigned long)paddr);
256 __invalidate_icache_page((unsigned long)paddr);
257 kunmap_local(paddr);
258 }
259 set_bit(PG_arch_1, &folio->flags);
260 }
261 #endif
262 }
263
264 /*
265 * access_process_vm() has called get_user_pages(), which has done a
266 * flush_dcache_page() on the page.
267 */
268
269 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
270
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)271 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
272 unsigned long vaddr, void *dst, const void *src,
273 unsigned long len)
274 {
275 unsigned long phys = page_to_phys(page);
276 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
277
278 /* Flush and invalidate user page if aliased. */
279
280 if (alias) {
281 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
282 preempt_disable();
283 __flush_invalidate_dcache_page_alias(t, phys);
284 preempt_enable();
285 }
286
287 /* Copy data */
288
289 memcpy(dst, src, len);
290
291 /*
292 * Flush and invalidate kernel page if aliased and synchronize
293 * data and instruction caches for executable pages.
294 */
295
296 if (alias) {
297 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
298
299 preempt_disable();
300 __flush_invalidate_dcache_range((unsigned long) dst, len);
301 if ((vma->vm_flags & VM_EXEC) != 0)
302 __invalidate_icache_page_alias(t, phys);
303 preempt_enable();
304
305 } else if ((vma->vm_flags & VM_EXEC) != 0) {
306 __flush_dcache_range((unsigned long)dst,len);
307 __invalidate_icache_range((unsigned long) dst, len);
308 }
309 }
310
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)311 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
312 unsigned long vaddr, void *dst, const void *src,
313 unsigned long len)
314 {
315 unsigned long phys = page_to_phys(page);
316 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
317
318 /*
319 * Flush user page if aliased.
320 * (Note: a simply flush would be sufficient)
321 */
322
323 if (alias) {
324 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
325 preempt_disable();
326 __flush_invalidate_dcache_page_alias(t, phys);
327 preempt_enable();
328 }
329
330 memcpy(dst, src, len);
331 }
332
333 #endif
334