1 /*
2 * linux/include/asm-arm/proc-armv/cache.h
3 *
4 * Copyright (C) 1999-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <asm/mman.h>
11
12 /*
13 * This flag is used to indicate that the page pointed to by a pte
14 * is dirty and requires cleaning before returning it to the user.
15 */
16 #define PG_dcache_dirty PG_arch_1
17
18 /*
19 * Cache handling for 32-bit ARM processors.
20 *
21 * Note that on ARM, we have a more accurate specification than that
22 * Linux's "flush". We therefore do not use "flush" here, but instead
23 * use:
24 *
25 * clean: the act of pushing dirty cache entries out to memory.
26 * invalidate: the act of discarding data held within the cache,
27 * whether it is dirty or not.
28 */
29
30 /*
31 * Generic I + D cache
32 */
33 #define flush_cache_all() \
34 do { \
35 cpu_cache_clean_invalidate_all(); \
36 } while (0)
37
38 /* This is always called for current->mm */
39 #define flush_cache_mm(_mm) \
40 do { \
41 if ((_mm) == current->active_mm) \
42 cpu_cache_clean_invalidate_all(); \
43 } while (0)
44
45 #define flush_cache_range(_mm,_start,_end) \
46 do { \
47 if ((_mm) == current->active_mm) \
48 cpu_cache_clean_invalidate_range((_start) & PAGE_MASK, \
49 PAGE_ALIGN(_end), 1); \
50 } while (0)
51
52 #define flush_cache_page(_vma,_vmaddr) \
53 do { \
54 if ((_vma)->vm_mm == current->active_mm) { \
55 unsigned long _addr = (_vmaddr) & PAGE_MASK; \
56 cpu_cache_clean_invalidate_range(_addr, \
57 _addr + PAGE_SIZE, \
58 ((_vma)->vm_flags & VM_EXEC)); \
59 } \
60 } while (0)
61
62 /*
63 * This flushes back any buffered write data. We have to clean the entries
64 * in the cache for this page. This does not invalidate either I or D caches.
65 *
66 * Called from:
67 * 1. mm/filemap.c:filemap_nopage
68 * 2. mm/filemap.c:filemap_nopage
69 * [via do_no_page - ok]
70 *
71 * 3. mm/memory.c:break_cow
72 * [copy_cow_page doesn't do anything to the cache; insufficient cache
73 * handling. Need to add flush_dcache_page() here]
74 *
75 * 4. mm/memory.c:do_swap_page
76 * [read_swap_cache_async doesn't do anything to the cache: insufficient
77 * cache handling. Need to add flush_dcache_page() here]
78 *
79 * 5. mm/memory.c:do_anonymous_page
80 * [zero page, never written by kernel - ok]
81 *
82 * 6. mm/memory.c:do_no_page
83 * [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty]
84 *
85 * 7. mm/shmem.c:shmem_nopage
86 * 8. mm/shmem.c:shmem_nopage
87 * [via do_no_page - ok]
88 *
89 * 9. fs/exec.c:put_dirty_page
90 * [we call flush_dcache_page prior to this, which will flush out the
91 * kernel virtual addresses from the dcache - ok]
92 */
flush_page_to_ram(struct page * page)93 static __inline__ void flush_page_to_ram(struct page *page)
94 {
95 cpu_flush_ram_page(page_address(page));
96 }
97
98 /*
99 * D cache only
100 */
101
102 #define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e))
103 #define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e))
104 #define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
105
106 /*
107 * flush_dcache_page is used when the kernel has written to the page
108 * cache page at virtual address page->virtual.
109 *
110 * If this page isn't mapped (ie, page->mapping = NULL), or it has
111 * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
112 * then we _must_ always clean + invalidate the dcache entries associated
113 * with the kernel mapping.
114 *
115 * Otherwise we can defer the operation, and clean the cache when we are
116 * about to change to user space. This is the same method as used on SPARC64.
117 * See update_mmu_cache for the user space part.
118 */
119 #define mapping_mapped(map) ((map)->i_mmap || (map)->i_mmap_shared)
120
flush_dcache_page(struct page * page)121 static inline void flush_dcache_page(struct page *page)
122 {
123 if (page->mapping && !mapping_mapped(page->mapping))
124 set_bit(PG_dcache_dirty, &page->flags);
125 else {
126 unsigned long virt = (unsigned long)page_address(page);
127 cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
128 }
129 }
130
131 #define flush_icache_user_range(vma,page,addr,len) \
132 flush_dcache_page(page)
133
134 #define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s))
135
136 /*
137 * This function is misnamed IMHO. There are three places where it
138 * is called, each of which is preceded immediately by a call to
139 * flush_page_to_ram:
140 *
141 * 1. kernel/ptrace.c:access_one_page
142 * called after we have written to the kernel view of a user page.
143 * The user page has been expundged from the cache by flush_cache_page.
144 * [we don't need to do anything here if we add a call to
145 * flush_dcache_page]
146 *
147 * 2. mm/memory.c:do_swap_page
148 * called after we have (possibly) written to the kernel view of a
149 * user page, which has previously been removed (ie, has been through
150 * the swap cache).
151 * [if the flush_page_to_ram() conditions are satisfied, then ok]
152 *
153 * 3. mm/memory.c:do_no_page
154 * [if the flush_page_to_ram() conditions are satisfied, then ok]
155 *
156 * Invalidating the icache at the kernels virtual page isn't really
157 * going to do us much good, since we wouldn't have executed any
158 * instructions there.
159 */
160 #define flush_icache_page(vma,pg) do { } while (0)
161
162 /*
163 * I cache coherency stuff.
164 *
165 * This *is not* just icache. It is to make data written to memory
166 * consistent such that instructions fetched from the region are what
167 * we expect.
168 *
169 * This generally means that we have to clean out the Dcache and write
170 * buffers, and maybe flush the Icache in the specified range.
171 */
172 #define flush_icache_range(_s,_e) \
173 do { \
174 cpu_icache_invalidate_range((_s), (_e)); \
175 } while (0)
176
177 /*
178 * TLB flushing.
179 *
180 * - flush_tlb_all() flushes all processes TLBs
181 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
182 * - flush_tlb_page(vma, vmaddr) flushes TLB for specified page
183 * - flush_tlb_range(mm, start, end) flushes TLB for specified range of pages
184 *
185 * We drain the write buffer in here to ensure that the page tables in ram
186 * are really up to date. It is more efficient to do this here...
187 */
188
189 /*
190 * Notes:
191 * current->active_mm is the currently active memory description.
192 * current->mm == NULL iff we are lazy.
193 */
194 #define flush_tlb_all() \
195 do { \
196 cpu_tlb_invalidate_all(); \
197 } while (0)
198
199 /*
200 * Flush all user virtual address space translations described by `_mm'.
201 *
202 * Currently, this is always called for current->mm, which should be
203 * the same as current->active_mm. This is currently not be called for
204 * the lazy TLB case.
205 */
206 #define flush_tlb_mm(_mm) \
207 do { \
208 if ((_mm) == current->active_mm) \
209 cpu_tlb_invalidate_all(); \
210 } while (0)
211
212 /*
213 * Flush the specified range of user virtual address space translations.
214 *
215 * _mm may not be current->active_mm, but may not be NULL.
216 */
217 #define flush_tlb_range(_mm,_start,_end) \
218 do { \
219 if ((_mm) == current->active_mm) \
220 cpu_tlb_invalidate_range((_start), (_end)); \
221 } while (0)
222
223 /*
224 * Flush the specified user virtual address space translation.
225 */
226 #define flush_tlb_page(_vma,_page) \
227 do { \
228 if ((_vma)->vm_mm == current->active_mm) \
229 cpu_tlb_invalidate_page((_page), \
230 ((_vma)->vm_flags & VM_EXEC)); \
231 } while (0)
232
233 /*
234 * if PG_dcache_dirty is set for the page, we need to ensure that any
235 * cache entries for the kernels virtual memory range are written
236 * back to the page.
237 */
238 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
239
240 /*
241 * Old ARM MEMC stuff. This supports the reversed mapping handling that
242 * we have on the older 26-bit machines. We don't have a MEMC chip, so...
243 */
244 #define memc_update_all() do { } while (0)
245 #define memc_update_mm(mm) do { } while (0)
246 #define memc_update_addr(mm,pte,log) do { } while (0)
247 #define memc_clear(mm,physaddr) do { } while (0)
248
249