1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/uaccess.h>
6 
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8  * Unfortunately, that doesn't apply to PA-RISC. */
9 
10 /* Internal implementation */
11 void flush_data_cache_local(void *);  /* flushes local data-cache only */
12 void flush_instruction_cache_local(void *); /* flushes local code-cache only */
13 #ifdef CONFIG_SMP
14 void flush_data_cache(void); /* flushes data-cache only (all processors) */
15 void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
16 #else
17 #define flush_data_cache() flush_data_cache_local(NULL)
18 #define flush_instruction_cache() flush_instruction_cache_local(NULL)
19 #endif
20 
21 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
22 
23 void flush_user_icache_range_asm(unsigned long, unsigned long);
24 void flush_kernel_icache_range_asm(unsigned long, unsigned long);
25 void flush_user_dcache_range_asm(unsigned long, unsigned long);
26 void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
27 void flush_kernel_dcache_page_asm(void *);
28 void flush_kernel_icache_page(void *);
29 void flush_user_dcache_range(unsigned long, unsigned long);
30 void flush_user_icache_range(unsigned long, unsigned long);
31 
32 /* Cache flush operations */
33 
34 void flush_cache_all_local(void);
35 void flush_cache_all(void);
36 void flush_cache_mm(struct mm_struct *mm);
37 
38 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
39 void flush_kernel_dcache_page_addr(void *addr);
flush_kernel_dcache_page(struct page * page)40 static inline void flush_kernel_dcache_page(struct page *page)
41 {
42 	flush_kernel_dcache_page_addr(page_address(page));
43 }
44 
45 #define flush_kernel_dcache_range(start,size) \
46 	flush_kernel_dcache_range_asm((start), (start)+(size));
47 /* vmap range flushes and invalidates.  Architecturally, we don't need
48  * the invalidate, because the CPU should refuse to speculate once an
49  * area has been flushed, so invalidate is left empty */
flush_kernel_vmap_range(void * vaddr,int size)50 static inline void flush_kernel_vmap_range(void *vaddr, int size)
51 {
52 	unsigned long start = (unsigned long)vaddr;
53 
54 	flush_kernel_dcache_range_asm(start, start + size);
55 }
invalidate_kernel_vmap_range(void * vaddr,int size)56 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
57 {
58 	unsigned long start = (unsigned long)vaddr;
59 	void *cursor = vaddr;
60 
61 	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
62 		struct page *page = vmalloc_to_page(cursor);
63 
64 		if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
65 			flush_kernel_dcache_page(page);
66 	}
67 	flush_kernel_dcache_range_asm(start, start + size);
68 }
69 
70 #define flush_cache_vmap(start, end)		flush_cache_all()
71 #define flush_cache_vunmap(start, end)		flush_cache_all()
72 
73 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
74 extern void flush_dcache_page(struct page *page);
75 
76 #define flush_dcache_mmap_lock(mapping) \
77 	spin_lock_irq(&(mapping)->tree_lock)
78 #define flush_dcache_mmap_unlock(mapping) \
79 	spin_unlock_irq(&(mapping)->tree_lock)
80 
81 #define flush_icache_page(vma,page)	do { 		\
82 	flush_kernel_dcache_page(page);			\
83 	flush_kernel_icache_page(page_address(page)); 	\
84 } while (0)
85 
86 #define flush_icache_range(s,e)		do { 		\
87 	flush_kernel_dcache_range_asm(s,e); 		\
88 	flush_kernel_icache_range_asm(s,e); 		\
89 } while (0)
90 
91 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
92 do { \
93 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
94 	memcpy(dst, src, len); \
95 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
96 } while (0)
97 
98 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
99 do { \
100 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
101 	memcpy(dst, src, len); \
102 } while (0)
103 
104 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
105 void flush_cache_range(struct vm_area_struct *vma,
106 		unsigned long start, unsigned long end);
107 
108 /* defined in pacache.S exported in cache.c used by flush_anon_page */
109 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
110 
111 #define ARCH_HAS_FLUSH_ANON_PAGE
112 static inline void
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)113 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
114 {
115 	if (PageAnon(page))
116 		flush_dcache_page_asm(page_to_phys(page), vmaddr);
117 }
118 
119 #ifdef CONFIG_DEBUG_RODATA
120 void mark_rodata_ro(void);
121 #endif
122 
123 #ifdef CONFIG_PA8X00
124 /* Only pa8800, pa8900 needs this */
125 
126 #include <asm/kmap_types.h>
127 
128 #define ARCH_HAS_KMAP
129 
130 void kunmap_parisc(void *addr);
131 
kmap(struct page * page)132 static inline void *kmap(struct page *page)
133 {
134 	might_sleep();
135 	return page_address(page);
136 }
137 
138 #define kunmap(page)			kunmap_parisc(page_address(page))
139 
__kmap_atomic(struct page * page)140 static inline void *__kmap_atomic(struct page *page)
141 {
142 	pagefault_disable();
143 	return page_address(page);
144 }
145 
__kunmap_atomic(void * addr)146 static inline void __kunmap_atomic(void *addr)
147 {
148 	kunmap_parisc(addr);
149 	pagefault_enable();
150 }
151 
152 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
153 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
154 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
155 #endif
156 
157 #endif /* _PARISC_CACHEFLUSH_H */
158 
159