1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/r4kcache.h>
24 #include <asm/system.h>
25 #include <asm/mmu_context.h>
26 #include <asm/war.h>
27
28 #include <asm/octeon/octeon.h>
29
30 unsigned long long cache_err_dcache[NR_CPUS];
31
32 /**
33 * Octeon automatically flushes the dcache on tlb changes, so
34 * from Linux's viewpoint it acts much like a physically
35 * tagged cache. No flushing is needed
36 *
37 */
octeon_flush_data_cache_page(unsigned long addr)38 static void octeon_flush_data_cache_page(unsigned long addr)
39 {
40 /* Nothing to do */
41 }
42
octeon_local_flush_icache(void)43 static inline void octeon_local_flush_icache(void)
44 {
45 asm volatile ("synci 0($0)");
46 }
47
48 /*
49 * Flush local I-cache for the specified range.
50 */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)51 static void local_octeon_flush_icache_range(unsigned long start,
52 unsigned long end)
53 {
54 octeon_local_flush_icache();
55 }
56
57 /**
58 * Flush caches as necessary for all cores affected by a
59 * vma. If no vma is supplied, all cores are flushed.
60 *
61 * @vma: VMA to flush or NULL to flush all icaches.
62 */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)63 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
64 {
65 extern void octeon_send_ipi_single(int cpu, unsigned int action);
66 #ifdef CONFIG_SMP
67 int cpu;
68 cpumask_t mask;
69 #endif
70
71 mb();
72 octeon_local_flush_icache();
73 #ifdef CONFIG_SMP
74 preempt_disable();
75 cpu = smp_processor_id();
76
77 /*
78 * If we have a vma structure, we only need to worry about
79 * cores it has been used on
80 */
81 if (vma)
82 mask = *mm_cpumask(vma->vm_mm);
83 else
84 mask = cpu_online_map;
85 cpu_clear(cpu, mask);
86 for_each_cpu_mask(cpu, mask)
87 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
88
89 preempt_enable();
90 #endif
91 }
92
93
94 /**
95 * Called to flush the icache on all cores
96 */
octeon_flush_icache_all(void)97 static void octeon_flush_icache_all(void)
98 {
99 octeon_flush_icache_all_cores(NULL);
100 }
101
102
103 /**
104 * Called to flush all memory associated with a memory
105 * context.
106 *
107 * @mm: Memory context to flush
108 */
octeon_flush_cache_mm(struct mm_struct * mm)109 static void octeon_flush_cache_mm(struct mm_struct *mm)
110 {
111 /*
112 * According to the R4K version of this file, CPUs without
113 * dcache aliases don't need to do anything here
114 */
115 }
116
117
118 /**
119 * Flush a range of kernel addresses out of the icache
120 *
121 */
octeon_flush_icache_range(unsigned long start,unsigned long end)122 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
123 {
124 octeon_flush_icache_all_cores(NULL);
125 }
126
127
128 /**
129 * Flush the icache for a trampoline. These are used for interrupt
130 * and exception hooking.
131 *
132 * @addr: Address to flush
133 */
octeon_flush_cache_sigtramp(unsigned long addr)134 static void octeon_flush_cache_sigtramp(unsigned long addr)
135 {
136 struct vm_area_struct *vma;
137
138 vma = find_vma(current->mm, addr);
139 octeon_flush_icache_all_cores(vma);
140 }
141
142
143 /**
144 * Flush a range out of a vma
145 *
146 * @vma: VMA to flush
147 * @start:
148 * @end:
149 */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)150 static void octeon_flush_cache_range(struct vm_area_struct *vma,
151 unsigned long start, unsigned long end)
152 {
153 if (vma->vm_flags & VM_EXEC)
154 octeon_flush_icache_all_cores(vma);
155 }
156
157
158 /**
159 * Flush a specific page of a vma
160 *
161 * @vma: VMA to flush page for
162 * @page: Page to flush
163 * @pfn:
164 */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)165 static void octeon_flush_cache_page(struct vm_area_struct *vma,
166 unsigned long page, unsigned long pfn)
167 {
168 if (vma->vm_flags & VM_EXEC)
169 octeon_flush_icache_all_cores(vma);
170 }
171
172
173 /**
174 * Probe Octeon's caches
175 *
176 */
probe_octeon(void)177 static void __cpuinit probe_octeon(void)
178 {
179 unsigned long icache_size;
180 unsigned long dcache_size;
181 unsigned int config1;
182 struct cpuinfo_mips *c = ¤t_cpu_data;
183
184 config1 = read_c0_config1();
185 switch (c->cputype) {
186 case CPU_CAVIUM_OCTEON:
187 case CPU_CAVIUM_OCTEON_PLUS:
188 c->icache.linesz = 2 << ((config1 >> 19) & 7);
189 c->icache.sets = 64 << ((config1 >> 22) & 7);
190 c->icache.ways = 1 + ((config1 >> 16) & 7);
191 c->icache.flags |= MIPS_CACHE_VTAG;
192 icache_size =
193 c->icache.sets * c->icache.ways * c->icache.linesz;
194 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
195 c->dcache.linesz = 128;
196 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
197 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
198 else
199 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
200 c->dcache.ways = 64;
201 dcache_size =
202 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
203 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
204 c->options |= MIPS_CPU_PREFETCH;
205 break;
206
207 case CPU_CAVIUM_OCTEON2:
208 c->icache.linesz = 2 << ((config1 >> 19) & 7);
209 c->icache.sets = 8;
210 c->icache.ways = 37;
211 c->icache.flags |= MIPS_CACHE_VTAG;
212 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
213
214 c->dcache.linesz = 128;
215 c->dcache.ways = 32;
216 c->dcache.sets = 8;
217 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
218 c->options |= MIPS_CPU_PREFETCH;
219 break;
220
221 default:
222 panic("Unsupported Cavium Networks CPU type\n");
223 break;
224 }
225
226 /* compute a couple of other cache variables */
227 c->icache.waysize = icache_size / c->icache.ways;
228 c->dcache.waysize = dcache_size / c->dcache.ways;
229
230 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
231 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
232
233 if (smp_processor_id() == 0) {
234 pr_notice("Primary instruction cache %ldkB, %s, %d way, "
235 "%d sets, linesize %d bytes.\n",
236 icache_size >> 10,
237 cpu_has_vtag_icache ?
238 "virtually tagged" : "physically tagged",
239 c->icache.ways, c->icache.sets, c->icache.linesz);
240
241 pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
242 "linesize %d bytes.\n",
243 dcache_size >> 10, c->dcache.ways,
244 c->dcache.sets, c->dcache.linesz);
245 }
246 }
247
248
249 /**
250 * Setup the Octeon cache flush routines
251 *
252 */
octeon_cache_init(void)253 void __cpuinit octeon_cache_init(void)
254 {
255 extern unsigned long ebase;
256 extern char except_vec2_octeon;
257
258 memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
259 octeon_flush_cache_sigtramp(ebase + 0x100);
260
261 probe_octeon();
262
263 shm_align_mask = PAGE_SIZE - 1;
264
265 flush_cache_all = octeon_flush_icache_all;
266 __flush_cache_all = octeon_flush_icache_all;
267 flush_cache_mm = octeon_flush_cache_mm;
268 flush_cache_page = octeon_flush_cache_page;
269 flush_cache_range = octeon_flush_cache_range;
270 flush_cache_sigtramp = octeon_flush_cache_sigtramp;
271 flush_icache_all = octeon_flush_icache_all;
272 flush_data_cache_page = octeon_flush_data_cache_page;
273 flush_icache_range = octeon_flush_icache_range;
274 local_flush_icache_range = local_octeon_flush_icache_range;
275
276 build_clear_page();
277 build_copy_page();
278 }
279
280 /**
281 * Handle a cache error exception
282 */
283
cache_parity_error_octeon(int non_recoverable)284 static void cache_parity_error_octeon(int non_recoverable)
285 {
286 unsigned long coreid = cvmx_get_core_num();
287 uint64_t icache_err = read_octeon_c0_icacheerr();
288
289 pr_err("Cache error exception:\n");
290 pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
291 if (icache_err & 1) {
292 pr_err("CacheErr (Icache) == %llx\n",
293 (unsigned long long)icache_err);
294 write_octeon_c0_icacheerr(0);
295 }
296 if (cache_err_dcache[coreid] & 1) {
297 pr_err("CacheErr (Dcache) == %llx\n",
298 (unsigned long long)cache_err_dcache[coreid]);
299 cache_err_dcache[coreid] = 0;
300 }
301
302 if (non_recoverable)
303 panic("Can't handle cache error: nested exception");
304 }
305
306 /**
307 * Called when the the exception is recoverable
308 */
309
cache_parity_error_octeon_recoverable(void)310 asmlinkage void cache_parity_error_octeon_recoverable(void)
311 {
312 cache_parity_error_octeon(0);
313 }
314
315 /**
316 * Called when the the exception is not recoverable
317 */
318
cache_parity_error_octeon_non_recoverable(void)319 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
320 {
321 cache_parity_error_octeon(1);
322 }
323