1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARC Cache Management
4 *
5 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 */
8
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/cache.h>
13 #include <linux/mmu_context.h>
14 #include <linux/syscalls.h>
15 #include <linux/uaccess.h>
16 #include <linux/pagemap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cachectl.h>
19 #include <asm/setup.h>
20
21 #ifdef CONFIG_ISA_ARCV2
22 #define USE_RGN_FLSH 1
23 #endif
24
25 static int l2_line_sz;
26 static int ioc_exists;
27 int slc_enable = 1, ioc_enable = 1;
28 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
29 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
30
31 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
32 unsigned long sz, const int op, const int full_page);
33
34 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
35 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
36 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
37
arc_cache_mumbojumbo(int c,char * buf,int len)38 char *arc_cache_mumbojumbo(int c, char *buf, int len)
39 {
40 int n = 0;
41 struct cpuinfo_arc_cache *p;
42
43 #define PR_CACHE(p, cfg, str) \
44 if (!(p)->line_len) \
45 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46 else \
47 n += scnprintf(buf + n, len - n, \
48 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
49 (p)->sz_k, (p)->assoc, (p)->line_len, \
50 (p)->vipt ? "VIPT" : "PIPT", \
51 (p)->alias ? " aliasing" : "", \
52 IS_USED_CFG(cfg));
53
54 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
55 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
56
57 p = &cpuinfo_arc700[c].slc;
58 if (p->line_len)
59 n += scnprintf(buf + n, len - n,
60 "SLC\t\t: %uK, %uB Line%s\n",
61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
62
63 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
64 perip_base,
65 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
66
67 return buf;
68 }
69
70 /*
71 * Read the Cache Build Confuration Registers, Decode them and save into
72 * the cpuinfo structure for later use.
73 * No Validation done here, simply read/convert the BCRs
74 */
read_decode_cache_bcr_arcv2(int cpu)75 static void read_decode_cache_bcr_arcv2(int cpu)
76 {
77 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
78 struct bcr_generic sbcr;
79
80 struct bcr_slc_cfg {
81 #ifdef CONFIG_CPU_BIG_ENDIAN
82 unsigned int pad:24, way:2, lsz:2, sz:4;
83 #else
84 unsigned int sz:4, lsz:2, way:2, pad:24;
85 #endif
86 } slc_cfg;
87
88 struct bcr_clust_cfg {
89 #ifdef CONFIG_CPU_BIG_ENDIAN
90 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
91 #else
92 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
93 #endif
94 } cbcr;
95
96 struct bcr_volatile {
97 #ifdef CONFIG_CPU_BIG_ENDIAN
98 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
99 #else
100 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
101 #endif
102 } vol;
103
104
105 READ_BCR(ARC_REG_SLC_BCR, sbcr);
106 if (sbcr.ver) {
107 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
108 p_slc->sz_k = 128 << slc_cfg.sz;
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110 }
111
112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
113 if (cbcr.c) {
114 ioc_exists = 1;
115
116 /*
117 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
118 * simultaneously. This happens because as of today IOC aperture covers
119 * only ZONE_NORMAL (low mem) and any dma transactions outside this
120 * region won't be HW coherent.
121 * If we want to use both IOC and ZONE_HIGHMEM we can use
122 * bounce_buffer to handle dma transactions to HIGHMEM.
123 * Also it is possible to modify dma_direct cache ops or increase IOC
124 * aperture size if we are planning to use HIGHMEM without PAE.
125 */
126 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
127 ioc_enable = 0;
128 } else {
129 ioc_enable = 0;
130 }
131
132 /* HS 2.0 didn't have AUX_VOL */
133 if (cpuinfo_arc700[cpu].core.family > 0x51) {
134 READ_BCR(AUX_VOL, vol);
135 perip_base = vol.start << 28;
136 /* HS 3.0 has limit and strict-ordering fields */
137 if (cpuinfo_arc700[cpu].core.family > 0x52)
138 perip_end = (vol.limit << 28) - 1;
139 }
140 }
141
read_decode_cache_bcr(void)142 void read_decode_cache_bcr(void)
143 {
144 struct cpuinfo_arc_cache *p_ic, *p_dc;
145 unsigned int cpu = smp_processor_id();
146 struct bcr_cache {
147 #ifdef CONFIG_CPU_BIG_ENDIAN
148 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
149 #else
150 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
151 #endif
152 } ibcr, dbcr;
153
154 p_ic = &cpuinfo_arc700[cpu].icache;
155 READ_BCR(ARC_REG_IC_BCR, ibcr);
156
157 if (!ibcr.ver)
158 goto dc_chk;
159
160 if (ibcr.ver <= 3) {
161 BUG_ON(ibcr.config != 3);
162 p_ic->assoc = 2; /* Fixed to 2w set assoc */
163 } else if (ibcr.ver >= 4) {
164 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
165 }
166
167 p_ic->line_len = 8 << ibcr.line_len;
168 p_ic->sz_k = 1 << (ibcr.sz - 1);
169 p_ic->vipt = 1;
170 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
171
172 dc_chk:
173 p_dc = &cpuinfo_arc700[cpu].dcache;
174 READ_BCR(ARC_REG_DC_BCR, dbcr);
175
176 if (!dbcr.ver)
177 goto slc_chk;
178
179 if (dbcr.ver <= 3) {
180 BUG_ON(dbcr.config != 2);
181 p_dc->assoc = 4; /* Fixed to 4w set assoc */
182 p_dc->vipt = 1;
183 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
184 } else if (dbcr.ver >= 4) {
185 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
186 p_dc->vipt = 0;
187 p_dc->alias = 0; /* PIPT so can't VIPT alias */
188 }
189
190 p_dc->line_len = 16 << dbcr.line_len;
191 p_dc->sz_k = 1 << (dbcr.sz - 1);
192
193 slc_chk:
194 if (is_isa_arcv2())
195 read_decode_cache_bcr_arcv2(cpu);
196 }
197
198 /*
199 * Line Operation on {I,D}-Cache
200 */
201
202 #define OP_INV 0x1
203 #define OP_FLUSH 0x2
204 #define OP_FLUSH_N_INV 0x3
205 #define OP_INV_IC 0x4
206
207 /*
208 * Cache Flush programming model
209 *
210 * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
211 * Programming model requires both paddr and vaddr irrespecive of aliasing
212 * considerations:
213 * - vaddr in {I,D}C_IV?L
214 * - paddr in {I,D}C_PTAG
215 *
216 * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
217 * Programming model is different for aliasing vs. non-aliasing I$
218 * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
219 * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
220 *
221 * - If PAE40 is enabled, independent of aliasing considerations, the higher
222 * bits needs to be written into PTAG_HI
223 */
224
225 static inline
__cache_line_loop_v3(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)226 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
227 unsigned long sz, const int op, const int full_page)
228 {
229 unsigned int aux_cmd, aux_tag;
230 int num_lines;
231
232 if (op == OP_INV_IC) {
233 aux_cmd = ARC_REG_IC_IVIL;
234 aux_tag = ARC_REG_IC_PTAG;
235 } else {
236 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
237 aux_tag = ARC_REG_DC_PTAG;
238 }
239
240 /* Ensure we properly floor/ceil the non-line aligned/sized requests
241 * and have @paddr - aligned to cache line and integral @num_lines.
242 * This however can be avoided for page sized since:
243 * -@paddr will be cache-line aligned already (being page aligned)
244 * -@sz will be integral multiple of line size (being page sized).
245 */
246 if (!full_page) {
247 sz += paddr & ~CACHE_LINE_MASK;
248 paddr &= CACHE_LINE_MASK;
249 vaddr &= CACHE_LINE_MASK;
250 }
251 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
252
253 /*
254 * MMUv3, cache ops require paddr in PTAG reg
255 * if V-P const for loop, PTAG can be written once outside loop
256 */
257 if (full_page)
258 write_aux_reg(aux_tag, paddr);
259
260 /*
261 * This is technically for MMU v4, using the MMU v3 programming model
262 * Special work for HS38 aliasing I-cache configuration with PAE40
263 * - upper 8 bits of paddr need to be written into PTAG_HI
264 * - (and needs to be written before the lower 32 bits)
265 * Note that PTAG_HI is hoisted outside the line loop
266 */
267 if (is_pae40_enabled() && op == OP_INV_IC)
268 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
269
270 while (num_lines-- > 0) {
271 if (!full_page) {
272 write_aux_reg(aux_tag, paddr);
273 paddr += L1_CACHE_BYTES;
274 }
275
276 write_aux_reg(aux_cmd, vaddr);
277 vaddr += L1_CACHE_BYTES;
278 }
279 }
280
281 #ifndef USE_RGN_FLSH
282
283 /*
284 */
285 static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)286 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
287 unsigned long sz, const int op, const int full_page)
288 {
289 unsigned int aux_cmd;
290 int num_lines;
291
292 if (op == OP_INV_IC) {
293 aux_cmd = ARC_REG_IC_IVIL;
294 } else {
295 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
296 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
297 }
298
299 /* Ensure we properly floor/ceil the non-line aligned/sized requests
300 * and have @paddr - aligned to cache line and integral @num_lines.
301 * This however can be avoided for page sized since:
302 * -@paddr will be cache-line aligned already (being page aligned)
303 * -@sz will be integral multiple of line size (being page sized).
304 */
305 if (!full_page) {
306 sz += paddr & ~CACHE_LINE_MASK;
307 paddr &= CACHE_LINE_MASK;
308 }
309
310 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
311
312 /*
313 * For HS38 PAE40 configuration
314 * - upper 8 bits of paddr need to be written into PTAG_HI
315 * - (and needs to be written before the lower 32 bits)
316 */
317 if (is_pae40_enabled()) {
318 if (op == OP_INV_IC)
319 /*
320 * Non aliasing I-cache in HS38,
321 * aliasing I-cache handled in __cache_line_loop_v3()
322 */
323 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
324 else
325 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
326 }
327
328 while (num_lines-- > 0) {
329 write_aux_reg(aux_cmd, paddr);
330 paddr += L1_CACHE_BYTES;
331 }
332 }
333
334 #else
335
336 /*
337 * optimized flush operation which takes a region as opposed to iterating per line
338 */
339 static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)340 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
341 unsigned long sz, const int op, const int full_page)
342 {
343 unsigned int s, e;
344
345 /* Only for Non aliasing I-cache in HS38 */
346 if (op == OP_INV_IC) {
347 s = ARC_REG_IC_IVIR;
348 e = ARC_REG_IC_ENDR;
349 } else {
350 s = ARC_REG_DC_STARTR;
351 e = ARC_REG_DC_ENDR;
352 }
353
354 if (!full_page) {
355 /* for any leading gap between @paddr and start of cache line */
356 sz += paddr & ~CACHE_LINE_MASK;
357 paddr &= CACHE_LINE_MASK;
358
359 /*
360 * account for any trailing gap to end of cache line
361 * this is equivalent to DIV_ROUND_UP() in line ops above
362 */
363 sz += L1_CACHE_BYTES - 1;
364 }
365
366 if (is_pae40_enabled()) {
367 /* TBD: check if crossing 4TB boundary */
368 if (op == OP_INV_IC)
369 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
370 else
371 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
372 }
373
374 /* ENDR needs to be set ahead of START */
375 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
376 write_aux_reg(s, paddr);
377
378 /* caller waits on DC_CTRL.FS */
379 }
380
381 #endif
382
383 #ifdef CONFIG_ARC_MMU_V3
384 #define __cache_line_loop __cache_line_loop_v3
385 #else
386 #define __cache_line_loop __cache_line_loop_v4
387 #endif
388
389 #ifdef CONFIG_ARC_HAS_DCACHE
390
391 /***************************************************************
392 * Machine specific helpers for Entire D-Cache or Per Line ops
393 */
394
395 #ifndef USE_RGN_FLSH
396 /*
397 * this version avoids extra read/write of DC_CTRL for flush or invalid ops
398 * in the non region flush regime (such as for ARCompact)
399 */
__before_dc_op(const int op)400 static inline void __before_dc_op(const int op)
401 {
402 if (op == OP_FLUSH_N_INV) {
403 /* Dcache provides 2 cmd: FLUSH or INV
404 * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
405 * flush-n-inv is achieved by INV cmd but with IM=1
406 * So toggle INV sub-mode depending on op request and default
407 */
408 const unsigned int ctl = ARC_REG_DC_CTRL;
409 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
410 }
411 }
412
413 #else
414
__before_dc_op(const int op)415 static inline void __before_dc_op(const int op)
416 {
417 const unsigned int ctl = ARC_REG_DC_CTRL;
418 unsigned int val = read_aux_reg(ctl);
419
420 if (op == OP_FLUSH_N_INV) {
421 val |= DC_CTRL_INV_MODE_FLUSH;
422 }
423
424 if (op != OP_INV_IC) {
425 /*
426 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
427 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
428 */
429 val &= ~DC_CTRL_RGN_OP_MSK;
430 if (op & OP_INV)
431 val |= DC_CTRL_RGN_OP_INV;
432 }
433 write_aux_reg(ctl, val);
434 }
435
436 #endif
437
438
__after_dc_op(const int op)439 static inline void __after_dc_op(const int op)
440 {
441 if (op & OP_FLUSH) {
442 const unsigned int ctl = ARC_REG_DC_CTRL;
443 unsigned int reg;
444
445 /* flush / flush-n-inv both wait */
446 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
447 ;
448
449 /* Switch back to default Invalidate mode */
450 if (op == OP_FLUSH_N_INV)
451 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
452 }
453 }
454
455 /*
456 * Operation on Entire D-Cache
457 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
458 * Note that constant propagation ensures all the checks are gone
459 * in generated code
460 */
__dc_entire_op(const int op)461 static inline void __dc_entire_op(const int op)
462 {
463 int aux;
464
465 __before_dc_op(op);
466
467 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
468 aux = ARC_REG_DC_IVDC;
469 else
470 aux = ARC_REG_DC_FLSH;
471
472 write_aux_reg(aux, 0x1);
473
474 __after_dc_op(op);
475 }
476
__dc_disable(void)477 static inline void __dc_disable(void)
478 {
479 const int r = ARC_REG_DC_CTRL;
480
481 __dc_entire_op(OP_FLUSH_N_INV);
482 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
483 }
484
__dc_enable(void)485 static void __dc_enable(void)
486 {
487 const int r = ARC_REG_DC_CTRL;
488
489 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
490 }
491
492 /* For kernel mappings cache operation: index is same as paddr */
493 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
494
495 /*
496 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
497 */
__dc_line_op(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op)498 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
499 unsigned long sz, const int op)
500 {
501 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
502 unsigned long flags;
503
504 local_irq_save(flags);
505
506 __before_dc_op(op);
507
508 __cache_line_loop(paddr, vaddr, sz, op, full_page);
509
510 __after_dc_op(op);
511
512 local_irq_restore(flags);
513 }
514
515 #else
516
517 #define __dc_entire_op(op)
518 #define __dc_disable()
519 #define __dc_enable()
520 #define __dc_line_op(paddr, vaddr, sz, op)
521 #define __dc_line_op_k(paddr, sz, op)
522
523 #endif /* CONFIG_ARC_HAS_DCACHE */
524
525 #ifdef CONFIG_ARC_HAS_ICACHE
526
__ic_entire_inv(void)527 static inline void __ic_entire_inv(void)
528 {
529 write_aux_reg(ARC_REG_IC_IVIC, 1);
530 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
531 }
532
533 static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)534 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
535 unsigned long sz)
536 {
537 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
538 unsigned long flags;
539
540 local_irq_save(flags);
541 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
542 local_irq_restore(flags);
543 }
544
545 #ifndef CONFIG_SMP
546
547 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
548
549 #else
550
551 struct ic_inv_args {
552 phys_addr_t paddr, vaddr;
553 int sz;
554 };
555
__ic_line_inv_vaddr_helper(void * info)556 static void __ic_line_inv_vaddr_helper(void *info)
557 {
558 struct ic_inv_args *ic_inv = info;
559
560 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
561 }
562
__ic_line_inv_vaddr(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)563 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
564 unsigned long sz)
565 {
566 struct ic_inv_args ic_inv = {
567 .paddr = paddr,
568 .vaddr = vaddr,
569 .sz = sz
570 };
571
572 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
573 }
574
575 #endif /* CONFIG_SMP */
576
577 #else /* !CONFIG_ARC_HAS_ICACHE */
578
579 #define __ic_entire_inv()
580 #define __ic_line_inv_vaddr(pstart, vstart, sz)
581
582 #endif /* CONFIG_ARC_HAS_ICACHE */
583
slc_op_rgn(phys_addr_t paddr,unsigned long sz,const int op)584 noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
585 {
586 #ifdef CONFIG_ISA_ARCV2
587 /*
588 * SLC is shared between all cores and concurrent aux operations from
589 * multiple cores need to be serialized using a spinlock
590 * A concurrent operation can be silently ignored and/or the old/new
591 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
592 * below)
593 */
594 static DEFINE_SPINLOCK(lock);
595 unsigned long flags;
596 unsigned int ctrl;
597 phys_addr_t end;
598
599 spin_lock_irqsave(&lock, flags);
600
601 /*
602 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
603 * - b'000 (default) is Flush,
604 * - b'001 is Invalidate if CTRL.IM == 0
605 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
606 */
607 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
608
609 /* Don't rely on default value of IM bit */
610 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
611 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
612 else
613 ctrl |= SLC_CTRL_IM;
614
615 if (op & OP_INV)
616 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
617 else
618 ctrl &= ~SLC_CTRL_RGN_OP_INV;
619
620 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
621
622 /*
623 * Lower bits are ignored, no need to clip
624 * END needs to be setup before START (latter triggers the operation)
625 * END can't be same as START, so add (l2_line_sz - 1) to sz
626 */
627 end = paddr + sz + l2_line_sz - 1;
628 if (is_pae40_enabled())
629 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
630
631 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
632
633 if (is_pae40_enabled())
634 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
635
636 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
637
638 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
639 read_aux_reg(ARC_REG_SLC_CTRL);
640
641 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
642
643 spin_unlock_irqrestore(&lock, flags);
644 #endif
645 }
646
slc_op_line(phys_addr_t paddr,unsigned long sz,const int op)647 noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
648 {
649 #ifdef CONFIG_ISA_ARCV2
650 /*
651 * SLC is shared between all cores and concurrent aux operations from
652 * multiple cores need to be serialized using a spinlock
653 * A concurrent operation can be silently ignored and/or the old/new
654 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
655 * below)
656 */
657 static DEFINE_SPINLOCK(lock);
658
659 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
660 unsigned int ctrl, cmd;
661 unsigned long flags;
662 int num_lines;
663
664 spin_lock_irqsave(&lock, flags);
665
666 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
667
668 /* Don't rely on default value of IM bit */
669 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
670 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
671 else
672 ctrl |= SLC_CTRL_IM;
673
674 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
675
676 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
677
678 sz += paddr & ~SLC_LINE_MASK;
679 paddr &= SLC_LINE_MASK;
680
681 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
682
683 while (num_lines-- > 0) {
684 write_aux_reg(cmd, paddr);
685 paddr += l2_line_sz;
686 }
687
688 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
689 read_aux_reg(ARC_REG_SLC_CTRL);
690
691 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
692
693 spin_unlock_irqrestore(&lock, flags);
694 #endif
695 }
696
697 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
698
slc_entire_op(const int op)699 noinline static void slc_entire_op(const int op)
700 {
701 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
702
703 ctrl = read_aux_reg(r);
704
705 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
706 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
707 else
708 ctrl |= SLC_CTRL_IM;
709
710 write_aux_reg(r, ctrl);
711
712 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
713 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
714 else
715 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
716
717 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
718 read_aux_reg(r);
719
720 /* Important to wait for flush to complete */
721 while (read_aux_reg(r) & SLC_CTRL_BUSY);
722 }
723
arc_slc_disable(void)724 static inline void arc_slc_disable(void)
725 {
726 const int r = ARC_REG_SLC_CTRL;
727
728 slc_entire_op(OP_FLUSH_N_INV);
729 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
730 }
731
arc_slc_enable(void)732 static inline void arc_slc_enable(void)
733 {
734 const int r = ARC_REG_SLC_CTRL;
735
736 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
737 }
738
739 /***********************************************************
740 * Exported APIs
741 */
742
743 /*
744 * Handle cache congruency of kernel and userspace mappings of page when kernel
745 * writes-to/reads-from
746 *
747 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
748 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
749 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
750 * -In SMP, if hardware caches are coherent
751 *
752 * There's a corollary case, where kernel READs from a userspace mapped page.
753 * If the U-mapping is not congruent to K-mapping, former needs flushing.
754 */
flush_dcache_page(struct page * page)755 void flush_dcache_page(struct page *page)
756 {
757 struct address_space *mapping;
758
759 if (!cache_is_vipt_aliasing()) {
760 clear_bit(PG_dc_clean, &page->flags);
761 return;
762 }
763
764 /* don't handle anon pages here */
765 mapping = page_mapping_file(page);
766 if (!mapping)
767 return;
768
769 /*
770 * pagecache page, file not yet mapped to userspace
771 * Make a note that K-mapping is dirty
772 */
773 if (!mapping_mapped(mapping)) {
774 clear_bit(PG_dc_clean, &page->flags);
775 } else if (page_mapcount(page)) {
776
777 /* kernel reading from page with U-mapping */
778 phys_addr_t paddr = (unsigned long)page_address(page);
779 unsigned long vaddr = page->index << PAGE_SHIFT;
780
781 if (addr_not_cache_congruent(paddr, vaddr))
782 __flush_dcache_page(paddr, vaddr);
783 }
784 }
785 EXPORT_SYMBOL(flush_dcache_page);
786
787 /*
788 * DMA ops for systems with L1 cache only
789 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
790 */
__dma_cache_wback_inv_l1(phys_addr_t start,unsigned long sz)791 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
792 {
793 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
794 }
795
__dma_cache_inv_l1(phys_addr_t start,unsigned long sz)796 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
797 {
798 __dc_line_op_k(start, sz, OP_INV);
799 }
800
__dma_cache_wback_l1(phys_addr_t start,unsigned long sz)801 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
802 {
803 __dc_line_op_k(start, sz, OP_FLUSH);
804 }
805
806 /*
807 * DMA ops for systems with both L1 and L2 caches, but without IOC
808 * Both L1 and L2 lines need to be explicitly flushed/invalidated
809 */
__dma_cache_wback_inv_slc(phys_addr_t start,unsigned long sz)810 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
811 {
812 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
813 slc_op(start, sz, OP_FLUSH_N_INV);
814 }
815
__dma_cache_inv_slc(phys_addr_t start,unsigned long sz)816 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
817 {
818 __dc_line_op_k(start, sz, OP_INV);
819 slc_op(start, sz, OP_INV);
820 }
821
__dma_cache_wback_slc(phys_addr_t start,unsigned long sz)822 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
823 {
824 __dc_line_op_k(start, sz, OP_FLUSH);
825 slc_op(start, sz, OP_FLUSH);
826 }
827
828 /*
829 * Exported DMA API
830 */
dma_cache_wback_inv(phys_addr_t start,unsigned long sz)831 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
832 {
833 __dma_cache_wback_inv(start, sz);
834 }
835 EXPORT_SYMBOL(dma_cache_wback_inv);
836
dma_cache_inv(phys_addr_t start,unsigned long sz)837 void dma_cache_inv(phys_addr_t start, unsigned long sz)
838 {
839 __dma_cache_inv(start, sz);
840 }
841 EXPORT_SYMBOL(dma_cache_inv);
842
dma_cache_wback(phys_addr_t start,unsigned long sz)843 void dma_cache_wback(phys_addr_t start, unsigned long sz)
844 {
845 __dma_cache_wback(start, sz);
846 }
847 EXPORT_SYMBOL(dma_cache_wback);
848
849 /*
850 * This is API for making I/D Caches consistent when modifying
851 * kernel code (loadable modules, kprobes, kgdb...)
852 * This is called on insmod, with kernel virtual address for CODE of
853 * the module. ARC cache maintenance ops require PHY address thus we
854 * need to convert vmalloc addr to PHY addr
855 */
flush_icache_range(unsigned long kstart,unsigned long kend)856 void flush_icache_range(unsigned long kstart, unsigned long kend)
857 {
858 unsigned int tot_sz;
859
860 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
861
862 /* Shortcut for bigger flush ranges.
863 * Here we don't care if this was kernel virtual or phy addr
864 */
865 tot_sz = kend - kstart;
866 if (tot_sz > PAGE_SIZE) {
867 flush_cache_all();
868 return;
869 }
870
871 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
872 if (likely(kstart > PAGE_OFFSET)) {
873 /*
874 * The 2nd arg despite being paddr will be used to index icache
875 * This is OK since no alternate virtual mappings will exist
876 * given the callers for this case: kprobe/kgdb in built-in
877 * kernel code only.
878 */
879 __sync_icache_dcache(kstart, kstart, kend - kstart);
880 return;
881 }
882
883 /*
884 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
885 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
886 * handling of kernel vaddr.
887 *
888 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
889 * it still needs to handle a 2 page scenario, where the range
890 * straddles across 2 virtual pages and hence need for loop
891 */
892 while (tot_sz > 0) {
893 unsigned int off, sz;
894 unsigned long phy, pfn;
895
896 off = kstart % PAGE_SIZE;
897 pfn = vmalloc_to_pfn((void *)kstart);
898 phy = (pfn << PAGE_SHIFT) + off;
899 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
900 __sync_icache_dcache(phy, kstart, sz);
901 kstart += sz;
902 tot_sz -= sz;
903 }
904 }
905 EXPORT_SYMBOL(flush_icache_range);
906
907 /*
908 * General purpose helper to make I and D cache lines consistent.
909 * @paddr is phy addr of region
910 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
911 * However in one instance, when called by kprobe (for a breakpt in
912 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
913 * use a paddr to index the cache (despite VIPT). This is fine since a
914 * builtin kernel page will not have any virtual mappings.
915 * kprobe on loadable module will be kernel vaddr.
916 */
__sync_icache_dcache(phys_addr_t paddr,unsigned long vaddr,int len)917 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
918 {
919 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
920 __ic_line_inv_vaddr(paddr, vaddr, len);
921 }
922
923 /* wrapper to compile time eliminate alignment checks in flush loop */
__inv_icache_page(phys_addr_t paddr,unsigned long vaddr)924 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
925 {
926 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
927 }
928
929 /*
930 * wrapper to clearout kernel or userspace mappings of a page
931 * For kernel mappings @vaddr == @paddr
932 */
__flush_dcache_page(phys_addr_t paddr,unsigned long vaddr)933 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
934 {
935 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
936 }
937
flush_cache_all(void)938 noinline void flush_cache_all(void)
939 {
940 unsigned long flags;
941
942 local_irq_save(flags);
943
944 __ic_entire_inv();
945 __dc_entire_op(OP_FLUSH_N_INV);
946
947 local_irq_restore(flags);
948
949 }
950
951 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
952
flush_cache_mm(struct mm_struct * mm)953 void flush_cache_mm(struct mm_struct *mm)
954 {
955 flush_cache_all();
956 }
957
flush_cache_page(struct vm_area_struct * vma,unsigned long u_vaddr,unsigned long pfn)958 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
959 unsigned long pfn)
960 {
961 phys_addr_t paddr = pfn << PAGE_SHIFT;
962
963 u_vaddr &= PAGE_MASK;
964
965 __flush_dcache_page(paddr, u_vaddr);
966
967 if (vma->vm_flags & VM_EXEC)
968 __inv_icache_page(paddr, u_vaddr);
969 }
970
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)971 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
972 unsigned long end)
973 {
974 flush_cache_all();
975 }
976
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long u_vaddr)977 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
978 unsigned long u_vaddr)
979 {
980 /* TBD: do we really need to clear the kernel mapping */
981 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
982 __flush_dcache_page((phys_addr_t)page_address(page),
983 (phys_addr_t)page_address(page));
984
985 }
986
987 #endif
988
copy_user_highpage(struct page * to,struct page * from,unsigned long u_vaddr,struct vm_area_struct * vma)989 void copy_user_highpage(struct page *to, struct page *from,
990 unsigned long u_vaddr, struct vm_area_struct *vma)
991 {
992 void *kfrom = kmap_atomic(from);
993 void *kto = kmap_atomic(to);
994 int clean_src_k_mappings = 0;
995
996 /*
997 * If SRC page was already mapped in userspace AND it's U-mapping is
998 * not congruent with K-mapping, sync former to physical page so that
999 * K-mapping in memcpy below, sees the right data
1000 *
1001 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
1002 * equally valid for SRC page as well
1003 *
1004 * For !VIPT cache, all of this gets compiled out as
1005 * addr_not_cache_congruent() is 0
1006 */
1007 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1008 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1009 clean_src_k_mappings = 1;
1010 }
1011
1012 copy_page(kto, kfrom);
1013
1014 /*
1015 * Mark DST page K-mapping as dirty for a later finalization by
1016 * update_mmu_cache(). Although the finalization could have been done
1017 * here as well (given that both vaddr/paddr are available).
1018 * But update_mmu_cache() already has code to do that for other
1019 * non copied user pages (e.g. read faults which wire in pagecache page
1020 * directly).
1021 */
1022 clear_bit(PG_dc_clean, &to->flags);
1023
1024 /*
1025 * if SRC was already usermapped and non-congruent to kernel mapping
1026 * sync the kernel mapping back to physical page
1027 */
1028 if (clean_src_k_mappings) {
1029 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1030 set_bit(PG_dc_clean, &from->flags);
1031 } else {
1032 clear_bit(PG_dc_clean, &from->flags);
1033 }
1034
1035 kunmap_atomic(kto);
1036 kunmap_atomic(kfrom);
1037 }
1038
clear_user_page(void * to,unsigned long u_vaddr,struct page * page)1039 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1040 {
1041 clear_page(to);
1042 clear_bit(PG_dc_clean, &page->flags);
1043 }
1044 EXPORT_SYMBOL(clear_user_page);
1045
1046 /**********************************************************************
1047 * Explicit Cache flush request from user space via syscall
1048 * Needed for JITs which generate code on the fly
1049 */
SYSCALL_DEFINE3(cacheflush,uint32_t,start,uint32_t,sz,uint32_t,flags)1050 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1051 {
1052 /* TBD: optimize this */
1053 flush_cache_all();
1054 return 0;
1055 }
1056
1057 /*
1058 * IO-Coherency (IOC) setup rules:
1059 *
1060 * 1. Needs to be at system level, so only once by Master core
1061 * Non-Masters need not be accessing caches at that time
1062 * - They are either HALT_ON_RESET and kick started much later or
1063 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1064 * doesn't perturb caches or coherency unit
1065 *
1066 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1067 * otherwise any straggler data might behave strangely post IOC enabling
1068 *
1069 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1070 * Coherency transactions
1071 */
arc_ioc_setup(void)1072 noinline void __init arc_ioc_setup(void)
1073 {
1074 unsigned int ioc_base, mem_sz;
1075
1076 /*
1077 * If IOC was already enabled (due to bootloader) it technically needs to
1078 * be reconfigured with aperture base,size corresponding to Linux memory map
1079 * which will certainly be different than uboot's. But disabling and
1080 * reenabling IOC when DMA might be potentially active is tricky business.
1081 * To avoid random memory issues later, just panic here and ask user to
1082 * upgrade bootloader to one which doesn't enable IOC
1083 */
1084 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1085 panic("IOC already enabled, please upgrade bootloader!\n");
1086
1087 if (!ioc_enable)
1088 return;
1089
1090 /* Flush + invalidate + disable L1 dcache */
1091 __dc_disable();
1092
1093 /* Flush + invalidate SLC */
1094 if (read_aux_reg(ARC_REG_SLC_BCR))
1095 slc_entire_op(OP_FLUSH_N_INV);
1096
1097 /*
1098 * currently IOC Aperture covers entire DDR
1099 * TBD: fix for PGU + 1GB of low mem
1100 * TBD: fix for PAE
1101 */
1102 mem_sz = arc_get_mem_sz();
1103
1104 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1105 panic("IOC Aperture size must be power of 2 larger than 4KB");
1106
1107 /*
1108 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
1109 * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1110 */
1111 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1112
1113 /* for now assume kernel base is start of IOC aperture */
1114 ioc_base = CONFIG_LINUX_RAM_BASE;
1115
1116 if (ioc_base % mem_sz != 0)
1117 panic("IOC Aperture start must be aligned to the size of the aperture");
1118
1119 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1120 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1121 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1122
1123 /* Re-enable L1 dcache */
1124 __dc_enable();
1125 }
1126
1127 /*
1128 * Cache related boot time checks/setups only needed on master CPU:
1129 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1130 * Assume SMP only, so all cores will have same cache config. A check on
1131 * one core suffices for all
1132 * - IOC setup / dma callbacks only need to be done once
1133 */
arc_cache_init_master(void)1134 void __init arc_cache_init_master(void)
1135 {
1136 unsigned int __maybe_unused cpu = smp_processor_id();
1137
1138 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1140
1141 if (!ic->line_len)
1142 panic("cache support enabled but non-existent cache\n");
1143
1144 if (ic->line_len != L1_CACHE_BYTES)
1145 panic("ICache line [%d] != kernel Config [%d]",
1146 ic->line_len, L1_CACHE_BYTES);
1147
1148 /*
1149 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1150 * pair to provide vaddr/paddr respectively, just as in MMU v3
1151 */
1152 if (is_isa_arcv2() && ic->alias)
1153 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1154 else
1155 _cache_line_loop_ic_fn = __cache_line_loop;
1156 }
1157
1158 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1159 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1160
1161 if (!dc->line_len)
1162 panic("cache support enabled but non-existent cache\n");
1163
1164 if (dc->line_len != L1_CACHE_BYTES)
1165 panic("DCache line [%d] != kernel Config [%d]",
1166 dc->line_len, L1_CACHE_BYTES);
1167
1168 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1169 if (is_isa_arcompact()) {
1170 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1171 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1172
1173 if (dc->alias) {
1174 if (!handled)
1175 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1176 if (CACHE_COLORS_NUM != num_colors)
1177 panic("CACHE_COLORS_NUM not optimized for config\n");
1178 } else if (!dc->alias && handled) {
1179 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1180 }
1181 }
1182 }
1183
1184 /*
1185 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1186 * or equal to any cache line length.
1187 */
1188 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1189 "SMP_CACHE_BYTES must be >= any cache line length");
1190 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1191 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1192 l2_line_sz, SMP_CACHE_BYTES);
1193
1194 /* Note that SLC disable not formally supported till HS 3.0 */
1195 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1196 arc_slc_disable();
1197
1198 if (is_isa_arcv2() && ioc_exists)
1199 arc_ioc_setup();
1200
1201 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1202 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1203 __dma_cache_inv = __dma_cache_inv_slc;
1204 __dma_cache_wback = __dma_cache_wback_slc;
1205 } else {
1206 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1207 __dma_cache_inv = __dma_cache_inv_l1;
1208 __dma_cache_wback = __dma_cache_wback_l1;
1209 }
1210 /*
1211 * In case of IOC (say IOC+SLC case), pointers above could still be set
1212 * but end up not being relevant as the first function in chain is not
1213 * called at all for devices using coherent DMA.
1214 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1215 */
1216 }
1217
arc_cache_init(void)1218 void __ref arc_cache_init(void)
1219 {
1220 unsigned int __maybe_unused cpu = smp_processor_id();
1221 char str[256];
1222
1223 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1224
1225 if (!cpu)
1226 arc_cache_init_master();
1227
1228 /*
1229 * In PAE regime, TLB and cache maintenance ops take wider addresses
1230 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1231 * to be zeroed to keep the ops sane.
1232 * As an optimization for more common !PAE enabled case, zero them out
1233 * once at init, rather than checking/setting to 0 for every runtime op
1234 */
1235 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1236
1237 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1238 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1239
1240 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1241 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1242
1243 if (l2_line_sz) {
1244 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1245 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1246 }
1247 }
1248 }
1249