1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15 #include <asm/page.h>
16 #include <asm/cacheflush.h>
17 #include <arch/icache.h>
18 #include <arch/spr_def.h>
19
20
__flush_icache_range(unsigned long start,unsigned long end)21 void __flush_icache_range(unsigned long start, unsigned long end)
22 {
23 invalidate_icache((const void *)start, end - start, PAGE_SIZE);
24 }
25
26
27 /* Force a load instruction to issue. */
force_load(char * p)28 static inline void force_load(char *p)
29 {
30 *(volatile char *)p;
31 }
32
33 /*
34 * Flush and invalidate a VA range that is homed remotely on a single
35 * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
36 * until the memory controller holds the flushed values.
37 */
finv_buffer_remote(void * buffer,size_t size,int hfh)38 void finv_buffer_remote(void *buffer, size_t size, int hfh)
39 {
40 char *p, *base;
41 size_t step_size, load_count;
42
43 /*
44 * On TILEPro the striping granularity is a fixed 8KB; on
45 * TILE-Gx it is configurable, and we rely on the fact that
46 * the hypervisor always configures maximum striping, so that
47 * bits 9 and 10 of the PA are part of the stripe function, so
48 * every 512 bytes we hit a striping boundary.
49 *
50 */
51 #ifdef __tilegx__
52 const unsigned long STRIPE_WIDTH = 512;
53 #else
54 const unsigned long STRIPE_WIDTH = 8192;
55 #endif
56
57 #ifdef __tilegx__
58 /*
59 * On TILE-Gx, we must disable the dstream prefetcher before doing
60 * a cache flush; otherwise, we could end up with data in the cache
61 * that we don't want there. Note that normally we'd do an mf
62 * after the SPR write to disabling the prefetcher, but we do one
63 * below, before any further loads, so there's no need to do it
64 * here.
65 */
66 uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
67 __insn_mtspr(SPR_DSTREAM_PF, 0);
68 #endif
69
70 /*
71 * Flush and invalidate the buffer out of the local L1/L2
72 * and request the home cache to flush and invalidate as well.
73 */
74 __finv_buffer(buffer, size);
75
76 /*
77 * Wait for the home cache to acknowledge that it has processed
78 * all the flush-and-invalidate requests. This does not mean
79 * that the flushed data has reached the memory controller yet,
80 * but it does mean the home cache is processing the flushes.
81 */
82 __insn_mf();
83
84 /*
85 * Issue a load to the last cache line, which can't complete
86 * until all the previously-issued flushes to the same memory
87 * controller have also completed. If we weren't striping
88 * memory, that one load would be sufficient, but since we may
89 * be, we also need to back up to the last load issued to
90 * another memory controller, which would be the point where
91 * we crossed a "striping" boundary (the granularity of striping
92 * across memory controllers). Keep backing up and doing this
93 * until we are before the beginning of the buffer, or have
94 * hit all the controllers.
95 *
96 * If we are flushing a hash-for-home buffer, it's even worse.
97 * Each line may be homed on a different tile, and each tile
98 * may have up to four lines that are on different
99 * controllers. So as we walk backwards, we have to touch
100 * enough cache lines to satisfy these constraints. In
101 * practice this ends up being close enough to "load from
102 * every cache line on a full memory stripe on each
103 * controller" that we simply do that, to simplify the logic.
104 *
105 * On TILE-Gx the hash-for-home function is much more complex,
106 * with the upshot being we can't readily guarantee we have
107 * hit both entries in the 128-entry AMT that were hit by any
108 * load in the entire range, so we just re-load them all.
109 * With larger buffers, we may want to consider using a hypervisor
110 * trap to issue loads directly to each hash-for-home tile for
111 * each controller (doing it from Linux would trash the TLB).
112 */
113 if (hfh) {
114 step_size = L2_CACHE_BYTES;
115 #ifdef __tilegx__
116 load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
117 #else
118 load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
119 (1 << CHIP_LOG_NUM_MSHIMS());
120 #endif
121 } else {
122 step_size = STRIPE_WIDTH;
123 load_count = (1 << CHIP_LOG_NUM_MSHIMS());
124 }
125
126 /* Load the last byte of the buffer. */
127 p = (char *)buffer + size - 1;
128 force_load(p);
129
130 /* Bump down to the end of the previous stripe or cache line. */
131 p -= step_size;
132 p = (char *)((unsigned long)p | (step_size - 1));
133
134 /* Figure out how far back we need to go. */
135 base = p - (step_size * (load_count - 2));
136 if ((unsigned long)base < (unsigned long)buffer)
137 base = buffer;
138
139 /*
140 * Fire all the loads we need. The MAF only has eight entries
141 * so we can have at most eight outstanding loads, so we
142 * unroll by that amount.
143 */
144 #pragma unroll 8
145 for (; p >= base; p -= step_size)
146 force_load(p);
147
148 /*
149 * Repeat, but with inv's instead of loads, to get rid of the
150 * data we just loaded into our own cache and the old home L3.
151 * No need to unroll since inv's don't target a register.
152 */
153 p = (char *)buffer + size - 1;
154 __insn_inv(p);
155 p -= step_size;
156 p = (char *)((unsigned long)p | (step_size - 1));
157 for (; p >= base; p -= step_size)
158 __insn_inv(p);
159
160 /* Wait for the load+inv's (and thus finvs) to have completed. */
161 __insn_mf();
162
163 #ifdef __tilegx__
164 /* Reenable the prefetcher. */
165 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
166 #endif
167 }
168