1 /*
2  * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
3  * Copyright (C) 2005 Mips Technologies, Inc
4  */
5 
6 #include <linux/device.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/security.h>
12 
13 #include <asm/cpu.h>
14 #include <asm/processor.h>
15 #include <asm/atomic.h>
16 #include <asm/system.h>
17 #include <asm/hardirq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/r4kcache.h>
21 #include <asm/cacheflush.h>
22 
23 int vpelimit;
24 
maxvpes(char * str)25 static int __init maxvpes(char *str)
26 {
27 	get_option(&str, &vpelimit);
28 
29 	return 1;
30 }
31 
32 __setup("maxvpes=", maxvpes);
33 
34 int tclimit;
35 
maxtcs(char * str)36 static int __init maxtcs(char *str)
37 {
38 	get_option(&str, &tclimit);
39 
40 	return 1;
41 }
42 
43 __setup("maxtcs=", maxtcs);
44 
45 /*
46  * Dump new MIPS MT state for the core. Does not leave TCs halted.
47  * Takes an argument which taken to be a pre-call MVPControl value.
48  */
49 
mips_mt_regdump(unsigned long mvpctl)50 void mips_mt_regdump(unsigned long mvpctl)
51 {
52 	unsigned long flags;
53 	unsigned long vpflags;
54 	unsigned long mvpconf0;
55 	int nvpe;
56 	int ntc;
57 	int i;
58 	int tc;
59 	unsigned long haltval;
60 	unsigned long tcstatval;
61 #ifdef CONFIG_MIPS_MT_SMTC
62 	void smtc_soft_dump(void);
63 #endif /* CONFIG_MIPT_MT_SMTC */
64 
65 	local_irq_save(flags);
66 	vpflags = dvpe();
67 	printk("=== MIPS MT State Dump ===\n");
68 	printk("-- Global State --\n");
69 	printk("   MVPControl Passed: %08lx\n", mvpctl);
70 	printk("   MVPControl Read: %08lx\n", vpflags);
71 	printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
72 	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
73 	ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
74 	printk("-- per-VPE State --\n");
75 	for (i = 0; i < nvpe; i++) {
76 		for (tc = 0; tc < ntc; tc++) {
77 			settc(tc);
78 			if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
79 				printk("  VPE %d\n", i);
80 				printk("   VPEControl : %08lx\n",
81 				       read_vpe_c0_vpecontrol());
82 				printk("   VPEConf0 : %08lx\n",
83 				       read_vpe_c0_vpeconf0());
84 				printk("   VPE%d.Status : %08lx\n",
85 				       i, read_vpe_c0_status());
86 				printk("   VPE%d.EPC : %08lx %pS\n",
87 				       i, read_vpe_c0_epc(),
88 				       (void *) read_vpe_c0_epc());
89 				printk("   VPE%d.Cause : %08lx\n",
90 				       i, read_vpe_c0_cause());
91 				printk("   VPE%d.Config7 : %08lx\n",
92 				       i, read_vpe_c0_config7());
93 				break; /* Next VPE */
94 			}
95 		}
96 	}
97 	printk("-- per-TC State --\n");
98 	for (tc = 0; tc < ntc; tc++) {
99 		settc(tc);
100 		if (read_tc_c0_tcbind() == read_c0_tcbind()) {
101 			/* Are we dumping ourself?  */
102 			haltval = 0; /* Then we're not halted, and mustn't be */
103 			tcstatval = flags; /* And pre-dump TCStatus is flags */
104 			printk("  TC %d (current TC with VPE EPC above)\n", tc);
105 		} else {
106 			haltval = read_tc_c0_tchalt();
107 			write_tc_c0_tchalt(1);
108 			tcstatval = read_tc_c0_tcstatus();
109 			printk("  TC %d\n", tc);
110 		}
111 		printk("   TCStatus : %08lx\n", tcstatval);
112 		printk("   TCBind : %08lx\n", read_tc_c0_tcbind());
113 		printk("   TCRestart : %08lx %pS\n",
114 		       read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
115 		printk("   TCHalt : %08lx\n", haltval);
116 		printk("   TCContext : %08lx\n", read_tc_c0_tccontext());
117 		if (!haltval)
118 			write_tc_c0_tchalt(0);
119 	}
120 #ifdef CONFIG_MIPS_MT_SMTC
121 	smtc_soft_dump();
122 #endif /* CONFIG_MIPT_MT_SMTC */
123 	printk("===========================\n");
124 	evpe(vpflags);
125 	local_irq_restore(flags);
126 }
127 
128 static int mt_opt_norps;
129 static int mt_opt_rpsctl = -1;
130 static int mt_opt_nblsu = -1;
131 static int mt_opt_forceconfig7;
132 static int mt_opt_config7 = -1;
133 
rps_disable(char * s)134 static int __init rps_disable(char *s)
135 {
136 	mt_opt_norps = 1;
137 	return 1;
138 }
139 __setup("norps", rps_disable);
140 
rpsctl_set(char * str)141 static int __init rpsctl_set(char *str)
142 {
143 	get_option(&str, &mt_opt_rpsctl);
144 	return 1;
145 }
146 __setup("rpsctl=", rpsctl_set);
147 
nblsu_set(char * str)148 static int __init nblsu_set(char *str)
149 {
150 	get_option(&str, &mt_opt_nblsu);
151 	return 1;
152 }
153 __setup("nblsu=", nblsu_set);
154 
config7_set(char * str)155 static int __init config7_set(char *str)
156 {
157 	get_option(&str, &mt_opt_config7);
158 	mt_opt_forceconfig7 = 1;
159 	return 1;
160 }
161 __setup("config7=", config7_set);
162 
163 /* Experimental cache flush control parameters that should go away some day */
164 int mt_protiflush;
165 int mt_protdflush;
166 int mt_n_iflushes = 1;
167 int mt_n_dflushes = 1;
168 
set_protiflush(char * s)169 static int __init set_protiflush(char *s)
170 {
171 	mt_protiflush = 1;
172 	return 1;
173 }
174 __setup("protiflush", set_protiflush);
175 
set_protdflush(char * s)176 static int __init set_protdflush(char *s)
177 {
178 	mt_protdflush = 1;
179 	return 1;
180 }
181 __setup("protdflush", set_protdflush);
182 
niflush(char * s)183 static int __init niflush(char *s)
184 {
185 	get_option(&s, &mt_n_iflushes);
186 	return 1;
187 }
188 __setup("niflush=", niflush);
189 
ndflush(char * s)190 static int __init ndflush(char *s)
191 {
192 	get_option(&s, &mt_n_dflushes);
193 	return 1;
194 }
195 __setup("ndflush=", ndflush);
196 
197 static unsigned int itc_base;
198 
set_itc_base(char * str)199 static int __init set_itc_base(char *str)
200 {
201 	get_option(&str, &itc_base);
202 	return 1;
203 }
204 
205 __setup("itcbase=", set_itc_base);
206 
mips_mt_set_cpuoptions(void)207 void mips_mt_set_cpuoptions(void)
208 {
209 	unsigned int oconfig7 = read_c0_config7();
210 	unsigned int nconfig7 = oconfig7;
211 
212 	if (mt_opt_norps) {
213 		printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
214 	}
215 	if (mt_opt_rpsctl >= 0) {
216 		printk("34K return prediction stack override set to %d.\n",
217 			mt_opt_rpsctl);
218 		if (mt_opt_rpsctl)
219 			nconfig7 |= (1 << 2);
220 		else
221 			nconfig7 &= ~(1 << 2);
222 	}
223 	if (mt_opt_nblsu >= 0) {
224 		printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
225 		if (mt_opt_nblsu)
226 			nconfig7 |= (1 << 5);
227 		else
228 			nconfig7 &= ~(1 << 5);
229 	}
230 	if (mt_opt_forceconfig7) {
231 		printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
232 		nconfig7 = mt_opt_config7;
233 	}
234 	if (oconfig7 != nconfig7) {
235 		__asm__ __volatile("sync");
236 		write_c0_config7(nconfig7);
237 		ehb();
238 		printk("Config7: 0x%08x\n", read_c0_config7());
239 	}
240 
241 	/* Report Cache management debug options */
242 	if (mt_protiflush)
243 		printk("I-cache flushes single-threaded\n");
244 	if (mt_protdflush)
245 		printk("D-cache flushes single-threaded\n");
246 	if (mt_n_iflushes != 1)
247 		printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
248 	if (mt_n_dflushes != 1)
249 		printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
250 
251 	if (itc_base != 0) {
252 		/*
253 		 * Configure ITC mapping.  This code is very
254 		 * specific to the 34K core family, which uses
255 		 * a special mode bit ("ITC") in the ErrCtl
256 		 * register to enable access to ITC control
257 		 * registers via cache "tag" operations.
258 		 */
259 		unsigned long ectlval;
260 		unsigned long itcblkgrn;
261 
262 		/* ErrCtl register is known as "ecc" to Linux */
263 		ectlval = read_c0_ecc();
264 		write_c0_ecc(ectlval | (0x1 << 26));
265 		ehb();
266 #define INDEX_0 (0x80000000)
267 #define INDEX_8 (0x80000008)
268 		/* Read "cache tag" for Dcache pseudo-index 8 */
269 		cache_op(Index_Load_Tag_D, INDEX_8);
270 		ehb();
271 		itcblkgrn = read_c0_dtaglo();
272 		itcblkgrn &= 0xfffe0000;
273 		/* Set for 128 byte pitch of ITC cells */
274 		itcblkgrn |= 0x00000c00;
275 		/* Stage in Tag register */
276 		write_c0_dtaglo(itcblkgrn);
277 		ehb();
278 		/* Write out to ITU with CACHE op */
279 		cache_op(Index_Store_Tag_D, INDEX_8);
280 		/* Now set base address, and turn ITC on with 0x1 bit */
281 		write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
282 		ehb();
283 		/* Write out to ITU with CACHE op */
284 		cache_op(Index_Store_Tag_D, INDEX_0);
285 		write_c0_ecc(ectlval);
286 		ehb();
287 		printk("Mapped %ld ITC cells starting at 0x%08x\n",
288 			((itcblkgrn & 0x7fe00000) >> 20), itc_base);
289 	}
290 }
291 
292 /*
293  * Function to protect cache flushes from concurrent execution
294  * depends on MP software model chosen.
295  */
296 
mt_cflush_lockdown(void)297 void mt_cflush_lockdown(void)
298 {
299 #ifdef CONFIG_MIPS_MT_SMTC
300 	void smtc_cflush_lockdown(void);
301 
302 	smtc_cflush_lockdown();
303 #endif /* CONFIG_MIPS_MT_SMTC */
304 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
305 }
306 
mt_cflush_release(void)307 void mt_cflush_release(void)
308 {
309 #ifdef CONFIG_MIPS_MT_SMTC
310 	void smtc_cflush_release(void);
311 
312 	smtc_cflush_release();
313 #endif /* CONFIG_MIPS_MT_SMTC */
314 	/* FILL IN VSMP and AP/SP VERSIONS HERE */
315 }
316 
317 struct class *mt_class;
318 
mt_init(void)319 static int __init mt_init(void)
320 {
321 	struct class *mtc;
322 
323 	mtc = class_create(THIS_MODULE, "mt");
324 	if (IS_ERR(mtc))
325 		return PTR_ERR(mtc);
326 
327 	mt_class = mtc;
328 
329 	return 0;
330 }
331 
332 subsys_initcall(mt_init);
333