1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/delay.h>
9 #include <linux/io.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/smp.h>
14 #include <linux/types.h>
15 #include <linux/irq.h>
16 
17 #include <asm/bcache.h>
18 #include <asm/mips-cps.h>
19 #include <asm/mips_mt.h>
20 #include <asm/mipsregs.h>
21 #include <asm/pm-cps.h>
22 #include <asm/r4kcache.h>
23 #include <asm/smp.h>
24 #include <asm/smp-cps.h>
25 #include <asm/time.h>
26 #include <asm/uasm.h>
27 
28 static DECLARE_BITMAP(core_power, NR_CPUS);
29 
30 struct core_boot_config *mips_cps_core_bootcfg;
31 
core_vpe_count(unsigned int cluster,unsigned core)32 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
33 {
34 	return min(smp_max_threads, mips_cps_numvps(cluster, core));
35 }
36 
cps_smp_setup(void)37 static void __init cps_smp_setup(void)
38 {
39 	unsigned int nclusters, ncores, nvpes, core_vpes;
40 	unsigned long core_entry;
41 	int cl, c, v;
42 
43 	/* Detect & record VPE topology */
44 	nvpes = 0;
45 	nclusters = mips_cps_numclusters();
46 	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
47 	for (cl = 0; cl < nclusters; cl++) {
48 		if (cl > 0)
49 			pr_cont(",");
50 		pr_cont("{");
51 
52 		ncores = mips_cps_numcores(cl);
53 		for (c = 0; c < ncores; c++) {
54 			core_vpes = core_vpe_count(cl, c);
55 
56 			if (c > 0)
57 				pr_cont(",");
58 			pr_cont("%u", core_vpes);
59 
60 			/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
61 			if (!cl && !c)
62 				smp_num_siblings = core_vpes;
63 
64 			for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
65 				cpu_set_cluster(&cpu_data[nvpes + v], cl);
66 				cpu_set_core(&cpu_data[nvpes + v], c);
67 				cpu_set_vpe_id(&cpu_data[nvpes + v], v);
68 			}
69 
70 			nvpes += core_vpes;
71 		}
72 
73 		pr_cont("}");
74 	}
75 	pr_cont(" total %u\n", nvpes);
76 
77 	/* Indicate present CPUs (CPU being synonymous with VPE) */
78 	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
79 		set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
80 		set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
81 		__cpu_number_map[v] = v;
82 		__cpu_logical_map[v] = v;
83 	}
84 
85 	/* Set a coherent default CCA (CWB) */
86 	change_c0_config(CONF_CM_CMASK, 0x5);
87 
88 	/* Core 0 is powered up (we're running on it) */
89 	bitmap_set(core_power, 0, 1);
90 
91 	/* Initialise core 0 */
92 	mips_cps_core_init();
93 
94 	/* Make core 0 coherent with everything */
95 	write_gcr_cl_coherence(0xff);
96 
97 	if (mips_cm_revision() >= CM_REV_CM3) {
98 		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
99 		write_gcr_bev_base(core_entry);
100 	}
101 
102 #ifdef CONFIG_MIPS_MT_FPAFF
103 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
104 	if (cpu_has_fpu)
105 		cpumask_set_cpu(0, &mt_fpu_cpumask);
106 #endif /* CONFIG_MIPS_MT_FPAFF */
107 }
108 
cps_prepare_cpus(unsigned int max_cpus)109 static void __init cps_prepare_cpus(unsigned int max_cpus)
110 {
111 	unsigned ncores, core_vpes, c, cca;
112 	bool cca_unsuitable, cores_limited;
113 	u32 *entry_code;
114 
115 	mips_mt_set_cpuoptions();
116 
117 	/* Detect whether the CCA is unsuited to multi-core SMP */
118 	cca = read_c0_config() & CONF_CM_CMASK;
119 	switch (cca) {
120 	case 0x4: /* CWBE */
121 	case 0x5: /* CWB */
122 		/* The CCA is coherent, multi-core is fine */
123 		cca_unsuitable = false;
124 		break;
125 
126 	default:
127 		/* CCA is not coherent, multi-core is not usable */
128 		cca_unsuitable = true;
129 	}
130 
131 	/* Warn the user if the CCA prevents multi-core */
132 	cores_limited = false;
133 	if (cca_unsuitable || cpu_has_dc_aliases) {
134 		for_each_present_cpu(c) {
135 			if (cpus_are_siblings(smp_processor_id(), c))
136 				continue;
137 
138 			set_cpu_present(c, false);
139 			cores_limited = true;
140 		}
141 	}
142 	if (cores_limited)
143 		pr_warn("Using only one core due to %s%s%s\n",
144 			cca_unsuitable ? "unsuitable CCA" : "",
145 			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
146 			cpu_has_dc_aliases ? "dcache aliasing" : "");
147 
148 	/*
149 	 * Patch the start of mips_cps_core_entry to provide:
150 	 *
151 	 * s0 = kseg0 CCA
152 	 */
153 	entry_code = (u32 *)&mips_cps_core_entry;
154 	uasm_i_addiu(&entry_code, 16, 0, cca);
155 	UASM_i_LA(&entry_code, 17, (long)mips_gcr_base);
156 	BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end);
157 	blast_dcache_range((unsigned long)&mips_cps_core_entry,
158 			   (unsigned long)entry_code);
159 	bc_wback_inv((unsigned long)&mips_cps_core_entry,
160 		     (void *)entry_code - (void *)&mips_cps_core_entry);
161 	__sync();
162 
163 	/* Allocate core boot configuration structs */
164 	ncores = mips_cps_numcores(0);
165 	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
166 					GFP_KERNEL);
167 	if (!mips_cps_core_bootcfg) {
168 		pr_err("Failed to allocate boot config for %u cores\n", ncores);
169 		goto err_out;
170 	}
171 
172 	/* Allocate VPE boot configuration structs */
173 	for (c = 0; c < ncores; c++) {
174 		core_vpes = core_vpe_count(0, c);
175 		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
176 				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
177 				GFP_KERNEL);
178 		if (!mips_cps_core_bootcfg[c].vpe_config) {
179 			pr_err("Failed to allocate %u VPE boot configs\n",
180 			       core_vpes);
181 			goto err_out;
182 		}
183 	}
184 
185 	/* Mark this CPU as booted */
186 	atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
187 		   1 << cpu_vpe_id(&current_cpu_data));
188 
189 	return;
190 err_out:
191 	/* Clean up allocations */
192 	if (mips_cps_core_bootcfg) {
193 		for (c = 0; c < ncores; c++)
194 			kfree(mips_cps_core_bootcfg[c].vpe_config);
195 		kfree(mips_cps_core_bootcfg);
196 		mips_cps_core_bootcfg = NULL;
197 	}
198 
199 	/* Effectively disable SMP by declaring CPUs not present */
200 	for_each_possible_cpu(c) {
201 		if (c == 0)
202 			continue;
203 		set_cpu_present(c, false);
204 	}
205 }
206 
boot_core(unsigned int core,unsigned int vpe_id)207 static void boot_core(unsigned int core, unsigned int vpe_id)
208 {
209 	u32 stat, seq_state;
210 	unsigned timeout;
211 
212 	/* Select the appropriate core */
213 	mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
214 
215 	/* Set its reset vector */
216 	write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
217 
218 	/* Ensure its coherency is disabled */
219 	write_gcr_co_coherence(0);
220 
221 	/* Start it with the legacy memory map and exception base */
222 	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
223 
224 	/* Ensure the core can access the GCRs */
225 	set_gcr_access(1 << core);
226 
227 	if (mips_cpc_present()) {
228 		/* Reset the core */
229 		mips_cpc_lock_other(core);
230 
231 		if (mips_cm_revision() >= CM_REV_CM3) {
232 			/* Run only the requested VP following the reset */
233 			write_cpc_co_vp_stop(0xf);
234 			write_cpc_co_vp_run(1 << vpe_id);
235 
236 			/*
237 			 * Ensure that the VP_RUN register is written before the
238 			 * core leaves reset.
239 			 */
240 			wmb();
241 		}
242 
243 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
244 
245 		timeout = 100;
246 		while (true) {
247 			stat = read_cpc_co_stat_conf();
248 			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
249 			seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
250 
251 			/* U6 == coherent execution, ie. the core is up */
252 			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
253 				break;
254 
255 			/* Delay a little while before we start warning */
256 			if (timeout) {
257 				timeout--;
258 				mdelay(10);
259 				continue;
260 			}
261 
262 			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
263 				core, stat);
264 			mdelay(1000);
265 		}
266 
267 		mips_cpc_unlock_other();
268 	} else {
269 		/* Take the core out of reset */
270 		write_gcr_co_reset_release(0);
271 	}
272 
273 	mips_cm_unlock_other();
274 
275 	/* The core is now powered up */
276 	bitmap_set(core_power, core, 1);
277 }
278 
remote_vpe_boot(void * dummy)279 static void remote_vpe_boot(void *dummy)
280 {
281 	unsigned core = cpu_core(&current_cpu_data);
282 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
283 
284 	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
285 }
286 
cps_boot_secondary(int cpu,struct task_struct * idle)287 static int cps_boot_secondary(int cpu, struct task_struct *idle)
288 {
289 	unsigned core = cpu_core(&cpu_data[cpu]);
290 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
291 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
292 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
293 	unsigned long core_entry;
294 	unsigned int remote;
295 	int err;
296 
297 	/* We don't yet support booting CPUs in other clusters */
298 	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
299 		return -ENOSYS;
300 
301 	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
302 	vpe_cfg->sp = __KSTK_TOS(idle);
303 	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
304 
305 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
306 
307 	preempt_disable();
308 
309 	if (!test_bit(core, core_power)) {
310 		/* Boot a VPE on a powered down core */
311 		boot_core(core, vpe_id);
312 		goto out;
313 	}
314 
315 	if (cpu_has_vp) {
316 		mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
317 		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
318 		write_gcr_co_reset_base(core_entry);
319 		mips_cm_unlock_other();
320 	}
321 
322 	if (!cpus_are_siblings(cpu, smp_processor_id())) {
323 		/* Boot a VPE on another powered up core */
324 		for (remote = 0; remote < NR_CPUS; remote++) {
325 			if (!cpus_are_siblings(cpu, remote))
326 				continue;
327 			if (cpu_online(remote))
328 				break;
329 		}
330 		if (remote >= NR_CPUS) {
331 			pr_crit("No online CPU in core %u to start CPU%d\n",
332 				core, cpu);
333 			goto out;
334 		}
335 
336 		err = smp_call_function_single(remote, remote_vpe_boot,
337 					       NULL, 1);
338 		if (err)
339 			panic("Failed to call remote CPU\n");
340 		goto out;
341 	}
342 
343 	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
344 
345 	/* Boot a VPE on this core */
346 	mips_cps_boot_vpes(core_cfg, vpe_id);
347 out:
348 	preempt_enable();
349 	return 0;
350 }
351 
cps_init_secondary(void)352 static void cps_init_secondary(void)
353 {
354 	int core = cpu_core(&current_cpu_data);
355 
356 	/* Disable MT - we only want to run 1 TC per VPE */
357 	if (cpu_has_mipsmt)
358 		dmt();
359 
360 	if (mips_cm_revision() >= CM_REV_CM3) {
361 		unsigned int ident = read_gic_vl_ident();
362 
363 		/*
364 		 * Ensure that our calculation of the VP ID matches up with
365 		 * what the GIC reports, otherwise we'll have configured
366 		 * interrupts incorrectly.
367 		 */
368 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
369 	}
370 
371 	if (core > 0 && !read_gcr_cl_coherence())
372 		pr_warn("Core %u is not in coherent domain\n", core);
373 
374 	if (cpu_has_veic)
375 		clear_c0_status(ST0_IM);
376 	else
377 		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
378 					 STATUSF_IP4 | STATUSF_IP5 |
379 					 STATUSF_IP6 | STATUSF_IP7);
380 }
381 
cps_smp_finish(void)382 static void cps_smp_finish(void)
383 {
384 	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
385 
386 #ifdef CONFIG_MIPS_MT_FPAFF
387 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
388 	if (cpu_has_fpu)
389 		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
390 #endif /* CONFIG_MIPS_MT_FPAFF */
391 
392 	local_irq_enable();
393 }
394 
395 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
396 
397 enum cpu_death {
398 	CPU_DEATH_HALT,
399 	CPU_DEATH_POWER,
400 };
401 
cps_shutdown_this_cpu(enum cpu_death death)402 static void cps_shutdown_this_cpu(enum cpu_death death)
403 {
404 	unsigned int cpu, core, vpe_id;
405 
406 	cpu = smp_processor_id();
407 	core = cpu_core(&cpu_data[cpu]);
408 
409 	if (death == CPU_DEATH_HALT) {
410 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
411 
412 		pr_debug("Halting core %d VP%d\n", core, vpe_id);
413 		if (cpu_has_mipsmt) {
414 			/* Halt this TC */
415 			write_c0_tchalt(TCHALT_H);
416 			instruction_hazard();
417 		} else if (cpu_has_vp) {
418 			write_cpc_cl_vp_stop(1 << vpe_id);
419 
420 			/* Ensure that the VP_STOP register is written */
421 			wmb();
422 		}
423 	} else {
424 		if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
425 			pr_debug("Gating power to core %d\n", core);
426 			/* Power down the core */
427 			cps_pm_enter_state(CPS_PM_POWER_GATED);
428 		}
429 	}
430 }
431 
432 #ifdef CONFIG_KEXEC
433 
cps_kexec_nonboot_cpu(void)434 static void cps_kexec_nonboot_cpu(void)
435 {
436 	if (cpu_has_mipsmt || cpu_has_vp)
437 		cps_shutdown_this_cpu(CPU_DEATH_HALT);
438 	else
439 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
440 }
441 
442 #endif /* CONFIG_KEXEC */
443 
444 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
445 
446 #ifdef CONFIG_HOTPLUG_CPU
447 
cps_cpu_disable(void)448 static int cps_cpu_disable(void)
449 {
450 	unsigned cpu = smp_processor_id();
451 	struct core_boot_config *core_cfg;
452 
453 	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
454 		return -EINVAL;
455 
456 	core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
457 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
458 	smp_mb__after_atomic();
459 	set_cpu_online(cpu, false);
460 	calculate_cpu_foreign_map();
461 	irq_migrate_all_off_this_cpu();
462 
463 	return 0;
464 }
465 
466 static unsigned cpu_death_sibling;
467 static enum cpu_death cpu_death;
468 
play_dead(void)469 void play_dead(void)
470 {
471 	unsigned int cpu;
472 
473 	local_irq_disable();
474 	idle_task_exit();
475 	cpu = smp_processor_id();
476 	cpu_death = CPU_DEATH_POWER;
477 
478 	pr_debug("CPU%d going offline\n", cpu);
479 
480 	if (cpu_has_mipsmt || cpu_has_vp) {
481 		/* Look for another online VPE within the core */
482 		for_each_online_cpu(cpu_death_sibling) {
483 			if (!cpus_are_siblings(cpu, cpu_death_sibling))
484 				continue;
485 
486 			/*
487 			 * There is an online VPE within the core. Just halt
488 			 * this TC and leave the core alone.
489 			 */
490 			cpu_death = CPU_DEATH_HALT;
491 			break;
492 		}
493 	}
494 
495 	cpuhp_ap_report_dead();
496 
497 	cps_shutdown_this_cpu(cpu_death);
498 
499 	/* This should never be reached */
500 	panic("Failed to offline CPU %u", cpu);
501 }
502 
wait_for_sibling_halt(void * ptr_cpu)503 static void wait_for_sibling_halt(void *ptr_cpu)
504 {
505 	unsigned cpu = (unsigned long)ptr_cpu;
506 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
507 	unsigned halted;
508 	unsigned long flags;
509 
510 	do {
511 		local_irq_save(flags);
512 		settc(vpe_id);
513 		halted = read_tc_c0_tchalt();
514 		local_irq_restore(flags);
515 	} while (!(halted & TCHALT_H));
516 }
517 
cps_cpu_die(unsigned int cpu)518 static void cps_cpu_die(unsigned int cpu) { }
519 
cps_cleanup_dead_cpu(unsigned cpu)520 static void cps_cleanup_dead_cpu(unsigned cpu)
521 {
522 	unsigned core = cpu_core(&cpu_data[cpu]);
523 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
524 	ktime_t fail_time;
525 	unsigned stat;
526 	int err;
527 
528 	/*
529 	 * Now wait for the CPU to actually offline. Without doing this that
530 	 * offlining may race with one or more of:
531 	 *
532 	 *   - Onlining the CPU again.
533 	 *   - Powering down the core if another VPE within it is offlined.
534 	 *   - A sibling VPE entering a non-coherent state.
535 	 *
536 	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
537 	 * with which we could race, so do nothing.
538 	 */
539 	if (cpu_death == CPU_DEATH_POWER) {
540 		/*
541 		 * Wait for the core to enter a powered down or clock gated
542 		 * state, the latter happening when a JTAG probe is connected
543 		 * in which case the CPC will refuse to power down the core.
544 		 */
545 		fail_time = ktime_add_ms(ktime_get(), 2000);
546 		do {
547 			mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
548 			mips_cpc_lock_other(core);
549 			stat = read_cpc_co_stat_conf();
550 			stat &= CPC_Cx_STAT_CONF_SEQSTATE;
551 			stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
552 			mips_cpc_unlock_other();
553 			mips_cm_unlock_other();
554 
555 			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
556 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
557 			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
558 				break;
559 
560 			/*
561 			 * The core ought to have powered down, but didn't &
562 			 * now we don't really know what state it's in. It's
563 			 * likely that its _pwr_up pin has been wired to logic
564 			 * 1 & it powered back up as soon as we powered it
565 			 * down...
566 			 *
567 			 * The best we can do is warn the user & continue in
568 			 * the hope that the core is doing nothing harmful &
569 			 * might behave properly if we online it later.
570 			 */
571 			if (WARN(ktime_after(ktime_get(), fail_time),
572 				 "CPU%u hasn't powered down, seq. state %u\n",
573 				 cpu, stat))
574 				break;
575 		} while (1);
576 
577 		/* Indicate the core is powered off */
578 		bitmap_clear(core_power, core, 1);
579 	} else if (cpu_has_mipsmt) {
580 		/*
581 		 * Have a CPU with access to the offlined CPUs registers wait
582 		 * for its TC to halt.
583 		 */
584 		err = smp_call_function_single(cpu_death_sibling,
585 					       wait_for_sibling_halt,
586 					       (void *)(unsigned long)cpu, 1);
587 		if (err)
588 			panic("Failed to call remote sibling CPU\n");
589 	} else if (cpu_has_vp) {
590 		do {
591 			mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
592 			stat = read_cpc_co_vp_running();
593 			mips_cm_unlock_other();
594 		} while (stat & (1 << vpe_id));
595 	}
596 }
597 
598 #endif /* CONFIG_HOTPLUG_CPU */
599 
600 static const struct plat_smp_ops cps_smp_ops = {
601 	.smp_setup		= cps_smp_setup,
602 	.prepare_cpus		= cps_prepare_cpus,
603 	.boot_secondary		= cps_boot_secondary,
604 	.init_secondary		= cps_init_secondary,
605 	.smp_finish		= cps_smp_finish,
606 	.send_ipi_single	= mips_smp_send_ipi_single,
607 	.send_ipi_mask		= mips_smp_send_ipi_mask,
608 #ifdef CONFIG_HOTPLUG_CPU
609 	.cpu_disable		= cps_cpu_disable,
610 	.cpu_die		= cps_cpu_die,
611 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
612 #endif
613 #ifdef CONFIG_KEXEC
614 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
615 #endif
616 };
617 
mips_cps_smp_in_use(void)618 bool mips_cps_smp_in_use(void)
619 {
620 	extern const struct plat_smp_ops *mp_ops;
621 	return mp_ops == &cps_smp_ops;
622 }
623 
register_cps_smp_ops(void)624 int register_cps_smp_ops(void)
625 {
626 	if (!mips_cm_present()) {
627 		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
628 		return -ENODEV;
629 	}
630 
631 	/* check we have a GIC - we need one for IPIs */
632 	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
633 		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
634 		return -ENODEV;
635 	}
636 
637 	register_smp_ops(&cps_smp_ops);
638 	return 0;
639 }
640