1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/smp.h>
3 #include <linux/cpu.h>
4 #include <linux/slab.h>
5 #include <linux/cpumask.h>
6 #include <linux/percpu.h>
7
8 #include <xen/events.h>
9
10 #include <xen/hvc-console.h>
11 #include "xen-ops.h"
12 #include "smp.h"
13
14 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18
19 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21
22 /*
23 * Reschedule call back.
24 */
xen_reschedule_interrupt(int irq,void * dev_id)25 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26 {
27 inc_irq_stat(irq_resched_count);
28 scheduler_ipi();
29
30 return IRQ_HANDLED;
31 }
32
xen_smp_intr_free(unsigned int cpu)33 void xen_smp_intr_free(unsigned int cpu)
34 {
35 kfree(per_cpu(xen_resched_irq, cpu).name);
36 per_cpu(xen_resched_irq, cpu).name = NULL;
37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
39 per_cpu(xen_resched_irq, cpu).irq = -1;
40 }
41 kfree(per_cpu(xen_callfunc_irq, cpu).name);
42 per_cpu(xen_callfunc_irq, cpu).name = NULL;
43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
45 per_cpu(xen_callfunc_irq, cpu).irq = -1;
46 }
47 kfree(per_cpu(xen_debug_irq, cpu).name);
48 per_cpu(xen_debug_irq, cpu).name = NULL;
49 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
50 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
51 per_cpu(xen_debug_irq, cpu).irq = -1;
52 }
53 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
55 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
56 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
57 NULL);
58 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
59 }
60 }
61
xen_smp_intr_init(unsigned int cpu)62 int xen_smp_intr_init(unsigned int cpu)
63 {
64 int rc;
65 char *resched_name, *callfunc_name, *debug_name;
66
67 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68 per_cpu(xen_resched_irq, cpu).name = resched_name;
69 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
70 cpu,
71 xen_reschedule_interrupt,
72 IRQF_PERCPU|IRQF_NOBALANCING,
73 resched_name,
74 NULL);
75 if (rc < 0)
76 goto fail;
77 per_cpu(xen_resched_irq, cpu).irq = rc;
78
79 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
81 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
82 cpu,
83 xen_call_function_interrupt,
84 IRQF_PERCPU|IRQF_NOBALANCING,
85 callfunc_name,
86 NULL);
87 if (rc < 0)
88 goto fail;
89 per_cpu(xen_callfunc_irq, cpu).irq = rc;
90
91 if (!xen_fifo_events) {
92 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
93 per_cpu(xen_debug_irq, cpu).name = debug_name;
94 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
95 xen_debug_interrupt,
96 IRQF_PERCPU | IRQF_NOBALANCING,
97 debug_name, NULL);
98 if (rc < 0)
99 goto fail;
100 per_cpu(xen_debug_irq, cpu).irq = rc;
101 }
102
103 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
104 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
105 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
106 cpu,
107 xen_call_function_single_interrupt,
108 IRQF_PERCPU|IRQF_NOBALANCING,
109 callfunc_name,
110 NULL);
111 if (rc < 0)
112 goto fail;
113 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
114
115 return 0;
116
117 fail:
118 xen_smp_intr_free(cpu);
119 return rc;
120 }
121
xen_smp_cpus_done(unsigned int max_cpus)122 void __init xen_smp_cpus_done(unsigned int max_cpus)
123 {
124 if (xen_hvm_domain())
125 native_smp_cpus_done(max_cpus);
126 else
127 calculate_max_logical_packages();
128 }
129
xen_smp_send_reschedule(int cpu)130 void xen_smp_send_reschedule(int cpu)
131 {
132 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
133 }
134
__xen_send_IPI_mask(const struct cpumask * mask,int vector)135 static void __xen_send_IPI_mask(const struct cpumask *mask,
136 int vector)
137 {
138 unsigned cpu;
139
140 for_each_cpu_and(cpu, mask, cpu_online_mask)
141 xen_send_IPI_one(cpu, vector);
142 }
143
xen_smp_send_call_function_ipi(const struct cpumask * mask)144 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
145 {
146 int cpu;
147
148 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
149
150 /* Make sure other vcpus get a chance to run if they need to. */
151 for_each_cpu(cpu, mask) {
152 if (xen_vcpu_stolen(cpu)) {
153 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
154 break;
155 }
156 }
157 }
158
xen_smp_send_call_function_single_ipi(int cpu)159 void xen_smp_send_call_function_single_ipi(int cpu)
160 {
161 __xen_send_IPI_mask(cpumask_of(cpu),
162 XEN_CALL_FUNCTION_SINGLE_VECTOR);
163 }
164
xen_map_vector(int vector)165 static inline int xen_map_vector(int vector)
166 {
167 int xen_vector;
168
169 switch (vector) {
170 case RESCHEDULE_VECTOR:
171 xen_vector = XEN_RESCHEDULE_VECTOR;
172 break;
173 case CALL_FUNCTION_VECTOR:
174 xen_vector = XEN_CALL_FUNCTION_VECTOR;
175 break;
176 case CALL_FUNCTION_SINGLE_VECTOR:
177 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
178 break;
179 case IRQ_WORK_VECTOR:
180 xen_vector = XEN_IRQ_WORK_VECTOR;
181 break;
182 #ifdef CONFIG_X86_64
183 case NMI_VECTOR:
184 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
185 xen_vector = XEN_NMI_VECTOR;
186 break;
187 #endif
188 default:
189 xen_vector = -1;
190 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
191 vector);
192 }
193
194 return xen_vector;
195 }
196
xen_send_IPI_mask(const struct cpumask * mask,int vector)197 void xen_send_IPI_mask(const struct cpumask *mask,
198 int vector)
199 {
200 int xen_vector = xen_map_vector(vector);
201
202 if (xen_vector >= 0)
203 __xen_send_IPI_mask(mask, xen_vector);
204 }
205
xen_send_IPI_all(int vector)206 void xen_send_IPI_all(int vector)
207 {
208 int xen_vector = xen_map_vector(vector);
209
210 if (xen_vector >= 0)
211 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
212 }
213
xen_send_IPI_self(int vector)214 void xen_send_IPI_self(int vector)
215 {
216 int xen_vector = xen_map_vector(vector);
217
218 if (xen_vector >= 0)
219 xen_send_IPI_one(smp_processor_id(), xen_vector);
220 }
221
xen_send_IPI_mask_allbutself(const struct cpumask * mask,int vector)222 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
223 int vector)
224 {
225 unsigned cpu;
226 unsigned int this_cpu = smp_processor_id();
227 int xen_vector = xen_map_vector(vector);
228
229 if (!(num_online_cpus() > 1) || (xen_vector < 0))
230 return;
231
232 for_each_cpu_and(cpu, mask, cpu_online_mask) {
233 if (this_cpu == cpu)
234 continue;
235
236 xen_send_IPI_one(cpu, xen_vector);
237 }
238 }
239
xen_send_IPI_allbutself(int vector)240 void xen_send_IPI_allbutself(int vector)
241 {
242 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
243 }
244
xen_call_function_interrupt(int irq,void * dev_id)245 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
246 {
247 generic_smp_call_function_interrupt();
248 inc_irq_stat(irq_call_count);
249
250 return IRQ_HANDLED;
251 }
252
xen_call_function_single_interrupt(int irq,void * dev_id)253 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
254 {
255 generic_smp_call_function_single_interrupt();
256 inc_irq_stat(irq_call_count);
257
258 return IRQ_HANDLED;
259 }
260