1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/irq.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/delay.h>
20 #include <linux/cpumask.h>
21 #include <linux/mm.h>
22 #include <linux/kmemleak.h>
23
24 #include <asm/machdep.h>
25 #include <asm/io.h>
26 #include <asm/smp.h>
27 #include <asm/irq.h>
28 #include <asm/errno.h>
29 #include <asm/xive.h>
30 #include <asm/xive-regs.h>
31 #include <asm/opal.h>
32 #include <asm/kvm_ppc.h>
33
34 #include "xive-internal.h"
35
36
37 static u32 xive_provision_size;
38 static u32 *xive_provision_chips;
39 static u32 xive_provision_chip_count;
40 static u32 xive_queue_shift;
41 static u32 xive_pool_vps = XIVE_INVALID_VP;
42 static struct kmem_cache *xive_provision_cache;
43 static bool xive_has_single_esc;
44 bool xive_has_save_restore;
45
xive_native_populate_irq_data(u32 hw_irq,struct xive_irq_data * data)46 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
47 {
48 __be64 flags, eoi_page, trig_page;
49 __be32 esb_shift, src_chip;
50 u64 opal_flags;
51 s64 rc;
52
53 memset(data, 0, sizeof(*data));
54
55 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
56 &esb_shift, &src_chip);
57 if (rc) {
58 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
59 hw_irq, rc);
60 return -EINVAL;
61 }
62
63 opal_flags = be64_to_cpu(flags);
64 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
65 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
66 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
67 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
68 if (opal_flags & OPAL_XIVE_IRQ_LSI)
69 data->flags |= XIVE_IRQ_FLAG_LSI;
70 data->eoi_page = be64_to_cpu(eoi_page);
71 data->trig_page = be64_to_cpu(trig_page);
72 data->esb_shift = be32_to_cpu(esb_shift);
73 data->src_chip = be32_to_cpu(src_chip);
74
75 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
76 if (!data->eoi_mmio) {
77 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
78 return -ENOMEM;
79 }
80
81 data->hw_irq = hw_irq;
82
83 if (!data->trig_page)
84 return 0;
85 if (data->trig_page == data->eoi_page) {
86 data->trig_mmio = data->eoi_mmio;
87 return 0;
88 }
89
90 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
91 if (!data->trig_mmio) {
92 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
93 return -ENOMEM;
94 }
95 return 0;
96 }
97 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
98
xive_native_configure_irq(u32 hw_irq,u32 target,u8 prio,u32 sw_irq)99 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
100 {
101 s64 rc;
102
103 for (;;) {
104 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
105 if (rc != OPAL_BUSY)
106 break;
107 msleep(OPAL_BUSY_DELAY_MS);
108 }
109 return rc == 0 ? 0 : -ENXIO;
110 }
111 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
112
xive_native_get_irq_config(u32 hw_irq,u32 * target,u8 * prio,u32 * sw_irq)113 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
114 u32 *sw_irq)
115 {
116 s64 rc;
117 __be64 vp;
118 __be32 lirq;
119
120 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
121
122 *target = be64_to_cpu(vp);
123 *sw_irq = be32_to_cpu(lirq);
124
125 return rc == 0 ? 0 : -ENXIO;
126 }
127
128 #define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
129
130 /* This can be called multiple time to change a queue configuration */
xive_native_configure_queue(u32 vp_id,struct xive_q * q,u8 prio,__be32 * qpage,u32 order,bool can_escalate)131 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
132 __be32 *qpage, u32 order, bool can_escalate)
133 {
134 s64 rc = 0;
135 __be64 qeoi_page_be;
136 __be32 esc_irq_be;
137 u64 flags, qpage_phys;
138
139 /* If there's an actual queue page, clean it */
140 if (order) {
141 if (WARN_ON(!qpage))
142 return -EINVAL;
143 qpage_phys = __pa(qpage);
144 } else
145 qpage_phys = 0;
146
147 /* Initialize the rest of the fields */
148 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
149 q->idx = 0;
150 q->toggle = 0;
151
152 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
153 &qeoi_page_be,
154 &esc_irq_be,
155 NULL);
156 if (rc) {
157 vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
158 rc = -EIO;
159 goto fail;
160 }
161 q->eoi_phys = be64_to_cpu(qeoi_page_be);
162
163 /* Default flags */
164 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
165
166 /* Escalation needed ? */
167 if (can_escalate) {
168 q->esc_irq = be32_to_cpu(esc_irq_be);
169 flags |= OPAL_XIVE_EQ_ESCALATE;
170 }
171
172 /* Configure and enable the queue in HW */
173 for (;;) {
174 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
175 if (rc != OPAL_BUSY)
176 break;
177 msleep(OPAL_BUSY_DELAY_MS);
178 }
179 if (rc) {
180 vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
181 rc = -EIO;
182 } else {
183 /*
184 * KVM code requires all of the above to be visible before
185 * q->qpage is set due to how it manages IPI EOIs
186 */
187 wmb();
188 q->qpage = qpage;
189 }
190 fail:
191 return rc;
192 }
193 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
194
__xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)195 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
196 {
197 s64 rc;
198
199 /* Disable the queue in HW */
200 for (;;) {
201 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
202 if (rc != OPAL_BUSY)
203 break;
204 msleep(OPAL_BUSY_DELAY_MS);
205 }
206 if (rc)
207 vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
208 }
209
xive_native_disable_queue(u32 vp_id,struct xive_q * q,u8 prio)210 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
211 {
212 __xive_native_disable_queue(vp_id, q, prio);
213 }
214 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
215
xive_native_setup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)216 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
217 {
218 struct xive_q *q = &xc->queue[prio];
219 __be32 *qpage;
220
221 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
222 if (IS_ERR(qpage))
223 return PTR_ERR(qpage);
224
225 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
226 q, prio, qpage, xive_queue_shift, false);
227 }
228
xive_native_cleanup_queue(unsigned int cpu,struct xive_cpu * xc,u8 prio)229 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
230 {
231 struct xive_q *q = &xc->queue[prio];
232 unsigned int alloc_order;
233
234 /*
235 * We use the variant with no iounmap as this is called on exec
236 * from an IPI and iounmap isn't safe
237 */
238 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
239 alloc_order = xive_alloc_order(xive_queue_shift);
240 free_pages((unsigned long)q->qpage, alloc_order);
241 q->qpage = NULL;
242 }
243
xive_native_match(struct device_node * node)244 static bool xive_native_match(struct device_node *node)
245 {
246 return of_device_is_compatible(node, "ibm,opal-xive-vc");
247 }
248
opal_xive_allocate_irq(u32 chip_id)249 static s64 opal_xive_allocate_irq(u32 chip_id)
250 {
251 s64 irq = opal_xive_allocate_irq_raw(chip_id);
252
253 /*
254 * Old versions of skiboot can incorrectly return 0xffffffff to
255 * indicate no space, fix it up here.
256 */
257 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
258 }
259
260 #ifdef CONFIG_SMP
xive_native_get_ipi(unsigned int cpu,struct xive_cpu * xc)261 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
262 {
263 s64 irq;
264
265 /* Allocate an IPI and populate info about it */
266 for (;;) {
267 irq = opal_xive_allocate_irq(xc->chip_id);
268 if (irq == OPAL_BUSY) {
269 msleep(OPAL_BUSY_DELAY_MS);
270 continue;
271 }
272 if (irq < 0) {
273 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
274 return -ENXIO;
275 }
276 xc->hw_ipi = irq;
277 break;
278 }
279 return 0;
280 }
281 #endif /* CONFIG_SMP */
282
xive_native_alloc_irq_on_chip(u32 chip_id)283 u32 xive_native_alloc_irq_on_chip(u32 chip_id)
284 {
285 s64 rc;
286
287 for (;;) {
288 rc = opal_xive_allocate_irq(chip_id);
289 if (rc != OPAL_BUSY)
290 break;
291 msleep(OPAL_BUSY_DELAY_MS);
292 }
293 if (rc < 0)
294 return 0;
295 return rc;
296 }
297 EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
298
xive_native_free_irq(u32 irq)299 void xive_native_free_irq(u32 irq)
300 {
301 for (;;) {
302 s64 rc = opal_xive_free_irq(irq);
303 if (rc != OPAL_BUSY)
304 break;
305 msleep(OPAL_BUSY_DELAY_MS);
306 }
307 }
308 EXPORT_SYMBOL_GPL(xive_native_free_irq);
309
310 #ifdef CONFIG_SMP
xive_native_put_ipi(unsigned int cpu,struct xive_cpu * xc)311 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
312 {
313 s64 rc;
314
315 /* Free the IPI */
316 if (xc->hw_ipi == XIVE_BAD_IRQ)
317 return;
318 for (;;) {
319 rc = opal_xive_free_irq(xc->hw_ipi);
320 if (rc == OPAL_BUSY) {
321 msleep(OPAL_BUSY_DELAY_MS);
322 continue;
323 }
324 xc->hw_ipi = XIVE_BAD_IRQ;
325 break;
326 }
327 }
328 #endif /* CONFIG_SMP */
329
xive_native_shutdown(void)330 static void xive_native_shutdown(void)
331 {
332 /* Switch the XIVE to emulation mode */
333 opal_xive_reset(OPAL_XIVE_MODE_EMU);
334 }
335
336 /*
337 * Perform an "ack" cycle on the current thread, thus
338 * grabbing the pending active priorities and updating
339 * the CPPR to the most favored one.
340 */
xive_native_update_pending(struct xive_cpu * xc)341 static void xive_native_update_pending(struct xive_cpu *xc)
342 {
343 u8 he, cppr;
344 u16 ack;
345
346 /* Perform the acknowledge hypervisor to register cycle */
347 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
348
349 /* Synchronize subsequent queue accesses */
350 mb();
351
352 /*
353 * Grab the CPPR and the "HE" field which indicates the source
354 * of the hypervisor interrupt (if any)
355 */
356 cppr = ack & 0xff;
357 he = (ack >> 8) >> 6;
358 switch(he) {
359 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
360 break;
361 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
362 if (cppr == 0xff)
363 return;
364 /* Mark the priority pending */
365 xc->pending_prio |= 1 << cppr;
366
367 /*
368 * A new interrupt should never have a CPPR less favored
369 * than our current one.
370 */
371 if (cppr >= xc->cppr)
372 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
373 smp_processor_id(), cppr, xc->cppr);
374
375 /* Update our idea of what the CPPR is */
376 xc->cppr = cppr;
377 break;
378 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
379 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
380 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
381 smp_processor_id(), he);
382 return;
383 }
384 }
385
xive_native_prepare_cpu(unsigned int cpu,struct xive_cpu * xc)386 static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
387 {
388 xc->chip_id = cpu_to_chip_id(cpu);
389 }
390
xive_native_setup_cpu(unsigned int cpu,struct xive_cpu * xc)391 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
392 {
393 s64 rc;
394 u32 vp;
395 __be64 vp_cam_be;
396 u64 vp_cam;
397
398 if (xive_pool_vps == XIVE_INVALID_VP)
399 return;
400
401 /* Check if pool VP already active, if it is, pull it */
402 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
403 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
404
405 /* Enable the pool VP */
406 vp = xive_pool_vps + cpu;
407 for (;;) {
408 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
409 if (rc != OPAL_BUSY)
410 break;
411 msleep(OPAL_BUSY_DELAY_MS);
412 }
413 if (rc) {
414 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
415 return;
416 }
417
418 /* Grab it's CAM value */
419 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
420 if (rc) {
421 pr_err("Failed to get pool VP info CPU %d\n", cpu);
422 return;
423 }
424 vp_cam = be64_to_cpu(vp_cam_be);
425
426 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
427 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
428 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
429 }
430
xive_native_teardown_cpu(unsigned int cpu,struct xive_cpu * xc)431 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
432 {
433 s64 rc;
434 u32 vp;
435
436 if (xive_pool_vps == XIVE_INVALID_VP)
437 return;
438
439 /* Pull the pool VP from the CPU */
440 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
441
442 /* Disable it */
443 vp = xive_pool_vps + cpu;
444 for (;;) {
445 rc = opal_xive_set_vp_info(vp, 0, 0);
446 if (rc != OPAL_BUSY)
447 break;
448 msleep(OPAL_BUSY_DELAY_MS);
449 }
450 }
451
xive_native_sync_source(u32 hw_irq)452 void xive_native_sync_source(u32 hw_irq)
453 {
454 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
455 }
456 EXPORT_SYMBOL_GPL(xive_native_sync_source);
457
xive_native_sync_queue(u32 hw_irq)458 void xive_native_sync_queue(u32 hw_irq)
459 {
460 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
461 }
462 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
463
464 #ifdef CONFIG_DEBUG_FS
xive_native_debug_create(struct dentry * xive_dir)465 static int xive_native_debug_create(struct dentry *xive_dir)
466 {
467 debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
468 return 0;
469 }
470 #endif
471
472 static const struct xive_ops xive_native_ops = {
473 .populate_irq_data = xive_native_populate_irq_data,
474 .configure_irq = xive_native_configure_irq,
475 .get_irq_config = xive_native_get_irq_config,
476 .setup_queue = xive_native_setup_queue,
477 .cleanup_queue = xive_native_cleanup_queue,
478 .match = xive_native_match,
479 .shutdown = xive_native_shutdown,
480 .update_pending = xive_native_update_pending,
481 .prepare_cpu = xive_native_prepare_cpu,
482 .setup_cpu = xive_native_setup_cpu,
483 .teardown_cpu = xive_native_teardown_cpu,
484 .sync_source = xive_native_sync_source,
485 #ifdef CONFIG_SMP
486 .get_ipi = xive_native_get_ipi,
487 .put_ipi = xive_native_put_ipi,
488 #endif /* CONFIG_SMP */
489 #ifdef CONFIG_DEBUG_FS
490 .debug_create = xive_native_debug_create,
491 #endif /* CONFIG_DEBUG_FS */
492 .name = "native",
493 };
494
xive_parse_provisioning(struct device_node * np)495 static bool __init xive_parse_provisioning(struct device_node *np)
496 {
497 int rc;
498
499 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
500 &xive_provision_size) < 0)
501 return true;
502 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
503 if (rc < 0) {
504 pr_err("Error %d getting provision chips array\n", rc);
505 return false;
506 }
507 xive_provision_chip_count = rc;
508 if (rc == 0)
509 return true;
510
511 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
512 GFP_KERNEL);
513 if (WARN_ON(!xive_provision_chips))
514 return false;
515
516 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
517 xive_provision_chips,
518 xive_provision_chip_count);
519 if (rc < 0) {
520 pr_err("Error %d reading provision chips array\n", rc);
521 return false;
522 }
523
524 xive_provision_cache = kmem_cache_create("xive-provision",
525 xive_provision_size,
526 xive_provision_size,
527 0, NULL);
528 if (!xive_provision_cache) {
529 pr_err("Failed to allocate provision cache\n");
530 return false;
531 }
532 return true;
533 }
534
xive_native_setup_pools(void)535 static void __init xive_native_setup_pools(void)
536 {
537 /* Allocate a pool big enough */
538 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
539
540 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
541 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
542 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
543
544 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
545 xive_pool_vps, nr_cpu_ids);
546 }
547
xive_native_default_eq_shift(void)548 u32 xive_native_default_eq_shift(void)
549 {
550 return xive_queue_shift;
551 }
552 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
553
554 unsigned long xive_tima_os;
555 EXPORT_SYMBOL_GPL(xive_tima_os);
556
xive_native_init(void)557 bool __init xive_native_init(void)
558 {
559 struct device_node *np;
560 struct resource r;
561 void __iomem *tima;
562 struct property *prop;
563 u8 max_prio = 7;
564 const __be32 *p;
565 u32 val, cpu;
566 s64 rc;
567
568 if (xive_cmdline_disabled)
569 return false;
570
571 pr_devel("xive_native_init()\n");
572 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
573 if (!np) {
574 pr_devel("not found !\n");
575 return false;
576 }
577 pr_devel("Found %pOF\n", np);
578
579 /* Resource 1 is HV window */
580 if (of_address_to_resource(np, 1, &r)) {
581 pr_err("Failed to get thread mgmnt area resource\n");
582 goto err_put;
583 }
584 tima = ioremap(r.start, resource_size(&r));
585 if (!tima) {
586 pr_err("Failed to map thread mgmnt area\n");
587 goto err_put;
588 }
589
590 /* Read number of priorities */
591 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
592 max_prio = val - 1;
593
594 /* Iterate the EQ sizes and pick one */
595 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
596 xive_queue_shift = val;
597 if (val == PAGE_SHIFT)
598 break;
599 }
600
601 /* Do we support single escalation */
602 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
603 xive_has_single_esc = true;
604
605 if (of_get_property(np, "vp-save-restore", NULL))
606 xive_has_save_restore = true;
607
608 /* Configure Thread Management areas for KVM */
609 for_each_possible_cpu(cpu)
610 kvmppc_set_xive_tima(cpu, r.start, tima);
611
612 /* Resource 2 is OS window */
613 if (of_address_to_resource(np, 2, &r)) {
614 pr_err("Failed to get thread mgmnt area resource\n");
615 goto err_put;
616 }
617
618 xive_tima_os = r.start;
619
620 /* Grab size of provisioning pages */
621 xive_parse_provisioning(np);
622
623 /* Switch the XIVE to exploitation mode */
624 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
625 if (rc) {
626 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
627 goto err_put;
628 }
629
630 /* Setup some dummy HV pool VPs */
631 xive_native_setup_pools();
632
633 /* Initialize XIVE core with our backend */
634 if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
635 max_prio)) {
636 opal_xive_reset(OPAL_XIVE_MODE_EMU);
637 goto err_put;
638 }
639 of_node_put(np);
640 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
641 return true;
642
643 err_put:
644 of_node_put(np);
645 return false;
646 }
647
xive_native_provision_pages(void)648 static bool xive_native_provision_pages(void)
649 {
650 u32 i;
651 void *p;
652
653 for (i = 0; i < xive_provision_chip_count; i++) {
654 u32 chip = xive_provision_chips[i];
655
656 /*
657 * XXX TODO: Try to make the allocation local to the node where
658 * the chip resides.
659 */
660 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
661 if (!p) {
662 pr_err("Failed to allocate provisioning page\n");
663 return false;
664 }
665 kmemleak_ignore(p);
666 opal_xive_donate_page(chip, __pa(p));
667 }
668 return true;
669 }
670
xive_native_alloc_vp_block(u32 max_vcpus)671 u32 xive_native_alloc_vp_block(u32 max_vcpus)
672 {
673 s64 rc;
674 u32 order;
675
676 order = fls(max_vcpus) - 1;
677 if (max_vcpus > (1 << order))
678 order++;
679
680 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
681 max_vcpus, order);
682
683 for (;;) {
684 rc = opal_xive_alloc_vp_block(order);
685 switch (rc) {
686 case OPAL_BUSY:
687 msleep(OPAL_BUSY_DELAY_MS);
688 break;
689 case OPAL_XIVE_PROVISIONING:
690 if (!xive_native_provision_pages())
691 return XIVE_INVALID_VP;
692 break;
693 default:
694 if (rc < 0) {
695 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
696 order, rc);
697 return XIVE_INVALID_VP;
698 }
699 return rc;
700 }
701 }
702 }
703 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
704
xive_native_free_vp_block(u32 vp_base)705 void xive_native_free_vp_block(u32 vp_base)
706 {
707 s64 rc;
708
709 if (vp_base == XIVE_INVALID_VP)
710 return;
711
712 rc = opal_xive_free_vp_block(vp_base);
713 if (rc < 0)
714 pr_warn("OPAL error %lld freeing VP block\n", rc);
715 }
716 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
717
xive_native_enable_vp(u32 vp_id,bool single_escalation)718 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
719 {
720 s64 rc;
721 u64 flags = OPAL_XIVE_VP_ENABLED;
722
723 if (single_escalation)
724 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
725 for (;;) {
726 rc = opal_xive_set_vp_info(vp_id, flags, 0);
727 if (rc != OPAL_BUSY)
728 break;
729 msleep(OPAL_BUSY_DELAY_MS);
730 }
731 if (rc)
732 vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
733 return rc ? -EIO : 0;
734 }
735 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
736
xive_native_disable_vp(u32 vp_id)737 int xive_native_disable_vp(u32 vp_id)
738 {
739 s64 rc;
740
741 for (;;) {
742 rc = opal_xive_set_vp_info(vp_id, 0, 0);
743 if (rc != OPAL_BUSY)
744 break;
745 msleep(OPAL_BUSY_DELAY_MS);
746 }
747 if (rc)
748 vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
749 return rc ? -EIO : 0;
750 }
751 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
752
xive_native_get_vp_info(u32 vp_id,u32 * out_cam_id,u32 * out_chip_id)753 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
754 {
755 __be64 vp_cam_be;
756 __be32 vp_chip_id_be;
757 s64 rc;
758
759 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
760 if (rc) {
761 vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
762 return -EIO;
763 }
764 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
765 *out_chip_id = be32_to_cpu(vp_chip_id_be);
766
767 return 0;
768 }
769 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
770
xive_native_has_single_escalation(void)771 bool xive_native_has_single_escalation(void)
772 {
773 return xive_has_single_esc;
774 }
775 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
776
xive_native_has_save_restore(void)777 bool xive_native_has_save_restore(void)
778 {
779 return xive_has_save_restore;
780 }
781 EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
782
xive_native_get_queue_info(u32 vp_id,u32 prio,u64 * out_qpage,u64 * out_qsize,u64 * out_qeoi_page,u32 * out_escalate_irq,u64 * out_qflags)783 int xive_native_get_queue_info(u32 vp_id, u32 prio,
784 u64 *out_qpage,
785 u64 *out_qsize,
786 u64 *out_qeoi_page,
787 u32 *out_escalate_irq,
788 u64 *out_qflags)
789 {
790 __be64 qpage;
791 __be64 qsize;
792 __be64 qeoi_page;
793 __be32 escalate_irq;
794 __be64 qflags;
795 s64 rc;
796
797 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
798 &qeoi_page, &escalate_irq, &qflags);
799 if (rc) {
800 vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
801 return -EIO;
802 }
803
804 if (out_qpage)
805 *out_qpage = be64_to_cpu(qpage);
806 if (out_qsize)
807 *out_qsize = be32_to_cpu(qsize);
808 if (out_qeoi_page)
809 *out_qeoi_page = be64_to_cpu(qeoi_page);
810 if (out_escalate_irq)
811 *out_escalate_irq = be32_to_cpu(escalate_irq);
812 if (out_qflags)
813 *out_qflags = be64_to_cpu(qflags);
814
815 return 0;
816 }
817 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
818
xive_native_get_queue_state(u32 vp_id,u32 prio,u32 * qtoggle,u32 * qindex)819 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
820 {
821 __be32 opal_qtoggle;
822 __be32 opal_qindex;
823 s64 rc;
824
825 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
826 &opal_qindex);
827 if (rc) {
828 vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
829 return -EIO;
830 }
831
832 if (qtoggle)
833 *qtoggle = be32_to_cpu(opal_qtoggle);
834 if (qindex)
835 *qindex = be32_to_cpu(opal_qindex);
836
837 return 0;
838 }
839 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
840
xive_native_set_queue_state(u32 vp_id,u32 prio,u32 qtoggle,u32 qindex)841 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
842 {
843 s64 rc;
844
845 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
846 if (rc) {
847 vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
848 return -EIO;
849 }
850
851 return 0;
852 }
853 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
854
xive_native_has_queue_state_support(void)855 bool xive_native_has_queue_state_support(void)
856 {
857 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
858 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
859 }
860 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
861
xive_native_get_vp_state(u32 vp_id,u64 * out_state)862 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
863 {
864 __be64 state;
865 s64 rc;
866
867 rc = opal_xive_get_vp_state(vp_id, &state);
868 if (rc) {
869 vp_err(vp_id, "failed to get vp state : %lld\n", rc);
870 return -EIO;
871 }
872
873 if (out_state)
874 *out_state = be64_to_cpu(state);
875 return 0;
876 }
877 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
878
879 machine_arch_initcall(powernv, xive_core_debug_init);
880