1 /*
2 * linux/arch/ia64/kernel/irq_ia64.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
17 #include <linux/module.h>
18
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/ptrace.h>
26 #include <linux/random.h> /* for rand_initialize_irq() */
27 #include <linux/signal.h>
28 #include <linux/smp.h>
29 #include <linux/threads.h>
30 #include <linux/bitops.h>
31 #include <linux/irq.h>
32 #include <linux/ratelimit.h>
33 #include <linux/acpi.h>
34
35 #include <asm/delay.h>
36 #include <asm/intrinsics.h>
37 #include <asm/io.h>
38 #include <asm/hw_irq.h>
39 #include <asm/machvec.h>
40 #include <asm/pgtable.h>
41 #include <asm/system.h>
42 #include <asm/tlbflush.h>
43
44 #ifdef CONFIG_PERFMON
45 # include <asm/perfmon.h>
46 #endif
47
48 #define IRQ_DEBUG 0
49
50 #define IRQ_VECTOR_UNASSIGNED (0)
51
52 #define IRQ_UNUSED (0)
53 #define IRQ_USED (1)
54 #define IRQ_RSVD (2)
55
56 /* These can be overridden in platform_irq_init */
57 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
58 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
59
60 /* default base addr of IPI table */
61 void __iomem *ipi_base_addr = ((void __iomem *)
62 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
63
64 static cpumask_t vector_allocation_domain(int cpu);
65
66 /*
67 * Legacy IRQ to IA-64 vector translation table.
68 */
69 __u8 isa_irq_to_vector_map[16] = {
70 /* 8259 IRQ translation, first 16 entries */
71 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
72 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
73 };
74 EXPORT_SYMBOL(isa_irq_to_vector_map);
75
76 DEFINE_SPINLOCK(vector_lock);
77
78 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
79 [0 ... NR_IRQS - 1] = {
80 .vector = IRQ_VECTOR_UNASSIGNED,
81 .domain = CPU_MASK_NONE
82 }
83 };
84
85 DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
86 [0 ... IA64_NUM_VECTORS - 1] = -1
87 };
88
89 static cpumask_t vector_table[IA64_NUM_VECTORS] = {
90 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
91 };
92
93 static int irq_status[NR_IRQS] = {
94 [0 ... NR_IRQS -1] = IRQ_UNUSED
95 };
96
check_irq_used(int irq)97 int check_irq_used(int irq)
98 {
99 if (irq_status[irq] == IRQ_USED)
100 return 1;
101
102 return -1;
103 }
104
find_unassigned_irq(void)105 static inline int find_unassigned_irq(void)
106 {
107 int irq;
108
109 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
110 if (irq_status[irq] == IRQ_UNUSED)
111 return irq;
112 return -ENOSPC;
113 }
114
find_unassigned_vector(cpumask_t domain)115 static inline int find_unassigned_vector(cpumask_t domain)
116 {
117 cpumask_t mask;
118 int pos, vector;
119
120 cpus_and(mask, domain, cpu_online_map);
121 if (cpus_empty(mask))
122 return -EINVAL;
123
124 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
125 vector = IA64_FIRST_DEVICE_VECTOR + pos;
126 cpus_and(mask, domain, vector_table[vector]);
127 if (!cpus_empty(mask))
128 continue;
129 return vector;
130 }
131 return -ENOSPC;
132 }
133
__bind_irq_vector(int irq,int vector,cpumask_t domain)134 static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
135 {
136 cpumask_t mask;
137 int cpu;
138 struct irq_cfg *cfg = &irq_cfg[irq];
139
140 BUG_ON((unsigned)irq >= NR_IRQS);
141 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
142
143 cpus_and(mask, domain, cpu_online_map);
144 if (cpus_empty(mask))
145 return -EINVAL;
146 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
147 return 0;
148 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
149 return -EBUSY;
150 for_each_cpu_mask(cpu, mask)
151 per_cpu(vector_irq, cpu)[vector] = irq;
152 cfg->vector = vector;
153 cfg->domain = domain;
154 irq_status[irq] = IRQ_USED;
155 cpus_or(vector_table[vector], vector_table[vector], domain);
156 return 0;
157 }
158
bind_irq_vector(int irq,int vector,cpumask_t domain)159 int bind_irq_vector(int irq, int vector, cpumask_t domain)
160 {
161 unsigned long flags;
162 int ret;
163
164 spin_lock_irqsave(&vector_lock, flags);
165 ret = __bind_irq_vector(irq, vector, domain);
166 spin_unlock_irqrestore(&vector_lock, flags);
167 return ret;
168 }
169
__clear_irq_vector(int irq)170 static void __clear_irq_vector(int irq)
171 {
172 int vector, cpu;
173 cpumask_t mask;
174 cpumask_t domain;
175 struct irq_cfg *cfg = &irq_cfg[irq];
176
177 BUG_ON((unsigned)irq >= NR_IRQS);
178 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
179 vector = cfg->vector;
180 domain = cfg->domain;
181 cpus_and(mask, cfg->domain, cpu_online_map);
182 for_each_cpu_mask(cpu, mask)
183 per_cpu(vector_irq, cpu)[vector] = -1;
184 cfg->vector = IRQ_VECTOR_UNASSIGNED;
185 cfg->domain = CPU_MASK_NONE;
186 irq_status[irq] = IRQ_UNUSED;
187 cpus_andnot(vector_table[vector], vector_table[vector], domain);
188 }
189
clear_irq_vector(int irq)190 static void clear_irq_vector(int irq)
191 {
192 unsigned long flags;
193
194 spin_lock_irqsave(&vector_lock, flags);
195 __clear_irq_vector(irq);
196 spin_unlock_irqrestore(&vector_lock, flags);
197 }
198
199 int
ia64_native_assign_irq_vector(int irq)200 ia64_native_assign_irq_vector (int irq)
201 {
202 unsigned long flags;
203 int vector, cpu;
204 cpumask_t domain = CPU_MASK_NONE;
205
206 vector = -ENOSPC;
207
208 spin_lock_irqsave(&vector_lock, flags);
209 for_each_online_cpu(cpu) {
210 domain = vector_allocation_domain(cpu);
211 vector = find_unassigned_vector(domain);
212 if (vector >= 0)
213 break;
214 }
215 if (vector < 0)
216 goto out;
217 if (irq == AUTO_ASSIGN)
218 irq = vector;
219 BUG_ON(__bind_irq_vector(irq, vector, domain));
220 out:
221 spin_unlock_irqrestore(&vector_lock, flags);
222 return vector;
223 }
224
225 void
ia64_native_free_irq_vector(int vector)226 ia64_native_free_irq_vector (int vector)
227 {
228 if (vector < IA64_FIRST_DEVICE_VECTOR ||
229 vector > IA64_LAST_DEVICE_VECTOR)
230 return;
231 clear_irq_vector(vector);
232 }
233
234 int
reserve_irq_vector(int vector)235 reserve_irq_vector (int vector)
236 {
237 if (vector < IA64_FIRST_DEVICE_VECTOR ||
238 vector > IA64_LAST_DEVICE_VECTOR)
239 return -EINVAL;
240 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
241 }
242
243 /*
244 * Initialize vector_irq on a new cpu. This function must be called
245 * with vector_lock held.
246 */
__setup_vector_irq(int cpu)247 void __setup_vector_irq(int cpu)
248 {
249 int irq, vector;
250
251 /* Clear vector_irq */
252 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
253 per_cpu(vector_irq, cpu)[vector] = -1;
254 /* Mark the inuse vectors */
255 for (irq = 0; irq < NR_IRQS; ++irq) {
256 if (!cpu_isset(cpu, irq_cfg[irq].domain))
257 continue;
258 vector = irq_to_vector(irq);
259 per_cpu(vector_irq, cpu)[vector] = irq;
260 }
261 }
262
263 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
264
265 static enum vector_domain_type {
266 VECTOR_DOMAIN_NONE,
267 VECTOR_DOMAIN_PERCPU
268 } vector_domain_type = VECTOR_DOMAIN_NONE;
269
vector_allocation_domain(int cpu)270 static cpumask_t vector_allocation_domain(int cpu)
271 {
272 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
273 return cpumask_of_cpu(cpu);
274 return CPU_MASK_ALL;
275 }
276
__irq_prepare_move(int irq,int cpu)277 static int __irq_prepare_move(int irq, int cpu)
278 {
279 struct irq_cfg *cfg = &irq_cfg[irq];
280 int vector;
281 cpumask_t domain;
282
283 if (cfg->move_in_progress || cfg->move_cleanup_count)
284 return -EBUSY;
285 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
286 return -EINVAL;
287 if (cpu_isset(cpu, cfg->domain))
288 return 0;
289 domain = vector_allocation_domain(cpu);
290 vector = find_unassigned_vector(domain);
291 if (vector < 0)
292 return -ENOSPC;
293 cfg->move_in_progress = 1;
294 cfg->old_domain = cfg->domain;
295 cfg->vector = IRQ_VECTOR_UNASSIGNED;
296 cfg->domain = CPU_MASK_NONE;
297 BUG_ON(__bind_irq_vector(irq, vector, domain));
298 return 0;
299 }
300
irq_prepare_move(int irq,int cpu)301 int irq_prepare_move(int irq, int cpu)
302 {
303 unsigned long flags;
304 int ret;
305
306 spin_lock_irqsave(&vector_lock, flags);
307 ret = __irq_prepare_move(irq, cpu);
308 spin_unlock_irqrestore(&vector_lock, flags);
309 return ret;
310 }
311
irq_complete_move(unsigned irq)312 void irq_complete_move(unsigned irq)
313 {
314 struct irq_cfg *cfg = &irq_cfg[irq];
315 cpumask_t cleanup_mask;
316 int i;
317
318 if (likely(!cfg->move_in_progress))
319 return;
320
321 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
322 return;
323
324 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
325 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
326 for_each_cpu_mask(i, cleanup_mask)
327 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
328 cfg->move_in_progress = 0;
329 }
330
smp_irq_move_cleanup_interrupt(int irq,void * dev_id)331 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
332 {
333 int me = smp_processor_id();
334 ia64_vector vector;
335 unsigned long flags;
336
337 for (vector = IA64_FIRST_DEVICE_VECTOR;
338 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
339 int irq;
340 struct irq_desc *desc;
341 struct irq_cfg *cfg;
342 irq = __get_cpu_var(vector_irq)[vector];
343 if (irq < 0)
344 continue;
345
346 desc = irq_to_desc(irq);
347 cfg = irq_cfg + irq;
348 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count)
350 goto unlock;
351
352 if (!cpu_isset(me, cfg->old_domain))
353 goto unlock;
354
355 spin_lock_irqsave(&vector_lock, flags);
356 __get_cpu_var(vector_irq)[vector] = -1;
357 cpu_clear(me, vector_table[vector]);
358 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--;
360 unlock:
361 raw_spin_unlock(&desc->lock);
362 }
363 return IRQ_HANDLED;
364 }
365
366 static struct irqaction irq_move_irqaction = {
367 .handler = smp_irq_move_cleanup_interrupt,
368 .flags = IRQF_DISABLED,
369 .name = "irq_move"
370 };
371
parse_vector_domain(char * arg)372 static int __init parse_vector_domain(char *arg)
373 {
374 if (!arg)
375 return -EINVAL;
376 if (!strcmp(arg, "percpu")) {
377 vector_domain_type = VECTOR_DOMAIN_PERCPU;
378 no_int_routing = 1;
379 }
380 return 0;
381 }
382 early_param("vector", parse_vector_domain);
383 #else
vector_allocation_domain(int cpu)384 static cpumask_t vector_allocation_domain(int cpu)
385 {
386 return CPU_MASK_ALL;
387 }
388 #endif
389
390
destroy_and_reserve_irq(unsigned int irq)391 void destroy_and_reserve_irq(unsigned int irq)
392 {
393 unsigned long flags;
394
395 dynamic_irq_cleanup(irq);
396
397 spin_lock_irqsave(&vector_lock, flags);
398 __clear_irq_vector(irq);
399 irq_status[irq] = IRQ_RSVD;
400 spin_unlock_irqrestore(&vector_lock, flags);
401 }
402
403 /*
404 * Dynamic irq allocate and deallocation for MSI
405 */
create_irq(void)406 int create_irq(void)
407 {
408 unsigned long flags;
409 int irq, vector, cpu;
410 cpumask_t domain = CPU_MASK_NONE;
411
412 irq = vector = -ENOSPC;
413 spin_lock_irqsave(&vector_lock, flags);
414 for_each_online_cpu(cpu) {
415 domain = vector_allocation_domain(cpu);
416 vector = find_unassigned_vector(domain);
417 if (vector >= 0)
418 break;
419 }
420 if (vector < 0)
421 goto out;
422 irq = find_unassigned_irq();
423 if (irq < 0)
424 goto out;
425 BUG_ON(__bind_irq_vector(irq, vector, domain));
426 out:
427 spin_unlock_irqrestore(&vector_lock, flags);
428 if (irq >= 0)
429 dynamic_irq_init(irq);
430 return irq;
431 }
432
destroy_irq(unsigned int irq)433 void destroy_irq(unsigned int irq)
434 {
435 dynamic_irq_cleanup(irq);
436 clear_irq_vector(irq);
437 }
438
439 #ifdef CONFIG_SMP
440 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
441 # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
442 #else
443 # define IS_RESCHEDULE(vec) (0)
444 # define IS_LOCAL_TLB_FLUSH(vec) (0)
445 #endif
446 /*
447 * That's where the IVT branches when we get an external
448 * interrupt. This branches to the correct hardware IRQ handler via
449 * function ptr.
450 */
451 void
ia64_handle_irq(ia64_vector vector,struct pt_regs * regs)452 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
453 {
454 struct pt_regs *old_regs = set_irq_regs(regs);
455 unsigned long saved_tpr;
456
457 #if IRQ_DEBUG
458 {
459 unsigned long bsp, sp;
460
461 /*
462 * Note: if the interrupt happened while executing in
463 * the context switch routine (ia64_switch_to), we may
464 * get a spurious stack overflow here. This is
465 * because the register and the memory stack are not
466 * switched atomically.
467 */
468 bsp = ia64_getreg(_IA64_REG_AR_BSP);
469 sp = ia64_getreg(_IA64_REG_SP);
470
471 if ((sp - bsp) < 1024) {
472 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
473
474 if (__ratelimit(&ratelimit)) {
475 printk("ia64_handle_irq: DANGER: less than "
476 "1KB of free stack space!!\n"
477 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
478 }
479 }
480 }
481 #endif /* IRQ_DEBUG */
482
483 /*
484 * Always set TPR to limit maximum interrupt nesting depth to
485 * 16 (without this, it would be ~240, which could easily lead
486 * to kernel stack overflows).
487 */
488 irq_enter();
489 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
490 ia64_srlz_d();
491 while (vector != IA64_SPURIOUS_INT_VECTOR) {
492 int irq = local_vector_to_irq(vector);
493 struct irq_desc *desc = irq_to_desc(irq);
494
495 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
496 smp_local_flush_tlb();
497 kstat_incr_irqs_this_cpu(irq, desc);
498 } else if (unlikely(IS_RESCHEDULE(vector))) {
499 kstat_incr_irqs_this_cpu(irq, desc);
500 } else {
501 ia64_setreg(_IA64_REG_CR_TPR, vector);
502 ia64_srlz_d();
503
504 if (unlikely(irq < 0)) {
505 printk(KERN_ERR "%s: Unexpected interrupt "
506 "vector %d on CPU %d is not mapped "
507 "to any IRQ!\n", __func__, vector,
508 smp_processor_id());
509 } else
510 generic_handle_irq(irq);
511
512 /*
513 * Disable interrupts and send EOI:
514 */
515 local_irq_disable();
516 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
517 }
518 ia64_eoi();
519 vector = ia64_get_ivr();
520 }
521 /*
522 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
523 * handler needs to be able to wait for further keyboard interrupts, which can't
524 * come through until ia64_eoi() has been done.
525 */
526 irq_exit();
527 set_irq_regs(old_regs);
528 }
529
530 #ifdef CONFIG_HOTPLUG_CPU
531 /*
532 * This function emulates a interrupt processing when a cpu is about to be
533 * brought down.
534 */
ia64_process_pending_intr(void)535 void ia64_process_pending_intr(void)
536 {
537 ia64_vector vector;
538 unsigned long saved_tpr;
539 extern unsigned int vectors_in_migration[NR_IRQS];
540
541 vector = ia64_get_ivr();
542
543 irq_enter();
544 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
545 ia64_srlz_d();
546
547 /*
548 * Perform normal interrupt style processing
549 */
550 while (vector != IA64_SPURIOUS_INT_VECTOR) {
551 int irq = local_vector_to_irq(vector);
552 struct irq_desc *desc = irq_to_desc(irq);
553
554 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
555 smp_local_flush_tlb();
556 kstat_incr_irqs_this_cpu(irq, desc);
557 } else if (unlikely(IS_RESCHEDULE(vector))) {
558 kstat_incr_irqs_this_cpu(irq, desc);
559 } else {
560 struct pt_regs *old_regs = set_irq_regs(NULL);
561
562 ia64_setreg(_IA64_REG_CR_TPR, vector);
563 ia64_srlz_d();
564
565 /*
566 * Now try calling normal ia64_handle_irq as it would have got called
567 * from a real intr handler. Try passing null for pt_regs, hopefully
568 * it will work. I hope it works!.
569 * Probably could shared code.
570 */
571 if (unlikely(irq < 0)) {
572 printk(KERN_ERR "%s: Unexpected interrupt "
573 "vector %d on CPU %d not being mapped "
574 "to any IRQ!!\n", __func__, vector,
575 smp_processor_id());
576 } else {
577 vectors_in_migration[irq]=0;
578 generic_handle_irq(irq);
579 }
580 set_irq_regs(old_regs);
581
582 /*
583 * Disable interrupts and send EOI
584 */
585 local_irq_disable();
586 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
587 }
588 ia64_eoi();
589 vector = ia64_get_ivr();
590 }
591 irq_exit();
592 }
593 #endif
594
595
596 #ifdef CONFIG_SMP
597
dummy_handler(int irq,void * dev_id)598 static irqreturn_t dummy_handler (int irq, void *dev_id)
599 {
600 BUG();
601 }
602
603 static struct irqaction ipi_irqaction = {
604 .handler = handle_IPI,
605 .flags = IRQF_DISABLED,
606 .name = "IPI"
607 };
608
609 /*
610 * KVM uses this interrupt to force a cpu out of guest mode
611 */
612 static struct irqaction resched_irqaction = {
613 .handler = dummy_handler,
614 .flags = IRQF_DISABLED,
615 .name = "resched"
616 };
617
618 static struct irqaction tlb_irqaction = {
619 .handler = dummy_handler,
620 .flags = IRQF_DISABLED,
621 .name = "tlb_flush"
622 };
623
624 #endif
625
626 void
ia64_native_register_percpu_irq(ia64_vector vec,struct irqaction * action)627 ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
628 {
629 unsigned int irq;
630
631 irq = vec;
632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
633 irq_set_status_flags(irq, IRQ_PER_CPU);
634 irq_set_chip(irq, &irq_type_ia64_lsapic);
635 if (action)
636 setup_irq(irq, action);
637 irq_set_handler(irq, handle_percpu_irq);
638 }
639
640 void __init
ia64_native_register_ipi(void)641 ia64_native_register_ipi(void)
642 {
643 #ifdef CONFIG_SMP
644 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
645 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
646 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
647 #endif
648 }
649
650 void __init
init_IRQ(void)651 init_IRQ (void)
652 {
653 #ifdef CONFIG_ACPI
654 acpi_boot_init();
655 #endif
656 ia64_register_ipi();
657 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
658 #ifdef CONFIG_SMP
659 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
660 if (vector_domain_type != VECTOR_DOMAIN_NONE)
661 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
662 #endif
663 #endif
664 #ifdef CONFIG_PERFMON
665 pfm_init_percpu();
666 #endif
667 platform_irq_init();
668 }
669
670 void
ia64_send_ipi(int cpu,int vector,int delivery_mode,int redirect)671 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
672 {
673 void __iomem *ipi_addr;
674 unsigned long ipi_data;
675 unsigned long phys_cpu_id;
676
677 phys_cpu_id = cpu_physical_id(cpu);
678
679 /*
680 * cpu number is in 8bit ID and 8bit EID
681 */
682
683 ipi_data = (delivery_mode << 8) | (vector & 0xff);
684 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
685
686 writeq(ipi_data, ipi_addr);
687 }
688