1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Interrupt management for most GSC and related devices.
4  *
5  * (c) Copyright 1999 Alex deVries for The Puffin Group
6  * (c) Copyright 1999 Grant Grundler for Hewlett-Packard
7  * (c) Copyright 1999 Matthew Wilcox
8  * (c) Copyright 2000 Helge Deller
9  * (c) Copyright 2001 Matthew Wilcox for Hewlett-Packard
10  */
11 
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 
20 #include <asm/hardware.h>
21 #include <asm/io.h>
22 
23 #include "gsc.h"
24 
25 #undef DEBUG
26 
27 #ifdef DEBUG
28 #define DEBPRINTK printk
29 #else
30 #define DEBPRINTK(x,...)
31 #endif
32 
gsc_alloc_irq(struct gsc_irq * i)33 int gsc_alloc_irq(struct gsc_irq *i)
34 {
35 	int irq = txn_alloc_irq(GSC_EIM_WIDTH);
36 	if (irq < 0) {
37 		printk("cannot get irq\n");
38 		return irq;
39 	}
40 
41 	i->txn_addr = txn_alloc_addr(irq);
42 	i->txn_data = txn_alloc_data(irq);
43 	i->irq = irq;
44 
45 	return irq;
46 }
47 
gsc_claim_irq(struct gsc_irq * i,int irq)48 int gsc_claim_irq(struct gsc_irq *i, int irq)
49 {
50 	int c = irq;
51 
52 	irq += CPU_IRQ_BASE; /* virtualize the IRQ first */
53 
54 	irq = txn_claim_irq(irq);
55 	if (irq < 0) {
56 		printk("cannot claim irq %d\n", c);
57 		return irq;
58 	}
59 
60 	i->txn_addr = txn_alloc_addr(irq);
61 	i->txn_data = txn_alloc_data(irq);
62 	i->irq = irq;
63 
64 	return irq;
65 }
66 
67 EXPORT_SYMBOL(gsc_alloc_irq);
68 EXPORT_SYMBOL(gsc_claim_irq);
69 
70 /* Common interrupt demultiplexer used by Asp, Lasi & Wax.  */
gsc_asic_intr(int gsc_asic_irq,void * dev)71 irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev)
72 {
73 	unsigned long irr;
74 	struct gsc_asic *gsc_asic = dev;
75 
76 	irr = gsc_readl(gsc_asic->hpa + OFFSET_IRR);
77 	if (irr == 0)
78 		return IRQ_NONE;
79 
80 	DEBPRINTK("%s intr, mask=0x%x\n", gsc_asic->name, irr);
81 
82 	do {
83 		int local_irq = __ffs(irr);
84 		unsigned int irq = gsc_asic->global_irq[local_irq];
85 		generic_handle_irq(irq);
86 		irr &= ~(1 << local_irq);
87 	} while (irr);
88 
89 	return IRQ_HANDLED;
90 }
91 
gsc_find_local_irq(unsigned int irq,int * global_irqs,int limit)92 int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
93 {
94 	int local_irq;
95 
96 	for (local_irq = 0; local_irq < limit; local_irq++) {
97 		if (global_irqs[local_irq] == irq)
98 			return local_irq;
99 	}
100 
101 	return NO_IRQ;
102 }
103 
gsc_asic_mask_irq(struct irq_data * d)104 static void gsc_asic_mask_irq(struct irq_data *d)
105 {
106 	struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
107 	int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
108 	u32 imr;
109 
110 	DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
111 			irq_dev->name, imr);
112 
113 	/* Disable the IRQ line by clearing the bit in the IMR */
114 	imr = gsc_readl(irq_dev->hpa + OFFSET_IMR);
115 	imr &= ~(1 << local_irq);
116 	gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
117 }
118 
gsc_asic_unmask_irq(struct irq_data * d)119 static void gsc_asic_unmask_irq(struct irq_data *d)
120 {
121 	struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
122 	int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
123 	u32 imr;
124 
125 	DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
126 			irq_dev->name, imr);
127 
128 	/* Enable the IRQ line by setting the bit in the IMR */
129 	imr = gsc_readl(irq_dev->hpa + OFFSET_IMR);
130 	imr |= 1 << local_irq;
131 	gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
132 	/*
133 	 * FIXME: read IPR to make sure the IRQ isn't already pending.
134 	 *   If so, we need to read IRR and manually call do_irq().
135 	 */
136 }
137 
138 #ifdef CONFIG_SMP
gsc_set_affinity_irq(struct irq_data * d,const struct cpumask * dest,bool force)139 static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
140 				bool force)
141 {
142 	struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d);
143 	struct cpumask tmask;
144 	int cpu_irq;
145 
146 	if (!cpumask_and(&tmask, dest, cpu_online_mask))
147 		return -EINVAL;
148 
149 	cpu_irq = cpu_check_affinity(d, &tmask);
150 	if (cpu_irq < 0)
151 		return cpu_irq;
152 
153 	gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
154 	gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
155 
156 	/* switch IRQ's for devices below LASI/WAX to other CPU */
157 	gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
158 
159 	irq_data_update_effective_affinity(d, &tmask);
160 
161 	return IRQ_SET_MASK_OK;
162 }
163 #endif
164 
165 
166 static struct irq_chip gsc_asic_interrupt_type = {
167 	.name		=	"GSC-ASIC",
168 	.irq_unmask	=	gsc_asic_unmask_irq,
169 	.irq_mask	=	gsc_asic_mask_irq,
170 #ifdef CONFIG_SMP
171 	.irq_set_affinity =	gsc_set_affinity_irq,
172 #endif
173 };
174 
gsc_assign_irq(struct irq_chip * type,void * data)175 int gsc_assign_irq(struct irq_chip *type, void *data)
176 {
177 	static int irq = GSC_IRQ_BASE;
178 
179 	if (irq > GSC_IRQ_MAX)
180 		return NO_IRQ;
181 
182 	irq_set_chip_and_handler(irq, type, handle_simple_irq);
183 	irq_set_chip_data(irq, data);
184 
185 	return irq++;
186 }
187 
gsc_asic_assign_irq(struct gsc_asic * asic,int local_irq,int * irqp)188 void gsc_asic_assign_irq(struct gsc_asic *asic, int local_irq, int *irqp)
189 {
190 	int irq = asic->global_irq[local_irq];
191 
192 	if (irq <= 0) {
193 		irq = gsc_assign_irq(&gsc_asic_interrupt_type, asic);
194 		if (irq == NO_IRQ)
195 			return;
196 
197 		asic->global_irq[local_irq] = irq;
198 	}
199 	*irqp = irq;
200 }
201 
202 struct gsc_fixup_struct {
203 	void (*choose_irq)(struct parisc_device *, void *);
204 	void *ctrl;
205 };
206 
gsc_fixup_irqs_callback(struct device * dev,void * data)207 static int gsc_fixup_irqs_callback(struct device *dev, void *data)
208 {
209 	struct parisc_device *padev = to_parisc_device(dev);
210 	struct gsc_fixup_struct *gf = data;
211 
212 	/* work-around for 715/64 and others which have parent
213 	   at path [5] and children at path [5/0/x] */
214 	if (padev->id.hw_type == HPHW_FAULTY)
215 		gsc_fixup_irqs(padev, gf->ctrl, gf->choose_irq);
216 	gf->choose_irq(padev, gf->ctrl);
217 
218 	return 0;
219 }
220 
gsc_fixup_irqs(struct parisc_device * parent,void * ctrl,void (* choose_irq)(struct parisc_device *,void *))221 void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
222 			void (*choose_irq)(struct parisc_device *, void *))
223 {
224 	struct gsc_fixup_struct data = {
225 		.choose_irq	= choose_irq,
226 		.ctrl		= ctrl,
227 	};
228 
229 	device_for_each_child(&parent->dev, &data, gsc_fixup_irqs_callback);
230 }
231 
gsc_common_setup(struct parisc_device * parent,struct gsc_asic * gsc_asic)232 int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic)
233 {
234 	struct resource *res;
235 	int i;
236 
237 	gsc_asic->gsc = parent;
238 
239 	/* Initialise local irq -> global irq mapping */
240 	for (i = 0; i < 32; i++) {
241 		gsc_asic->global_irq[i] = NO_IRQ;
242 	}
243 
244 	/* allocate resource region */
245 	res = request_mem_region(gsc_asic->hpa, 0x100000, gsc_asic->name);
246 	if (res) {
247 		res->flags = IORESOURCE_MEM; 	/* do not mark it busy ! */
248 	}
249 
250 #if 0
251 	printk(KERN_WARNING "%s IRQ %d EIM 0x%x", gsc_asic->name,
252 			parent->irq, gsc_asic->eim);
253 	if (gsc_readl(gsc_asic->hpa + OFFSET_IMR))
254 		printk("  IMR is non-zero! (0x%x)",
255 				gsc_readl(gsc_asic->hpa + OFFSET_IMR));
256 	printk("\n");
257 #endif
258 
259 	return 0;
260 }
261