1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
4  *  Loongson Local IO Interrupt Controller support
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
12 #include <linux/irqchip.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15 #include <linux/io.h>
16 #include <linux/smp.h>
17 #include <linux/irqchip/chained_irq.h>
18 
19 #ifdef CONFIG_MIPS
20 #include <loongson.h>
21 #else
22 #include <asm/loongson.h>
23 #endif
24 
25 #define LIOINTC_CHIP_IRQ	32
26 #define LIOINTC_NUM_PARENT	4
27 #define LIOINTC_NUM_CORES	4
28 
29 #define LIOINTC_INTC_CHIP_START	0x20
30 
31 #define LIOINTC_REG_INTC_STATUS	(LIOINTC_INTC_CHIP_START + 0x20)
32 #define LIOINTC_REG_INTC_EN_STATUS	(LIOINTC_INTC_CHIP_START + 0x04)
33 #define LIOINTC_REG_INTC_ENABLE	(LIOINTC_INTC_CHIP_START + 0x08)
34 #define LIOINTC_REG_INTC_DISABLE	(LIOINTC_INTC_CHIP_START + 0x0c)
35 #define LIOINTC_REG_INTC_POL	(LIOINTC_INTC_CHIP_START + 0x10)
36 #define LIOINTC_REG_INTC_EDGE	(LIOINTC_INTC_CHIP_START + 0x14)
37 
38 #define LIOINTC_SHIFT_INTx	4
39 
40 #define LIOINTC_ERRATA_IRQ	10
41 
42 #if defined(CONFIG_MIPS)
43 #define liointc_core_id get_ebase_cpunum()
44 #else
45 #define liointc_core_id get_csr_cpuid()
46 #endif
47 
48 struct liointc_handler_data {
49 	struct liointc_priv	*priv;
50 	u32			parent_int_map;
51 };
52 
53 struct liointc_priv {
54 	struct irq_chip_generic		*gc;
55 	struct liointc_handler_data	handler[LIOINTC_NUM_PARENT];
56 	void __iomem			*core_isr[LIOINTC_NUM_CORES];
57 	u8				map_cache[LIOINTC_CHIP_IRQ];
58 	bool				has_lpc_irq_errata;
59 };
60 
61 struct fwnode_handle *liointc_handle;
62 
liointc_chained_handle_irq(struct irq_desc * desc)63 static void liointc_chained_handle_irq(struct irq_desc *desc)
64 {
65 	struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
66 	struct irq_chip *chip = irq_desc_get_chip(desc);
67 	struct irq_chip_generic *gc = handler->priv->gc;
68 	int core = liointc_core_id % LIOINTC_NUM_CORES;
69 	u32 pending;
70 
71 	chained_irq_enter(chip, desc);
72 
73 	pending = readl(handler->priv->core_isr[core]);
74 
75 	if (!pending) {
76 		/* Always blame LPC IRQ if we have that bug */
77 		if (handler->priv->has_lpc_irq_errata &&
78 			(handler->parent_int_map & gc->mask_cache &
79 			BIT(LIOINTC_ERRATA_IRQ)))
80 			pending = BIT(LIOINTC_ERRATA_IRQ);
81 		else
82 			spurious_interrupt();
83 	}
84 
85 	while (pending) {
86 		int bit = __ffs(pending);
87 
88 		generic_handle_domain_irq(gc->domain, bit);
89 		pending &= ~BIT(bit);
90 	}
91 
92 	chained_irq_exit(chip, desc);
93 }
94 
liointc_set_bit(struct irq_chip_generic * gc,unsigned int offset,u32 mask,bool set)95 static void liointc_set_bit(struct irq_chip_generic *gc,
96 				unsigned int offset,
97 				u32 mask, bool set)
98 {
99 	if (set)
100 		writel(readl(gc->reg_base + offset) | mask,
101 				gc->reg_base + offset);
102 	else
103 		writel(readl(gc->reg_base + offset) & ~mask,
104 				gc->reg_base + offset);
105 }
106 
liointc_set_type(struct irq_data * data,unsigned int type)107 static int liointc_set_type(struct irq_data *data, unsigned int type)
108 {
109 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
110 	u32 mask = data->mask;
111 	unsigned long flags;
112 
113 	irq_gc_lock_irqsave(gc, flags);
114 	switch (type) {
115 	case IRQ_TYPE_LEVEL_HIGH:
116 		liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
117 		liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
118 		break;
119 	case IRQ_TYPE_LEVEL_LOW:
120 		liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
121 		liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
122 		break;
123 	case IRQ_TYPE_EDGE_RISING:
124 		liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
125 		liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
126 		break;
127 	case IRQ_TYPE_EDGE_FALLING:
128 		liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
129 		liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
130 		break;
131 	default:
132 		irq_gc_unlock_irqrestore(gc, flags);
133 		return -EINVAL;
134 	}
135 	irq_gc_unlock_irqrestore(gc, flags);
136 
137 	irqd_set_trigger_type(data, type);
138 	return 0;
139 }
140 
liointc_resume(struct irq_chip_generic * gc)141 static void liointc_resume(struct irq_chip_generic *gc)
142 {
143 	struct liointc_priv *priv = gc->private;
144 	unsigned long flags;
145 	int i;
146 
147 	irq_gc_lock_irqsave(gc, flags);
148 	/* Disable all at first */
149 	writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
150 	/* Restore map cache */
151 	for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
152 		writeb(priv->map_cache[i], gc->reg_base + i);
153 	/* Restore mask cache */
154 	writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
155 	irq_gc_unlock_irqrestore(gc, flags);
156 }
157 
158 static int parent_irq[LIOINTC_NUM_PARENT];
159 static u32 parent_int_map[LIOINTC_NUM_PARENT];
160 static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
161 static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
162 
liointc_domain_xlate(struct irq_domain * d,struct device_node * ctrlr,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)163 static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
164 			     const u32 *intspec, unsigned int intsize,
165 			     unsigned long *out_hwirq, unsigned int *out_type)
166 {
167 	if (WARN_ON(intsize < 1))
168 		return -EINVAL;
169 	*out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
170 	*out_type = IRQ_TYPE_NONE;
171 	return 0;
172 }
173 
174 static const struct irq_domain_ops acpi_irq_gc_ops = {
175 	.map	= irq_map_generic_chip,
176 	.unmap  = irq_unmap_generic_chip,
177 	.xlate	= liointc_domain_xlate,
178 };
179 
liointc_init(phys_addr_t addr,unsigned long size,int revision,struct fwnode_handle * domain_handle,struct device_node * node)180 static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
181 		struct fwnode_handle *domain_handle, struct device_node *node)
182 {
183 	int i, err;
184 	void __iomem *base;
185 	struct irq_chip_type *ct;
186 	struct irq_chip_generic *gc;
187 	struct irq_domain *domain;
188 	struct liointc_priv *priv;
189 
190 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
191 	if (!priv)
192 		return -ENOMEM;
193 
194 	base = ioremap(addr, size);
195 	if (!base)
196 		goto out_free_priv;
197 
198 	for (i = 0; i < LIOINTC_NUM_CORES; i++)
199 		priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
200 
201 	for (i = 0; i < LIOINTC_NUM_PARENT; i++)
202 		priv->handler[i].parent_int_map = parent_int_map[i];
203 
204 	if (revision > 1) {
205 		for (i = 0; i < LIOINTC_NUM_CORES; i++) {
206 			int index = of_property_match_string(node,
207 					"reg-names", core_reg_names[i]);
208 
209 			if (index < 0)
210 				continue;
211 
212 			priv->core_isr[i] = of_iomap(node, index);
213 		}
214 
215 		if (!priv->core_isr[0])
216 			goto out_iounmap;
217 	}
218 
219 	/* Setup IRQ domain */
220 	if (!acpi_disabled)
221 		domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
222 					&acpi_irq_gc_ops, priv);
223 	else
224 		domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
225 					&irq_generic_chip_ops, priv);
226 	if (!domain) {
227 		pr_err("loongson-liointc: cannot add IRQ domain\n");
228 		goto out_iounmap;
229 	}
230 
231 	err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
232 					(node ? node->full_name : "LIOINTC"),
233 					handle_level_irq, 0, IRQ_NOPROBE, 0);
234 	if (err) {
235 		pr_err("loongson-liointc: unable to register IRQ domain\n");
236 		goto out_free_domain;
237 	}
238 
239 
240 	/* Disable all IRQs */
241 	writel(0xffffffff, base + LIOINTC_REG_INTC_DISABLE);
242 	/* Set to level triggered */
243 	writel(0x0, base + LIOINTC_REG_INTC_EDGE);
244 
245 	/* Generate parent INT part of map cache */
246 	for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
247 		u32 pending = priv->handler[i].parent_int_map;
248 
249 		while (pending) {
250 			int bit = __ffs(pending);
251 
252 			priv->map_cache[bit] = BIT(i) << LIOINTC_SHIFT_INTx;
253 			pending &= ~BIT(bit);
254 		}
255 	}
256 
257 	for (i = 0; i < LIOINTC_CHIP_IRQ; i++) {
258 		/* Generate core part of map cache */
259 		priv->map_cache[i] |= BIT(loongson_sysconf.boot_cpu_id);
260 		writeb(priv->map_cache[i], base + i);
261 	}
262 
263 	gc = irq_get_domain_generic_chip(domain, 0);
264 	gc->private = priv;
265 	gc->reg_base = base;
266 	gc->domain = domain;
267 	gc->resume = liointc_resume;
268 
269 	ct = gc->chip_types;
270 	ct->regs.enable = LIOINTC_REG_INTC_ENABLE;
271 	ct->regs.disable = LIOINTC_REG_INTC_DISABLE;
272 	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
273 	ct->chip.irq_mask = irq_gc_mask_disable_reg;
274 	ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
275 	ct->chip.irq_set_type = liointc_set_type;
276 
277 	gc->mask_cache = 0;
278 	priv->gc = gc;
279 
280 	for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
281 		if (parent_irq[i] <= 0)
282 			continue;
283 
284 		priv->handler[i].priv = priv;
285 		irq_set_chained_handler_and_data(parent_irq[i],
286 				liointc_chained_handle_irq, &priv->handler[i]);
287 	}
288 
289 	liointc_handle = domain_handle;
290 	return 0;
291 
292 out_free_domain:
293 	irq_domain_remove(domain);
294 out_iounmap:
295 	iounmap(base);
296 out_free_priv:
297 	kfree(priv);
298 
299 	return -EINVAL;
300 }
301 
302 #ifdef CONFIG_OF
303 
liointc_of_init(struct device_node * node,struct device_node * parent)304 static int __init liointc_of_init(struct device_node *node,
305 				  struct device_node *parent)
306 {
307 	bool have_parent = FALSE;
308 	int sz, i, index, revision, err = 0;
309 	struct resource res;
310 
311 	if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
312 		index = 0;
313 		revision = 1;
314 	} else {
315 		index = of_property_match_string(node, "reg-names", "main");
316 		revision = 2;
317 	}
318 
319 	if (of_address_to_resource(node, index, &res))
320 		return -EINVAL;
321 
322 	for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
323 		parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
324 		if (parent_irq[i] > 0)
325 			have_parent = TRUE;
326 	}
327 	if (!have_parent)
328 		return -ENODEV;
329 
330 	sz = of_property_read_variable_u32_array(node,
331 						"loongson,parent_int_map",
332 						&parent_int_map[0],
333 						LIOINTC_NUM_PARENT,
334 						LIOINTC_NUM_PARENT);
335 	if (sz < 4) {
336 		pr_err("loongson-liointc: No parent_int_map\n");
337 		return -ENODEV;
338 	}
339 
340 	err = liointc_init(res.start, resource_size(&res),
341 			revision, of_node_to_fwnode(node), node);
342 	if (err < 0)
343 		return err;
344 
345 	return 0;
346 }
347 
348 IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
349 IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
350 IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
351 
352 #endif
353 
354 #ifdef CONFIG_ACPI
liointc_acpi_init(struct irq_domain * parent,struct acpi_madt_lio_pic * acpi_liointc)355 int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
356 {
357 	int ret;
358 	struct fwnode_handle *domain_handle;
359 
360 	parent_int_map[0] = acpi_liointc->cascade_map[0];
361 	parent_int_map[1] = acpi_liointc->cascade_map[1];
362 
363 	parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
364 	parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
365 
366 	domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
367 	if (!domain_handle) {
368 		pr_err("Unable to allocate domain handle\n");
369 		return -ENOMEM;
370 	}
371 	ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
372 			   1, domain_handle, NULL);
373 	if (ret)
374 		irq_domain_free_fwnode(domain_handle);
375 
376 	return ret;
377 }
378 #endif
379