1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas IRQC Driver
4 *
5 * Copyright (C) 2013 Magnus Damm
6 */
7
8 #include <linux/init.h>
9 #include <linux/platform_device.h>
10 #include <linux/interrupt.h>
11 #include <linux/ioport.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/pm_runtime.h>
19
20 #define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
21
22 #define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
23 #define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
24 #define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
25 #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
26 /* SYS-CPU vs. RT-CPU */
27 #define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
28 #define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
29 #define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
30 #define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
31 #define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
32 #define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
33 #define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
34 #define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
35 #define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
36 #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
37 /* IRQn Configuration Register */
38
39 struct irqc_irq {
40 int hw_irq;
41 int requested_irq;
42 struct irqc_priv *p;
43 };
44
45 struct irqc_priv {
46 void __iomem *iomem;
47 void __iomem *cpu_int_base;
48 struct irqc_irq irq[IRQC_IRQ_MAX];
49 unsigned int number_of_irqs;
50 struct device *dev;
51 struct irq_chip_generic *gc;
52 struct irq_domain *irq_domain;
53 atomic_t wakeup_path;
54 };
55
irq_data_to_priv(struct irq_data * data)56 static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
57 {
58 return data->domain->host_data;
59 }
60
irqc_dbg(struct irqc_irq * i,char * str)61 static void irqc_dbg(struct irqc_irq *i, char *str)
62 {
63 dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
64 }
65
66 static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
67 [IRQ_TYPE_LEVEL_LOW] = 0x01,
68 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
69 [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
70 [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
71 [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
72 };
73
irqc_irq_set_type(struct irq_data * d,unsigned int type)74 static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
75 {
76 struct irqc_priv *p = irq_data_to_priv(d);
77 int hw_irq = irqd_to_hwirq(d);
78 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
79 u32 tmp;
80
81 irqc_dbg(&p->irq[hw_irq], "sense");
82
83 if (!value)
84 return -EINVAL;
85
86 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
87 tmp &= ~0x3f;
88 tmp |= value;
89 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
90 return 0;
91 }
92
irqc_irq_set_wake(struct irq_data * d,unsigned int on)93 static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
94 {
95 struct irqc_priv *p = irq_data_to_priv(d);
96 int hw_irq = irqd_to_hwirq(d);
97
98 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
99 if (on)
100 atomic_inc(&p->wakeup_path);
101 else
102 atomic_dec(&p->wakeup_path);
103
104 return 0;
105 }
106
irqc_irq_handler(int irq,void * dev_id)107 static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
108 {
109 struct irqc_irq *i = dev_id;
110 struct irqc_priv *p = i->p;
111 u32 bit = BIT(i->hw_irq);
112
113 irqc_dbg(i, "demux1");
114
115 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
116 iowrite32(bit, p->iomem + DETECT_STATUS);
117 irqc_dbg(i, "demux2");
118 generic_handle_domain_irq(p->irq_domain, i->hw_irq);
119 return IRQ_HANDLED;
120 }
121 return IRQ_NONE;
122 }
123
irqc_probe(struct platform_device * pdev)124 static int irqc_probe(struct platform_device *pdev)
125 {
126 struct device *dev = &pdev->dev;
127 const char *name = dev_name(dev);
128 struct irqc_priv *p;
129 int ret;
130 int k;
131
132 p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
133 if (!p)
134 return -ENOMEM;
135
136 p->dev = dev;
137 platform_set_drvdata(pdev, p);
138
139 pm_runtime_enable(dev);
140 pm_runtime_get_sync(dev);
141
142 /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
143 for (k = 0; k < IRQC_IRQ_MAX; k++) {
144 ret = platform_get_irq_optional(pdev, k);
145 if (ret == -ENXIO)
146 break;
147 if (ret < 0)
148 goto err_runtime_pm_disable;
149
150 p->irq[k].p = p;
151 p->irq[k].hw_irq = k;
152 p->irq[k].requested_irq = ret;
153 }
154
155 p->number_of_irqs = k;
156 if (p->number_of_irqs < 1) {
157 dev_err(dev, "not enough IRQ resources\n");
158 ret = -EINVAL;
159 goto err_runtime_pm_disable;
160 }
161
162 /* ioremap IOMEM and setup read/write callbacks */
163 p->iomem = devm_platform_ioremap_resource(pdev, 0);
164 if (IS_ERR(p->iomem)) {
165 ret = PTR_ERR(p->iomem);
166 goto err_runtime_pm_disable;
167 }
168
169 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
170
171 p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
172 &irq_generic_chip_ops, p);
173 if (!p->irq_domain) {
174 ret = -ENXIO;
175 dev_err(dev, "cannot initialize irq domain\n");
176 goto err_runtime_pm_disable;
177 }
178
179 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
180 1, "irqc", handle_level_irq,
181 0, 0, IRQ_GC_INIT_NESTED_LOCK);
182 if (ret) {
183 dev_err(dev, "cannot allocate generic chip\n");
184 goto err_remove_domain;
185 }
186
187 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
188 p->gc->reg_base = p->cpu_int_base;
189 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
190 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
191 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
192 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
193 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
194 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
195 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
196
197 irq_domain_set_pm_device(p->irq_domain, dev);
198
199 /* request interrupts one by one */
200 for (k = 0; k < p->number_of_irqs; k++) {
201 if (devm_request_irq(dev, p->irq[k].requested_irq,
202 irqc_irq_handler, 0, name, &p->irq[k])) {
203 dev_err(dev, "failed to request IRQ\n");
204 ret = -ENOENT;
205 goto err_remove_domain;
206 }
207 }
208
209 dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
210
211 return 0;
212
213 err_remove_domain:
214 irq_domain_remove(p->irq_domain);
215 err_runtime_pm_disable:
216 pm_runtime_put(dev);
217 pm_runtime_disable(dev);
218 return ret;
219 }
220
irqc_remove(struct platform_device * pdev)221 static int irqc_remove(struct platform_device *pdev)
222 {
223 struct irqc_priv *p = platform_get_drvdata(pdev);
224
225 irq_domain_remove(p->irq_domain);
226 pm_runtime_put(&pdev->dev);
227 pm_runtime_disable(&pdev->dev);
228 return 0;
229 }
230
irqc_suspend(struct device * dev)231 static int __maybe_unused irqc_suspend(struct device *dev)
232 {
233 struct irqc_priv *p = dev_get_drvdata(dev);
234
235 if (atomic_read(&p->wakeup_path))
236 device_set_wakeup_path(dev);
237
238 return 0;
239 }
240
241 static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
242
243 static const struct of_device_id irqc_dt_ids[] = {
244 { .compatible = "renesas,irqc", },
245 {},
246 };
247 MODULE_DEVICE_TABLE(of, irqc_dt_ids);
248
249 static struct platform_driver irqc_device_driver = {
250 .probe = irqc_probe,
251 .remove = irqc_remove,
252 .driver = {
253 .name = "renesas_irqc",
254 .of_match_table = irqc_dt_ids,
255 .pm = &irqc_pm_ops,
256 }
257 };
258
irqc_init(void)259 static int __init irqc_init(void)
260 {
261 return platform_driver_register(&irqc_device_driver);
262 }
263 postcore_initcall(irqc_init);
264
irqc_exit(void)265 static void __exit irqc_exit(void)
266 {
267 platform_driver_unregister(&irqc_device_driver);
268 }
269 module_exit(irqc_exit);
270
271 MODULE_AUTHOR("Magnus Damm");
272 MODULE_DESCRIPTION("Renesas IRQC Driver");
273 MODULE_LICENSE("GPL v2");
274