1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Library implementing the most common irq chip callback functions
4 *
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7 #include <linux/io.h>
8 #include <linux/irq.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/irqdomain.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/syscore_ops.h>
15
16 #include "internals.h"
17
18 static LIST_HEAD(gc_list);
19 static DEFINE_RAW_SPINLOCK(gc_lock);
20
21 /**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
24 */
irq_gc_noop(struct irq_data * d)25 void irq_gc_noop(struct irq_data *d)
26 {
27 }
28 EXPORT_SYMBOL_GPL(irq_gc_noop);
29
30 /**
31 * irq_gc_mask_disable_reg - Mask chip via disable register
32 * @d: irq_data
33 *
34 * Chip has separate enable/disable registers instead of a single mask
35 * register.
36 */
irq_gc_mask_disable_reg(struct irq_data * d)37 void irq_gc_mask_disable_reg(struct irq_data *d)
38 {
39 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
40 struct irq_chip_type *ct = irq_data_get_chip_type(d);
41 u32 mask = d->mask;
42
43 irq_gc_lock(gc);
44 irq_reg_writel(gc, mask, ct->regs.disable);
45 *ct->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
47 }
48 EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
49
50 /**
51 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 * @d: irq_data
53 *
54 * Chip has a single mask register. Values of this register are cached
55 * and protected by gc->lock
56 */
irq_gc_mask_set_bit(struct irq_data * d)57 void irq_gc_mask_set_bit(struct irq_data *d)
58 {
59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60 struct irq_chip_type *ct = irq_data_get_chip_type(d);
61 u32 mask = d->mask;
62
63 irq_gc_lock(gc);
64 *ct->mask_cache |= mask;
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
66 irq_gc_unlock(gc);
67 }
68 EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
69
70 /**
71 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
72 * @d: irq_data
73 *
74 * Chip has a single mask register. Values of this register are cached
75 * and protected by gc->lock
76 */
irq_gc_mask_clr_bit(struct irq_data * d)77 void irq_gc_mask_clr_bit(struct irq_data *d)
78 {
79 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
80 struct irq_chip_type *ct = irq_data_get_chip_type(d);
81 u32 mask = d->mask;
82
83 irq_gc_lock(gc);
84 *ct->mask_cache &= ~mask;
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
86 irq_gc_unlock(gc);
87 }
88 EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
89
90 /**
91 * irq_gc_unmask_enable_reg - Unmask chip via enable register
92 * @d: irq_data
93 *
94 * Chip has separate enable/disable registers instead of a single mask
95 * register.
96 */
irq_gc_unmask_enable_reg(struct irq_data * d)97 void irq_gc_unmask_enable_reg(struct irq_data *d)
98 {
99 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
100 struct irq_chip_type *ct = irq_data_get_chip_type(d);
101 u32 mask = d->mask;
102
103 irq_gc_lock(gc);
104 irq_reg_writel(gc, mask, ct->regs.enable);
105 *ct->mask_cache |= mask;
106 irq_gc_unlock(gc);
107 }
108 EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
109
110 /**
111 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
112 * @d: irq_data
113 */
irq_gc_ack_set_bit(struct irq_data * d)114 void irq_gc_ack_set_bit(struct irq_data *d)
115 {
116 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
117 struct irq_chip_type *ct = irq_data_get_chip_type(d);
118 u32 mask = d->mask;
119
120 irq_gc_lock(gc);
121 irq_reg_writel(gc, mask, ct->regs.ack);
122 irq_gc_unlock(gc);
123 }
124 EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
125
126 /**
127 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
128 * @d: irq_data
129 */
irq_gc_ack_clr_bit(struct irq_data * d)130 void irq_gc_ack_clr_bit(struct irq_data *d)
131 {
132 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134 u32 mask = ~d->mask;
135
136 irq_gc_lock(gc);
137 irq_reg_writel(gc, mask, ct->regs.ack);
138 irq_gc_unlock(gc);
139 }
140
141 /**
142 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
143 * @d: irq_data
144 *
145 * This generic implementation of the irq_mask_ack method is for chips
146 * with separate enable/disable registers instead of a single mask
147 * register and where a pending interrupt is acknowledged by setting a
148 * bit.
149 *
150 * Note: This is the only permutation currently used. Similar generic
151 * functions should be added here if other permutations are required.
152 */
irq_gc_mask_disable_and_ack_set(struct irq_data * d)153 void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
154 {
155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156 struct irq_chip_type *ct = irq_data_get_chip_type(d);
157 u32 mask = d->mask;
158
159 irq_gc_lock(gc);
160 irq_reg_writel(gc, mask, ct->regs.disable);
161 *ct->mask_cache &= ~mask;
162 irq_reg_writel(gc, mask, ct->regs.ack);
163 irq_gc_unlock(gc);
164 }
165
166 /**
167 * irq_gc_eoi - EOI interrupt
168 * @d: irq_data
169 */
irq_gc_eoi(struct irq_data * d)170 void irq_gc_eoi(struct irq_data *d)
171 {
172 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
173 struct irq_chip_type *ct = irq_data_get_chip_type(d);
174 u32 mask = d->mask;
175
176 irq_gc_lock(gc);
177 irq_reg_writel(gc, mask, ct->regs.eoi);
178 irq_gc_unlock(gc);
179 }
180
181 /**
182 * irq_gc_set_wake - Set/clr wake bit for an interrupt
183 * @d: irq_data
184 * @on: Indicates whether the wake bit should be set or cleared
185 *
186 * For chips where the wake from suspend functionality is not
187 * configured in a separate register and the wakeup active state is
188 * just stored in a bitmask.
189 */
irq_gc_set_wake(struct irq_data * d,unsigned int on)190 int irq_gc_set_wake(struct irq_data *d, unsigned int on)
191 {
192 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
193 u32 mask = d->mask;
194
195 if (!(mask & gc->wake_enabled))
196 return -EINVAL;
197
198 irq_gc_lock(gc);
199 if (on)
200 gc->wake_active |= mask;
201 else
202 gc->wake_active &= ~mask;
203 irq_gc_unlock(gc);
204 return 0;
205 }
206 EXPORT_SYMBOL_GPL(irq_gc_set_wake);
207
irq_readl_be(void __iomem * addr)208 static u32 irq_readl_be(void __iomem *addr)
209 {
210 return ioread32be(addr);
211 }
212
irq_writel_be(u32 val,void __iomem * addr)213 static void irq_writel_be(u32 val, void __iomem *addr)
214 {
215 iowrite32be(val, addr);
216 }
217
irq_init_generic_chip(struct irq_chip_generic * gc,const char * name,int num_ct,unsigned int irq_base,void __iomem * reg_base,irq_flow_handler_t handler)218 void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
219 int num_ct, unsigned int irq_base,
220 void __iomem *reg_base, irq_flow_handler_t handler)
221 {
222 raw_spin_lock_init(&gc->lock);
223 gc->num_ct = num_ct;
224 gc->irq_base = irq_base;
225 gc->reg_base = reg_base;
226 gc->chip_types->chip.name = name;
227 gc->chip_types->handler = handler;
228 }
229
230 /**
231 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
232 * @name: Name of the irq chip
233 * @num_ct: Number of irq_chip_type instances associated with this
234 * @irq_base: Interrupt base nr for this chip
235 * @reg_base: Register base address (virtual)
236 * @handler: Default flow handler associated with this chip
237 *
238 * Returns an initialized irq_chip_generic structure. The chip defaults
239 * to the primary (index 0) irq_chip_type and @handler
240 */
241 struct irq_chip_generic *
irq_alloc_generic_chip(const char * name,int num_ct,unsigned int irq_base,void __iomem * reg_base,irq_flow_handler_t handler)242 irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
243 void __iomem *reg_base, irq_flow_handler_t handler)
244 {
245 struct irq_chip_generic *gc;
246
247 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
248 if (gc) {
249 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
250 handler);
251 }
252 return gc;
253 }
254 EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
255
256 static void
irq_gc_init_mask_cache(struct irq_chip_generic * gc,enum irq_gc_flags flags)257 irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
258 {
259 struct irq_chip_type *ct = gc->chip_types;
260 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
261 int i;
262
263 for (i = 0; i < gc->num_ct; i++) {
264 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
265 mskptr = &ct[i].mask_cache_priv;
266 mskreg = ct[i].regs.mask;
267 }
268 ct[i].mask_cache = mskptr;
269 if (flags & IRQ_GC_INIT_MASK_CACHE)
270 *mskptr = irq_reg_readl(gc, mskreg);
271 }
272 }
273
274 /**
275 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
276 * @d: irq domain for which to allocate chips
277 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
278 * @num_ct: Number of irq_chip_type instances associated with this
279 * @name: Name of the irq chip
280 * @handler: Default flow handler associated with these chips
281 * @clr: IRQ_* bits to clear in the mapping function
282 * @set: IRQ_* bits to set in the mapping function
283 * @gcflags: Generic chip specific setup flags
284 */
__irq_alloc_domain_generic_chips(struct irq_domain * d,int irqs_per_chip,int num_ct,const char * name,irq_flow_handler_t handler,unsigned int clr,unsigned int set,enum irq_gc_flags gcflags)285 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
286 int num_ct, const char *name,
287 irq_flow_handler_t handler,
288 unsigned int clr, unsigned int set,
289 enum irq_gc_flags gcflags)
290 {
291 struct irq_domain_chip_generic *dgc;
292 struct irq_chip_generic *gc;
293 unsigned long flags;
294 int numchips, i;
295 size_t dgc_sz;
296 size_t gc_sz;
297 size_t sz;
298 void *tmp;
299
300 if (d->gc)
301 return -EBUSY;
302
303 numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
304 if (!numchips)
305 return -EINVAL;
306
307 /* Allocate a pointer, generic chip and chiptypes for each chip */
308 gc_sz = struct_size(gc, chip_types, num_ct);
309 dgc_sz = struct_size(dgc, gc, numchips);
310 sz = dgc_sz + numchips * gc_sz;
311
312 tmp = dgc = kzalloc(sz, GFP_KERNEL);
313 if (!dgc)
314 return -ENOMEM;
315 dgc->irqs_per_chip = irqs_per_chip;
316 dgc->num_chips = numchips;
317 dgc->irq_flags_to_set = set;
318 dgc->irq_flags_to_clear = clr;
319 dgc->gc_flags = gcflags;
320 d->gc = dgc;
321
322 /* Calc pointer to the first generic chip */
323 tmp += dgc_sz;
324 for (i = 0; i < numchips; i++) {
325 /* Store the pointer to the generic chip */
326 dgc->gc[i] = gc = tmp;
327 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
328 NULL, handler);
329
330 gc->domain = d;
331 if (gcflags & IRQ_GC_BE_IO) {
332 gc->reg_readl = &irq_readl_be;
333 gc->reg_writel = &irq_writel_be;
334 }
335
336 raw_spin_lock_irqsave(&gc_lock, flags);
337 list_add_tail(&gc->list, &gc_list);
338 raw_spin_unlock_irqrestore(&gc_lock, flags);
339 /* Calc pointer to the next generic chip */
340 tmp += gc_sz;
341 }
342 return 0;
343 }
344 EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
345
346 static struct irq_chip_generic *
__irq_get_domain_generic_chip(struct irq_domain * d,unsigned int hw_irq)347 __irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
348 {
349 struct irq_domain_chip_generic *dgc = d->gc;
350 int idx;
351
352 if (!dgc)
353 return ERR_PTR(-ENODEV);
354 idx = hw_irq / dgc->irqs_per_chip;
355 if (idx >= dgc->num_chips)
356 return ERR_PTR(-EINVAL);
357 return dgc->gc[idx];
358 }
359
360 /**
361 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
362 * @d: irq domain pointer
363 * @hw_irq: Hardware interrupt number
364 */
365 struct irq_chip_generic *
irq_get_domain_generic_chip(struct irq_domain * d,unsigned int hw_irq)366 irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
367 {
368 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
369
370 return !IS_ERR(gc) ? gc : NULL;
371 }
372 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
373
374 /*
375 * Separate lockdep classes for interrupt chip which can nest irq_desc
376 * lock and request mutex.
377 */
378 static struct lock_class_key irq_nested_lock_class;
379 static struct lock_class_key irq_nested_request_class;
380
381 /*
382 * irq_map_generic_chip - Map a generic chip for an irq domain
383 */
irq_map_generic_chip(struct irq_domain * d,unsigned int virq,irq_hw_number_t hw_irq)384 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
385 irq_hw_number_t hw_irq)
386 {
387 struct irq_data *data = irq_domain_get_irq_data(d, virq);
388 struct irq_domain_chip_generic *dgc = d->gc;
389 struct irq_chip_generic *gc;
390 struct irq_chip_type *ct;
391 struct irq_chip *chip;
392 unsigned long flags;
393 int idx;
394
395 gc = __irq_get_domain_generic_chip(d, hw_irq);
396 if (IS_ERR(gc))
397 return PTR_ERR(gc);
398
399 idx = hw_irq % dgc->irqs_per_chip;
400
401 if (test_bit(idx, &gc->unused))
402 return -ENOTSUPP;
403
404 if (test_bit(idx, &gc->installed))
405 return -EBUSY;
406
407 ct = gc->chip_types;
408 chip = &ct->chip;
409
410 /* We only init the cache for the first mapping of a generic chip */
411 if (!gc->installed) {
412 raw_spin_lock_irqsave(&gc->lock, flags);
413 irq_gc_init_mask_cache(gc, dgc->gc_flags);
414 raw_spin_unlock_irqrestore(&gc->lock, flags);
415 }
416
417 /* Mark the interrupt as installed */
418 set_bit(idx, &gc->installed);
419
420 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
421 irq_set_lockdep_class(virq, &irq_nested_lock_class,
422 &irq_nested_request_class);
423
424 if (chip->irq_calc_mask)
425 chip->irq_calc_mask(data);
426 else
427 data->mask = 1 << idx;
428
429 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
430 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
431 return 0;
432 }
433
irq_unmap_generic_chip(struct irq_domain * d,unsigned int virq)434 static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
435 {
436 struct irq_data *data = irq_domain_get_irq_data(d, virq);
437 struct irq_domain_chip_generic *dgc = d->gc;
438 unsigned int hw_irq = data->hwirq;
439 struct irq_chip_generic *gc;
440 int irq_idx;
441
442 gc = irq_get_domain_generic_chip(d, hw_irq);
443 if (!gc)
444 return;
445
446 irq_idx = hw_irq % dgc->irqs_per_chip;
447
448 clear_bit(irq_idx, &gc->installed);
449 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
450 NULL);
451
452 }
453
454 const struct irq_domain_ops irq_generic_chip_ops = {
455 .map = irq_map_generic_chip,
456 .unmap = irq_unmap_generic_chip,
457 .xlate = irq_domain_xlate_onetwocell,
458 };
459 EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
460
461 /**
462 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
463 * @gc: Generic irq chip holding all data
464 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
465 * @flags: Flags for initialization
466 * @clr: IRQ_* bits to clear
467 * @set: IRQ_* bits to set
468 *
469 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
470 * initializes all interrupts to the primary irq_chip_type and its
471 * associated handler.
472 */
irq_setup_generic_chip(struct irq_chip_generic * gc,u32 msk,enum irq_gc_flags flags,unsigned int clr,unsigned int set)473 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
474 enum irq_gc_flags flags, unsigned int clr,
475 unsigned int set)
476 {
477 struct irq_chip_type *ct = gc->chip_types;
478 struct irq_chip *chip = &ct->chip;
479 unsigned int i;
480
481 raw_spin_lock(&gc_lock);
482 list_add_tail(&gc->list, &gc_list);
483 raw_spin_unlock(&gc_lock);
484
485 irq_gc_init_mask_cache(gc, flags);
486
487 for (i = gc->irq_base; msk; msk >>= 1, i++) {
488 if (!(msk & 0x01))
489 continue;
490
491 if (flags & IRQ_GC_INIT_NESTED_LOCK)
492 irq_set_lockdep_class(i, &irq_nested_lock_class,
493 &irq_nested_request_class);
494
495 if (!(flags & IRQ_GC_NO_MASK)) {
496 struct irq_data *d = irq_get_irq_data(i);
497
498 if (chip->irq_calc_mask)
499 chip->irq_calc_mask(d);
500 else
501 d->mask = 1 << (i - gc->irq_base);
502 }
503 irq_set_chip_and_handler(i, chip, ct->handler);
504 irq_set_chip_data(i, gc);
505 irq_modify_status(i, clr, set);
506 }
507 gc->irq_cnt = i - gc->irq_base;
508 }
509 EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
510
511 /**
512 * irq_setup_alt_chip - Switch to alternative chip
513 * @d: irq_data for this interrupt
514 * @type: Flow type to be initialized
515 *
516 * Only to be called from chip->irq_set_type() callbacks.
517 */
irq_setup_alt_chip(struct irq_data * d,unsigned int type)518 int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
519 {
520 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
521 struct irq_chip_type *ct = gc->chip_types;
522 unsigned int i;
523
524 for (i = 0; i < gc->num_ct; i++, ct++) {
525 if (ct->type & type) {
526 d->chip = &ct->chip;
527 irq_data_to_desc(d)->handle_irq = ct->handler;
528 return 0;
529 }
530 }
531 return -EINVAL;
532 }
533 EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
534
535 /**
536 * irq_remove_generic_chip - Remove a chip
537 * @gc: Generic irq chip holding all data
538 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
539 * @clr: IRQ_* bits to clear
540 * @set: IRQ_* bits to set
541 *
542 * Remove up to 32 interrupts starting from gc->irq_base.
543 */
irq_remove_generic_chip(struct irq_chip_generic * gc,u32 msk,unsigned int clr,unsigned int set)544 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
545 unsigned int clr, unsigned int set)
546 {
547 unsigned int i = gc->irq_base;
548
549 raw_spin_lock(&gc_lock);
550 list_del(&gc->list);
551 raw_spin_unlock(&gc_lock);
552
553 for (; msk; msk >>= 1, i++) {
554 if (!(msk & 0x01))
555 continue;
556
557 /* Remove handler first. That will mask the irq line */
558 irq_set_handler(i, NULL);
559 irq_set_chip(i, &no_irq_chip);
560 irq_set_chip_data(i, NULL);
561 irq_modify_status(i, clr, set);
562 }
563 }
564 EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
565
irq_gc_get_irq_data(struct irq_chip_generic * gc)566 static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
567 {
568 unsigned int virq;
569
570 if (!gc->domain)
571 return irq_get_irq_data(gc->irq_base);
572
573 /*
574 * We don't know which of the irqs has been actually
575 * installed. Use the first one.
576 */
577 if (!gc->installed)
578 return NULL;
579
580 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
581 return virq ? irq_get_irq_data(virq) : NULL;
582 }
583
584 #ifdef CONFIG_PM
irq_gc_suspend(void)585 static int irq_gc_suspend(void)
586 {
587 struct irq_chip_generic *gc;
588
589 list_for_each_entry(gc, &gc_list, list) {
590 struct irq_chip_type *ct = gc->chip_types;
591
592 if (ct->chip.irq_suspend) {
593 struct irq_data *data = irq_gc_get_irq_data(gc);
594
595 if (data)
596 ct->chip.irq_suspend(data);
597 }
598
599 if (gc->suspend)
600 gc->suspend(gc);
601 }
602 return 0;
603 }
604
irq_gc_resume(void)605 static void irq_gc_resume(void)
606 {
607 struct irq_chip_generic *gc;
608
609 list_for_each_entry(gc, &gc_list, list) {
610 struct irq_chip_type *ct = gc->chip_types;
611
612 if (gc->resume)
613 gc->resume(gc);
614
615 if (ct->chip.irq_resume) {
616 struct irq_data *data = irq_gc_get_irq_data(gc);
617
618 if (data)
619 ct->chip.irq_resume(data);
620 }
621 }
622 }
623 #else
624 #define irq_gc_suspend NULL
625 #define irq_gc_resume NULL
626 #endif
627
irq_gc_shutdown(void)628 static void irq_gc_shutdown(void)
629 {
630 struct irq_chip_generic *gc;
631
632 list_for_each_entry(gc, &gc_list, list) {
633 struct irq_chip_type *ct = gc->chip_types;
634
635 if (ct->chip.irq_pm_shutdown) {
636 struct irq_data *data = irq_gc_get_irq_data(gc);
637
638 if (data)
639 ct->chip.irq_pm_shutdown(data);
640 }
641 }
642 }
643
644 static struct syscore_ops irq_gc_syscore_ops = {
645 .suspend = irq_gc_suspend,
646 .resume = irq_gc_resume,
647 .shutdown = irq_gc_shutdown,
648 };
649
irq_gc_init_ops(void)650 static int __init irq_gc_init_ops(void)
651 {
652 register_syscore_ops(&irq_gc_syscore_ops);
653 return 0;
654 }
655 device_initcall(irq_gc_init_ops);
656