1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /*
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. to the MDP base
19  */
20 #define MDP_SSPP_TOP0_OFF		0x0
21 #define MDP_INTF_0_OFF			0x6A000
22 #define MDP_INTF_1_OFF			0x6A800
23 #define MDP_INTF_2_OFF			0x6B000
24 #define MDP_INTF_3_OFF			0x6B800
25 #define MDP_INTF_4_OFF			0x6C000
26 #define MDP_INTF_5_OFF			0x6C800
27 #define MDP_AD4_0_OFF			0x7C000
28 #define MDP_AD4_1_OFF			0x7D000
29 #define MDP_AD4_INTR_EN_OFF		0x41c
30 #define MDP_AD4_INTR_CLEAR_OFF		0x424
31 #define MDP_AD4_INTR_STATUS_OFF		0x420
32 #define MDP_INTF_0_OFF_REV_7xxx             0x34000
33 #define MDP_INTF_1_OFF_REV_7xxx             0x35000
34 #define MDP_INTF_2_OFF_REV_7xxx             0x36000
35 #define MDP_INTF_3_OFF_REV_7xxx             0x37000
36 #define MDP_INTF_4_OFF_REV_7xxx             0x38000
37 #define MDP_INTF_5_OFF_REV_7xxx             0x39000
38 
39 /**
40  * struct dpu_intr_reg - array of DPU register sets
41  * @clr_off:	offset to CLEAR reg
42  * @en_off:	offset to ENABLE reg
43  * @status_off:	offset to STATUS reg
44  */
45 struct dpu_intr_reg {
46 	u32 clr_off;
47 	u32 en_off;
48 	u32 status_off;
49 };
50 
51 /*
52  * struct dpu_intr_reg -  List of DPU interrupt registers
53  *
54  * When making changes be sure to sync with dpu_hw_intr_reg
55  */
56 static const struct dpu_intr_reg dpu_intr_set[] = {
57 	[MDP_SSPP_TOP0_INTR] = {
58 		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
59 		MDP_SSPP_TOP0_OFF+INTR_EN,
60 		MDP_SSPP_TOP0_OFF+INTR_STATUS
61 	},
62 	[MDP_SSPP_TOP0_INTR2] = {
63 		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
64 		MDP_SSPP_TOP0_OFF+INTR2_EN,
65 		MDP_SSPP_TOP0_OFF+INTR2_STATUS
66 	},
67 	[MDP_SSPP_TOP0_HIST_INTR] = {
68 		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
69 		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
70 		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
71 	},
72 	[MDP_INTF0_INTR] = {
73 		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
74 		MDP_INTF_0_OFF+INTF_INTR_EN,
75 		MDP_INTF_0_OFF+INTF_INTR_STATUS
76 	},
77 	[MDP_INTF1_INTR] = {
78 		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
79 		MDP_INTF_1_OFF+INTF_INTR_EN,
80 		MDP_INTF_1_OFF+INTF_INTR_STATUS
81 	},
82 	[MDP_INTF2_INTR] = {
83 		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
84 		MDP_INTF_2_OFF+INTF_INTR_EN,
85 		MDP_INTF_2_OFF+INTF_INTR_STATUS
86 	},
87 	[MDP_INTF3_INTR] = {
88 		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
89 		MDP_INTF_3_OFF+INTF_INTR_EN,
90 		MDP_INTF_3_OFF+INTF_INTR_STATUS
91 	},
92 	[MDP_INTF4_INTR] = {
93 		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
94 		MDP_INTF_4_OFF+INTF_INTR_EN,
95 		MDP_INTF_4_OFF+INTF_INTR_STATUS
96 	},
97 	[MDP_INTF5_INTR] = {
98 		MDP_INTF_5_OFF+INTF_INTR_CLEAR,
99 		MDP_INTF_5_OFF+INTF_INTR_EN,
100 		MDP_INTF_5_OFF+INTF_INTR_STATUS
101 	},
102 	[MDP_AD4_0_INTR] = {
103 		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
104 		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
105 		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
106 	},
107 	[MDP_AD4_1_INTR] = {
108 		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
109 		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
110 		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
111 	},
112 	[MDP_INTF0_7xxx_INTR] = {
113 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
114 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
115 		MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
116 	},
117 	[MDP_INTF1_7xxx_INTR] = {
118 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
119 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
120 		MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
121 	},
122 	[MDP_INTF2_7xxx_INTR] = {
123 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
124 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
125 		MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
126 	},
127 	[MDP_INTF3_7xxx_INTR] = {
128 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
129 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
130 		MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
131 	},
132 	[MDP_INTF4_7xxx_INTR] = {
133 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
134 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
135 		MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
136 	},
137 	[MDP_INTF5_7xxx_INTR] = {
138 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
139 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
140 		MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
141 	},
142 };
143 
144 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
145 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
146 
147 /**
148  * dpu_core_irq_callback_handler - dispatch core interrupts
149  * @dpu_kms:		Pointer to DPU's KMS structure
150  * @irq_idx:		interrupt index
151  */
dpu_core_irq_callback_handler(struct dpu_kms * dpu_kms,int irq_idx)152 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
153 {
154 	VERB("irq_idx=%d\n", irq_idx);
155 
156 	if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
157 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
158 
159 	atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
160 
161 	/*
162 	 * Perform registered function callback
163 	 */
164 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
165 }
166 
dpu_core_irq(struct msm_kms * kms)167 irqreturn_t dpu_core_irq(struct msm_kms *kms)
168 {
169 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
170 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
171 	int reg_idx;
172 	int irq_idx;
173 	u32 irq_status;
174 	u32 enable_mask;
175 	int bit;
176 	unsigned long irq_flags;
177 
178 	if (!intr)
179 		return IRQ_NONE;
180 
181 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
182 	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
183 		if (!test_bit(reg_idx, &intr->irq_mask))
184 			continue;
185 
186 		/* Read interrupt status */
187 		irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
188 
189 		/* Read enable mask */
190 		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
191 
192 		/* and clear the interrupt */
193 		if (irq_status)
194 			DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
195 				     irq_status);
196 
197 		/* Finally update IRQ status based on enable mask */
198 		irq_status &= enable_mask;
199 
200 		if (!irq_status)
201 			continue;
202 
203 		/*
204 		 * Search through matching intr status.
205 		 */
206 		while ((bit = ffs(irq_status)) != 0) {
207 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
208 
209 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
210 
211 			/*
212 			 * When callback finish, clear the irq_status
213 			 * with the matching mask. Once irq_status
214 			 * is all cleared, the search can be stopped.
215 			 */
216 			irq_status &= ~BIT(bit - 1);
217 		}
218 	}
219 
220 	/* ensure register writes go through */
221 	wmb();
222 
223 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
224 
225 	return IRQ_HANDLED;
226 }
227 
dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)228 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
229 {
230 	int reg_idx;
231 	const struct dpu_intr_reg *reg;
232 	const char *dbgstr = NULL;
233 	uint32_t cache_irq_mask;
234 
235 	if (!intr)
236 		return -EINVAL;
237 
238 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
239 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
240 		return -EINVAL;
241 	}
242 
243 	/*
244 	 * The cache_irq_mask and hardware RMW operations needs to be done
245 	 * under irq_lock and it's the caller's responsibility to ensure that's
246 	 * held.
247 	 */
248 	assert_spin_locked(&intr->irq_lock);
249 
250 	reg_idx = DPU_IRQ_REG(irq_idx);
251 	reg = &dpu_intr_set[reg_idx];
252 
253 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
254 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
255 		dbgstr = "DPU IRQ already set:";
256 	} else {
257 		dbgstr = "DPU IRQ enabled:";
258 
259 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
260 		/* Cleaning any pending interrupt */
261 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
262 		/* Enabling interrupts with the new mask */
263 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
264 
265 		/* ensure register write goes through */
266 		wmb();
267 
268 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
269 	}
270 
271 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
272 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
273 
274 	return 0;
275 }
276 
dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr * intr,int irq_idx)277 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
278 {
279 	int reg_idx;
280 	const struct dpu_intr_reg *reg;
281 	const char *dbgstr = NULL;
282 	uint32_t cache_irq_mask;
283 
284 	if (!intr)
285 		return -EINVAL;
286 
287 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
288 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
289 		return -EINVAL;
290 	}
291 
292 	/*
293 	 * The cache_irq_mask and hardware RMW operations needs to be done
294 	 * under irq_lock and it's the caller's responsibility to ensure that's
295 	 * held.
296 	 */
297 	assert_spin_locked(&intr->irq_lock);
298 
299 	reg_idx = DPU_IRQ_REG(irq_idx);
300 	reg = &dpu_intr_set[reg_idx];
301 
302 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
303 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
304 		dbgstr = "DPU IRQ is already cleared:";
305 	} else {
306 		dbgstr = "DPU IRQ mask disable:";
307 
308 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
309 		/* Disable interrupts based on the new mask */
310 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
311 		/* Cleaning any pending interrupt */
312 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
313 
314 		/* ensure register write goes through */
315 		wmb();
316 
317 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
318 	}
319 
320 	pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
321 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
322 
323 	return 0;
324 }
325 
dpu_clear_irqs(struct dpu_kms * dpu_kms)326 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
327 {
328 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
329 	int i;
330 
331 	if (!intr)
332 		return;
333 
334 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
335 		if (test_bit(i, &intr->irq_mask))
336 			DPU_REG_WRITE(&intr->hw,
337 					dpu_intr_set[i].clr_off, 0xffffffff);
338 	}
339 
340 	/* ensure register writes go through */
341 	wmb();
342 }
343 
dpu_disable_all_irqs(struct dpu_kms * dpu_kms)344 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
345 {
346 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
347 	int i;
348 
349 	if (!intr)
350 		return;
351 
352 	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
353 		if (test_bit(i, &intr->irq_mask))
354 			DPU_REG_WRITE(&intr->hw,
355 					dpu_intr_set[i].en_off, 0x00000000);
356 	}
357 
358 	/* ensure register writes go through */
359 	wmb();
360 }
361 
dpu_core_irq_read(struct dpu_kms * dpu_kms,int irq_idx)362 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
363 {
364 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
365 	int reg_idx;
366 	unsigned long irq_flags;
367 	u32 intr_status;
368 
369 	if (!intr)
370 		return 0;
371 
372 	if (irq_idx < 0) {
373 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
374 				__builtin_return_address(0), irq_idx);
375 		return 0;
376 	}
377 
378 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
379 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
380 		return 0;
381 	}
382 
383 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
384 
385 	reg_idx = DPU_IRQ_REG(irq_idx);
386 	intr_status = DPU_REG_READ(&intr->hw,
387 			dpu_intr_set[reg_idx].status_off) &
388 		DPU_IRQ_MASK(irq_idx);
389 	if (intr_status)
390 		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
391 				intr_status);
392 
393 	/* ensure register writes go through */
394 	wmb();
395 
396 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
397 
398 	return intr_status;
399 }
400 
__intr_offset(const struct dpu_mdss_cfg * m,void __iomem * addr,struct dpu_hw_blk_reg_map * hw)401 static void __intr_offset(const struct dpu_mdss_cfg *m,
402 		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
403 {
404 	hw->blk_addr = addr + m->mdp[0].base;
405 }
406 
dpu_hw_intr_init(void __iomem * addr,const struct dpu_mdss_cfg * m)407 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
408 		const struct dpu_mdss_cfg *m)
409 {
410 	struct dpu_hw_intr *intr;
411 	int nirq = MDP_INTR_MAX * 32;
412 
413 	if (!addr || !m)
414 		return ERR_PTR(-EINVAL);
415 
416 	intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
417 	if (!intr)
418 		return ERR_PTR(-ENOMEM);
419 
420 	__intr_offset(m, addr, &intr->hw);
421 
422 	intr->total_irqs = nirq;
423 
424 	intr->irq_mask = m->mdss_irqs;
425 
426 	spin_lock_init(&intr->irq_lock);
427 
428 	return intr;
429 }
430 
dpu_hw_intr_destroy(struct dpu_hw_intr * intr)431 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
432 {
433 	kfree(intr);
434 }
435 
dpu_core_irq_register_callback(struct dpu_kms * dpu_kms,int irq_idx,void (* irq_cb)(void * arg,int irq_idx),void * irq_arg)436 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
437 		void (*irq_cb)(void *arg, int irq_idx),
438 		void *irq_arg)
439 {
440 	unsigned long irq_flags;
441 	int ret;
442 
443 	if (!irq_cb) {
444 		DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
445 		return -EINVAL;
446 	}
447 
448 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
449 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
450 		return -EINVAL;
451 	}
452 
453 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
454 
455 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
456 
457 	if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
458 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
459 
460 		return -EBUSY;
461 	}
462 
463 	trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
464 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
465 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
466 
467 	ret = dpu_hw_intr_enable_irq_locked(
468 				dpu_kms->hw_intr,
469 				irq_idx);
470 	if (ret)
471 		DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
472 					irq_idx);
473 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
474 
475 	trace_dpu_irq_register_success(irq_idx);
476 
477 	return 0;
478 }
479 
dpu_core_irq_unregister_callback(struct dpu_kms * dpu_kms,int irq_idx)480 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
481 {
482 	unsigned long irq_flags;
483 	int ret;
484 
485 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
486 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
487 		return -EINVAL;
488 	}
489 
490 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
491 
492 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
493 	trace_dpu_core_irq_unregister_callback(irq_idx);
494 
495 	ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
496 	if (ret)
497 		DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
498 					irq_idx, ret);
499 
500 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
501 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
502 
503 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
504 
505 	trace_dpu_irq_unregister_success(irq_idx);
506 
507 	return 0;
508 }
509 
510 #ifdef CONFIG_DEBUG_FS
dpu_debugfs_core_irq_show(struct seq_file * s,void * v)511 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
512 {
513 	struct dpu_kms *dpu_kms = s->private;
514 	unsigned long irq_flags;
515 	int i, irq_count;
516 	void *cb;
517 
518 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
519 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
520 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
521 		cb = dpu_kms->hw_intr->irq_tbl[i].cb;
522 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
523 
524 		if (irq_count || cb)
525 			seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
526 	}
527 
528 	return 0;
529 }
530 
531 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
532 
dpu_debugfs_core_irq_init(struct dpu_kms * dpu_kms,struct dentry * parent)533 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
534 		struct dentry *parent)
535 {
536 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
537 		&dpu_debugfs_core_irq_fops);
538 }
539 #endif
540 
dpu_core_irq_preinstall(struct msm_kms * kms)541 void dpu_core_irq_preinstall(struct msm_kms *kms)
542 {
543 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
544 	int i;
545 
546 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
547 	dpu_clear_irqs(dpu_kms);
548 	dpu_disable_all_irqs(dpu_kms);
549 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
550 
551 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
552 		atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
553 }
554 
dpu_core_irq_uninstall(struct msm_kms * kms)555 void dpu_core_irq_uninstall(struct msm_kms *kms)
556 {
557 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
558 	int i;
559 
560 	if (!dpu_kms->hw_intr)
561 		return;
562 
563 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
564 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
565 		if (dpu_kms->hw_intr->irq_tbl[i].cb)
566 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
567 
568 	dpu_clear_irqs(dpu_kms);
569 	dpu_disable_all_irqs(dpu_kms);
570 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
571 }
572