1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/delay.h>
8 
9 #include <drm/drm_vblank.h>
10 
11 #include "msm_drv.h"
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "mdp4_kms.h"
15 
mdp4_hw_init(struct msm_kms * kms)16 static int mdp4_hw_init(struct msm_kms *kms)
17 {
18 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
19 	struct drm_device *dev = mdp4_kms->dev;
20 	u32 dmap_cfg, vg_cfg;
21 	unsigned long clk;
22 
23 	pm_runtime_get_sync(dev->dev);
24 
25 	if (mdp4_kms->rev > 1) {
26 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
27 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
28 	}
29 
30 	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
31 
32 	/* max read pending cmd config, 3 pending requests: */
33 	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
34 
35 	clk = clk_get_rate(mdp4_kms->clk);
36 
37 	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
38 		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
39 		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
40 	} else {
41 		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
42 		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
43 	}
44 
45 	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
46 
47 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
48 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
49 
50 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
51 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
52 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
53 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
54 
55 	if (mdp4_kms->rev >= 2)
56 		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
57 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
58 
59 	/* disable CSC matrix / YUV by default: */
60 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
61 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
62 	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
63 	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
64 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
65 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
66 
67 	if (mdp4_kms->rev > 1)
68 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
69 
70 	pm_runtime_put_sync(dev->dev);
71 
72 	return 0;
73 }
74 
mdp4_enable_commit(struct msm_kms * kms)75 static void mdp4_enable_commit(struct msm_kms *kms)
76 {
77 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
78 	mdp4_enable(mdp4_kms);
79 }
80 
mdp4_disable_commit(struct msm_kms * kms)81 static void mdp4_disable_commit(struct msm_kms *kms)
82 {
83 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
84 	mdp4_disable(mdp4_kms);
85 }
86 
mdp4_prepare_commit(struct msm_kms * kms,struct drm_atomic_state * state)87 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
88 {
89 }
90 
mdp4_flush_commit(struct msm_kms * kms,unsigned crtc_mask)91 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
92 {
93 	/* TODO */
94 }
95 
mdp4_wait_flush(struct msm_kms * kms,unsigned crtc_mask)96 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
97 {
98 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
99 	struct drm_crtc *crtc;
100 
101 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
102 		mdp4_crtc_wait_for_commit_done(crtc);
103 }
104 
mdp4_complete_commit(struct msm_kms * kms,unsigned crtc_mask)105 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
106 {
107 }
108 
mdp4_round_pixclk(struct msm_kms * kms,unsigned long rate,struct drm_encoder * encoder)109 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
110 		struct drm_encoder *encoder)
111 {
112 	/* if we had >1 encoder, we'd need something more clever: */
113 	switch (encoder->encoder_type) {
114 	case DRM_MODE_ENCODER_TMDS:
115 		return mdp4_dtv_round_pixclk(encoder, rate);
116 	case DRM_MODE_ENCODER_LVDS:
117 	case DRM_MODE_ENCODER_DSI:
118 	default:
119 		return rate;
120 	}
121 }
122 
mdp4_destroy(struct msm_kms * kms)123 static void mdp4_destroy(struct msm_kms *kms)
124 {
125 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
126 	struct device *dev = mdp4_kms->dev->dev;
127 	struct msm_gem_address_space *aspace = kms->aspace;
128 
129 	if (mdp4_kms->blank_cursor_iova)
130 		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
131 	drm_gem_object_put(mdp4_kms->blank_cursor_bo);
132 
133 	if (aspace) {
134 		aspace->mmu->funcs->detach(aspace->mmu);
135 		msm_gem_address_space_put(aspace);
136 	}
137 
138 	if (mdp4_kms->rpm_enabled)
139 		pm_runtime_disable(dev);
140 
141 	mdp_kms_destroy(&mdp4_kms->base);
142 
143 	kfree(mdp4_kms);
144 }
145 
146 static const struct mdp_kms_funcs kms_funcs = {
147 	.base = {
148 		.hw_init         = mdp4_hw_init,
149 		.irq_preinstall  = mdp4_irq_preinstall,
150 		.irq_postinstall = mdp4_irq_postinstall,
151 		.irq_uninstall   = mdp4_irq_uninstall,
152 		.irq             = mdp4_irq,
153 		.enable_vblank   = mdp4_enable_vblank,
154 		.disable_vblank  = mdp4_disable_vblank,
155 		.enable_commit   = mdp4_enable_commit,
156 		.disable_commit  = mdp4_disable_commit,
157 		.prepare_commit  = mdp4_prepare_commit,
158 		.flush_commit    = mdp4_flush_commit,
159 		.wait_flush      = mdp4_wait_flush,
160 		.complete_commit = mdp4_complete_commit,
161 		.get_format      = mdp_get_format,
162 		.round_pixclk    = mdp4_round_pixclk,
163 		.destroy         = mdp4_destroy,
164 	},
165 	.set_irqmask         = mdp4_set_irqmask,
166 };
167 
mdp4_disable(struct mdp4_kms * mdp4_kms)168 int mdp4_disable(struct mdp4_kms *mdp4_kms)
169 {
170 	DBG("");
171 
172 	clk_disable_unprepare(mdp4_kms->clk);
173 	clk_disable_unprepare(mdp4_kms->pclk);
174 	clk_disable_unprepare(mdp4_kms->lut_clk);
175 	clk_disable_unprepare(mdp4_kms->axi_clk);
176 
177 	return 0;
178 }
179 
mdp4_enable(struct mdp4_kms * mdp4_kms)180 int mdp4_enable(struct mdp4_kms *mdp4_kms)
181 {
182 	DBG("");
183 
184 	clk_prepare_enable(mdp4_kms->clk);
185 	clk_prepare_enable(mdp4_kms->pclk);
186 	clk_prepare_enable(mdp4_kms->lut_clk);
187 	clk_prepare_enable(mdp4_kms->axi_clk);
188 
189 	return 0;
190 }
191 
192 
mdp4_modeset_init_intf(struct mdp4_kms * mdp4_kms,int intf_type)193 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
194 				  int intf_type)
195 {
196 	struct drm_device *dev = mdp4_kms->dev;
197 	struct msm_drm_private *priv = dev->dev_private;
198 	struct drm_encoder *encoder;
199 	struct drm_connector *connector;
200 	struct device_node *panel_node;
201 	int dsi_id;
202 	int ret;
203 
204 	switch (intf_type) {
205 	case DRM_MODE_ENCODER_LVDS:
206 		/*
207 		 * bail out early if there is no panel node (no need to
208 		 * initialize LCDC encoder and LVDS connector)
209 		 */
210 		panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
211 		if (!panel_node)
212 			return 0;
213 
214 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
215 		if (IS_ERR(encoder)) {
216 			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
217 			of_node_put(panel_node);
218 			return PTR_ERR(encoder);
219 		}
220 
221 		/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
222 		encoder->possible_crtcs = 1 << DMA_P;
223 
224 		connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
225 		if (IS_ERR(connector)) {
226 			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
227 			of_node_put(panel_node);
228 			return PTR_ERR(connector);
229 		}
230 
231 		break;
232 	case DRM_MODE_ENCODER_TMDS:
233 		encoder = mdp4_dtv_encoder_init(dev);
234 		if (IS_ERR(encoder)) {
235 			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
236 			return PTR_ERR(encoder);
237 		}
238 
239 		/* DTV can be hooked to DMA_E: */
240 		encoder->possible_crtcs = 1 << 1;
241 
242 		if (priv->hdmi) {
243 			/* Construct bridge/connector for HDMI: */
244 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
245 			if (ret) {
246 				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
247 				return ret;
248 			}
249 		}
250 
251 		break;
252 	case DRM_MODE_ENCODER_DSI:
253 		/* only DSI1 supported for now */
254 		dsi_id = 0;
255 
256 		if (!priv->dsi[dsi_id])
257 			break;
258 
259 		encoder = mdp4_dsi_encoder_init(dev);
260 		if (IS_ERR(encoder)) {
261 			ret = PTR_ERR(encoder);
262 			DRM_DEV_ERROR(dev->dev,
263 				"failed to construct DSI encoder: %d\n", ret);
264 			return ret;
265 		}
266 
267 		/* TODO: Add DMA_S later? */
268 		encoder->possible_crtcs = 1 << DMA_P;
269 
270 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
271 		if (ret) {
272 			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
273 				ret);
274 			return ret;
275 		}
276 
277 		break;
278 	default:
279 		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
280 		return -EINVAL;
281 	}
282 
283 	return 0;
284 }
285 
modeset_init(struct mdp4_kms * mdp4_kms)286 static int modeset_init(struct mdp4_kms *mdp4_kms)
287 {
288 	struct drm_device *dev = mdp4_kms->dev;
289 	struct msm_drm_private *priv = dev->dev_private;
290 	struct drm_plane *plane;
291 	struct drm_crtc *crtc;
292 	int i, ret;
293 	static const enum mdp4_pipe rgb_planes[] = {
294 		RGB1, RGB2,
295 	};
296 	static const enum mdp4_pipe vg_planes[] = {
297 		VG1, VG2,
298 	};
299 	static const enum mdp4_dma mdp4_crtcs[] = {
300 		DMA_P, DMA_E,
301 	};
302 	static const char * const mdp4_crtc_names[] = {
303 		"DMA_P", "DMA_E",
304 	};
305 	static const int mdp4_intfs[] = {
306 		DRM_MODE_ENCODER_LVDS,
307 		DRM_MODE_ENCODER_DSI,
308 		DRM_MODE_ENCODER_TMDS,
309 	};
310 
311 	/* construct non-private planes: */
312 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
313 		plane = mdp4_plane_init(dev, vg_planes[i], false);
314 		if (IS_ERR(plane)) {
315 			DRM_DEV_ERROR(dev->dev,
316 				"failed to construct plane for VG%d\n", i + 1);
317 			ret = PTR_ERR(plane);
318 			goto fail;
319 		}
320 	}
321 
322 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
323 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
324 		if (IS_ERR(plane)) {
325 			DRM_DEV_ERROR(dev->dev,
326 				"failed to construct plane for RGB%d\n", i + 1);
327 			ret = PTR_ERR(plane);
328 			goto fail;
329 		}
330 
331 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
332 				mdp4_crtcs[i]);
333 		if (IS_ERR(crtc)) {
334 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
335 				mdp4_crtc_names[i]);
336 			ret = PTR_ERR(crtc);
337 			goto fail;
338 		}
339 
340 		priv->crtcs[priv->num_crtcs++] = crtc;
341 	}
342 
343 	/*
344 	 * we currently set up two relatively fixed paths:
345 	 *
346 	 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
347 	 *			or
348 	 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
349 	 *
350 	 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
351 	 */
352 
353 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
354 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
355 		if (ret) {
356 			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
357 				i, ret);
358 			goto fail;
359 		}
360 	}
361 
362 	return 0;
363 
364 fail:
365 	return ret;
366 }
367 
read_mdp_hw_revision(struct mdp4_kms * mdp4_kms,u32 * major,u32 * minor)368 static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
369 				 u32 *major, u32 *minor)
370 {
371 	struct drm_device *dev = mdp4_kms->dev;
372 	u32 version;
373 
374 	mdp4_enable(mdp4_kms);
375 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
376 	mdp4_disable(mdp4_kms);
377 
378 	*major = FIELD(version, MDP4_VERSION_MAJOR);
379 	*minor = FIELD(version, MDP4_VERSION_MINOR);
380 
381 	DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
382 }
383 
mdp4_kms_init(struct drm_device * dev)384 static int mdp4_kms_init(struct drm_device *dev)
385 {
386 	struct platform_device *pdev = to_platform_device(dev->dev);
387 	struct msm_drm_private *priv = dev->dev_private;
388 	struct mdp4_kms *mdp4_kms;
389 	struct msm_kms *kms = NULL;
390 	struct iommu_domain *iommu;
391 	struct msm_gem_address_space *aspace;
392 	int irq, ret;
393 	u32 major, minor;
394 	unsigned long max_clk;
395 
396 	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
397 	max_clk = 266667000;
398 
399 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
400 	if (!mdp4_kms) {
401 		DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
402 		return -ENOMEM;
403 	}
404 
405 	ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
406 	if (ret) {
407 		DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
408 		goto fail;
409 	}
410 
411 	priv->kms = &mdp4_kms->base.base;
412 	kms = priv->kms;
413 
414 	mdp4_kms->dev = dev;
415 
416 	mdp4_kms->mmio = msm_ioremap(pdev, NULL);
417 	if (IS_ERR(mdp4_kms->mmio)) {
418 		ret = PTR_ERR(mdp4_kms->mmio);
419 		goto fail;
420 	}
421 
422 	irq = platform_get_irq(pdev, 0);
423 	if (irq < 0) {
424 		ret = irq;
425 		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
426 		goto fail;
427 	}
428 
429 	kms->irq = irq;
430 
431 	/* NOTE: driver for this regulator still missing upstream.. use
432 	 * _get_exclusive() and ignore the error if it does not exist
433 	 * (and hope that the bootloader left it on for us)
434 	 */
435 	mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
436 	if (IS_ERR(mdp4_kms->vdd))
437 		mdp4_kms->vdd = NULL;
438 
439 	if (mdp4_kms->vdd) {
440 		ret = regulator_enable(mdp4_kms->vdd);
441 		if (ret) {
442 			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
443 			goto fail;
444 		}
445 	}
446 
447 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
448 	if (IS_ERR(mdp4_kms->clk)) {
449 		DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
450 		ret = PTR_ERR(mdp4_kms->clk);
451 		goto fail;
452 	}
453 
454 	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
455 	if (IS_ERR(mdp4_kms->pclk))
456 		mdp4_kms->pclk = NULL;
457 
458 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
459 	if (IS_ERR(mdp4_kms->axi_clk)) {
460 		DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
461 		ret = PTR_ERR(mdp4_kms->axi_clk);
462 		goto fail;
463 	}
464 
465 	clk_set_rate(mdp4_kms->clk, max_clk);
466 
467 	read_mdp_hw_revision(mdp4_kms, &major, &minor);
468 
469 	if (major != 4) {
470 		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
471 			      major, minor);
472 		ret = -ENXIO;
473 		goto fail;
474 	}
475 
476 	mdp4_kms->rev = minor;
477 
478 	if (mdp4_kms->rev >= 2) {
479 		mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
480 		if (IS_ERR(mdp4_kms->lut_clk)) {
481 			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
482 			ret = PTR_ERR(mdp4_kms->lut_clk);
483 			goto fail;
484 		}
485 		clk_set_rate(mdp4_kms->lut_clk, max_clk);
486 	}
487 
488 	pm_runtime_enable(dev->dev);
489 	mdp4_kms->rpm_enabled = true;
490 
491 	/* make sure things are off before attaching iommu (bootloader could
492 	 * have left things on, in which case we'll start getting faults if
493 	 * we don't disable):
494 	 */
495 	mdp4_enable(mdp4_kms);
496 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
497 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
498 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
499 	mdp4_disable(mdp4_kms);
500 	mdelay(16);
501 
502 	iommu = iommu_domain_alloc(pdev->dev.bus);
503 	if (iommu) {
504 		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
505 
506 		aspace  = msm_gem_address_space_create(mmu,
507 			"mdp4", 0x1000, 0x100000000 - 0x1000);
508 
509 		if (IS_ERR(aspace)) {
510 			if (!IS_ERR(mmu))
511 				mmu->funcs->destroy(mmu);
512 			ret = PTR_ERR(aspace);
513 			goto fail;
514 		}
515 
516 		kms->aspace = aspace;
517 	} else {
518 		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
519 				"contig buffers for scanout\n");
520 		aspace = NULL;
521 	}
522 
523 	ret = modeset_init(mdp4_kms);
524 	if (ret) {
525 		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
526 		goto fail;
527 	}
528 
529 	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
530 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
531 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
532 		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
533 		mdp4_kms->blank_cursor_bo = NULL;
534 		goto fail;
535 	}
536 
537 	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
538 			&mdp4_kms->blank_cursor_iova);
539 	if (ret) {
540 		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
541 		goto fail;
542 	}
543 
544 	dev->mode_config.min_width = 0;
545 	dev->mode_config.min_height = 0;
546 	dev->mode_config.max_width = 2048;
547 	dev->mode_config.max_height = 2048;
548 
549 	return 0;
550 
551 fail:
552 	if (kms)
553 		mdp4_destroy(kms);
554 
555 	return ret;
556 }
557 
558 static const struct dev_pm_ops mdp4_pm_ops = {
559 	.prepare = msm_pm_prepare,
560 	.complete = msm_pm_complete,
561 };
562 
mdp4_probe(struct platform_device * pdev)563 static int mdp4_probe(struct platform_device *pdev)
564 {
565 	return msm_drv_probe(&pdev->dev, mdp4_kms_init);
566 }
567 
mdp4_remove(struct platform_device * pdev)568 static int mdp4_remove(struct platform_device *pdev)
569 {
570 	component_master_del(&pdev->dev, &msm_drm_ops);
571 
572 	return 0;
573 }
574 
575 static const struct of_device_id mdp4_dt_match[] = {
576 	{ .compatible = "qcom,mdp4" },
577 	{ /* sentinel */ }
578 };
579 MODULE_DEVICE_TABLE(of, mdp4_dt_match);
580 
581 static struct platform_driver mdp4_platform_driver = {
582 	.probe      = mdp4_probe,
583 	.remove     = mdp4_remove,
584 	.shutdown   = msm_drv_shutdown,
585 	.driver     = {
586 		.name   = "mdp4",
587 		.of_match_table = mdp4_dt_match,
588 		.pm     = &mdp4_pm_ops,
589 	},
590 };
591 
msm_mdp4_register(void)592 void __init msm_mdp4_register(void)
593 {
594 	platform_driver_register(&mdp4_platform_driver);
595 }
596 
msm_mdp4_unregister(void)597 void __exit msm_mdp4_unregister(void)
598 {
599 	platform_driver_unregister(&mdp4_platform_driver);
600 }
601