1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Cadence MIPI-CSI2 RX Controller v1.3
4  *
5  * Copyright (C) 2017 Cadence Design Systems Inc.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_graph.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 
18 #include <media/v4l2-ctrls.h>
19 #include <media/v4l2-device.h>
20 #include <media/v4l2-fwnode.h>
21 #include <media/v4l2-subdev.h>
22 
23 #define CSI2RX_DEVICE_CFG_REG			0x000
24 
25 #define CSI2RX_SOFT_RESET_REG			0x004
26 #define CSI2RX_SOFT_RESET_PROTOCOL			BIT(1)
27 #define CSI2RX_SOFT_RESET_FRONT				BIT(0)
28 
29 #define CSI2RX_STATIC_CFG_REG			0x008
30 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane)	((plane) << (16 + (llane) * 4))
31 #define CSI2RX_STATIC_CFG_LANES_MASK			GENMASK(11, 8)
32 
33 #define CSI2RX_STREAM_BASE(n)		(((n) + 1) * 0x100)
34 
35 #define CSI2RX_STREAM_CTRL_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x000)
36 #define CSI2RX_STREAM_CTRL_START			BIT(0)
37 
38 #define CSI2RX_STREAM_DATA_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x008)
39 #define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT		BIT(31)
40 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n)		BIT((n) + 16)
41 
42 #define CSI2RX_STREAM_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x00c)
43 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF		(1 << 8)
44 
45 #define CSI2RX_LANES_MAX	4
46 #define CSI2RX_STREAMS_MAX	4
47 
48 enum csi2rx_pads {
49 	CSI2RX_PAD_SINK,
50 	CSI2RX_PAD_SOURCE_STREAM0,
51 	CSI2RX_PAD_SOURCE_STREAM1,
52 	CSI2RX_PAD_SOURCE_STREAM2,
53 	CSI2RX_PAD_SOURCE_STREAM3,
54 	CSI2RX_PAD_MAX,
55 };
56 
57 struct csi2rx_priv {
58 	struct device			*dev;
59 	unsigned int			count;
60 
61 	/*
62 	 * Used to prevent race conditions between multiple,
63 	 * concurrent calls to start and stop.
64 	 */
65 	struct mutex			lock;
66 
67 	void __iomem			*base;
68 	struct clk			*sys_clk;
69 	struct clk			*p_clk;
70 	struct clk			*pixel_clk[CSI2RX_STREAMS_MAX];
71 	struct phy			*dphy;
72 
73 	u8				lanes[CSI2RX_LANES_MAX];
74 	u8				num_lanes;
75 	u8				max_lanes;
76 	u8				max_streams;
77 	bool				has_internal_dphy;
78 
79 	struct v4l2_subdev		subdev;
80 	struct v4l2_async_notifier	notifier;
81 	struct media_pad		pads[CSI2RX_PAD_MAX];
82 
83 	/* Remote source */
84 	struct v4l2_subdev		*source_subdev;
85 	int				source_pad;
86 };
87 
88 static inline
v4l2_subdev_to_csi2rx(struct v4l2_subdev * subdev)89 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
90 {
91 	return container_of(subdev, struct csi2rx_priv, subdev);
92 }
93 
csi2rx_reset(struct csi2rx_priv * csi2rx)94 static void csi2rx_reset(struct csi2rx_priv *csi2rx)
95 {
96 	writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
97 	       csi2rx->base + CSI2RX_SOFT_RESET_REG);
98 
99 	udelay(10);
100 
101 	writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
102 }
103 
csi2rx_start(struct csi2rx_priv * csi2rx)104 static int csi2rx_start(struct csi2rx_priv *csi2rx)
105 {
106 	unsigned int i;
107 	unsigned long lanes_used = 0;
108 	u32 reg;
109 	int ret;
110 
111 	ret = clk_prepare_enable(csi2rx->p_clk);
112 	if (ret)
113 		return ret;
114 
115 	csi2rx_reset(csi2rx);
116 
117 	reg = csi2rx->num_lanes << 8;
118 	for (i = 0; i < csi2rx->num_lanes; i++) {
119 		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
120 		set_bit(csi2rx->lanes[i], &lanes_used);
121 	}
122 
123 	/*
124 	 * Even the unused lanes need to be mapped. In order to avoid
125 	 * to map twice to the same physical lane, keep the lanes used
126 	 * in the previous loop, and only map unused physical lanes to
127 	 * the rest of our logical lanes.
128 	 */
129 	for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
130 		unsigned int idx = find_first_zero_bit(&lanes_used,
131 						       csi2rx->max_lanes);
132 		set_bit(idx, &lanes_used);
133 		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
134 	}
135 
136 	writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
137 
138 	ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
139 	if (ret)
140 		goto err_disable_pclk;
141 
142 	/*
143 	 * Create a static mapping between the CSI virtual channels
144 	 * and the output stream.
145 	 *
146 	 * This should be enhanced, but v4l2 lacks the support for
147 	 * changing that mapping dynamically.
148 	 *
149 	 * We also cannot enable and disable independent streams here,
150 	 * hence the reference counting.
151 	 */
152 	for (i = 0; i < csi2rx->max_streams; i++) {
153 		ret = clk_prepare_enable(csi2rx->pixel_clk[i]);
154 		if (ret)
155 			goto err_disable_pixclk;
156 
157 		writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
158 		       csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
159 
160 		writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
161 		       CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
162 		       csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
163 
164 		writel(CSI2RX_STREAM_CTRL_START,
165 		       csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
166 	}
167 
168 	ret = clk_prepare_enable(csi2rx->sys_clk);
169 	if (ret)
170 		goto err_disable_pixclk;
171 
172 	clk_disable_unprepare(csi2rx->p_clk);
173 
174 	return 0;
175 
176 err_disable_pixclk:
177 	for (; i > 0; i--)
178 		clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
179 
180 err_disable_pclk:
181 	clk_disable_unprepare(csi2rx->p_clk);
182 
183 	return ret;
184 }
185 
csi2rx_stop(struct csi2rx_priv * csi2rx)186 static void csi2rx_stop(struct csi2rx_priv *csi2rx)
187 {
188 	unsigned int i;
189 
190 	clk_prepare_enable(csi2rx->p_clk);
191 	clk_disable_unprepare(csi2rx->sys_clk);
192 
193 	for (i = 0; i < csi2rx->max_streams; i++) {
194 		writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
195 
196 		clk_disable_unprepare(csi2rx->pixel_clk[i]);
197 	}
198 
199 	clk_disable_unprepare(csi2rx->p_clk);
200 
201 	if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
202 		dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
203 }
204 
csi2rx_s_stream(struct v4l2_subdev * subdev,int enable)205 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
206 {
207 	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
208 	int ret = 0;
209 
210 	mutex_lock(&csi2rx->lock);
211 
212 	if (enable) {
213 		/*
214 		 * If we're not the first users, there's no need to
215 		 * enable the whole controller.
216 		 */
217 		if (!csi2rx->count) {
218 			ret = csi2rx_start(csi2rx);
219 			if (ret)
220 				goto out;
221 		}
222 
223 		csi2rx->count++;
224 	} else {
225 		csi2rx->count--;
226 
227 		/*
228 		 * Let the last user turn off the lights.
229 		 */
230 		if (!csi2rx->count)
231 			csi2rx_stop(csi2rx);
232 	}
233 
234 out:
235 	mutex_unlock(&csi2rx->lock);
236 	return ret;
237 }
238 
239 static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
240 	.s_stream	= csi2rx_s_stream,
241 };
242 
243 static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
244 	.video		= &csi2rx_video_ops,
245 };
246 
csi2rx_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * s_subdev,struct v4l2_async_subdev * asd)247 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
248 			      struct v4l2_subdev *s_subdev,
249 			      struct v4l2_async_subdev *asd)
250 {
251 	struct v4l2_subdev *subdev = notifier->sd;
252 	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
253 
254 	csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
255 							 s_subdev->fwnode,
256 							 MEDIA_PAD_FL_SOURCE);
257 	if (csi2rx->source_pad < 0) {
258 		dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
259 			s_subdev->name);
260 		return csi2rx->source_pad;
261 	}
262 
263 	csi2rx->source_subdev = s_subdev;
264 
265 	dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name,
266 		csi2rx->source_pad);
267 
268 	return media_create_pad_link(&csi2rx->source_subdev->entity,
269 				     csi2rx->source_pad,
270 				     &csi2rx->subdev.entity, 0,
271 				     MEDIA_LNK_FL_ENABLED |
272 				     MEDIA_LNK_FL_IMMUTABLE);
273 }
274 
275 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
276 	.bound		= csi2rx_async_bound,
277 };
278 
csi2rx_get_resources(struct csi2rx_priv * csi2rx,struct platform_device * pdev)279 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
280 				struct platform_device *pdev)
281 {
282 	unsigned char i;
283 	u32 dev_cfg;
284 	int ret;
285 
286 	csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
287 	if (IS_ERR(csi2rx->base))
288 		return PTR_ERR(csi2rx->base);
289 
290 	csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
291 	if (IS_ERR(csi2rx->sys_clk)) {
292 		dev_err(&pdev->dev, "Couldn't get sys clock\n");
293 		return PTR_ERR(csi2rx->sys_clk);
294 	}
295 
296 	csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
297 	if (IS_ERR(csi2rx->p_clk)) {
298 		dev_err(&pdev->dev, "Couldn't get P clock\n");
299 		return PTR_ERR(csi2rx->p_clk);
300 	}
301 
302 	csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
303 	if (IS_ERR(csi2rx->dphy)) {
304 		dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
305 		return PTR_ERR(csi2rx->dphy);
306 	}
307 
308 	/*
309 	 * FIXME: Once we'll have external D-PHY support, the check
310 	 * will need to be removed.
311 	 */
312 	if (csi2rx->dphy) {
313 		dev_err(&pdev->dev, "External D-PHY not supported yet\n");
314 		return -EINVAL;
315 	}
316 
317 	ret = clk_prepare_enable(csi2rx->p_clk);
318 	if (ret) {
319 		dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
320 		return ret;
321 	}
322 
323 	dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
324 	clk_disable_unprepare(csi2rx->p_clk);
325 
326 	csi2rx->max_lanes = dev_cfg & 7;
327 	if (csi2rx->max_lanes > CSI2RX_LANES_MAX) {
328 		dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
329 			csi2rx->max_lanes);
330 		return -EINVAL;
331 	}
332 
333 	csi2rx->max_streams = (dev_cfg >> 4) & 7;
334 	if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) {
335 		dev_err(&pdev->dev, "Invalid number of streams: %u\n",
336 			csi2rx->max_streams);
337 		return -EINVAL;
338 	}
339 
340 	csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false;
341 
342 	/*
343 	 * FIXME: Once we'll have internal D-PHY support, the check
344 	 * will need to be removed.
345 	 */
346 	if (csi2rx->has_internal_dphy) {
347 		dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
348 		return -EINVAL;
349 	}
350 
351 	for (i = 0; i < csi2rx->max_streams; i++) {
352 		char clk_name[16];
353 
354 		snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
355 		csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
356 		if (IS_ERR(csi2rx->pixel_clk[i])) {
357 			dev_err(&pdev->dev, "Couldn't get clock %s\n", clk_name);
358 			return PTR_ERR(csi2rx->pixel_clk[i]);
359 		}
360 	}
361 
362 	return 0;
363 }
364 
csi2rx_parse_dt(struct csi2rx_priv * csi2rx)365 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
366 {
367 	struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
368 	struct v4l2_async_subdev *asd;
369 	struct fwnode_handle *fwh;
370 	struct device_node *ep;
371 	int ret;
372 
373 	ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0);
374 	if (!ep)
375 		return -EINVAL;
376 
377 	fwh = of_fwnode_handle(ep);
378 	ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
379 	if (ret) {
380 		dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n");
381 		of_node_put(ep);
382 		return ret;
383 	}
384 
385 	if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
386 		dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n",
387 			v4l2_ep.bus_type);
388 		of_node_put(ep);
389 		return -EINVAL;
390 	}
391 
392 	memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
393 	       sizeof(csi2rx->lanes));
394 	csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
395 	if (csi2rx->num_lanes > csi2rx->max_lanes) {
396 		dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n",
397 			csi2rx->num_lanes);
398 		of_node_put(ep);
399 		return -EINVAL;
400 	}
401 
402 	v4l2_async_nf_init(&csi2rx->notifier);
403 
404 	asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
405 					      struct v4l2_async_subdev);
406 	of_node_put(ep);
407 	if (IS_ERR(asd))
408 		return PTR_ERR(asd);
409 
410 	csi2rx->notifier.ops = &csi2rx_notifier_ops;
411 
412 	ret = v4l2_async_subdev_nf_register(&csi2rx->subdev, &csi2rx->notifier);
413 	if (ret)
414 		v4l2_async_nf_cleanup(&csi2rx->notifier);
415 
416 	return ret;
417 }
418 
csi2rx_probe(struct platform_device * pdev)419 static int csi2rx_probe(struct platform_device *pdev)
420 {
421 	struct csi2rx_priv *csi2rx;
422 	unsigned int i;
423 	int ret;
424 
425 	csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL);
426 	if (!csi2rx)
427 		return -ENOMEM;
428 	platform_set_drvdata(pdev, csi2rx);
429 	csi2rx->dev = &pdev->dev;
430 	mutex_init(&csi2rx->lock);
431 
432 	ret = csi2rx_get_resources(csi2rx, pdev);
433 	if (ret)
434 		goto err_free_priv;
435 
436 	ret = csi2rx_parse_dt(csi2rx);
437 	if (ret)
438 		goto err_free_priv;
439 
440 	csi2rx->subdev.owner = THIS_MODULE;
441 	csi2rx->subdev.dev = &pdev->dev;
442 	v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
443 	v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
444 	snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
445 		 KBUILD_MODNAME, dev_name(&pdev->dev));
446 
447 	/* Create our media pads */
448 	csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
449 	csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
450 	for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
451 		csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
452 
453 	ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
454 				     csi2rx->pads);
455 	if (ret)
456 		goto err_cleanup;
457 
458 	ret = v4l2_async_register_subdev(&csi2rx->subdev);
459 	if (ret < 0)
460 		goto err_cleanup;
461 
462 	dev_info(&pdev->dev,
463 		 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
464 		 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
465 		 csi2rx->has_internal_dphy ? "internal" : "no");
466 
467 	return 0;
468 
469 err_cleanup:
470 	v4l2_async_nf_cleanup(&csi2rx->notifier);
471 err_free_priv:
472 	kfree(csi2rx);
473 	return ret;
474 }
475 
csi2rx_remove(struct platform_device * pdev)476 static int csi2rx_remove(struct platform_device *pdev)
477 {
478 	struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
479 
480 	v4l2_async_unregister_subdev(&csi2rx->subdev);
481 	kfree(csi2rx);
482 
483 	return 0;
484 }
485 
486 static const struct of_device_id csi2rx_of_table[] = {
487 	{ .compatible = "cdns,csi2rx" },
488 	{ },
489 };
490 MODULE_DEVICE_TABLE(of, csi2rx_of_table);
491 
492 static struct platform_driver csi2rx_driver = {
493 	.probe	= csi2rx_probe,
494 	.remove	= csi2rx_remove,
495 
496 	.driver	= {
497 		.name		= "cdns-csi2rx",
498 		.of_match_table	= csi2rx_of_table,
499 	},
500 };
501 module_platform_driver(csi2rx_driver);
502 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
503 MODULE_DESCRIPTION("Cadence CSI2-RX controller");
504 MODULE_LICENSE("GPL");
505