1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
v4l2_async_nf_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)27 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
28 				    struct v4l2_subdev *subdev,
29 				    struct v4l2_async_subdev *asd)
30 {
31 	if (!n->ops || !n->ops->bound)
32 		return 0;
33 
34 	return n->ops->bound(n, subdev, asd);
35 }
36 
v4l2_async_nf_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)37 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
38 				      struct v4l2_subdev *subdev,
39 				      struct v4l2_async_subdev *asd)
40 {
41 	if (!n->ops || !n->ops->unbind)
42 		return;
43 
44 	n->ops->unbind(n, subdev, asd);
45 }
46 
v4l2_async_nf_call_complete(struct v4l2_async_notifier * n)47 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
48 {
49 	if (!n->ops || !n->ops->complete)
50 		return 0;
51 
52 	return n->ops->complete(n);
53 }
54 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)55 static bool match_i2c(struct v4l2_async_notifier *notifier,
56 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
57 {
58 #if IS_ENABLED(CONFIG_I2C)
59 	struct i2c_client *client = i2c_verify_client(sd->dev);
60 
61 	return client &&
62 		asd->match.i2c.adapter_id == client->adapter->nr &&
63 		asd->match.i2c.address == client->addr;
64 #else
65 	return false;
66 #endif
67 }
68 
69 static bool
match_fwnode_one(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct fwnode_handle * sd_fwnode,struct v4l2_async_subdev * asd)70 match_fwnode_one(struct v4l2_async_notifier *notifier,
71 		 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
72 		 struct v4l2_async_subdev *asd)
73 {
74 	struct fwnode_handle *other_fwnode;
75 	struct fwnode_handle *dev_fwnode;
76 	bool asd_fwnode_is_ep;
77 	bool sd_fwnode_is_ep;
78 	struct device *dev;
79 
80 	/*
81 	 * Both the subdev and the async subdev can provide either an endpoint
82 	 * fwnode or a device fwnode. Start with the simple case of direct
83 	 * fwnode matching.
84 	 */
85 	if (sd_fwnode == asd->match.fwnode)
86 		return true;
87 
88 	/*
89 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
90 	 * endpoint or a device. If they're of the same type, there's no match.
91 	 * Technically speaking this checks if the nodes refer to a connected
92 	 * endpoint, which is the simplest check that works for both OF and
93 	 * ACPI. This won't make a difference, as drivers should not try to
94 	 * match unconnected endpoints.
95 	 */
96 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
97 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
98 
99 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
100 		return false;
101 
102 	/*
103 	 * The sd and asd fwnodes are of different types. Get the device fwnode
104 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
105 	 */
106 	if (sd_fwnode_is_ep) {
107 		dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
108 		other_fwnode = asd->match.fwnode;
109 	} else {
110 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
111 		other_fwnode = sd_fwnode;
112 	}
113 
114 	fwnode_handle_put(dev_fwnode);
115 
116 	if (dev_fwnode != other_fwnode)
117 		return false;
118 
119 	/*
120 	 * We have a heterogeneous match. Retrieve the struct device of the side
121 	 * that matched on a device fwnode to print its driver name.
122 	 */
123 	if (sd_fwnode_is_ep)
124 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
125 		    : notifier->sd->dev;
126 	else
127 		dev = sd->dev;
128 
129 	if (dev && dev->driver) {
130 		if (sd_fwnode_is_ep)
131 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
132 				 dev->driver->name);
133 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
134 			   dev->driver->name);
135 	}
136 
137 	return true;
138 }
139 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)140 static bool match_fwnode(struct v4l2_async_notifier *notifier,
141 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
142 {
143 	if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
144 		return true;
145 
146 	/* Also check the secondary fwnode. */
147 	if (IS_ERR_OR_NULL(sd->fwnode->secondary))
148 		return false;
149 
150 	return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
151 }
152 
153 static LIST_HEAD(subdev_list);
154 static LIST_HEAD(notifier_list);
155 static DEFINE_MUTEX(list_lock);
156 
157 static struct v4l2_async_subdev *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)158 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
159 		      struct v4l2_subdev *sd)
160 {
161 	bool (*match)(struct v4l2_async_notifier *notifier,
162 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
163 	struct v4l2_async_subdev *asd;
164 
165 	list_for_each_entry(asd, &notifier->waiting, list) {
166 		/* bus_type has been verified valid before */
167 		switch (asd->match_type) {
168 		case V4L2_ASYNC_MATCH_I2C:
169 			match = match_i2c;
170 			break;
171 		case V4L2_ASYNC_MATCH_FWNODE:
172 			match = match_fwnode;
173 			break;
174 		default:
175 			/* Cannot happen, unless someone breaks us */
176 			WARN_ON(true);
177 			return NULL;
178 		}
179 
180 		/* match cannot be NULL here */
181 		if (match(notifier, sd, asd))
182 			return asd;
183 	}
184 
185 	return NULL;
186 }
187 
188 /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)189 static bool asd_equal(struct v4l2_async_subdev *asd_x,
190 		      struct v4l2_async_subdev *asd_y)
191 {
192 	if (asd_x->match_type != asd_y->match_type)
193 		return false;
194 
195 	switch (asd_x->match_type) {
196 	case V4L2_ASYNC_MATCH_I2C:
197 		return asd_x->match.i2c.adapter_id ==
198 			asd_y->match.i2c.adapter_id &&
199 			asd_x->match.i2c.address ==
200 			asd_y->match.i2c.address;
201 	case V4L2_ASYNC_MATCH_FWNODE:
202 		return asd_x->match.fwnode == asd_y->match.fwnode;
203 	default:
204 		break;
205 	}
206 
207 	return false;
208 }
209 
210 /* Find the sub-device notifier registered by a sub-device driver. */
211 static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)212 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
213 {
214 	struct v4l2_async_notifier *n;
215 
216 	list_for_each_entry(n, &notifier_list, list)
217 		if (n->sd == sd)
218 			return n;
219 
220 	return NULL;
221 }
222 
223 /* Get v4l2_device related to the notifier if one can be found. */
224 static struct v4l2_device *
v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier * notifier)225 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
226 {
227 	while (notifier->parent)
228 		notifier = notifier->parent;
229 
230 	return notifier->v4l2_dev;
231 }
232 
233 /*
234  * Return true if all child sub-device notifiers are complete, false otherwise.
235  */
236 static bool
v4l2_async_nf_can_complete(struct v4l2_async_notifier * notifier)237 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
238 {
239 	struct v4l2_subdev *sd;
240 
241 	if (!list_empty(&notifier->waiting))
242 		return false;
243 
244 	list_for_each_entry(sd, &notifier->done, async_list) {
245 		struct v4l2_async_notifier *subdev_notifier =
246 			v4l2_async_find_subdev_notifier(sd);
247 
248 		if (subdev_notifier &&
249 		    !v4l2_async_nf_can_complete(subdev_notifier))
250 			return false;
251 	}
252 
253 	return true;
254 }
255 
256 /*
257  * Complete the master notifier if possible. This is done when all async
258  * sub-devices have been bound; v4l2_device is also available then.
259  */
260 static int
v4l2_async_nf_try_complete(struct v4l2_async_notifier * notifier)261 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
262 {
263 	/* Quick check whether there are still more sub-devices here. */
264 	if (!list_empty(&notifier->waiting))
265 		return 0;
266 
267 	/* Check the entire notifier tree; find the root notifier first. */
268 	while (notifier->parent)
269 		notifier = notifier->parent;
270 
271 	/* This is root if it has v4l2_dev. */
272 	if (!notifier->v4l2_dev)
273 		return 0;
274 
275 	/* Is everything ready? */
276 	if (!v4l2_async_nf_can_complete(notifier))
277 		return 0;
278 
279 	return v4l2_async_nf_call_complete(notifier);
280 }
281 
282 static int
283 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
284 
v4l2_async_create_ancillary_links(struct v4l2_async_notifier * n,struct v4l2_subdev * sd)285 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
286 					     struct v4l2_subdev *sd)
287 {
288 	struct media_link *link = NULL;
289 
290 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
291 
292 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
293 	    sd->entity.function != MEDIA_ENT_F_FLASH)
294 		return 0;
295 
296 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
297 
298 #endif
299 
300 	return IS_ERR(link) ? PTR_ERR(link) : 0;
301 }
302 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)303 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
304 				   struct v4l2_device *v4l2_dev,
305 				   struct v4l2_subdev *sd,
306 				   struct v4l2_async_subdev *asd)
307 {
308 	struct v4l2_async_notifier *subdev_notifier;
309 	int ret;
310 
311 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
312 	if (ret < 0)
313 		return ret;
314 
315 	ret = v4l2_async_nf_call_bound(notifier, sd, asd);
316 	if (ret < 0) {
317 		v4l2_device_unregister_subdev(sd);
318 		return ret;
319 	}
320 
321 	/*
322 	 * Depending of the function of the entities involved, we may want to
323 	 * create links between them (for example between a sensor and its lens
324 	 * or between a sensor's source pad and the connected device's sink
325 	 * pad).
326 	 */
327 	ret = v4l2_async_create_ancillary_links(notifier, sd);
328 	if (ret) {
329 		v4l2_async_nf_call_unbind(notifier, sd, asd);
330 		v4l2_device_unregister_subdev(sd);
331 		return ret;
332 	}
333 
334 	/* Remove from the waiting list */
335 	list_del(&asd->list);
336 	sd->asd = asd;
337 	sd->notifier = notifier;
338 
339 	/* Move from the global subdevice list to notifier's done */
340 	list_move(&sd->async_list, &notifier->done);
341 
342 	/*
343 	 * See if the sub-device has a notifier. If not, return here.
344 	 */
345 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
346 	if (!subdev_notifier || subdev_notifier->parent)
347 		return 0;
348 
349 	/*
350 	 * Proceed with checking for the sub-device notifier's async
351 	 * sub-devices, and return the result. The error will be handled by the
352 	 * caller.
353 	 */
354 	subdev_notifier->parent = notifier;
355 
356 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
357 }
358 
359 /* Test all async sub-devices in a notifier for a match. */
360 static int
v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier * notifier)361 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
362 {
363 	struct v4l2_device *v4l2_dev =
364 		v4l2_async_nf_find_v4l2_dev(notifier);
365 	struct v4l2_subdev *sd;
366 
367 	if (!v4l2_dev)
368 		return 0;
369 
370 again:
371 	list_for_each_entry(sd, &subdev_list, async_list) {
372 		struct v4l2_async_subdev *asd;
373 		int ret;
374 
375 		asd = v4l2_async_find_match(notifier, sd);
376 		if (!asd)
377 			continue;
378 
379 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
380 		if (ret < 0)
381 			return ret;
382 
383 		/*
384 		 * v4l2_async_match_notify() may lead to registering a
385 		 * new notifier and thus changing the async subdevs
386 		 * list. In order to proceed safely from here, restart
387 		 * parsing the list from the beginning.
388 		 */
389 		goto again;
390 	}
391 
392 	return 0;
393 }
394 
v4l2_async_cleanup(struct v4l2_subdev * sd)395 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
396 {
397 	v4l2_device_unregister_subdev(sd);
398 	/*
399 	 * Subdevice driver will reprobe and put the subdev back
400 	 * onto the list
401 	 */
402 	list_del_init(&sd->async_list);
403 	sd->asd = NULL;
404 }
405 
406 /* Unbind all sub-devices in the notifier tree. */
407 static void
v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier * notifier)408 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
409 {
410 	struct v4l2_subdev *sd, *tmp;
411 
412 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
413 		struct v4l2_async_notifier *subdev_notifier =
414 			v4l2_async_find_subdev_notifier(sd);
415 
416 		if (subdev_notifier)
417 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
418 
419 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
420 		v4l2_async_cleanup(sd);
421 
422 		list_move(&sd->async_list, &subdev_list);
423 	}
424 
425 	notifier->parent = NULL;
426 }
427 
428 /* See if an async sub-device can be found in a notifier's lists. */
429 static bool
__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)430 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
431 				 struct v4l2_async_subdev *asd)
432 {
433 	struct v4l2_async_subdev *asd_y;
434 	struct v4l2_subdev *sd;
435 
436 	list_for_each_entry(asd_y, &notifier->waiting, list)
437 		if (asd_equal(asd, asd_y))
438 			return true;
439 
440 	list_for_each_entry(sd, &notifier->done, async_list) {
441 		if (WARN_ON(!sd->asd))
442 			continue;
443 
444 		if (asd_equal(asd, sd->asd))
445 			return true;
446 	}
447 
448 	return false;
449 }
450 
451 /*
452  * Find out whether an async sub-device was set up already or
453  * whether it exists in a given notifier before @this_index.
454  * If @this_index < 0, search the notifier's entire @asd_list.
455  */
456 static bool
v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)457 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
458 			       struct v4l2_async_subdev *asd, int this_index)
459 {
460 	struct v4l2_async_subdev *asd_y;
461 	int j = 0;
462 
463 	lockdep_assert_held(&list_lock);
464 
465 	/* Check that an asd is not being added more than once. */
466 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
467 		if (this_index >= 0 && j++ >= this_index)
468 			break;
469 		if (asd_equal(asd, asd_y))
470 			return true;
471 	}
472 
473 	/* Check that an asd does not exist in other notifiers. */
474 	list_for_each_entry(notifier, &notifier_list, list)
475 		if (__v4l2_async_nf_has_async_subdev(notifier, asd))
476 			return true;
477 
478 	return false;
479 }
480 
v4l2_async_nf_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)481 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
482 				   struct v4l2_async_subdev *asd,
483 				   int this_index)
484 {
485 	struct device *dev =
486 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
487 
488 	if (!asd)
489 		return -EINVAL;
490 
491 	switch (asd->match_type) {
492 	case V4L2_ASYNC_MATCH_I2C:
493 	case V4L2_ASYNC_MATCH_FWNODE:
494 		if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
495 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
496 			return -EEXIST;
497 		}
498 		break;
499 	default:
500 		dev_err(dev, "Invalid match type %u on %p\n",
501 			asd->match_type, asd);
502 		return -EINVAL;
503 	}
504 
505 	return 0;
506 }
507 
v4l2_async_nf_init(struct v4l2_async_notifier * notifier)508 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
509 {
510 	INIT_LIST_HEAD(&notifier->asd_list);
511 }
512 EXPORT_SYMBOL(v4l2_async_nf_init);
513 
__v4l2_async_nf_register(struct v4l2_async_notifier * notifier)514 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
515 {
516 	struct v4l2_async_subdev *asd;
517 	int ret, i = 0;
518 
519 	INIT_LIST_HEAD(&notifier->waiting);
520 	INIT_LIST_HEAD(&notifier->done);
521 
522 	mutex_lock(&list_lock);
523 
524 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
525 		ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
526 		if (ret)
527 			goto err_unlock;
528 
529 		list_add_tail(&asd->list, &notifier->waiting);
530 	}
531 
532 	ret = v4l2_async_nf_try_all_subdevs(notifier);
533 	if (ret < 0)
534 		goto err_unbind;
535 
536 	ret = v4l2_async_nf_try_complete(notifier);
537 	if (ret < 0)
538 		goto err_unbind;
539 
540 	/* Keep also completed notifiers on the list */
541 	list_add(&notifier->list, &notifier_list);
542 
543 	mutex_unlock(&list_lock);
544 
545 	return 0;
546 
547 err_unbind:
548 	/*
549 	 * On failure, unbind all sub-devices registered through this notifier.
550 	 */
551 	v4l2_async_nf_unbind_all_subdevs(notifier);
552 
553 err_unlock:
554 	mutex_unlock(&list_lock);
555 
556 	return ret;
557 }
558 
v4l2_async_nf_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)559 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
560 			   struct v4l2_async_notifier *notifier)
561 {
562 	int ret;
563 
564 	if (WARN_ON(!v4l2_dev || notifier->sd))
565 		return -EINVAL;
566 
567 	notifier->v4l2_dev = v4l2_dev;
568 
569 	ret = __v4l2_async_nf_register(notifier);
570 	if (ret)
571 		notifier->v4l2_dev = NULL;
572 
573 	return ret;
574 }
575 EXPORT_SYMBOL(v4l2_async_nf_register);
576 
v4l2_async_subdev_nf_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)577 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
578 				  struct v4l2_async_notifier *notifier)
579 {
580 	int ret;
581 
582 	if (WARN_ON(!sd || notifier->v4l2_dev))
583 		return -EINVAL;
584 
585 	notifier->sd = sd;
586 
587 	ret = __v4l2_async_nf_register(notifier);
588 	if (ret)
589 		notifier->sd = NULL;
590 
591 	return ret;
592 }
593 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
594 
595 static void
__v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)596 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
597 {
598 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
599 		return;
600 
601 	v4l2_async_nf_unbind_all_subdevs(notifier);
602 
603 	notifier->sd = NULL;
604 	notifier->v4l2_dev = NULL;
605 
606 	list_del(&notifier->list);
607 }
608 
v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)609 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
610 {
611 	mutex_lock(&list_lock);
612 
613 	__v4l2_async_nf_unregister(notifier);
614 
615 	mutex_unlock(&list_lock);
616 }
617 EXPORT_SYMBOL(v4l2_async_nf_unregister);
618 
__v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)619 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
620 {
621 	struct v4l2_async_subdev *asd, *tmp;
622 
623 	if (!notifier || !notifier->asd_list.next)
624 		return;
625 
626 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
627 		switch (asd->match_type) {
628 		case V4L2_ASYNC_MATCH_FWNODE:
629 			fwnode_handle_put(asd->match.fwnode);
630 			break;
631 		default:
632 			break;
633 		}
634 
635 		list_del(&asd->asd_list);
636 		kfree(asd);
637 	}
638 }
639 
v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)640 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
641 {
642 	mutex_lock(&list_lock);
643 
644 	__v4l2_async_nf_cleanup(notifier);
645 
646 	mutex_unlock(&list_lock);
647 }
648 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
649 
__v4l2_async_nf_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)650 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
651 			       struct v4l2_async_subdev *asd)
652 {
653 	int ret;
654 
655 	mutex_lock(&list_lock);
656 
657 	ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
658 	if (ret)
659 		goto unlock;
660 
661 	list_add_tail(&asd->asd_list, &notifier->asd_list);
662 
663 unlock:
664 	mutex_unlock(&list_lock);
665 	return ret;
666 }
667 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
668 
669 struct v4l2_async_subdev *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)670 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
671 			   struct fwnode_handle *fwnode,
672 			   unsigned int asd_struct_size)
673 {
674 	struct v4l2_async_subdev *asd;
675 	int ret;
676 
677 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
678 	if (!asd)
679 		return ERR_PTR(-ENOMEM);
680 
681 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
682 	asd->match.fwnode = fwnode_handle_get(fwnode);
683 
684 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
685 	if (ret) {
686 		fwnode_handle_put(fwnode);
687 		kfree(asd);
688 		return ERR_PTR(ret);
689 	}
690 
691 	return asd;
692 }
693 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
694 
695 struct v4l2_async_subdev *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)696 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
697 				  struct fwnode_handle *endpoint,
698 				  unsigned int asd_struct_size)
699 {
700 	struct v4l2_async_subdev *asd;
701 	struct fwnode_handle *remote;
702 
703 	remote = fwnode_graph_get_remote_endpoint(endpoint);
704 	if (!remote)
705 		return ERR_PTR(-ENOTCONN);
706 
707 	asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
708 	/*
709 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
710 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
711 	 */
712 	fwnode_handle_put(remote);
713 	return asd;
714 }
715 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
716 
717 struct v4l2_async_subdev *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)718 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
719 			unsigned short address, unsigned int asd_struct_size)
720 {
721 	struct v4l2_async_subdev *asd;
722 	int ret;
723 
724 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
725 	if (!asd)
726 		return ERR_PTR(-ENOMEM);
727 
728 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
729 	asd->match.i2c.adapter_id = adapter_id;
730 	asd->match.i2c.address = address;
731 
732 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
733 	if (ret) {
734 		kfree(asd);
735 		return ERR_PTR(ret);
736 	}
737 
738 	return asd;
739 }
740 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
741 
v4l2_async_register_subdev(struct v4l2_subdev * sd)742 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
743 {
744 	struct v4l2_async_notifier *subdev_notifier;
745 	struct v4l2_async_notifier *notifier;
746 	int ret;
747 
748 	/*
749 	 * No reference taken. The reference is held by the device
750 	 * (struct v4l2_subdev.dev), and async sub-device does not
751 	 * exist independently of the device at any point of time.
752 	 */
753 	if (!sd->fwnode && sd->dev)
754 		sd->fwnode = dev_fwnode(sd->dev);
755 
756 	mutex_lock(&list_lock);
757 
758 	INIT_LIST_HEAD(&sd->async_list);
759 
760 	list_for_each_entry(notifier, &notifier_list, list) {
761 		struct v4l2_device *v4l2_dev =
762 			v4l2_async_nf_find_v4l2_dev(notifier);
763 		struct v4l2_async_subdev *asd;
764 
765 		if (!v4l2_dev)
766 			continue;
767 
768 		asd = v4l2_async_find_match(notifier, sd);
769 		if (!asd)
770 			continue;
771 
772 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
773 		if (ret)
774 			goto err_unbind;
775 
776 		ret = v4l2_async_nf_try_complete(notifier);
777 		if (ret)
778 			goto err_unbind;
779 
780 		goto out_unlock;
781 	}
782 
783 	/* None matched, wait for hot-plugging */
784 	list_add(&sd->async_list, &subdev_list);
785 
786 out_unlock:
787 	mutex_unlock(&list_lock);
788 
789 	return 0;
790 
791 err_unbind:
792 	/*
793 	 * Complete failed. Unbind the sub-devices bound through registering
794 	 * this async sub-device.
795 	 */
796 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
797 	if (subdev_notifier)
798 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
799 
800 	if (sd->asd)
801 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
802 	v4l2_async_cleanup(sd);
803 
804 	mutex_unlock(&list_lock);
805 
806 	return ret;
807 }
808 EXPORT_SYMBOL(v4l2_async_register_subdev);
809 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)810 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
811 {
812 	if (!sd->async_list.next)
813 		return;
814 
815 	mutex_lock(&list_lock);
816 
817 	__v4l2_async_nf_unregister(sd->subdev_notifier);
818 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
819 	kfree(sd->subdev_notifier);
820 	sd->subdev_notifier = NULL;
821 
822 	if (sd->asd) {
823 		struct v4l2_async_notifier *notifier = sd->notifier;
824 
825 		list_add(&sd->asd->list, &notifier->waiting);
826 
827 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
828 	}
829 
830 	v4l2_async_cleanup(sd);
831 
832 	mutex_unlock(&list_lock);
833 }
834 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
835 
print_waiting_subdev(struct seq_file * s,struct v4l2_async_subdev * asd)836 static void print_waiting_subdev(struct seq_file *s,
837 				 struct v4l2_async_subdev *asd)
838 {
839 	switch (asd->match_type) {
840 	case V4L2_ASYNC_MATCH_I2C:
841 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
842 			   asd->match.i2c.address);
843 		break;
844 	case V4L2_ASYNC_MATCH_FWNODE: {
845 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
846 
847 		devnode = fwnode_graph_is_endpoint(fwnode) ?
848 			  fwnode_graph_get_port_parent(fwnode) :
849 			  fwnode_handle_get(fwnode);
850 
851 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
852 			   devnode->dev ? dev_name(devnode->dev) : "nil",
853 			   fwnode);
854 
855 		fwnode_handle_put(devnode);
856 		break;
857 	}
858 	}
859 }
860 
861 static const char *
v4l2_async_nf_name(struct v4l2_async_notifier * notifier)862 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
863 {
864 	if (notifier->v4l2_dev)
865 		return notifier->v4l2_dev->name;
866 	else if (notifier->sd)
867 		return notifier->sd->name;
868 	else
869 		return "nil";
870 }
871 
pending_subdevs_show(struct seq_file * s,void * data)872 static int pending_subdevs_show(struct seq_file *s, void *data)
873 {
874 	struct v4l2_async_notifier *notif;
875 	struct v4l2_async_subdev *asd;
876 
877 	mutex_lock(&list_lock);
878 
879 	list_for_each_entry(notif, &notifier_list, list) {
880 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
881 		list_for_each_entry(asd, &notif->waiting, list)
882 			print_waiting_subdev(s, asd);
883 	}
884 
885 	mutex_unlock(&list_lock);
886 
887 	return 0;
888 }
889 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
890 
891 static struct dentry *v4l2_async_debugfs_dir;
892 
v4l2_async_init(void)893 static int __init v4l2_async_init(void)
894 {
895 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
896 	debugfs_create_file("pending_async_subdevices", 0444,
897 			    v4l2_async_debugfs_dir, NULL,
898 			    &pending_subdevs_fops);
899 
900 	return 0;
901 }
902 
v4l2_async_exit(void)903 static void __exit v4l2_async_exit(void)
904 {
905 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
906 }
907 
908 subsys_initcall(v4l2_async_init);
909 module_exit(v4l2_async_exit);
910 
911 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
912 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
913 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
914 MODULE_LICENSE("GPL");
915