1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
v4l2_async_nf_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)27 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
28 				    struct v4l2_subdev *subdev,
29 				    struct v4l2_async_subdev *asd)
30 {
31 	if (!n->ops || !n->ops->bound)
32 		return 0;
33 
34 	return n->ops->bound(n, subdev, asd);
35 }
36 
v4l2_async_nf_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)37 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
38 				      struct v4l2_subdev *subdev,
39 				      struct v4l2_async_subdev *asd)
40 {
41 	if (!n->ops || !n->ops->unbind)
42 		return;
43 
44 	n->ops->unbind(n, subdev, asd);
45 }
46 
v4l2_async_nf_call_complete(struct v4l2_async_notifier * n)47 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
48 {
49 	if (!n->ops || !n->ops->complete)
50 		return 0;
51 
52 	return n->ops->complete(n);
53 }
54 
v4l2_async_nf_call_destroy(struct v4l2_async_notifier * n,struct v4l2_async_subdev * asd)55 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
56 				       struct v4l2_async_subdev *asd)
57 {
58 	if (!n->ops || !n->ops->destroy)
59 		return;
60 
61 	n->ops->destroy(asd);
62 }
63 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)64 static bool match_i2c(struct v4l2_async_notifier *notifier,
65 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
66 {
67 #if IS_ENABLED(CONFIG_I2C)
68 	struct i2c_client *client = i2c_verify_client(sd->dev);
69 
70 	return client &&
71 		asd->match.i2c.adapter_id == client->adapter->nr &&
72 		asd->match.i2c.address == client->addr;
73 #else
74 	return false;
75 #endif
76 }
77 
78 static bool
match_fwnode_one(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct fwnode_handle * sd_fwnode,struct v4l2_async_subdev * asd)79 match_fwnode_one(struct v4l2_async_notifier *notifier,
80 		 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
81 		 struct v4l2_async_subdev *asd)
82 {
83 	struct fwnode_handle *other_fwnode;
84 	struct fwnode_handle *dev_fwnode;
85 	bool asd_fwnode_is_ep;
86 	bool sd_fwnode_is_ep;
87 	struct device *dev;
88 
89 	/*
90 	 * Both the subdev and the async subdev can provide either an endpoint
91 	 * fwnode or a device fwnode. Start with the simple case of direct
92 	 * fwnode matching.
93 	 */
94 	if (sd_fwnode == asd->match.fwnode)
95 		return true;
96 
97 	/*
98 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
99 	 * endpoint or a device. If they're of the same type, there's no match.
100 	 * Technically speaking this checks if the nodes refer to a connected
101 	 * endpoint, which is the simplest check that works for both OF and
102 	 * ACPI. This won't make a difference, as drivers should not try to
103 	 * match unconnected endpoints.
104 	 */
105 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
106 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
107 
108 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
109 		return false;
110 
111 	/*
112 	 * The sd and asd fwnodes are of different types. Get the device fwnode
113 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
114 	 */
115 	if (sd_fwnode_is_ep) {
116 		dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
117 		other_fwnode = asd->match.fwnode;
118 	} else {
119 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
120 		other_fwnode = sd_fwnode;
121 	}
122 
123 	fwnode_handle_put(dev_fwnode);
124 
125 	if (dev_fwnode != other_fwnode)
126 		return false;
127 
128 	/*
129 	 * We have a heterogeneous match. Retrieve the struct device of the side
130 	 * that matched on a device fwnode to print its driver name.
131 	 */
132 	if (sd_fwnode_is_ep)
133 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
134 		    : notifier->sd->dev;
135 	else
136 		dev = sd->dev;
137 
138 	if (dev && dev->driver) {
139 		if (sd_fwnode_is_ep)
140 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
141 				 dev->driver->name);
142 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
143 			   dev->driver->name);
144 	}
145 
146 	return true;
147 }
148 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)149 static bool match_fwnode(struct v4l2_async_notifier *notifier,
150 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
151 {
152 	if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
153 		return true;
154 
155 	/* Also check the secondary fwnode. */
156 	if (IS_ERR_OR_NULL(sd->fwnode->secondary))
157 		return false;
158 
159 	return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
160 }
161 
162 static LIST_HEAD(subdev_list);
163 static LIST_HEAD(notifier_list);
164 static DEFINE_MUTEX(list_lock);
165 
166 static struct v4l2_async_subdev *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)167 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
168 		      struct v4l2_subdev *sd)
169 {
170 	bool (*match)(struct v4l2_async_notifier *notifier,
171 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
172 	struct v4l2_async_subdev *asd;
173 
174 	list_for_each_entry(asd, &notifier->waiting, list) {
175 		/* bus_type has been verified valid before */
176 		switch (asd->match_type) {
177 		case V4L2_ASYNC_MATCH_I2C:
178 			match = match_i2c;
179 			break;
180 		case V4L2_ASYNC_MATCH_FWNODE:
181 			match = match_fwnode;
182 			break;
183 		default:
184 			/* Cannot happen, unless someone breaks us */
185 			WARN_ON(true);
186 			return NULL;
187 		}
188 
189 		/* match cannot be NULL here */
190 		if (match(notifier, sd, asd))
191 			return asd;
192 	}
193 
194 	return NULL;
195 }
196 
197 /* Compare two async sub-device descriptors for equivalence */
asd_equal(struct v4l2_async_subdev * asd_x,struct v4l2_async_subdev * asd_y)198 static bool asd_equal(struct v4l2_async_subdev *asd_x,
199 		      struct v4l2_async_subdev *asd_y)
200 {
201 	if (asd_x->match_type != asd_y->match_type)
202 		return false;
203 
204 	switch (asd_x->match_type) {
205 	case V4L2_ASYNC_MATCH_I2C:
206 		return asd_x->match.i2c.adapter_id ==
207 			asd_y->match.i2c.adapter_id &&
208 			asd_x->match.i2c.address ==
209 			asd_y->match.i2c.address;
210 	case V4L2_ASYNC_MATCH_FWNODE:
211 		return asd_x->match.fwnode == asd_y->match.fwnode;
212 	default:
213 		break;
214 	}
215 
216 	return false;
217 }
218 
219 /* Find the sub-device notifier registered by a sub-device driver. */
220 static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)221 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
222 {
223 	struct v4l2_async_notifier *n;
224 
225 	list_for_each_entry(n, &notifier_list, list)
226 		if (n->sd == sd)
227 			return n;
228 
229 	return NULL;
230 }
231 
232 /* Get v4l2_device related to the notifier if one can be found. */
233 static struct v4l2_device *
v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier * notifier)234 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
235 {
236 	while (notifier->parent)
237 		notifier = notifier->parent;
238 
239 	return notifier->v4l2_dev;
240 }
241 
242 /*
243  * Return true if all child sub-device notifiers are complete, false otherwise.
244  */
245 static bool
v4l2_async_nf_can_complete(struct v4l2_async_notifier * notifier)246 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
247 {
248 	struct v4l2_subdev *sd;
249 
250 	if (!list_empty(&notifier->waiting))
251 		return false;
252 
253 	list_for_each_entry(sd, &notifier->done, async_list) {
254 		struct v4l2_async_notifier *subdev_notifier =
255 			v4l2_async_find_subdev_notifier(sd);
256 
257 		if (subdev_notifier &&
258 		    !v4l2_async_nf_can_complete(subdev_notifier))
259 			return false;
260 	}
261 
262 	return true;
263 }
264 
265 /*
266  * Complete the master notifier if possible. This is done when all async
267  * sub-devices have been bound; v4l2_device is also available then.
268  */
269 static int
v4l2_async_nf_try_complete(struct v4l2_async_notifier * notifier)270 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
271 {
272 	/* Quick check whether there are still more sub-devices here. */
273 	if (!list_empty(&notifier->waiting))
274 		return 0;
275 
276 	/* Check the entire notifier tree; find the root notifier first. */
277 	while (notifier->parent)
278 		notifier = notifier->parent;
279 
280 	/* This is root if it has v4l2_dev. */
281 	if (!notifier->v4l2_dev)
282 		return 0;
283 
284 	/* Is everything ready? */
285 	if (!v4l2_async_nf_can_complete(notifier))
286 		return 0;
287 
288 	return v4l2_async_nf_call_complete(notifier);
289 }
290 
291 static int
292 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
293 
v4l2_async_create_ancillary_links(struct v4l2_async_notifier * n,struct v4l2_subdev * sd)294 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
295 					     struct v4l2_subdev *sd)
296 {
297 	struct media_link *link = NULL;
298 
299 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
300 
301 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
302 	    sd->entity.function != MEDIA_ENT_F_FLASH)
303 		return 0;
304 
305 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
306 
307 #endif
308 
309 	return IS_ERR(link) ? PTR_ERR(link) : 0;
310 }
311 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)312 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
313 				   struct v4l2_device *v4l2_dev,
314 				   struct v4l2_subdev *sd,
315 				   struct v4l2_async_subdev *asd)
316 {
317 	struct v4l2_async_notifier *subdev_notifier;
318 	int ret;
319 
320 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
321 	if (ret < 0)
322 		return ret;
323 
324 	ret = v4l2_async_nf_call_bound(notifier, sd, asd);
325 	if (ret < 0) {
326 		v4l2_device_unregister_subdev(sd);
327 		return ret;
328 	}
329 
330 	/*
331 	 * Depending of the function of the entities involved, we may want to
332 	 * create links between them (for example between a sensor and its lens
333 	 * or between a sensor's source pad and the connected device's sink
334 	 * pad).
335 	 */
336 	ret = v4l2_async_create_ancillary_links(notifier, sd);
337 	if (ret) {
338 		v4l2_async_nf_call_unbind(notifier, sd, asd);
339 		v4l2_device_unregister_subdev(sd);
340 		return ret;
341 	}
342 
343 	/* Remove from the waiting list */
344 	list_del(&asd->list);
345 	sd->asd = asd;
346 	sd->notifier = notifier;
347 
348 	/* Move from the global subdevice list to notifier's done */
349 	list_move(&sd->async_list, &notifier->done);
350 
351 	/*
352 	 * See if the sub-device has a notifier. If not, return here.
353 	 */
354 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
355 	if (!subdev_notifier || subdev_notifier->parent)
356 		return 0;
357 
358 	/*
359 	 * Proceed with checking for the sub-device notifier's async
360 	 * sub-devices, and return the result. The error will be handled by the
361 	 * caller.
362 	 */
363 	subdev_notifier->parent = notifier;
364 
365 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
366 }
367 
368 /* Test all async sub-devices in a notifier for a match. */
369 static int
v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier * notifier)370 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
371 {
372 	struct v4l2_device *v4l2_dev =
373 		v4l2_async_nf_find_v4l2_dev(notifier);
374 	struct v4l2_subdev *sd;
375 
376 	if (!v4l2_dev)
377 		return 0;
378 
379 again:
380 	list_for_each_entry(sd, &subdev_list, async_list) {
381 		struct v4l2_async_subdev *asd;
382 		int ret;
383 
384 		asd = v4l2_async_find_match(notifier, sd);
385 		if (!asd)
386 			continue;
387 
388 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
389 		if (ret < 0)
390 			return ret;
391 
392 		/*
393 		 * v4l2_async_match_notify() may lead to registering a
394 		 * new notifier and thus changing the async subdevs
395 		 * list. In order to proceed safely from here, restart
396 		 * parsing the list from the beginning.
397 		 */
398 		goto again;
399 	}
400 
401 	return 0;
402 }
403 
v4l2_async_cleanup(struct v4l2_subdev * sd)404 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
405 {
406 	v4l2_device_unregister_subdev(sd);
407 	/*
408 	 * Subdevice driver will reprobe and put the subdev back
409 	 * onto the list
410 	 */
411 	list_del_init(&sd->async_list);
412 	sd->asd = NULL;
413 }
414 
415 /* Unbind all sub-devices in the notifier tree. */
416 static void
v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier * notifier)417 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
418 {
419 	struct v4l2_subdev *sd, *tmp;
420 
421 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
422 		struct v4l2_async_notifier *subdev_notifier =
423 			v4l2_async_find_subdev_notifier(sd);
424 
425 		if (subdev_notifier)
426 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
427 
428 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
429 		v4l2_async_cleanup(sd);
430 
431 		list_move(&sd->async_list, &subdev_list);
432 	}
433 
434 	notifier->parent = NULL;
435 }
436 
437 /* See if an async sub-device can be found in a notifier's lists. */
438 static bool
__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)439 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
440 				 struct v4l2_async_subdev *asd)
441 {
442 	struct v4l2_async_subdev *asd_y;
443 	struct v4l2_subdev *sd;
444 
445 	list_for_each_entry(asd_y, &notifier->waiting, list)
446 		if (asd_equal(asd, asd_y))
447 			return true;
448 
449 	list_for_each_entry(sd, &notifier->done, async_list) {
450 		if (WARN_ON(!sd->asd))
451 			continue;
452 
453 		if (asd_equal(asd, sd->asd))
454 			return true;
455 	}
456 
457 	return false;
458 }
459 
460 /*
461  * Find out whether an async sub-device was set up already or
462  * whether it exists in a given notifier before @this_index.
463  * If @this_index < 0, search the notifier's entire @asd_list.
464  */
465 static bool
v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)466 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
467 			       struct v4l2_async_subdev *asd, int this_index)
468 {
469 	struct v4l2_async_subdev *asd_y;
470 	int j = 0;
471 
472 	lockdep_assert_held(&list_lock);
473 
474 	/* Check that an asd is not being added more than once. */
475 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
476 		if (this_index >= 0 && j++ >= this_index)
477 			break;
478 		if (asd_equal(asd, asd_y))
479 			return true;
480 	}
481 
482 	/* Check that an asd does not exist in other notifiers. */
483 	list_for_each_entry(notifier, &notifier_list, list)
484 		if (__v4l2_async_nf_has_async_subdev(notifier, asd))
485 			return true;
486 
487 	return false;
488 }
489 
v4l2_async_nf_asd_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd,int this_index)490 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
491 				   struct v4l2_async_subdev *asd,
492 				   int this_index)
493 {
494 	struct device *dev =
495 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
496 
497 	if (!asd)
498 		return -EINVAL;
499 
500 	switch (asd->match_type) {
501 	case V4L2_ASYNC_MATCH_I2C:
502 	case V4L2_ASYNC_MATCH_FWNODE:
503 		if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
504 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
505 			return -EEXIST;
506 		}
507 		break;
508 	default:
509 		dev_err(dev, "Invalid match type %u on %p\n",
510 			asd->match_type, asd);
511 		return -EINVAL;
512 	}
513 
514 	return 0;
515 }
516 
v4l2_async_nf_init(struct v4l2_async_notifier * notifier)517 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
518 {
519 	INIT_LIST_HEAD(&notifier->asd_list);
520 }
521 EXPORT_SYMBOL(v4l2_async_nf_init);
522 
__v4l2_async_nf_register(struct v4l2_async_notifier * notifier)523 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
524 {
525 	struct v4l2_async_subdev *asd;
526 	int ret, i = 0;
527 
528 	INIT_LIST_HEAD(&notifier->waiting);
529 	INIT_LIST_HEAD(&notifier->done);
530 
531 	mutex_lock(&list_lock);
532 
533 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
534 		ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
535 		if (ret)
536 			goto err_unlock;
537 
538 		list_add_tail(&asd->list, &notifier->waiting);
539 	}
540 
541 	ret = v4l2_async_nf_try_all_subdevs(notifier);
542 	if (ret < 0)
543 		goto err_unbind;
544 
545 	ret = v4l2_async_nf_try_complete(notifier);
546 	if (ret < 0)
547 		goto err_unbind;
548 
549 	/* Keep also completed notifiers on the list */
550 	list_add(&notifier->list, &notifier_list);
551 
552 	mutex_unlock(&list_lock);
553 
554 	return 0;
555 
556 err_unbind:
557 	/*
558 	 * On failure, unbind all sub-devices registered through this notifier.
559 	 */
560 	v4l2_async_nf_unbind_all_subdevs(notifier);
561 
562 err_unlock:
563 	mutex_unlock(&list_lock);
564 
565 	return ret;
566 }
567 
v4l2_async_nf_register(struct v4l2_device * v4l2_dev,struct v4l2_async_notifier * notifier)568 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
569 			   struct v4l2_async_notifier *notifier)
570 {
571 	int ret;
572 
573 	if (WARN_ON(!v4l2_dev || notifier->sd))
574 		return -EINVAL;
575 
576 	notifier->v4l2_dev = v4l2_dev;
577 
578 	ret = __v4l2_async_nf_register(notifier);
579 	if (ret)
580 		notifier->v4l2_dev = NULL;
581 
582 	return ret;
583 }
584 EXPORT_SYMBOL(v4l2_async_nf_register);
585 
v4l2_async_subdev_nf_register(struct v4l2_subdev * sd,struct v4l2_async_notifier * notifier)586 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
587 				  struct v4l2_async_notifier *notifier)
588 {
589 	int ret;
590 
591 	if (WARN_ON(!sd || notifier->v4l2_dev))
592 		return -EINVAL;
593 
594 	notifier->sd = sd;
595 
596 	ret = __v4l2_async_nf_register(notifier);
597 	if (ret)
598 		notifier->sd = NULL;
599 
600 	return ret;
601 }
602 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
603 
604 static void
__v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)605 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
606 {
607 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
608 		return;
609 
610 	v4l2_async_nf_unbind_all_subdevs(notifier);
611 
612 	notifier->sd = NULL;
613 	notifier->v4l2_dev = NULL;
614 
615 	list_del(&notifier->list);
616 }
617 
v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)618 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
619 {
620 	mutex_lock(&list_lock);
621 
622 	__v4l2_async_nf_unregister(notifier);
623 
624 	mutex_unlock(&list_lock);
625 }
626 EXPORT_SYMBOL(v4l2_async_nf_unregister);
627 
__v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)628 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
629 {
630 	struct v4l2_async_subdev *asd, *tmp;
631 
632 	if (!notifier || !notifier->asd_list.next)
633 		return;
634 
635 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
636 		switch (asd->match_type) {
637 		case V4L2_ASYNC_MATCH_FWNODE:
638 			fwnode_handle_put(asd->match.fwnode);
639 			break;
640 		default:
641 			break;
642 		}
643 
644 		list_del(&asd->asd_list);
645 		v4l2_async_nf_call_destroy(notifier, asd);
646 		kfree(asd);
647 	}
648 }
649 
v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)650 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
651 {
652 	mutex_lock(&list_lock);
653 
654 	__v4l2_async_nf_cleanup(notifier);
655 
656 	mutex_unlock(&list_lock);
657 }
658 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
659 
__v4l2_async_nf_add_subdev(struct v4l2_async_notifier * notifier,struct v4l2_async_subdev * asd)660 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
661 			       struct v4l2_async_subdev *asd)
662 {
663 	int ret;
664 
665 	mutex_lock(&list_lock);
666 
667 	ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
668 	if (ret)
669 		goto unlock;
670 
671 	list_add_tail(&asd->asd_list, &notifier->asd_list);
672 
673 unlock:
674 	mutex_unlock(&list_lock);
675 	return ret;
676 }
677 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
678 
679 struct v4l2_async_subdev *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asd_struct_size)680 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
681 			   struct fwnode_handle *fwnode,
682 			   unsigned int asd_struct_size)
683 {
684 	struct v4l2_async_subdev *asd;
685 	int ret;
686 
687 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
688 	if (!asd)
689 		return ERR_PTR(-ENOMEM);
690 
691 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
692 	asd->match.fwnode = fwnode_handle_get(fwnode);
693 
694 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
695 	if (ret) {
696 		fwnode_handle_put(fwnode);
697 		kfree(asd);
698 		return ERR_PTR(ret);
699 	}
700 
701 	return asd;
702 }
703 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
704 
705 struct v4l2_async_subdev *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asd_struct_size)706 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
707 				  struct fwnode_handle *endpoint,
708 				  unsigned int asd_struct_size)
709 {
710 	struct v4l2_async_subdev *asd;
711 	struct fwnode_handle *remote;
712 
713 	remote = fwnode_graph_get_remote_endpoint(endpoint);
714 	if (!remote)
715 		return ERR_PTR(-ENOTCONN);
716 
717 	asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
718 	/*
719 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
720 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
721 	 */
722 	fwnode_handle_put(remote);
723 	return asd;
724 }
725 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
726 
727 struct v4l2_async_subdev *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asd_struct_size)728 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
729 			unsigned short address, unsigned int asd_struct_size)
730 {
731 	struct v4l2_async_subdev *asd;
732 	int ret;
733 
734 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
735 	if (!asd)
736 		return ERR_PTR(-ENOMEM);
737 
738 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
739 	asd->match.i2c.adapter_id = adapter_id;
740 	asd->match.i2c.address = address;
741 
742 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
743 	if (ret) {
744 		kfree(asd);
745 		return ERR_PTR(ret);
746 	}
747 
748 	return asd;
749 }
750 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
751 
v4l2_async_register_subdev(struct v4l2_subdev * sd)752 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
753 {
754 	struct v4l2_async_notifier *subdev_notifier;
755 	struct v4l2_async_notifier *notifier;
756 	int ret;
757 
758 	/*
759 	 * No reference taken. The reference is held by the device
760 	 * (struct v4l2_subdev.dev), and async sub-device does not
761 	 * exist independently of the device at any point of time.
762 	 */
763 	if (!sd->fwnode && sd->dev)
764 		sd->fwnode = dev_fwnode(sd->dev);
765 
766 	mutex_lock(&list_lock);
767 
768 	INIT_LIST_HEAD(&sd->async_list);
769 
770 	list_for_each_entry(notifier, &notifier_list, list) {
771 		struct v4l2_device *v4l2_dev =
772 			v4l2_async_nf_find_v4l2_dev(notifier);
773 		struct v4l2_async_subdev *asd;
774 
775 		if (!v4l2_dev)
776 			continue;
777 
778 		asd = v4l2_async_find_match(notifier, sd);
779 		if (!asd)
780 			continue;
781 
782 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
783 		if (ret)
784 			goto err_unbind;
785 
786 		ret = v4l2_async_nf_try_complete(notifier);
787 		if (ret)
788 			goto err_unbind;
789 
790 		goto out_unlock;
791 	}
792 
793 	/* None matched, wait for hot-plugging */
794 	list_add(&sd->async_list, &subdev_list);
795 
796 out_unlock:
797 	mutex_unlock(&list_lock);
798 
799 	return 0;
800 
801 err_unbind:
802 	/*
803 	 * Complete failed. Unbind the sub-devices bound through registering
804 	 * this async sub-device.
805 	 */
806 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
807 	if (subdev_notifier)
808 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
809 
810 	if (sd->asd)
811 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
812 	v4l2_async_cleanup(sd);
813 
814 	mutex_unlock(&list_lock);
815 
816 	return ret;
817 }
818 EXPORT_SYMBOL(v4l2_async_register_subdev);
819 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)820 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
821 {
822 	if (!sd->async_list.next)
823 		return;
824 
825 	mutex_lock(&list_lock);
826 
827 	__v4l2_async_nf_unregister(sd->subdev_notifier);
828 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
829 	kfree(sd->subdev_notifier);
830 	sd->subdev_notifier = NULL;
831 
832 	if (sd->asd) {
833 		struct v4l2_async_notifier *notifier = sd->notifier;
834 
835 		list_add(&sd->asd->list, &notifier->waiting);
836 
837 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
838 	}
839 
840 	v4l2_async_cleanup(sd);
841 
842 	mutex_unlock(&list_lock);
843 }
844 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
845 
print_waiting_subdev(struct seq_file * s,struct v4l2_async_subdev * asd)846 static void print_waiting_subdev(struct seq_file *s,
847 				 struct v4l2_async_subdev *asd)
848 {
849 	switch (asd->match_type) {
850 	case V4L2_ASYNC_MATCH_I2C:
851 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
852 			   asd->match.i2c.address);
853 		break;
854 	case V4L2_ASYNC_MATCH_FWNODE: {
855 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
856 
857 		devnode = fwnode_graph_is_endpoint(fwnode) ?
858 			  fwnode_graph_get_port_parent(fwnode) :
859 			  fwnode_handle_get(fwnode);
860 
861 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
862 			   devnode->dev ? dev_name(devnode->dev) : "nil",
863 			   fwnode);
864 
865 		fwnode_handle_put(devnode);
866 		break;
867 	}
868 	}
869 }
870 
871 static const char *
v4l2_async_nf_name(struct v4l2_async_notifier * notifier)872 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
873 {
874 	if (notifier->v4l2_dev)
875 		return notifier->v4l2_dev->name;
876 	else if (notifier->sd)
877 		return notifier->sd->name;
878 	else
879 		return "nil";
880 }
881 
pending_subdevs_show(struct seq_file * s,void * data)882 static int pending_subdevs_show(struct seq_file *s, void *data)
883 {
884 	struct v4l2_async_notifier *notif;
885 	struct v4l2_async_subdev *asd;
886 
887 	mutex_lock(&list_lock);
888 
889 	list_for_each_entry(notif, &notifier_list, list) {
890 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
891 		list_for_each_entry(asd, &notif->waiting, list)
892 			print_waiting_subdev(s, asd);
893 	}
894 
895 	mutex_unlock(&list_lock);
896 
897 	return 0;
898 }
899 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
900 
901 static struct dentry *v4l2_async_debugfs_dir;
902 
v4l2_async_init(void)903 static int __init v4l2_async_init(void)
904 {
905 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
906 	debugfs_create_file("pending_async_subdevices", 0444,
907 			    v4l2_async_debugfs_dir, NULL,
908 			    &pending_subdevs_fops);
909 
910 	return 0;
911 }
912 
v4l2_async_exit(void)913 static void __exit v4l2_async_exit(void)
914 {
915 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
916 }
917 
918 subsys_initcall(v4l2_async_init);
919 module_exit(v4l2_async_exit);
920 
921 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
922 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
923 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
924 MODULE_LICENSE("GPL");
925