1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Remote processor messaging transport (OMAP platform-specific bits)
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  * Copyright (C) 2011 Google, Inc.
7  *
8  * Ohad Ben-Cohen <ohad@wizery.com>
9  * Brian Swetland <swetland@google.com>
10  */
11 
12 #include <linux/dma-direct.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/export.h>
16 #include <linux/of_reserved_mem.h>
17 #include <linux/platform_device.h>
18 #include <linux/remoteproc.h>
19 #include <linux/virtio.h>
20 #include <linux/virtio_config.h>
21 #include <linux/virtio_ids.h>
22 #include <linux/virtio_ring.h>
23 #include <linux/err.h>
24 #include <linux/kref.h>
25 #include <linux/slab.h>
26 
27 #include "remoteproc_internal.h"
28 
copy_dma_range_map(struct device * to,struct device * from)29 static int copy_dma_range_map(struct device *to, struct device *from)
30 {
31 	const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
32 	int num_ranges = 0;
33 
34 	if (!map)
35 		return 0;
36 
37 	for (r = map; r->size; r++)
38 		num_ranges++;
39 
40 	new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
41 			  GFP_KERNEL);
42 	if (!new_map)
43 		return -ENOMEM;
44 	to->dma_range_map = new_map;
45 	return 0;
46 }
47 
vdev_to_rvdev(struct virtio_device * vdev)48 static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
49 {
50 	struct platform_device *pdev;
51 
52 	pdev = container_of(vdev->dev.parent, struct platform_device, dev);
53 
54 	return platform_get_drvdata(pdev);
55 }
56 
vdev_to_rproc(struct virtio_device * vdev)57 static  struct rproc *vdev_to_rproc(struct virtio_device *vdev)
58 {
59 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
60 
61 	return rvdev->rproc;
62 }
63 
64 /* kick the remote processor, and let it know which virtqueue to poke at */
rproc_virtio_notify(struct virtqueue * vq)65 static bool rproc_virtio_notify(struct virtqueue *vq)
66 {
67 	struct rproc_vring *rvring = vq->priv;
68 	struct rproc *rproc = rvring->rvdev->rproc;
69 	int notifyid = rvring->notifyid;
70 
71 	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
72 
73 	rproc->ops->kick(rproc, notifyid);
74 	return true;
75 }
76 
77 /**
78  * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
79  * @rproc: handle to the remote processor
80  * @notifyid: index of the signalled virtqueue (unique per this @rproc)
81  *
82  * This function should be called by the platform-specific rproc driver,
83  * when the remote processor signals that a specific virtqueue has pending
84  * messages available.
85  *
86  * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
87  * and otherwise returns IRQ_HANDLED.
88  */
rproc_vq_interrupt(struct rproc * rproc,int notifyid)89 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
90 {
91 	struct rproc_vring *rvring;
92 
93 	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
94 
95 	rvring = idr_find(&rproc->notifyids, notifyid);
96 	if (!rvring || !rvring->vq)
97 		return IRQ_NONE;
98 
99 	return vring_interrupt(0, rvring->vq);
100 }
101 EXPORT_SYMBOL(rproc_vq_interrupt);
102 
rp_find_vq(struct virtio_device * vdev,unsigned int id,void (* callback)(struct virtqueue * vq),const char * name,bool ctx)103 static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
104 				    unsigned int id,
105 				    void (*callback)(struct virtqueue *vq),
106 				    const char *name, bool ctx)
107 {
108 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
109 	struct rproc *rproc = vdev_to_rproc(vdev);
110 	struct device *dev = &rproc->dev;
111 	struct rproc_mem_entry *mem;
112 	struct rproc_vring *rvring;
113 	struct fw_rsc_vdev *rsc;
114 	struct virtqueue *vq;
115 	void *addr;
116 	int num, size;
117 
118 	/* we're temporarily limited to two virtqueues per rvdev */
119 	if (id >= ARRAY_SIZE(rvdev->vring))
120 		return ERR_PTR(-EINVAL);
121 
122 	if (!name)
123 		return NULL;
124 
125 	/* Search allocated memory region by name */
126 	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
127 					  id);
128 	if (!mem || !mem->va)
129 		return ERR_PTR(-ENOMEM);
130 
131 	rvring = &rvdev->vring[id];
132 	addr = mem->va;
133 	num = rvring->num;
134 
135 	/* zero vring */
136 	size = vring_size(num, rvring->align);
137 	memset(addr, 0, size);
138 
139 	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
140 		id, addr, num, rvring->notifyid);
141 
142 	/*
143 	 * Create the new vq, and tell virtio we're not interested in
144 	 * the 'weak' smp barriers, since we're talking with a real device.
145 	 */
146 	vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
147 				 addr, rproc_virtio_notify, callback, name);
148 	if (!vq) {
149 		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
150 		rproc_free_vring(rvring);
151 		return ERR_PTR(-ENOMEM);
152 	}
153 
154 	vq->num_max = num;
155 
156 	rvring->vq = vq;
157 	vq->priv = rvring;
158 
159 	/* Update vring in resource table */
160 	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
161 	rsc->vring[id].da = mem->da;
162 
163 	return vq;
164 }
165 
__rproc_virtio_del_vqs(struct virtio_device * vdev)166 static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
167 {
168 	struct virtqueue *vq, *n;
169 	struct rproc_vring *rvring;
170 
171 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
172 		rvring = vq->priv;
173 		rvring->vq = NULL;
174 		vring_del_virtqueue(vq);
175 	}
176 }
177 
rproc_virtio_del_vqs(struct virtio_device * vdev)178 static void rproc_virtio_del_vqs(struct virtio_device *vdev)
179 {
180 	__rproc_virtio_del_vqs(vdev);
181 }
182 
rproc_virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)183 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
184 				 struct virtqueue *vqs[],
185 				 vq_callback_t *callbacks[],
186 				 const char * const names[],
187 				 const bool * ctx,
188 				 struct irq_affinity *desc)
189 {
190 	int i, ret, queue_idx = 0;
191 
192 	for (i = 0; i < nvqs; ++i) {
193 		if (!names[i]) {
194 			vqs[i] = NULL;
195 			continue;
196 		}
197 
198 		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
199 				    ctx ? ctx[i] : false);
200 		if (IS_ERR(vqs[i])) {
201 			ret = PTR_ERR(vqs[i]);
202 			goto error;
203 		}
204 	}
205 
206 	return 0;
207 
208 error:
209 	__rproc_virtio_del_vqs(vdev);
210 	return ret;
211 }
212 
rproc_virtio_get_status(struct virtio_device * vdev)213 static u8 rproc_virtio_get_status(struct virtio_device *vdev)
214 {
215 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
216 	struct fw_rsc_vdev *rsc;
217 
218 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
219 
220 	return rsc->status;
221 }
222 
rproc_virtio_set_status(struct virtio_device * vdev,u8 status)223 static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
224 {
225 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
226 	struct fw_rsc_vdev *rsc;
227 
228 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
229 
230 	rsc->status = status;
231 	dev_dbg(&vdev->dev, "status: %d\n", status);
232 }
233 
rproc_virtio_reset(struct virtio_device * vdev)234 static void rproc_virtio_reset(struct virtio_device *vdev)
235 {
236 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
237 	struct fw_rsc_vdev *rsc;
238 
239 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
240 
241 	rsc->status = 0;
242 	dev_dbg(&vdev->dev, "reset !\n");
243 }
244 
245 /* provide the vdev features as retrieved from the firmware */
rproc_virtio_get_features(struct virtio_device * vdev)246 static u64 rproc_virtio_get_features(struct virtio_device *vdev)
247 {
248 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
249 	struct fw_rsc_vdev *rsc;
250 
251 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
252 
253 	return rsc->dfeatures;
254 }
255 
rproc_transport_features(struct virtio_device * vdev)256 static void rproc_transport_features(struct virtio_device *vdev)
257 {
258 	/*
259 	 * Packed ring isn't enabled on remoteproc for now,
260 	 * because remoteproc uses vring_new_virtqueue() which
261 	 * creates virtio rings on preallocated memory.
262 	 */
263 	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
264 }
265 
rproc_virtio_finalize_features(struct virtio_device * vdev)266 static int rproc_virtio_finalize_features(struct virtio_device *vdev)
267 {
268 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
269 	struct fw_rsc_vdev *rsc;
270 
271 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
272 
273 	/* Give virtio_ring a chance to accept features */
274 	vring_transport_features(vdev);
275 
276 	/* Give virtio_rproc a chance to accept features. */
277 	rproc_transport_features(vdev);
278 
279 	/* Make sure we don't have any features > 32 bits! */
280 	BUG_ON((u32)vdev->features != vdev->features);
281 
282 	/*
283 	 * Remember the finalized features of our vdev, and provide it
284 	 * to the remote processor once it is powered on.
285 	 */
286 	rsc->gfeatures = vdev->features;
287 
288 	return 0;
289 }
290 
rproc_virtio_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)291 static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
292 			     void *buf, unsigned int len)
293 {
294 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
295 	struct fw_rsc_vdev *rsc;
296 	void *cfg;
297 
298 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
299 	cfg = &rsc->vring[rsc->num_of_vrings];
300 
301 	if (offset + len > rsc->config_len || offset + len < len) {
302 		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
303 		return;
304 	}
305 
306 	memcpy(buf, cfg + offset, len);
307 }
308 
rproc_virtio_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)309 static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
310 			     const void *buf, unsigned int len)
311 {
312 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
313 	struct fw_rsc_vdev *rsc;
314 	void *cfg;
315 
316 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
317 	cfg = &rsc->vring[rsc->num_of_vrings];
318 
319 	if (offset + len > rsc->config_len || offset + len < len) {
320 		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
321 		return;
322 	}
323 
324 	memcpy(cfg + offset, buf, len);
325 }
326 
327 static const struct virtio_config_ops rproc_virtio_config_ops = {
328 	.get_features	= rproc_virtio_get_features,
329 	.finalize_features = rproc_virtio_finalize_features,
330 	.find_vqs	= rproc_virtio_find_vqs,
331 	.del_vqs	= rproc_virtio_del_vqs,
332 	.reset		= rproc_virtio_reset,
333 	.set_status	= rproc_virtio_set_status,
334 	.get_status	= rproc_virtio_get_status,
335 	.get		= rproc_virtio_get,
336 	.set		= rproc_virtio_set,
337 };
338 
339 /*
340  * This function is called whenever vdev is released, and is responsible
341  * to decrement the remote processor's refcount which was taken when vdev was
342  * added.
343  *
344  * Never call this function directly; it will be called by the driver
345  * core when needed.
346  */
rproc_virtio_dev_release(struct device * dev)347 static void rproc_virtio_dev_release(struct device *dev)
348 {
349 	struct virtio_device *vdev = dev_to_virtio(dev);
350 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
351 
352 	kfree(vdev);
353 
354 	put_device(&rvdev->pdev->dev);
355 }
356 
357 /**
358  * rproc_add_virtio_dev() - register an rproc-induced virtio device
359  * @rvdev: the remote vdev
360  * @id: the device type identification (used to match it with a driver).
361  *
362  * This function registers a virtio device. This vdev's partent is
363  * the rproc device.
364  *
365  * Return: 0 on success or an appropriate error value otherwise
366  */
rproc_add_virtio_dev(struct rproc_vdev * rvdev,int id)367 static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
368 {
369 	struct rproc *rproc = rvdev->rproc;
370 	struct device *dev = &rvdev->pdev->dev;
371 	struct virtio_device *vdev;
372 	struct rproc_mem_entry *mem;
373 	int ret;
374 
375 	if (rproc->ops->kick == NULL) {
376 		ret = -EINVAL;
377 		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
378 		goto out;
379 	}
380 
381 	/* Try to find dedicated vdev buffer carveout */
382 	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
383 	if (mem) {
384 		phys_addr_t pa;
385 
386 		if (mem->of_resm_idx != -1) {
387 			struct device_node *np = rproc->dev.parent->of_node;
388 
389 			/* Associate reserved memory to vdev device */
390 			ret = of_reserved_mem_device_init_by_idx(dev, np,
391 								 mem->of_resm_idx);
392 			if (ret) {
393 				dev_err(dev, "Can't associate reserved memory\n");
394 				goto out;
395 			}
396 		} else {
397 			if (mem->va) {
398 				dev_warn(dev, "vdev %d buffer already mapped\n",
399 					 rvdev->index);
400 				pa = rproc_va_to_pa(mem->va);
401 			} else {
402 				/* Use dma address as carveout no memmapped yet */
403 				pa = (phys_addr_t)mem->dma;
404 			}
405 
406 			/* Associate vdev buffer memory pool to vdev subdev */
407 			ret = dma_declare_coherent_memory(dev, pa,
408 							   mem->da,
409 							   mem->len);
410 			if (ret < 0) {
411 				dev_err(dev, "Failed to associate buffer\n");
412 				goto out;
413 			}
414 		}
415 	} else {
416 		struct device_node *np = rproc->dev.parent->of_node;
417 
418 		/*
419 		 * If we don't have dedicated buffer, just attempt to re-assign
420 		 * the reserved memory from our parent. A default memory-region
421 		 * at index 0 from the parent's memory-regions is assigned for
422 		 * the rvdev dev to allocate from. Failure is non-critical and
423 		 * the allocations will fall back to global pools, so don't
424 		 * check return value either.
425 		 */
426 		of_reserved_mem_device_init_by_idx(dev, np, 0);
427 	}
428 
429 	/* Allocate virtio device */
430 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
431 	if (!vdev) {
432 		ret = -ENOMEM;
433 		goto out;
434 	}
435 	vdev->id.device	= id,
436 	vdev->config = &rproc_virtio_config_ops,
437 	vdev->dev.parent = dev;
438 	vdev->dev.release = rproc_virtio_dev_release;
439 
440 	/* Reference the vdev and vring allocations */
441 	get_device(dev);
442 
443 	ret = register_virtio_device(vdev);
444 	if (ret) {
445 		put_device(&vdev->dev);
446 		dev_err(dev, "failed to register vdev: %d\n", ret);
447 		goto out;
448 	}
449 
450 	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
451 
452 out:
453 	return ret;
454 }
455 
456 /**
457  * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
458  * @dev: the virtio device
459  * @data: must be null
460  *
461  * This function unregisters an existing virtio device.
462  *
463  * Return: 0
464  */
rproc_remove_virtio_dev(struct device * dev,void * data)465 static int rproc_remove_virtio_dev(struct device *dev, void *data)
466 {
467 	struct virtio_device *vdev = dev_to_virtio(dev);
468 
469 	unregister_virtio_device(vdev);
470 	return 0;
471 }
472 
rproc_vdev_do_start(struct rproc_subdev * subdev)473 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
474 {
475 	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
476 
477 	return rproc_add_virtio_dev(rvdev, rvdev->id);
478 }
479 
rproc_vdev_do_stop(struct rproc_subdev * subdev,bool crashed)480 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
481 {
482 	struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
483 	struct device *dev = &rvdev->pdev->dev;
484 	int ret;
485 
486 	ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
487 	if (ret)
488 		dev_warn(dev, "can't remove vdev child device: %d\n", ret);
489 }
490 
rproc_virtio_probe(struct platform_device * pdev)491 static int rproc_virtio_probe(struct platform_device *pdev)
492 {
493 	struct device *dev = &pdev->dev;
494 	struct rproc_vdev_data *rvdev_data = dev->platform_data;
495 	struct rproc_vdev *rvdev;
496 	struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
497 	struct fw_rsc_vdev *rsc;
498 	int i, ret;
499 
500 	if (!rvdev_data)
501 		return -EINVAL;
502 
503 	rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
504 	if (!rvdev)
505 		return -ENOMEM;
506 
507 	rvdev->id = rvdev_data->id;
508 	rvdev->rproc = rproc;
509 	rvdev->index = rvdev_data->index;
510 
511 	ret = copy_dma_range_map(dev, rproc->dev.parent);
512 	if (ret)
513 		return ret;
514 
515 	/* Make device dma capable by inheriting from parent's capabilities */
516 	set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
517 
518 	ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
519 	if (ret) {
520 		dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
521 			 dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
522 	}
523 
524 	platform_set_drvdata(pdev, rvdev);
525 	rvdev->pdev = pdev;
526 
527 	rsc = rvdev_data->rsc;
528 
529 	/* parse the vrings */
530 	for (i = 0; i < rsc->num_of_vrings; i++) {
531 		ret = rproc_parse_vring(rvdev, rsc, i);
532 		if (ret)
533 			return ret;
534 	}
535 
536 	/* remember the resource offset*/
537 	rvdev->rsc_offset = rvdev_data->rsc_offset;
538 
539 	/* allocate the vring resources */
540 	for (i = 0; i < rsc->num_of_vrings; i++) {
541 		ret = rproc_alloc_vring(rvdev, i);
542 		if (ret)
543 			goto unwind_vring_allocations;
544 	}
545 
546 	rproc_add_rvdev(rproc, rvdev);
547 
548 	rvdev->subdev.start = rproc_vdev_do_start;
549 	rvdev->subdev.stop = rproc_vdev_do_stop;
550 
551 	rproc_add_subdev(rproc, &rvdev->subdev);
552 
553 	/*
554 	 * We're indirectly making a non-temporary copy of the rproc pointer
555 	 * here, because the platform device or the vdev device will indirectly
556 	 * access the wrapping rproc.
557 	 *
558 	 * Therefore we must increment the rproc refcount here, and decrement
559 	 * it _only_ on platform remove.
560 	 */
561 	get_device(&rproc->dev);
562 
563 	return 0;
564 
565 unwind_vring_allocations:
566 	for (i--; i >= 0; i--)
567 		rproc_free_vring(&rvdev->vring[i]);
568 
569 	return ret;
570 }
571 
rproc_virtio_remove(struct platform_device * pdev)572 static void rproc_virtio_remove(struct platform_device *pdev)
573 {
574 	struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
575 	struct rproc *rproc = rvdev->rproc;
576 	struct rproc_vring *rvring;
577 	int id;
578 
579 	for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
580 		rvring = &rvdev->vring[id];
581 		rproc_free_vring(rvring);
582 	}
583 
584 	rproc_remove_subdev(rproc, &rvdev->subdev);
585 	rproc_remove_rvdev(rvdev);
586 
587 	of_reserved_mem_device_release(&pdev->dev);
588 	dma_release_coherent_memory(&pdev->dev);
589 
590 	put_device(&rproc->dev);
591 }
592 
593 /* Platform driver */
594 static struct platform_driver rproc_virtio_driver = {
595 	.probe		= rproc_virtio_probe,
596 	.remove_new	= rproc_virtio_remove,
597 	.driver		= {
598 		.name	= "rproc-virtio",
599 	},
600 };
601 builtin_platform_driver(rproc_virtio_driver);
602