1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 
25 /* virtio guest is communicating with a virtual "device" that actually runs on
26  * a host processor.  Memory barriers are used to control SMP effects. */
27 #ifdef CONFIG_SMP
28 /* Where possible, use SMP barriers which are more lightweight than mandatory
29  * barriers, because mandatory barriers control MMIO effects on accesses
30  * through relaxed memory I/O windows (which virtio does not use). */
31 #define virtio_mb() smp_mb()
32 #define virtio_rmb() smp_rmb()
33 #define virtio_wmb() smp_wmb()
34 #else
35 /* We must force memory ordering even if guest is UP since host could be
36  * running on another CPU, but SMP barriers are defined to barrier() in that
37  * configuration. So fall back to mandatory barriers instead. */
38 #define virtio_mb() mb()
39 #define virtio_rmb() rmb()
40 #define virtio_wmb() wmb()
41 #endif
42 
43 #ifdef DEBUG
44 /* For development, we want to crash whenever the ring is screwed. */
45 #define BAD_RING(_vq, fmt, args...)				\
46 	do {							\
47 		dev_err(&(_vq)->vq.vdev->dev,			\
48 			"%s:"fmt, (_vq)->vq.name, ##args);	\
49 		BUG();						\
50 	} while (0)
51 /* Caller is supposed to guarantee no reentry. */
52 #define START_USE(_vq)						\
53 	do {							\
54 		if ((_vq)->in_use)				\
55 			panic("%s:in_use = %i\n",		\
56 			      (_vq)->vq.name, (_vq)->in_use);	\
57 		(_vq)->in_use = __LINE__;			\
58 	} while (0)
59 #define END_USE(_vq) \
60 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
61 #else
62 #define BAD_RING(_vq, fmt, args...)				\
63 	do {							\
64 		dev_err(&_vq->vq.vdev->dev,			\
65 			"%s:"fmt, (_vq)->vq.name, ##args);	\
66 		(_vq)->broken = true;				\
67 	} while (0)
68 #define START_USE(vq)
69 #define END_USE(vq)
70 #endif
71 
72 struct vring_virtqueue
73 {
74 	struct virtqueue vq;
75 
76 	/* Actual memory layout for this queue */
77 	struct vring vring;
78 
79 	/* Other side has made a mess, don't try any more. */
80 	bool broken;
81 
82 	/* Host supports indirect buffers */
83 	bool indirect;
84 
85 	/* Number of free buffers */
86 	unsigned int num_free;
87 	/* Head of free buffer list. */
88 	unsigned int free_head;
89 	/* Number we've added since last sync. */
90 	unsigned int num_added;
91 
92 	/* Last used index we've seen. */
93 	u16 last_used_idx;
94 
95 	/* How to notify other side. FIXME: commonalize hcalls! */
96 	void (*notify)(struct virtqueue *vq);
97 
98 #ifdef DEBUG
99 	/* They're supposed to lock for us. */
100 	unsigned int in_use;
101 #endif
102 
103 	/* Tokens for callbacks. */
104 	void *data[];
105 };
106 
107 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
108 
109 /* Set up an indirect table of descriptors and add it to the queue. */
vring_add_indirect(struct vring_virtqueue * vq,struct scatterlist sg[],unsigned int out,unsigned int in,gfp_t gfp)110 static int vring_add_indirect(struct vring_virtqueue *vq,
111 			      struct scatterlist sg[],
112 			      unsigned int out,
113 			      unsigned int in,
114 			      gfp_t gfp)
115 {
116 	struct vring_desc *desc;
117 	unsigned head;
118 	int i;
119 
120 	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
121 	if (!desc)
122 		return -ENOMEM;
123 
124 	/* Transfer entries from the sg list into the indirect page */
125 	for (i = 0; i < out; i++) {
126 		desc[i].flags = VRING_DESC_F_NEXT;
127 		desc[i].addr = sg_phys(sg);
128 		desc[i].len = sg->length;
129 		desc[i].next = i+1;
130 		sg++;
131 	}
132 	for (; i < (out + in); i++) {
133 		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
134 		desc[i].addr = sg_phys(sg);
135 		desc[i].len = sg->length;
136 		desc[i].next = i+1;
137 		sg++;
138 	}
139 
140 	/* Last one doesn't continue. */
141 	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
142 	desc[i-1].next = 0;
143 
144 	/* We're about to use a buffer */
145 	vq->num_free--;
146 
147 	/* Use a single buffer which doesn't continue */
148 	head = vq->free_head;
149 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
150 	vq->vring.desc[head].addr = virt_to_phys(desc);
151 	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
152 
153 	/* Update free pointer */
154 	vq->free_head = vq->vring.desc[head].next;
155 
156 	return head;
157 }
158 
virtqueue_add_buf_gfp(struct virtqueue * _vq,struct scatterlist sg[],unsigned int out,unsigned int in,void * data,gfp_t gfp)159 int virtqueue_add_buf_gfp(struct virtqueue *_vq,
160 			  struct scatterlist sg[],
161 			  unsigned int out,
162 			  unsigned int in,
163 			  void *data,
164 			  gfp_t gfp)
165 {
166 	struct vring_virtqueue *vq = to_vvq(_vq);
167 	unsigned int i, avail, uninitialized_var(prev);
168 	int head;
169 
170 	START_USE(vq);
171 
172 	BUG_ON(data == NULL);
173 
174 	/* If the host supports indirect descriptor tables, and we have multiple
175 	 * buffers, then go indirect. FIXME: tune this threshold */
176 	if (vq->indirect && (out + in) > 1 && vq->num_free) {
177 		head = vring_add_indirect(vq, sg, out, in, gfp);
178 		if (likely(head >= 0))
179 			goto add_head;
180 	}
181 
182 	BUG_ON(out + in > vq->vring.num);
183 	BUG_ON(out + in == 0);
184 
185 	if (vq->num_free < out + in) {
186 		pr_debug("Can't add buf len %i - avail = %i\n",
187 			 out + in, vq->num_free);
188 		/* FIXME: for historical reasons, we force a notify here if
189 		 * there are outgoing parts to the buffer.  Presumably the
190 		 * host should service the ring ASAP. */
191 		if (out)
192 			vq->notify(&vq->vq);
193 		END_USE(vq);
194 		return -ENOSPC;
195 	}
196 
197 	/* We're about to use some buffers from the free list. */
198 	vq->num_free -= out + in;
199 
200 	head = vq->free_head;
201 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
202 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
203 		vq->vring.desc[i].addr = sg_phys(sg);
204 		vq->vring.desc[i].len = sg->length;
205 		prev = i;
206 		sg++;
207 	}
208 	for (; in; i = vq->vring.desc[i].next, in--) {
209 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
210 		vq->vring.desc[i].addr = sg_phys(sg);
211 		vq->vring.desc[i].len = sg->length;
212 		prev = i;
213 		sg++;
214 	}
215 	/* Last one doesn't continue. */
216 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
217 
218 	/* Update free pointer */
219 	vq->free_head = i;
220 
221 add_head:
222 	/* Set token. */
223 	vq->data[head] = data;
224 
225 	/* Put entry in available array (but don't update avail->idx until they
226 	 * do sync).  FIXME: avoid modulus here? */
227 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
228 	vq->vring.avail->ring[avail] = head;
229 
230 	pr_debug("Added buffer head %i to %p\n", head, vq);
231 	END_USE(vq);
232 
233 	return vq->num_free;
234 }
235 EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
236 
virtqueue_kick(struct virtqueue * _vq)237 void virtqueue_kick(struct virtqueue *_vq)
238 {
239 	struct vring_virtqueue *vq = to_vvq(_vq);
240 	START_USE(vq);
241 	/* Descriptors and available array need to be set before we expose the
242 	 * new available array entries. */
243 	virtio_wmb();
244 
245 	vq->vring.avail->idx += vq->num_added;
246 	vq->num_added = 0;
247 
248 	/* Need to update avail index before checking if we should notify */
249 	virtio_mb();
250 
251 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
252 		/* Prod other side to tell it about changes. */
253 		vq->notify(&vq->vq);
254 
255 	END_USE(vq);
256 }
257 EXPORT_SYMBOL_GPL(virtqueue_kick);
258 
detach_buf(struct vring_virtqueue * vq,unsigned int head)259 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
260 {
261 	unsigned int i;
262 
263 	/* Clear data ptr. */
264 	vq->data[head] = NULL;
265 
266 	/* Put back on free list: find end */
267 	i = head;
268 
269 	/* Free the indirect table */
270 	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
271 		kfree(phys_to_virt(vq->vring.desc[i].addr));
272 
273 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
274 		i = vq->vring.desc[i].next;
275 		vq->num_free++;
276 	}
277 
278 	vq->vring.desc[i].next = vq->free_head;
279 	vq->free_head = head;
280 	/* Plus final descriptor */
281 	vq->num_free++;
282 }
283 
more_used(const struct vring_virtqueue * vq)284 static inline bool more_used(const struct vring_virtqueue *vq)
285 {
286 	return vq->last_used_idx != vq->vring.used->idx;
287 }
288 
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)289 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
290 {
291 	struct vring_virtqueue *vq = to_vvq(_vq);
292 	void *ret;
293 	unsigned int i;
294 
295 	START_USE(vq);
296 
297 	if (unlikely(vq->broken)) {
298 		END_USE(vq);
299 		return NULL;
300 	}
301 
302 	if (!more_used(vq)) {
303 		pr_debug("No more buffers in queue\n");
304 		END_USE(vq);
305 		return NULL;
306 	}
307 
308 	/* Only get used array entries after they have been exposed by host. */
309 	virtio_rmb();
310 
311 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
312 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
313 
314 	if (unlikely(i >= vq->vring.num)) {
315 		BAD_RING(vq, "id %u out of range\n", i);
316 		return NULL;
317 	}
318 	if (unlikely(!vq->data[i])) {
319 		BAD_RING(vq, "id %u is not a head!\n", i);
320 		return NULL;
321 	}
322 
323 	/* detach_buf clears data, so grab it now. */
324 	ret = vq->data[i];
325 	detach_buf(vq, i);
326 	vq->last_used_idx++;
327 	END_USE(vq);
328 	return ret;
329 }
330 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
331 
virtqueue_disable_cb(struct virtqueue * _vq)332 void virtqueue_disable_cb(struct virtqueue *_vq)
333 {
334 	struct vring_virtqueue *vq = to_vvq(_vq);
335 
336 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
337 }
338 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
339 
virtqueue_enable_cb(struct virtqueue * _vq)340 bool virtqueue_enable_cb(struct virtqueue *_vq)
341 {
342 	struct vring_virtqueue *vq = to_vvq(_vq);
343 
344 	START_USE(vq);
345 
346 	/* We optimistically turn back on interrupts, then check if there was
347 	 * more to do. */
348 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
349 	virtio_mb();
350 	if (unlikely(more_used(vq))) {
351 		END_USE(vq);
352 		return false;
353 	}
354 
355 	END_USE(vq);
356 	return true;
357 }
358 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
359 
virtqueue_detach_unused_buf(struct virtqueue * _vq)360 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
361 {
362 	struct vring_virtqueue *vq = to_vvq(_vq);
363 	unsigned int i;
364 	void *buf;
365 
366 	START_USE(vq);
367 
368 	for (i = 0; i < vq->vring.num; i++) {
369 		if (!vq->data[i])
370 			continue;
371 		/* detach_buf clears data, so grab it now. */
372 		buf = vq->data[i];
373 		detach_buf(vq, i);
374 		vq->vring.avail->idx--;
375 		END_USE(vq);
376 		return buf;
377 	}
378 	/* That should have freed everything. */
379 	BUG_ON(vq->num_free != vq->vring.num);
380 
381 	END_USE(vq);
382 	return NULL;
383 }
384 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
385 
vring_interrupt(int irq,void * _vq)386 irqreturn_t vring_interrupt(int irq, void *_vq)
387 {
388 	struct vring_virtqueue *vq = to_vvq(_vq);
389 
390 	if (!more_used(vq)) {
391 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
392 		return IRQ_NONE;
393 	}
394 
395 	if (unlikely(vq->broken))
396 		return IRQ_HANDLED;
397 
398 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
399 	if (vq->vq.callback)
400 		vq->vq.callback(&vq->vq);
401 
402 	return IRQ_HANDLED;
403 }
404 EXPORT_SYMBOL_GPL(vring_interrupt);
405 
vring_new_virtqueue(unsigned int num,unsigned int vring_align,struct virtio_device * vdev,void * pages,void (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)406 struct virtqueue *vring_new_virtqueue(unsigned int num,
407 				      unsigned int vring_align,
408 				      struct virtio_device *vdev,
409 				      void *pages,
410 				      void (*notify)(struct virtqueue *),
411 				      void (*callback)(struct virtqueue *),
412 				      const char *name)
413 {
414 	struct vring_virtqueue *vq;
415 	unsigned int i;
416 
417 	/* We assume num is a power of 2. */
418 	if (num & (num - 1)) {
419 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
420 		return NULL;
421 	}
422 
423 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
424 	if (!vq)
425 		return NULL;
426 
427 	vring_init(&vq->vring, num, pages, vring_align);
428 	vq->vq.callback = callback;
429 	vq->vq.vdev = vdev;
430 	vq->vq.name = name;
431 	vq->notify = notify;
432 	vq->broken = false;
433 	vq->last_used_idx = 0;
434 	vq->num_added = 0;
435 	list_add_tail(&vq->vq.list, &vdev->vqs);
436 #ifdef DEBUG
437 	vq->in_use = false;
438 #endif
439 
440 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
441 
442 	/* No callback?  Tell other side not to bother us. */
443 	if (!callback)
444 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
445 
446 	/* Put everything in free lists. */
447 	vq->num_free = num;
448 	vq->free_head = 0;
449 	for (i = 0; i < num-1; i++) {
450 		vq->vring.desc[i].next = i+1;
451 		vq->data[i] = NULL;
452 	}
453 	vq->data[i] = NULL;
454 
455 	return &vq->vq;
456 }
457 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
458 
vring_del_virtqueue(struct virtqueue * vq)459 void vring_del_virtqueue(struct virtqueue *vq)
460 {
461 	list_del(&vq->list);
462 	kfree(to_vvq(vq));
463 }
464 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
465 
466 /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)467 void vring_transport_features(struct virtio_device *vdev)
468 {
469 	unsigned int i;
470 
471 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
472 		switch (i) {
473 		case VIRTIO_RING_F_INDIRECT_DESC:
474 			break;
475 		default:
476 			/* We don't understand this bit. */
477 			clear_bit(i, vdev->features);
478 		}
479 	}
480 }
481 EXPORT_SYMBOL_GPL(vring_transport_features);
482 
483 MODULE_LICENSE("GPL");
484