1 #ifndef _UAPI_LINUX_VIRTIO_RING_H
2 #define _UAPI_LINUX_VIRTIO_RING_H
3 /* An interface for efficient virtio implementation, currently for use by KVM,
4 * but hopefully others soon. Do NOT change this since it will
5 * break existing servers and clients.
6 *
7 * This header is BSD licensed so anyone can use the definitions to implement
8 * compatible drivers/servers.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of IBM nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */
34 #ifndef __KERNEL__
35 #include <stdint.h>
36 #endif
37 #include <linux/types.h>
38 #include <linux/virtio_types.h>
39
40 /* This marks a buffer as continuing via the next field. */
41 #define VRING_DESC_F_NEXT 1
42 /* This marks a buffer as write-only (otherwise read-only). */
43 #define VRING_DESC_F_WRITE 2
44 /* This means the buffer contains a list of buffer descriptors. */
45 #define VRING_DESC_F_INDIRECT 4
46
47 /*
48 * Mark a descriptor as available or used in packed ring.
49 * Notice: they are defined as shifts instead of shifted values.
50 */
51 #define VRING_PACKED_DESC_F_AVAIL 7
52 #define VRING_PACKED_DESC_F_USED 15
53
54 /* The Host uses this in used->flags to advise the Guest: don't kick me when
55 * you add a buffer. It's unreliable, so it's simply an optimization. Guest
56 * will still kick if it's out of buffers. */
57 #define VRING_USED_F_NO_NOTIFY 1
58 /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
59 * when you consume a buffer. It's unreliable, so it's simply an
60 * optimization. */
61 #define VRING_AVAIL_F_NO_INTERRUPT 1
62
63 /* Enable events in packed ring. */
64 #define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
65 /* Disable events in packed ring. */
66 #define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
67 /*
68 * Enable events for a specific descriptor in packed ring.
69 * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
70 * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
71 */
72 #define VRING_PACKED_EVENT_FLAG_DESC 0x2
73
74 /*
75 * Wrap counter bit shift in event suppression structure
76 * of packed ring.
77 */
78 #define VRING_PACKED_EVENT_F_WRAP_CTR 15
79
80 /* We support indirect buffer descriptors */
81 #define VIRTIO_RING_F_INDIRECT_DESC 28
82
83 /* The Guest publishes the used index for which it expects an interrupt
84 * at the end of the avail ring. Host should ignore the avail->flags field. */
85 /* The Host publishes the avail index for which it expects a kick
86 * at the end of the used ring. Guest should ignore the used->flags field. */
87 #define VIRTIO_RING_F_EVENT_IDX 29
88
89 /* Alignment requirements for vring elements.
90 * When using pre-virtio 1.0 layout, these fall out naturally.
91 */
92 #define VRING_AVAIL_ALIGN_SIZE 2
93 #define VRING_USED_ALIGN_SIZE 4
94 #define VRING_DESC_ALIGN_SIZE 16
95
96 /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
97 struct vring_desc {
98 /* Address (guest-physical). */
99 __virtio64 addr;
100 /* Length. */
101 __virtio32 len;
102 /* The flags as indicated above. */
103 __virtio16 flags;
104 /* We chain unused descriptors via this, too */
105 __virtio16 next;
106 };
107
108 struct vring_avail {
109 __virtio16 flags;
110 __virtio16 idx;
111 __virtio16 ring[];
112 };
113
114 /* u32 is used here for ids for padding reasons. */
115 struct vring_used_elem {
116 /* Index of start of used descriptor chain. */
117 __virtio32 id;
118 /* Total length of the descriptor chain which was used (written to) */
119 __virtio32 len;
120 };
121
122 typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
123 vring_used_elem_t;
124
125 struct vring_used {
126 __virtio16 flags;
127 __virtio16 idx;
128 vring_used_elem_t ring[];
129 };
130
131 /*
132 * The ring element addresses are passed between components with different
133 * alignments assumptions. Thus, we might need to decrease the compiler-selected
134 * alignment, and so must use a typedef to make sure the aligned attribute
135 * actually takes hold:
136 *
137 * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
138 *
139 * When used on a struct, or struct member, the aligned attribute can only
140 * increase the alignment; in order to decrease it, the packed attribute must
141 * be specified as well. When used as part of a typedef, the aligned attribute
142 * can both increase and decrease alignment, and specifying the packed
143 * attribute generates a warning.
144 */
145 typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
146 vring_desc_t;
147 typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
148 vring_avail_t;
149 typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
150 vring_used_t;
151
152 struct vring {
153 unsigned int num;
154
155 vring_desc_t *desc;
156
157 vring_avail_t *avail;
158
159 vring_used_t *used;
160 };
161
162 #ifndef VIRTIO_RING_NO_LEGACY
163
164 /* The standard layout for the ring is a continuous chunk of memory which looks
165 * like this. We assume num is a power of 2.
166 *
167 * struct vring
168 * {
169 * // The actual descriptors (16 bytes each)
170 * struct vring_desc desc[num];
171 *
172 * // A ring of available descriptor heads with free-running index.
173 * __virtio16 avail_flags;
174 * __virtio16 avail_idx;
175 * __virtio16 available[num];
176 * __virtio16 used_event_idx;
177 *
178 * // Padding to the next align boundary.
179 * char pad[];
180 *
181 * // A ring of used descriptor heads with free-running index.
182 * __virtio16 used_flags;
183 * __virtio16 used_idx;
184 * struct vring_used_elem used[num];
185 * __virtio16 avail_event_idx;
186 * };
187 */
188 /* We publish the used event index at the end of the available ring, and vice
189 * versa. They are at the end for backwards compatibility. */
190 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
191 #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
192
vring_init(struct vring * vr,unsigned int num,void * p,unsigned long align)193 static inline void vring_init(struct vring *vr, unsigned int num, void *p,
194 unsigned long align)
195 {
196 vr->num = num;
197 vr->desc = p;
198 vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
199 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
200 + align-1) & ~(align - 1));
201 }
202
vring_size(unsigned int num,unsigned long align)203 static inline unsigned vring_size(unsigned int num, unsigned long align)
204 {
205 return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
206 + align - 1) & ~(align - 1))
207 + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
208 }
209
210 #endif /* VIRTIO_RING_NO_LEGACY */
211
212 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
213 /* Assuming a given event_idx value from the other side, if
214 * we have just incremented index from old to new_idx,
215 * should we trigger an event? */
vring_need_event(__u16 event_idx,__u16 new_idx,__u16 old)216 static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
217 {
218 /* Note: Xen has similar logic for notification hold-off
219 * in include/xen/interface/io/ring.h with req_event and req_prod
220 * corresponding to event_idx + 1 and new_idx respectively.
221 * Note also that req_event and req_prod in Xen start at 1,
222 * event indexes in virtio start at 0. */
223 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
224 }
225
226 struct vring_packed_desc_event {
227 /* Descriptor Ring Change Event Offset/Wrap Counter. */
228 __le16 off_wrap;
229 /* Descriptor Ring Change Event Flags. */
230 __le16 flags;
231 };
232
233 struct vring_packed_desc {
234 /* Buffer Address. */
235 __le64 addr;
236 /* Buffer Length. */
237 __le32 len;
238 /* Buffer ID. */
239 __le16 id;
240 /* The flags depending on descriptor type. */
241 __le16 flags;
242 };
243
244 #endif /* _UAPI_LINUX_VIRTIO_RING_H */
245