1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11
12 struct irq_affinity;
13
14 struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17 };
18
19 /**
20 * virtio_config_ops - operations for configuring a virtio device
21 * Note: Do not assume that a transport implements all of the operations
22 * getting/setting a value as a simple read/write! Generally speaking,
23 * any of @get/@set, @get_status/@set_status, or @get_features/
24 * @finalize_features are NOT safe to be called from an atomic
25 * context.
26 * @get: read the value of a configuration field
27 * vdev: the virtio_device
28 * offset: the offset of the configuration field
29 * buf: the buffer to write the field value into.
30 * len: the length of the buffer
31 * @set: write the value of a configuration field
32 * vdev: the virtio_device
33 * offset: the offset of the configuration field
34 * buf: the buffer to read the field value from.
35 * len: the length of the buffer
36 * @generation: config generation counter (optional)
37 * vdev: the virtio_device
38 * Returns the config generation counter
39 * @get_status: read the status byte
40 * vdev: the virtio_device
41 * Returns the status byte
42 * @set_status: write the status byte
43 * vdev: the virtio_device
44 * status: the new status byte
45 * @reset: reset the device
46 * vdev: the virtio device
47 * After this, status and feature negotiation must be done again
48 * Device must not be reset from its vq/config callbacks, or in
49 * parallel with being added/removed.
50 * @find_vqs: find virtqueues and instantiate them.
51 * vdev: the virtio_device
52 * nvqs: the number of virtqueues to find
53 * vqs: on success, includes new virtqueues
54 * callbacks: array of callbacks, for each virtqueue
55 * include a NULL entry for vqs that do not need a callback
56 * names: array of virtqueue names (mainly for debugging)
57 * include a NULL entry for vqs unused by driver
58 * Returns 0 on success or error status
59 * @del_vqs: free virtqueues found by find_vqs().
60 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
61 * The function guarantees that all memory operations on the
62 * queue before it are visible to the vring_interrupt() that is
63 * called after it.
64 * vdev: the virtio_device
65 * @get_features: get the array of feature bits for this device.
66 * vdev: the virtio_device
67 * Returns the first 64 feature bits (all we currently need).
68 * @finalize_features: confirm what device features we'll be using.
69 * vdev: the virtio_device
70 * This sends the driver feature bits to the device: it can change
71 * the dev->feature bits if it wants.
72 * Note: despite the name this can be called any number of times.
73 * Returns 0 on success or error status
74 * @bus_name: return the bus name associated with the device (optional)
75 * vdev: the virtio_device
76 * This returns a pointer to the bus name a la pci_name from which
77 * the caller can then copy.
78 * @set_vq_affinity: set the affinity for a virtqueue (optional).
79 * @get_vq_affinity: get the affinity for a virtqueue (optional).
80 * @get_shm_region: get a shared memory region based on the index.
81 */
82 typedef void vq_callback_t(struct virtqueue *);
83 struct virtio_config_ops {
84 void (*get)(struct virtio_device *vdev, unsigned offset,
85 void *buf, unsigned len);
86 void (*set)(struct virtio_device *vdev, unsigned offset,
87 const void *buf, unsigned len);
88 u32 (*generation)(struct virtio_device *vdev);
89 u8 (*get_status)(struct virtio_device *vdev);
90 void (*set_status)(struct virtio_device *vdev, u8 status);
91 void (*reset)(struct virtio_device *vdev);
92 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
93 struct virtqueue *vqs[], vq_callback_t *callbacks[],
94 const char * const names[], const bool *ctx,
95 struct irq_affinity *desc);
96 void (*del_vqs)(struct virtio_device *);
97 void (*synchronize_cbs)(struct virtio_device *);
98 u64 (*get_features)(struct virtio_device *vdev);
99 int (*finalize_features)(struct virtio_device *vdev);
100 const char *(*bus_name)(struct virtio_device *vdev);
101 int (*set_vq_affinity)(struct virtqueue *vq,
102 const struct cpumask *cpu_mask);
103 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
104 int index);
105 bool (*get_shm_region)(struct virtio_device *vdev,
106 struct virtio_shm_region *region, u8 id);
107 };
108
109 /* If driver didn't advertise the feature, it will never appear. */
110 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
111 unsigned int fbit);
112
113 /**
114 * __virtio_test_bit - helper to test feature bits. For use by transports.
115 * Devices should normally use virtio_has_feature,
116 * which includes more checks.
117 * @vdev: the device
118 * @fbit: the feature bit
119 */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)120 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
121 unsigned int fbit)
122 {
123 /* Did you forget to fix assumptions on max features? */
124 if (__builtin_constant_p(fbit))
125 BUILD_BUG_ON(fbit >= 64);
126 else
127 BUG_ON(fbit >= 64);
128
129 return vdev->features & BIT_ULL(fbit);
130 }
131
132 /**
133 * __virtio_set_bit - helper to set feature bits. For use by transports.
134 * @vdev: the device
135 * @fbit: the feature bit
136 */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)137 static inline void __virtio_set_bit(struct virtio_device *vdev,
138 unsigned int fbit)
139 {
140 /* Did you forget to fix assumptions on max features? */
141 if (__builtin_constant_p(fbit))
142 BUILD_BUG_ON(fbit >= 64);
143 else
144 BUG_ON(fbit >= 64);
145
146 vdev->features |= BIT_ULL(fbit);
147 }
148
149 /**
150 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
151 * @vdev: the device
152 * @fbit: the feature bit
153 */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)154 static inline void __virtio_clear_bit(struct virtio_device *vdev,
155 unsigned int fbit)
156 {
157 /* Did you forget to fix assumptions on max features? */
158 if (__builtin_constant_p(fbit))
159 BUILD_BUG_ON(fbit >= 64);
160 else
161 BUG_ON(fbit >= 64);
162
163 vdev->features &= ~BIT_ULL(fbit);
164 }
165
166 /**
167 * virtio_has_feature - helper to determine if this device has this feature.
168 * @vdev: the device
169 * @fbit: the feature bit
170 */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)171 static inline bool virtio_has_feature(const struct virtio_device *vdev,
172 unsigned int fbit)
173 {
174 if (fbit < VIRTIO_TRANSPORT_F_START)
175 virtio_check_driver_offered_feature(vdev, fbit);
176
177 return __virtio_test_bit(vdev, fbit);
178 }
179
180 /**
181 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
182 * @vdev: the device
183 */
virtio_has_dma_quirk(const struct virtio_device * vdev)184 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
185 {
186 /*
187 * Note the reverse polarity of the quirk feature (compared to most
188 * other features), this is for compatibility with legacy systems.
189 */
190 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
191 }
192
193 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)194 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
195 vq_callback_t *c, const char *n)
196 {
197 vq_callback_t *callbacks[] = { c };
198 const char *names[] = { n };
199 struct virtqueue *vq;
200 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
201 NULL);
202 if (err < 0)
203 return ERR_PTR(err);
204 return vq;
205 }
206
207 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],struct irq_affinity * desc)208 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
209 struct virtqueue *vqs[], vq_callback_t *callbacks[],
210 const char * const names[],
211 struct irq_affinity *desc)
212 {
213 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
214 }
215
216 static inline
virtio_find_vqs_ctx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)217 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
218 struct virtqueue *vqs[], vq_callback_t *callbacks[],
219 const char * const names[], const bool *ctx,
220 struct irq_affinity *desc)
221 {
222 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
223 desc);
224 }
225
226 /**
227 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
228 * @vdev: the device
229 */
230 static inline
virtio_synchronize_cbs(struct virtio_device * dev)231 void virtio_synchronize_cbs(struct virtio_device *dev)
232 {
233 if (dev->config->synchronize_cbs) {
234 dev->config->synchronize_cbs(dev);
235 } else {
236 /*
237 * A best effort fallback to synchronize with
238 * interrupts, preemption and softirq disabled
239 * regions. See comment above synchronize_rcu().
240 */
241 synchronize_rcu();
242 }
243 }
244
245 /**
246 * virtio_device_ready - enable vq use in probe function
247 * @vdev: the device
248 *
249 * Driver must call this to use vqs in the probe function.
250 *
251 * Note: vqs are enabled automatically after probe returns.
252 */
253 static inline
virtio_device_ready(struct virtio_device * dev)254 void virtio_device_ready(struct virtio_device *dev)
255 {
256 unsigned status = dev->config->get_status(dev);
257
258 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
259
260 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
261 /*
262 * The virtio_synchronize_cbs() makes sure vring_interrupt()
263 * will see the driver specific setup if it sees vq->broken
264 * as false (even if the notifications come before DRIVER_OK).
265 */
266 virtio_synchronize_cbs(dev);
267 __virtio_unbreak_device(dev);
268 #endif
269 /*
270 * The transport should ensure the visibility of vq->broken
271 * before setting DRIVER_OK. See the comments for the transport
272 * specific set_status() method.
273 *
274 * A well behaved device will only notify a virtqueue after
275 * DRIVER_OK, this means the device should "see" the coherenct
276 * memory write that set vq->broken as false which is done by
277 * the driver when it sees DRIVER_OK, then the following
278 * driver's vring_interrupt() will see vq->broken as false so
279 * we won't lose any notification.
280 */
281 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
282 }
283
284 static inline
virtio_bus_name(struct virtio_device * vdev)285 const char *virtio_bus_name(struct virtio_device *vdev)
286 {
287 if (!vdev->config->bus_name)
288 return "virtio";
289 return vdev->config->bus_name(vdev);
290 }
291
292 /**
293 * virtqueue_set_affinity - setting affinity for a virtqueue
294 * @vq: the virtqueue
295 * @cpu: the cpu no.
296 *
297 * Pay attention the function are best-effort: the affinity hint may not be set
298 * due to config support, irq type and sharing.
299 *
300 */
301 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)302 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
303 {
304 struct virtio_device *vdev = vq->vdev;
305 if (vdev->config->set_vq_affinity)
306 return vdev->config->set_vq_affinity(vq, cpu_mask);
307 return 0;
308 }
309
310 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)311 bool virtio_get_shm_region(struct virtio_device *vdev,
312 struct virtio_shm_region *region, u8 id)
313 {
314 if (!vdev->config->get_shm_region)
315 return false;
316 return vdev->config->get_shm_region(vdev, region, id);
317 }
318
virtio_is_little_endian(struct virtio_device * vdev)319 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
320 {
321 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
322 virtio_legacy_is_little_endian();
323 }
324
325 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)326 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
327 {
328 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
329 }
330
cpu_to_virtio16(struct virtio_device * vdev,u16 val)331 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
332 {
333 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
334 }
335
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)336 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
337 {
338 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
339 }
340
cpu_to_virtio32(struct virtio_device * vdev,u32 val)341 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
342 {
343 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
344 }
345
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)346 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
347 {
348 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
349 }
350
cpu_to_virtio64(struct virtio_device * vdev,u64 val)351 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
352 {
353 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
354 }
355
356 #define virtio_to_cpu(vdev, x) \
357 _Generic((x), \
358 __u8: (x), \
359 __virtio16: virtio16_to_cpu((vdev), (x)), \
360 __virtio32: virtio32_to_cpu((vdev), (x)), \
361 __virtio64: virtio64_to_cpu((vdev), (x)) \
362 )
363
364 #define cpu_to_virtio(vdev, x, m) \
365 _Generic((m), \
366 __u8: (x), \
367 __virtio16: cpu_to_virtio16((vdev), (x)), \
368 __virtio32: cpu_to_virtio32((vdev), (x)), \
369 __virtio64: cpu_to_virtio64((vdev), (x)) \
370 )
371
372 #define __virtio_native_type(structname, member) \
373 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
374
375 /* Config space accessors. */
376 #define virtio_cread(vdev, structname, member, ptr) \
377 do { \
378 typeof(((structname*)0)->member) virtio_cread_v; \
379 \
380 might_sleep(); \
381 /* Sanity check: must match the member's type */ \
382 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
383 \
384 switch (sizeof(virtio_cread_v)) { \
385 case 1: \
386 case 2: \
387 case 4: \
388 vdev->config->get((vdev), \
389 offsetof(structname, member), \
390 &virtio_cread_v, \
391 sizeof(virtio_cread_v)); \
392 break; \
393 default: \
394 __virtio_cread_many((vdev), \
395 offsetof(structname, member), \
396 &virtio_cread_v, \
397 1, \
398 sizeof(virtio_cread_v)); \
399 break; \
400 } \
401 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
402 } while(0)
403
404 /* Config space accessors. */
405 #define virtio_cwrite(vdev, structname, member, ptr) \
406 do { \
407 typeof(((structname*)0)->member) virtio_cwrite_v = \
408 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
409 \
410 might_sleep(); \
411 /* Sanity check: must match the member's type */ \
412 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
413 \
414 vdev->config->set((vdev), offsetof(structname, member), \
415 &virtio_cwrite_v, \
416 sizeof(virtio_cwrite_v)); \
417 } while(0)
418
419 /*
420 * Nothing virtio-specific about these, but let's worry about generalizing
421 * these later.
422 */
423 #define virtio_le_to_cpu(x) \
424 _Generic((x), \
425 __u8: (u8)(x), \
426 __le16: (u16)le16_to_cpu(x), \
427 __le32: (u32)le32_to_cpu(x), \
428 __le64: (u64)le64_to_cpu(x) \
429 )
430
431 #define virtio_cpu_to_le(x, m) \
432 _Generic((m), \
433 __u8: (x), \
434 __le16: cpu_to_le16(x), \
435 __le32: cpu_to_le32(x), \
436 __le64: cpu_to_le64(x) \
437 )
438
439 /* LE (e.g. modern) Config space accessors. */
440 #define virtio_cread_le(vdev, structname, member, ptr) \
441 do { \
442 typeof(((structname*)0)->member) virtio_cread_v; \
443 \
444 might_sleep(); \
445 /* Sanity check: must match the member's type */ \
446 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
447 \
448 switch (sizeof(virtio_cread_v)) { \
449 case 1: \
450 case 2: \
451 case 4: \
452 vdev->config->get((vdev), \
453 offsetof(structname, member), \
454 &virtio_cread_v, \
455 sizeof(virtio_cread_v)); \
456 break; \
457 default: \
458 __virtio_cread_many((vdev), \
459 offsetof(structname, member), \
460 &virtio_cread_v, \
461 1, \
462 sizeof(virtio_cread_v)); \
463 break; \
464 } \
465 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
466 } while(0)
467
468 #define virtio_cwrite_le(vdev, structname, member, ptr) \
469 do { \
470 typeof(((structname*)0)->member) virtio_cwrite_v = \
471 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
472 \
473 might_sleep(); \
474 /* Sanity check: must match the member's type */ \
475 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
476 \
477 vdev->config->set((vdev), offsetof(structname, member), \
478 &virtio_cwrite_v, \
479 sizeof(virtio_cwrite_v)); \
480 } while(0)
481
482
483 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)484 static inline void __virtio_cread_many(struct virtio_device *vdev,
485 unsigned int offset,
486 void *buf, size_t count, size_t bytes)
487 {
488 u32 old, gen = vdev->config->generation ?
489 vdev->config->generation(vdev) : 0;
490 int i;
491
492 might_sleep();
493 do {
494 old = gen;
495
496 for (i = 0; i < count; i++)
497 vdev->config->get(vdev, offset + bytes * i,
498 buf + i * bytes, bytes);
499
500 gen = vdev->config->generation ?
501 vdev->config->generation(vdev) : 0;
502 } while (gen != old);
503 }
504
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)505 static inline void virtio_cread_bytes(struct virtio_device *vdev,
506 unsigned int offset,
507 void *buf, size_t len)
508 {
509 __virtio_cread_many(vdev, offset, buf, len, 1);
510 }
511
virtio_cread8(struct virtio_device * vdev,unsigned int offset)512 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
513 {
514 u8 ret;
515
516 might_sleep();
517 vdev->config->get(vdev, offset, &ret, sizeof(ret));
518 return ret;
519 }
520
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)521 static inline void virtio_cwrite8(struct virtio_device *vdev,
522 unsigned int offset, u8 val)
523 {
524 might_sleep();
525 vdev->config->set(vdev, offset, &val, sizeof(val));
526 }
527
virtio_cread16(struct virtio_device * vdev,unsigned int offset)528 static inline u16 virtio_cread16(struct virtio_device *vdev,
529 unsigned int offset)
530 {
531 __virtio16 ret;
532
533 might_sleep();
534 vdev->config->get(vdev, offset, &ret, sizeof(ret));
535 return virtio16_to_cpu(vdev, ret);
536 }
537
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)538 static inline void virtio_cwrite16(struct virtio_device *vdev,
539 unsigned int offset, u16 val)
540 {
541 __virtio16 v;
542
543 might_sleep();
544 v = cpu_to_virtio16(vdev, val);
545 vdev->config->set(vdev, offset, &v, sizeof(v));
546 }
547
virtio_cread32(struct virtio_device * vdev,unsigned int offset)548 static inline u32 virtio_cread32(struct virtio_device *vdev,
549 unsigned int offset)
550 {
551 __virtio32 ret;
552
553 might_sleep();
554 vdev->config->get(vdev, offset, &ret, sizeof(ret));
555 return virtio32_to_cpu(vdev, ret);
556 }
557
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)558 static inline void virtio_cwrite32(struct virtio_device *vdev,
559 unsigned int offset, u32 val)
560 {
561 __virtio32 v;
562
563 might_sleep();
564 v = cpu_to_virtio32(vdev, val);
565 vdev->config->set(vdev, offset, &v, sizeof(v));
566 }
567
virtio_cread64(struct virtio_device * vdev,unsigned int offset)568 static inline u64 virtio_cread64(struct virtio_device *vdev,
569 unsigned int offset)
570 {
571 __virtio64 ret;
572
573 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
574 return virtio64_to_cpu(vdev, ret);
575 }
576
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)577 static inline void virtio_cwrite64(struct virtio_device *vdev,
578 unsigned int offset, u64 val)
579 {
580 __virtio64 v;
581
582 might_sleep();
583 v = cpu_to_virtio64(vdev, val);
584 vdev->config->set(vdev, offset, &v, sizeof(v));
585 }
586
587 /* Conditional config space accessors. */
588 #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
589 ({ \
590 int _r = 0; \
591 if (!virtio_has_feature(vdev, fbit)) \
592 _r = -ENOENT; \
593 else \
594 virtio_cread((vdev), structname, member, ptr); \
595 _r; \
596 })
597
598 /* Conditional config space accessors. */
599 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
600 ({ \
601 int _r = 0; \
602 if (!virtio_has_feature(vdev, fbit)) \
603 _r = -ENOENT; \
604 else \
605 virtio_cread_le((vdev), structname, member, ptr); \
606 _r; \
607 })
608
609 #endif /* _LINUX_VIRTIO_CONFIG_H */
610