1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 /**
20  * virtio_config_ops - operations for configuring a virtio device
21  * Note: Do not assume that a transport implements all of the operations
22  *       getting/setting a value as a simple read/write! Generally speaking,
23  *       any of @get/@set, @get_status/@set_status, or @get_features/
24  *       @finalize_features are NOT safe to be called from an atomic
25  *       context.
26  * @get: read the value of a configuration field
27  *	vdev: the virtio_device
28  *	offset: the offset of the configuration field
29  *	buf: the buffer to write the field value into.
30  *	len: the length of the buffer
31  * @set: write the value of a configuration field
32  *	vdev: the virtio_device
33  *	offset: the offset of the configuration field
34  *	buf: the buffer to read the field value from.
35  *	len: the length of the buffer
36  * @generation: config generation counter (optional)
37  *	vdev: the virtio_device
38  *	Returns the config generation counter
39  * @get_status: read the status byte
40  *	vdev: the virtio_device
41  *	Returns the status byte
42  * @set_status: write the status byte
43  *	vdev: the virtio_device
44  *	status: the new status byte
45  * @reset: reset the device
46  *	vdev: the virtio device
47  *	After this, status and feature negotiation must be done again
48  *	Device must not be reset from its vq/config callbacks, or in
49  *	parallel with being added/removed.
50  * @find_vqs: find virtqueues and instantiate them.
51  *	vdev: the virtio_device
52  *	nvqs: the number of virtqueues to find
53  *	vqs: on success, includes new virtqueues
54  *	callbacks: array of callbacks, for each virtqueue
55  *		include a NULL entry for vqs that do not need a callback
56  *	names: array of virtqueue names (mainly for debugging)
57  *		include a NULL entry for vqs unused by driver
58  *	Returns 0 on success or error status
59  * @del_vqs: free virtqueues found by find_vqs().
60  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
61  *      The function guarantees that all memory operations on the
62  *      queue before it are visible to the vring_interrupt() that is
63  *      called after it.
64  *      vdev: the virtio_device
65  * @get_features: get the array of feature bits for this device.
66  *	vdev: the virtio_device
67  *	Returns the first 64 feature bits (all we currently need).
68  * @finalize_features: confirm what device features we'll be using.
69  *	vdev: the virtio_device
70  *	This sends the driver feature bits to the device: it can change
71  *	the dev->feature bits if it wants.
72  * Note: despite the name this can be called any number of times.
73  *	Returns 0 on success or error status
74  * @bus_name: return the bus name associated with the device (optional)
75  *	vdev: the virtio_device
76  *      This returns a pointer to the bus name a la pci_name from which
77  *      the caller can then copy.
78  * @set_vq_affinity: set the affinity for a virtqueue (optional).
79  * @get_vq_affinity: get the affinity for a virtqueue (optional).
80  * @get_shm_region: get a shared memory region based on the index.
81  * @disable_vq_and_reset: reset a queue individually (optional).
82  *	vq: the virtqueue
83  *	Returns 0 on success or error status
84  *	disable_vq_and_reset will guarantee that the callbacks are disabled and
85  *	synchronized.
86  *	Except for the callback, the caller should guarantee that the vring is
87  *	not accessed by any functions of virtqueue.
88  * @enable_vq_after_reset: enable a reset queue
89  *	vq: the virtqueue
90  *	Returns 0 on success or error status
91  *	If disable_vq_and_reset is set, then enable_vq_after_reset must also be
92  *	set.
93  */
94 typedef void vq_callback_t(struct virtqueue *);
95 struct virtio_config_ops {
96 	void (*get)(struct virtio_device *vdev, unsigned offset,
97 		    void *buf, unsigned len);
98 	void (*set)(struct virtio_device *vdev, unsigned offset,
99 		    const void *buf, unsigned len);
100 	u32 (*generation)(struct virtio_device *vdev);
101 	u8 (*get_status)(struct virtio_device *vdev);
102 	void (*set_status)(struct virtio_device *vdev, u8 status);
103 	void (*reset)(struct virtio_device *vdev);
104 	int (*find_vqs)(struct virtio_device *, unsigned nvqs,
105 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
106 			const char * const names[], const bool *ctx,
107 			struct irq_affinity *desc);
108 	void (*del_vqs)(struct virtio_device *);
109 	void (*synchronize_cbs)(struct virtio_device *);
110 	u64 (*get_features)(struct virtio_device *vdev);
111 	int (*finalize_features)(struct virtio_device *vdev);
112 	const char *(*bus_name)(struct virtio_device *vdev);
113 	int (*set_vq_affinity)(struct virtqueue *vq,
114 			       const struct cpumask *cpu_mask);
115 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
116 			int index);
117 	bool (*get_shm_region)(struct virtio_device *vdev,
118 			       struct virtio_shm_region *region, u8 id);
119 	int (*disable_vq_and_reset)(struct virtqueue *vq);
120 	int (*enable_vq_after_reset)(struct virtqueue *vq);
121 };
122 
123 /* If driver didn't advertise the feature, it will never appear. */
124 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
125 					 unsigned int fbit);
126 
127 /**
128  * __virtio_test_bit - helper to test feature bits. For use by transports.
129  *                     Devices should normally use virtio_has_feature,
130  *                     which includes more checks.
131  * @vdev: the device
132  * @fbit: the feature bit
133  */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)134 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
135 				     unsigned int fbit)
136 {
137 	/* Did you forget to fix assumptions on max features? */
138 	if (__builtin_constant_p(fbit))
139 		BUILD_BUG_ON(fbit >= 64);
140 	else
141 		BUG_ON(fbit >= 64);
142 
143 	return vdev->features & BIT_ULL(fbit);
144 }
145 
146 /**
147  * __virtio_set_bit - helper to set feature bits. For use by transports.
148  * @vdev: the device
149  * @fbit: the feature bit
150  */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)151 static inline void __virtio_set_bit(struct virtio_device *vdev,
152 				    unsigned int fbit)
153 {
154 	/* Did you forget to fix assumptions on max features? */
155 	if (__builtin_constant_p(fbit))
156 		BUILD_BUG_ON(fbit >= 64);
157 	else
158 		BUG_ON(fbit >= 64);
159 
160 	vdev->features |= BIT_ULL(fbit);
161 }
162 
163 /**
164  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
165  * @vdev: the device
166  * @fbit: the feature bit
167  */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)168 static inline void __virtio_clear_bit(struct virtio_device *vdev,
169 				      unsigned int fbit)
170 {
171 	/* Did you forget to fix assumptions on max features? */
172 	if (__builtin_constant_p(fbit))
173 		BUILD_BUG_ON(fbit >= 64);
174 	else
175 		BUG_ON(fbit >= 64);
176 
177 	vdev->features &= ~BIT_ULL(fbit);
178 }
179 
180 /**
181  * virtio_has_feature - helper to determine if this device has this feature.
182  * @vdev: the device
183  * @fbit: the feature bit
184  */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)185 static inline bool virtio_has_feature(const struct virtio_device *vdev,
186 				      unsigned int fbit)
187 {
188 	if (fbit < VIRTIO_TRANSPORT_F_START)
189 		virtio_check_driver_offered_feature(vdev, fbit);
190 
191 	return __virtio_test_bit(vdev, fbit);
192 }
193 
194 /**
195  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
196  * @vdev: the device
197  */
virtio_has_dma_quirk(const struct virtio_device * vdev)198 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
199 {
200 	/*
201 	 * Note the reverse polarity of the quirk feature (compared to most
202 	 * other features), this is for compatibility with legacy systems.
203 	 */
204 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
205 }
206 
207 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)208 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
209 					vq_callback_t *c, const char *n)
210 {
211 	vq_callback_t *callbacks[] = { c };
212 	const char *names[] = { n };
213 	struct virtqueue *vq;
214 	int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
215 					 NULL);
216 	if (err < 0)
217 		return ERR_PTR(err);
218 	return vq;
219 }
220 
221 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],struct irq_affinity * desc)222 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
223 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
224 			const char * const names[],
225 			struct irq_affinity *desc)
226 {
227 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
228 }
229 
230 static inline
virtio_find_vqs_ctx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)231 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
232 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
233 			const char * const names[], const bool *ctx,
234 			struct irq_affinity *desc)
235 {
236 	return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
237 				      desc);
238 }
239 
240 /**
241  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
242  * @dev: the virtio device
243  */
244 static inline
virtio_synchronize_cbs(struct virtio_device * dev)245 void virtio_synchronize_cbs(struct virtio_device *dev)
246 {
247 	if (dev->config->synchronize_cbs) {
248 		dev->config->synchronize_cbs(dev);
249 	} else {
250 		/*
251 		 * A best effort fallback to synchronize with
252 		 * interrupts, preemption and softirq disabled
253 		 * regions. See comment above synchronize_rcu().
254 		 */
255 		synchronize_rcu();
256 	}
257 }
258 
259 /**
260  * virtio_device_ready - enable vq use in probe function
261  * @dev: the virtio device
262  *
263  * Driver must call this to use vqs in the probe function.
264  *
265  * Note: vqs are enabled automatically after probe returns.
266  */
267 static inline
virtio_device_ready(struct virtio_device * dev)268 void virtio_device_ready(struct virtio_device *dev)
269 {
270 	unsigned status = dev->config->get_status(dev);
271 
272 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
273 
274 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
275 	/*
276 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
277 	 * will see the driver specific setup if it sees vq->broken
278 	 * as false (even if the notifications come before DRIVER_OK).
279 	 */
280 	virtio_synchronize_cbs(dev);
281 	__virtio_unbreak_device(dev);
282 #endif
283 	/*
284 	 * The transport should ensure the visibility of vq->broken
285 	 * before setting DRIVER_OK. See the comments for the transport
286 	 * specific set_status() method.
287 	 *
288 	 * A well behaved device will only notify a virtqueue after
289 	 * DRIVER_OK, this means the device should "see" the coherenct
290 	 * memory write that set vq->broken as false which is done by
291 	 * the driver when it sees DRIVER_OK, then the following
292 	 * driver's vring_interrupt() will see vq->broken as false so
293 	 * we won't lose any notification.
294 	 */
295 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
296 }
297 
298 static inline
virtio_bus_name(struct virtio_device * vdev)299 const char *virtio_bus_name(struct virtio_device *vdev)
300 {
301 	if (!vdev->config->bus_name)
302 		return "virtio";
303 	return vdev->config->bus_name(vdev);
304 }
305 
306 /**
307  * virtqueue_set_affinity - setting affinity for a virtqueue
308  * @vq: the virtqueue
309  * @cpu_mask: the cpu mask
310  *
311  * Pay attention the function are best-effort: the affinity hint may not be set
312  * due to config support, irq type and sharing.
313  *
314  */
315 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)316 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
317 {
318 	struct virtio_device *vdev = vq->vdev;
319 	if (vdev->config->set_vq_affinity)
320 		return vdev->config->set_vq_affinity(vq, cpu_mask);
321 	return 0;
322 }
323 
324 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)325 bool virtio_get_shm_region(struct virtio_device *vdev,
326 			   struct virtio_shm_region *region, u8 id)
327 {
328 	if (!vdev->config->get_shm_region)
329 		return false;
330 	return vdev->config->get_shm_region(vdev, region, id);
331 }
332 
virtio_is_little_endian(struct virtio_device * vdev)333 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
334 {
335 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
336 		virtio_legacy_is_little_endian();
337 }
338 
339 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)340 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
341 {
342 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
343 }
344 
cpu_to_virtio16(struct virtio_device * vdev,u16 val)345 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
346 {
347 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
348 }
349 
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)350 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
351 {
352 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
353 }
354 
cpu_to_virtio32(struct virtio_device * vdev,u32 val)355 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
356 {
357 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
358 }
359 
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)360 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
361 {
362 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
363 }
364 
cpu_to_virtio64(struct virtio_device * vdev,u64 val)365 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
366 {
367 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
368 }
369 
370 #define virtio_to_cpu(vdev, x) \
371 	_Generic((x), \
372 		__u8: (x), \
373 		__virtio16: virtio16_to_cpu((vdev), (x)), \
374 		__virtio32: virtio32_to_cpu((vdev), (x)), \
375 		__virtio64: virtio64_to_cpu((vdev), (x)) \
376 		)
377 
378 #define cpu_to_virtio(vdev, x, m) \
379 	_Generic((m), \
380 		__u8: (x), \
381 		__virtio16: cpu_to_virtio16((vdev), (x)), \
382 		__virtio32: cpu_to_virtio32((vdev), (x)), \
383 		__virtio64: cpu_to_virtio64((vdev), (x)) \
384 		)
385 
386 #define __virtio_native_type(structname, member) \
387 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
388 
389 /* Config space accessors. */
390 #define virtio_cread(vdev, structname, member, ptr)			\
391 	do {								\
392 		typeof(((structname*)0)->member) virtio_cread_v;	\
393 									\
394 		might_sleep();						\
395 		/* Sanity check: must match the member's type */	\
396 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
397 									\
398 		switch (sizeof(virtio_cread_v)) {			\
399 		case 1:							\
400 		case 2:							\
401 		case 4:							\
402 			vdev->config->get((vdev), 			\
403 					  offsetof(structname, member), \
404 					  &virtio_cread_v,		\
405 					  sizeof(virtio_cread_v));	\
406 			break;						\
407 		default:						\
408 			__virtio_cread_many((vdev), 			\
409 					  offsetof(structname, member), \
410 					  &virtio_cread_v,		\
411 					  1,				\
412 					  sizeof(virtio_cread_v));	\
413 			break;						\
414 		}							\
415 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
416 	} while(0)
417 
418 /* Config space accessors. */
419 #define virtio_cwrite(vdev, structname, member, ptr)			\
420 	do {								\
421 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
422 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
423 									\
424 		might_sleep();						\
425 		/* Sanity check: must match the member's type */	\
426 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
427 									\
428 		vdev->config->set((vdev), offsetof(structname, member),	\
429 				  &virtio_cwrite_v,			\
430 				  sizeof(virtio_cwrite_v));		\
431 	} while(0)
432 
433 /*
434  * Nothing virtio-specific about these, but let's worry about generalizing
435  * these later.
436  */
437 #define virtio_le_to_cpu(x) \
438 	_Generic((x), \
439 		__u8: (u8)(x), \
440 		 __le16: (u16)le16_to_cpu(x), \
441 		 __le32: (u32)le32_to_cpu(x), \
442 		 __le64: (u64)le64_to_cpu(x) \
443 		)
444 
445 #define virtio_cpu_to_le(x, m) \
446 	_Generic((m), \
447 		 __u8: (x), \
448 		 __le16: cpu_to_le16(x), \
449 		 __le32: cpu_to_le32(x), \
450 		 __le64: cpu_to_le64(x) \
451 		)
452 
453 /* LE (e.g. modern) Config space accessors. */
454 #define virtio_cread_le(vdev, structname, member, ptr)			\
455 	do {								\
456 		typeof(((structname*)0)->member) virtio_cread_v;	\
457 									\
458 		might_sleep();						\
459 		/* Sanity check: must match the member's type */	\
460 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
461 									\
462 		switch (sizeof(virtio_cread_v)) {			\
463 		case 1:							\
464 		case 2:							\
465 		case 4:							\
466 			vdev->config->get((vdev), 			\
467 					  offsetof(structname, member), \
468 					  &virtio_cread_v,		\
469 					  sizeof(virtio_cread_v));	\
470 			break;						\
471 		default:						\
472 			__virtio_cread_many((vdev), 			\
473 					  offsetof(structname, member), \
474 					  &virtio_cread_v,		\
475 					  1,				\
476 					  sizeof(virtio_cread_v));	\
477 			break;						\
478 		}							\
479 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
480 	} while(0)
481 
482 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
483 	do {								\
484 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
485 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
486 									\
487 		might_sleep();						\
488 		/* Sanity check: must match the member's type */	\
489 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
490 									\
491 		vdev->config->set((vdev), offsetof(structname, member),	\
492 				  &virtio_cwrite_v,			\
493 				  sizeof(virtio_cwrite_v));		\
494 	} while(0)
495 
496 
497 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)498 static inline void __virtio_cread_many(struct virtio_device *vdev,
499 				       unsigned int offset,
500 				       void *buf, size_t count, size_t bytes)
501 {
502 	u32 old, gen = vdev->config->generation ?
503 		vdev->config->generation(vdev) : 0;
504 	int i;
505 
506 	might_sleep();
507 	do {
508 		old = gen;
509 
510 		for (i = 0; i < count; i++)
511 			vdev->config->get(vdev, offset + bytes * i,
512 					  buf + i * bytes, bytes);
513 
514 		gen = vdev->config->generation ?
515 			vdev->config->generation(vdev) : 0;
516 	} while (gen != old);
517 }
518 
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)519 static inline void virtio_cread_bytes(struct virtio_device *vdev,
520 				      unsigned int offset,
521 				      void *buf, size_t len)
522 {
523 	__virtio_cread_many(vdev, offset, buf, len, 1);
524 }
525 
virtio_cread8(struct virtio_device * vdev,unsigned int offset)526 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
527 {
528 	u8 ret;
529 
530 	might_sleep();
531 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
532 	return ret;
533 }
534 
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)535 static inline void virtio_cwrite8(struct virtio_device *vdev,
536 				  unsigned int offset, u8 val)
537 {
538 	might_sleep();
539 	vdev->config->set(vdev, offset, &val, sizeof(val));
540 }
541 
virtio_cread16(struct virtio_device * vdev,unsigned int offset)542 static inline u16 virtio_cread16(struct virtio_device *vdev,
543 				 unsigned int offset)
544 {
545 	__virtio16 ret;
546 
547 	might_sleep();
548 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
549 	return virtio16_to_cpu(vdev, ret);
550 }
551 
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)552 static inline void virtio_cwrite16(struct virtio_device *vdev,
553 				   unsigned int offset, u16 val)
554 {
555 	__virtio16 v;
556 
557 	might_sleep();
558 	v = cpu_to_virtio16(vdev, val);
559 	vdev->config->set(vdev, offset, &v, sizeof(v));
560 }
561 
virtio_cread32(struct virtio_device * vdev,unsigned int offset)562 static inline u32 virtio_cread32(struct virtio_device *vdev,
563 				 unsigned int offset)
564 {
565 	__virtio32 ret;
566 
567 	might_sleep();
568 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
569 	return virtio32_to_cpu(vdev, ret);
570 }
571 
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)572 static inline void virtio_cwrite32(struct virtio_device *vdev,
573 				   unsigned int offset, u32 val)
574 {
575 	__virtio32 v;
576 
577 	might_sleep();
578 	v = cpu_to_virtio32(vdev, val);
579 	vdev->config->set(vdev, offset, &v, sizeof(v));
580 }
581 
virtio_cread64(struct virtio_device * vdev,unsigned int offset)582 static inline u64 virtio_cread64(struct virtio_device *vdev,
583 				 unsigned int offset)
584 {
585 	__virtio64 ret;
586 
587 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
588 	return virtio64_to_cpu(vdev, ret);
589 }
590 
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)591 static inline void virtio_cwrite64(struct virtio_device *vdev,
592 				   unsigned int offset, u64 val)
593 {
594 	__virtio64 v;
595 
596 	might_sleep();
597 	v = cpu_to_virtio64(vdev, val);
598 	vdev->config->set(vdev, offset, &v, sizeof(v));
599 }
600 
601 /* Conditional config space accessors. */
602 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
603 	({								\
604 		int _r = 0;						\
605 		if (!virtio_has_feature(vdev, fbit))			\
606 			_r = -ENOENT;					\
607 		else							\
608 			virtio_cread((vdev), structname, member, ptr);	\
609 		_r;							\
610 	})
611 
612 /* Conditional config space accessors. */
613 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
614 	({								\
615 		int _r = 0;						\
616 		if (!virtio_has_feature(vdev, fbit))			\
617 			_r = -ENOENT;					\
618 		else							\
619 			virtio_cread_le((vdev), structname, member, ptr); \
620 		_r;							\
621 	})
622 
623 #endif /* _LINUX_VIRTIO_CONFIG_H */
624