xref: /linux-6.15/include/linux/virtio_config.h (revision b49503ea)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 typedef void vq_callback_t(struct virtqueue *);
20 
21 /**
22  * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
23  * @name: virtqueue description. Used mainly for debugging, NULL for
24  *        a virtqueue unused by the driver.
25  * @callback: A callback to invoke on a used buffer notification.
26  *            NULL for a virtqueue that does not need a callback.
27  * @ctx: A flag to indicate to maintain an extra context per virtqueue.
28  */
29 struct virtqueue_info {
30 	const char *name;
31 	vq_callback_t *callback;
32 	bool ctx;
33 };
34 
35 /**
36  * struct virtio_config_ops - operations for configuring a virtio device
37  * Note: Do not assume that a transport implements all of the operations
38  *       getting/setting a value as a simple read/write! Generally speaking,
39  *       any of @get/@set, @get_status/@set_status, or @get_features/
40  *       @finalize_features are NOT safe to be called from an atomic
41  *       context.
42  * @get: read the value of a configuration field
43  *	vdev: the virtio_device
44  *	offset: the offset of the configuration field
45  *	buf: the buffer to write the field value into.
46  *	len: the length of the buffer
47  * @set: write the value of a configuration field
48  *	vdev: the virtio_device
49  *	offset: the offset of the configuration field
50  *	buf: the buffer to read the field value from.
51  *	len: the length of the buffer
52  * @generation: config generation counter (optional)
53  *	vdev: the virtio_device
54  *	Returns the config generation counter
55  * @get_status: read the status byte
56  *	vdev: the virtio_device
57  *	Returns the status byte
58  * @set_status: write the status byte
59  *	vdev: the virtio_device
60  *	status: the new status byte
61  * @reset: reset the device
62  *	vdev: the virtio device
63  *	After this, status and feature negotiation must be done again
64  *	Device must not be reset from its vq/config callbacks, or in
65  *	parallel with being added/removed.
66  * @find_vqs: find virtqueues and instantiate them.
67  *	vdev: the virtio_device
68  *	nvqs: the number of virtqueues to find
69  *	vqs: on success, includes new virtqueues
70  *	vqs_info: array of virtqueue info structures
71  *	Returns 0 on success or error status
72  * @del_vqs: free virtqueues found by find_vqs().
73  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
74  *      The function guarantees that all memory operations on the
75  *      queue before it are visible to the vring_interrupt() that is
76  *      called after it.
77  *      vdev: the virtio_device
78  * @get_features: get the array of feature bits for this device.
79  *	vdev: the virtio_device
80  *	Returns the first 64 feature bits (all we currently need).
81  * @finalize_features: confirm what device features we'll be using.
82  *	vdev: the virtio_device
83  *	This sends the driver feature bits to the device: it can change
84  *	the dev->feature bits if it wants.
85  *	Note that despite the name this	can be called any number of
86  *	times.
87  *	Returns 0 on success or error status
88  * @bus_name: return the bus name associated with the device (optional)
89  *	vdev: the virtio_device
90  *      This returns a pointer to the bus name a la pci_name from which
91  *      the caller can then copy.
92  * @set_vq_affinity: set the affinity for a virtqueue (optional).
93  * @get_vq_affinity: get the affinity for a virtqueue (optional).
94  * @get_shm_region: get a shared memory region based on the index.
95  * @disable_vq_and_reset: reset a queue individually (optional).
96  *	vq: the virtqueue
97  *	Returns 0 on success or error status
98  *	disable_vq_and_reset will guarantee that the callbacks are disabled and
99  *	synchronized.
100  *	Except for the callback, the caller should guarantee that the vring is
101  *	not accessed by any functions of virtqueue.
102  * @enable_vq_after_reset: enable a reset queue
103  *	vq: the virtqueue
104  *	Returns 0 on success or error status
105  *	If disable_vq_and_reset is set, then enable_vq_after_reset must also be
106  *	set.
107  * @create_avq: create admin virtqueue resource.
108  * @destroy_avq: destroy admin virtqueue resource.
109  */
110 struct virtio_config_ops {
111 	void (*get)(struct virtio_device *vdev, unsigned offset,
112 		    void *buf, unsigned len);
113 	void (*set)(struct virtio_device *vdev, unsigned offset,
114 		    const void *buf, unsigned len);
115 	u32 (*generation)(struct virtio_device *vdev);
116 	u8 (*get_status)(struct virtio_device *vdev);
117 	void (*set_status)(struct virtio_device *vdev, u8 status);
118 	void (*reset)(struct virtio_device *vdev);
119 	int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
120 			struct virtqueue *vqs[],
121 			struct virtqueue_info vqs_info[],
122 			struct irq_affinity *desc);
123 	void (*del_vqs)(struct virtio_device *);
124 	void (*synchronize_cbs)(struct virtio_device *);
125 	u64 (*get_features)(struct virtio_device *vdev);
126 	int (*finalize_features)(struct virtio_device *vdev);
127 	const char *(*bus_name)(struct virtio_device *vdev);
128 	int (*set_vq_affinity)(struct virtqueue *vq,
129 			       const struct cpumask *cpu_mask);
130 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
131 						 int index);
132 	bool (*get_shm_region)(struct virtio_device *vdev,
133 			       struct virtio_shm_region *region, u8 id);
134 	int (*disable_vq_and_reset)(struct virtqueue *vq);
135 	int (*enable_vq_after_reset)(struct virtqueue *vq);
136 	int (*create_avq)(struct virtio_device *vdev);
137 	void (*destroy_avq)(struct virtio_device *vdev);
138 };
139 
140 /* If driver didn't advertise the feature, it will never appear. */
141 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
142 					 unsigned int fbit);
143 
144 /**
145  * __virtio_test_bit - helper to test feature bits. For use by transports.
146  *                     Devices should normally use virtio_has_feature,
147  *                     which includes more checks.
148  * @vdev: the device
149  * @fbit: the feature bit
150  */
151 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
152 				     unsigned int fbit)
153 {
154 	/* Did you forget to fix assumptions on max features? */
155 	if (__builtin_constant_p(fbit))
156 		BUILD_BUG_ON(fbit >= 64);
157 	else
158 		BUG_ON(fbit >= 64);
159 
160 	return vdev->features & BIT_ULL(fbit);
161 }
162 
163 /**
164  * __virtio_set_bit - helper to set feature bits. For use by transports.
165  * @vdev: the device
166  * @fbit: the feature bit
167  */
168 static inline void __virtio_set_bit(struct virtio_device *vdev,
169 				    unsigned int fbit)
170 {
171 	/* Did you forget to fix assumptions on max features? */
172 	if (__builtin_constant_p(fbit))
173 		BUILD_BUG_ON(fbit >= 64);
174 	else
175 		BUG_ON(fbit >= 64);
176 
177 	vdev->features |= BIT_ULL(fbit);
178 }
179 
180 /**
181  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
182  * @vdev: the device
183  * @fbit: the feature bit
184  */
185 static inline void __virtio_clear_bit(struct virtio_device *vdev,
186 				      unsigned int fbit)
187 {
188 	/* Did you forget to fix assumptions on max features? */
189 	if (__builtin_constant_p(fbit))
190 		BUILD_BUG_ON(fbit >= 64);
191 	else
192 		BUG_ON(fbit >= 64);
193 
194 	vdev->features &= ~BIT_ULL(fbit);
195 }
196 
197 /**
198  * virtio_has_feature - helper to determine if this device has this feature.
199  * @vdev: the device
200  * @fbit: the feature bit
201  */
202 static inline bool virtio_has_feature(const struct virtio_device *vdev,
203 				      unsigned int fbit)
204 {
205 	if (fbit < VIRTIO_TRANSPORT_F_START)
206 		virtio_check_driver_offered_feature(vdev, fbit);
207 
208 	return __virtio_test_bit(vdev, fbit);
209 }
210 
211 /**
212  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
213  * @vdev: the device
214  */
215 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
216 {
217 	/*
218 	 * Note the reverse polarity of the quirk feature (compared to most
219 	 * other features), this is for compatibility with legacy systems.
220 	 */
221 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
222 }
223 
224 static inline
225 int virtio_find_vqs_info(struct virtio_device *vdev, unsigned int nvqs,
226 			 struct virtqueue *vqs[],
227 			 struct virtqueue_info vqs_info[],
228 			 struct irq_affinity *desc)
229 {
230 	return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
231 }
232 
233 static inline
234 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
235 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
236 			const char * const names[], const bool *ctx,
237 			struct irq_affinity *desc)
238 {
239 	struct virtqueue_info *vqs_info;
240 	int err, i;
241 
242 	vqs_info = kmalloc_array(nvqs, sizeof(*vqs_info), GFP_KERNEL);
243 	if (!vqs_info)
244 		return -ENOMEM;
245 	for (i = 0; i < nvqs; i++) {
246 		vqs_info[i].name = names[i];
247 		vqs_info[i].callback = callbacks[i];
248 		vqs_info[i].ctx = ctx ? ctx[i] : false;
249 	}
250 	err = virtio_find_vqs_info(vdev, nvqs, vqs, vqs_info, desc);
251 	kfree(vqs_info);
252 	return err;
253 }
254 
255 static inline
256 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
257 			struct virtqueue *vqs[], vq_callback_t *callbacks[],
258 			const char * const names[],
259 			struct irq_affinity *desc)
260 {
261 	return virtio_find_vqs_ctx(vdev, nvqs, vqs, callbacks,
262 				   names, NULL, desc);
263 }
264 
265 static inline
266 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
267 					vq_callback_t *c, const char *n)
268 {
269 	struct virtqueue_info vqs_info[] = {
270 		{ n, c },
271 	};
272 	struct virtqueue *vq;
273 	int err = virtio_find_vqs_info(vdev, 1, &vq, vqs_info, NULL);
274 
275 	if (err < 0)
276 		return ERR_PTR(err);
277 	return vq;
278 }
279 
280 /**
281  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
282  * @dev: the virtio device
283  */
284 static inline
285 void virtio_synchronize_cbs(struct virtio_device *dev)
286 {
287 	if (dev->config->synchronize_cbs) {
288 		dev->config->synchronize_cbs(dev);
289 	} else {
290 		/*
291 		 * A best effort fallback to synchronize with
292 		 * interrupts, preemption and softirq disabled
293 		 * regions. See comment above synchronize_rcu().
294 		 */
295 		synchronize_rcu();
296 	}
297 }
298 
299 /**
300  * virtio_device_ready - enable vq use in probe function
301  * @dev: the virtio device
302  *
303  * Driver must call this to use vqs in the probe function.
304  *
305  * Note: vqs are enabled automatically after probe returns.
306  */
307 static inline
308 void virtio_device_ready(struct virtio_device *dev)
309 {
310 	unsigned status = dev->config->get_status(dev);
311 
312 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
313 
314 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
315 	/*
316 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
317 	 * will see the driver specific setup if it sees vq->broken
318 	 * as false (even if the notifications come before DRIVER_OK).
319 	 */
320 	virtio_synchronize_cbs(dev);
321 	__virtio_unbreak_device(dev);
322 #endif
323 	/*
324 	 * The transport should ensure the visibility of vq->broken
325 	 * before setting DRIVER_OK. See the comments for the transport
326 	 * specific set_status() method.
327 	 *
328 	 * A well behaved device will only notify a virtqueue after
329 	 * DRIVER_OK, this means the device should "see" the coherenct
330 	 * memory write that set vq->broken as false which is done by
331 	 * the driver when it sees DRIVER_OK, then the following
332 	 * driver's vring_interrupt() will see vq->broken as false so
333 	 * we won't lose any notification.
334 	 */
335 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
336 }
337 
338 static inline
339 const char *virtio_bus_name(struct virtio_device *vdev)
340 {
341 	if (!vdev->config->bus_name)
342 		return "virtio";
343 	return vdev->config->bus_name(vdev);
344 }
345 
346 /**
347  * virtqueue_set_affinity - setting affinity for a virtqueue
348  * @vq: the virtqueue
349  * @cpu_mask: the cpu mask
350  *
351  * Pay attention the function are best-effort: the affinity hint may not be set
352  * due to config support, irq type and sharing.
353  *
354  */
355 static inline
356 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
357 {
358 	struct virtio_device *vdev = vq->vdev;
359 	if (vdev->config->set_vq_affinity)
360 		return vdev->config->set_vq_affinity(vq, cpu_mask);
361 	return 0;
362 }
363 
364 static inline
365 bool virtio_get_shm_region(struct virtio_device *vdev,
366 			   struct virtio_shm_region *region, u8 id)
367 {
368 	if (!vdev->config->get_shm_region)
369 		return false;
370 	return vdev->config->get_shm_region(vdev, region, id);
371 }
372 
373 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
374 {
375 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
376 		virtio_legacy_is_little_endian();
377 }
378 
379 /* Memory accessors */
380 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
381 {
382 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
383 }
384 
385 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
386 {
387 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
388 }
389 
390 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
391 {
392 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
393 }
394 
395 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
396 {
397 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
398 }
399 
400 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
401 {
402 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
403 }
404 
405 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
406 {
407 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
408 }
409 
410 #define virtio_to_cpu(vdev, x) \
411 	_Generic((x), \
412 		__u8: (x), \
413 		__virtio16: virtio16_to_cpu((vdev), (x)), \
414 		__virtio32: virtio32_to_cpu((vdev), (x)), \
415 		__virtio64: virtio64_to_cpu((vdev), (x)) \
416 		)
417 
418 #define cpu_to_virtio(vdev, x, m) \
419 	_Generic((m), \
420 		__u8: (x), \
421 		__virtio16: cpu_to_virtio16((vdev), (x)), \
422 		__virtio32: cpu_to_virtio32((vdev), (x)), \
423 		__virtio64: cpu_to_virtio64((vdev), (x)) \
424 		)
425 
426 #define __virtio_native_type(structname, member) \
427 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
428 
429 /* Config space accessors. */
430 #define virtio_cread(vdev, structname, member, ptr)			\
431 	do {								\
432 		typeof(((structname*)0)->member) virtio_cread_v;	\
433 									\
434 		might_sleep();						\
435 		/* Sanity check: must match the member's type */	\
436 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
437 									\
438 		switch (sizeof(virtio_cread_v)) {			\
439 		case 1:							\
440 		case 2:							\
441 		case 4:							\
442 			vdev->config->get((vdev), 			\
443 					  offsetof(structname, member), \
444 					  &virtio_cread_v,		\
445 					  sizeof(virtio_cread_v));	\
446 			break;						\
447 		default:						\
448 			__virtio_cread_many((vdev), 			\
449 					  offsetof(structname, member), \
450 					  &virtio_cread_v,		\
451 					  1,				\
452 					  sizeof(virtio_cread_v));	\
453 			break;						\
454 		}							\
455 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
456 	} while(0)
457 
458 /* Config space accessors. */
459 #define virtio_cwrite(vdev, structname, member, ptr)			\
460 	do {								\
461 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
462 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
463 									\
464 		might_sleep();						\
465 		/* Sanity check: must match the member's type */	\
466 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
467 									\
468 		vdev->config->set((vdev), offsetof(structname, member),	\
469 				  &virtio_cwrite_v,			\
470 				  sizeof(virtio_cwrite_v));		\
471 	} while(0)
472 
473 /*
474  * Nothing virtio-specific about these, but let's worry about generalizing
475  * these later.
476  */
477 #define virtio_le_to_cpu(x) \
478 	_Generic((x), \
479 		__u8: (u8)(x), \
480 		 __le16: (u16)le16_to_cpu(x), \
481 		 __le32: (u32)le32_to_cpu(x), \
482 		 __le64: (u64)le64_to_cpu(x) \
483 		)
484 
485 #define virtio_cpu_to_le(x, m) \
486 	_Generic((m), \
487 		 __u8: (x), \
488 		 __le16: cpu_to_le16(x), \
489 		 __le32: cpu_to_le32(x), \
490 		 __le64: cpu_to_le64(x) \
491 		)
492 
493 /* LE (e.g. modern) Config space accessors. */
494 #define virtio_cread_le(vdev, structname, member, ptr)			\
495 	do {								\
496 		typeof(((structname*)0)->member) virtio_cread_v;	\
497 									\
498 		might_sleep();						\
499 		/* Sanity check: must match the member's type */	\
500 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
501 									\
502 		switch (sizeof(virtio_cread_v)) {			\
503 		case 1:							\
504 		case 2:							\
505 		case 4:							\
506 			vdev->config->get((vdev), 			\
507 					  offsetof(structname, member), \
508 					  &virtio_cread_v,		\
509 					  sizeof(virtio_cread_v));	\
510 			break;						\
511 		default:						\
512 			__virtio_cread_many((vdev), 			\
513 					  offsetof(structname, member), \
514 					  &virtio_cread_v,		\
515 					  1,				\
516 					  sizeof(virtio_cread_v));	\
517 			break;						\
518 		}							\
519 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
520 	} while(0)
521 
522 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
523 	do {								\
524 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
525 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
526 									\
527 		might_sleep();						\
528 		/* Sanity check: must match the member's type */	\
529 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
530 									\
531 		vdev->config->set((vdev), offsetof(structname, member),	\
532 				  &virtio_cwrite_v,			\
533 				  sizeof(virtio_cwrite_v));		\
534 	} while(0)
535 
536 
537 /* Read @count fields, @bytes each. */
538 static inline void __virtio_cread_many(struct virtio_device *vdev,
539 				       unsigned int offset,
540 				       void *buf, size_t count, size_t bytes)
541 {
542 	u32 old, gen = vdev->config->generation ?
543 		vdev->config->generation(vdev) : 0;
544 	int i;
545 
546 	might_sleep();
547 	do {
548 		old = gen;
549 
550 		for (i = 0; i < count; i++)
551 			vdev->config->get(vdev, offset + bytes * i,
552 					  buf + i * bytes, bytes);
553 
554 		gen = vdev->config->generation ?
555 			vdev->config->generation(vdev) : 0;
556 	} while (gen != old);
557 }
558 
559 static inline void virtio_cread_bytes(struct virtio_device *vdev,
560 				      unsigned int offset,
561 				      void *buf, size_t len)
562 {
563 	__virtio_cread_many(vdev, offset, buf, len, 1);
564 }
565 
566 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
567 {
568 	u8 ret;
569 
570 	might_sleep();
571 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
572 	return ret;
573 }
574 
575 static inline void virtio_cwrite8(struct virtio_device *vdev,
576 				  unsigned int offset, u8 val)
577 {
578 	might_sleep();
579 	vdev->config->set(vdev, offset, &val, sizeof(val));
580 }
581 
582 static inline u16 virtio_cread16(struct virtio_device *vdev,
583 				 unsigned int offset)
584 {
585 	__virtio16 ret;
586 
587 	might_sleep();
588 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
589 	return virtio16_to_cpu(vdev, ret);
590 }
591 
592 static inline void virtio_cwrite16(struct virtio_device *vdev,
593 				   unsigned int offset, u16 val)
594 {
595 	__virtio16 v;
596 
597 	might_sleep();
598 	v = cpu_to_virtio16(vdev, val);
599 	vdev->config->set(vdev, offset, &v, sizeof(v));
600 }
601 
602 static inline u32 virtio_cread32(struct virtio_device *vdev,
603 				 unsigned int offset)
604 {
605 	__virtio32 ret;
606 
607 	might_sleep();
608 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
609 	return virtio32_to_cpu(vdev, ret);
610 }
611 
612 static inline void virtio_cwrite32(struct virtio_device *vdev,
613 				   unsigned int offset, u32 val)
614 {
615 	__virtio32 v;
616 
617 	might_sleep();
618 	v = cpu_to_virtio32(vdev, val);
619 	vdev->config->set(vdev, offset, &v, sizeof(v));
620 }
621 
622 static inline u64 virtio_cread64(struct virtio_device *vdev,
623 				 unsigned int offset)
624 {
625 	__virtio64 ret;
626 
627 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
628 	return virtio64_to_cpu(vdev, ret);
629 }
630 
631 static inline void virtio_cwrite64(struct virtio_device *vdev,
632 				   unsigned int offset, u64 val)
633 {
634 	__virtio64 v;
635 
636 	might_sleep();
637 	v = cpu_to_virtio64(vdev, val);
638 	vdev->config->set(vdev, offset, &v, sizeof(v));
639 }
640 
641 /* Conditional config space accessors. */
642 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
643 	({								\
644 		int _r = 0;						\
645 		if (!virtio_has_feature(vdev, fbit))			\
646 			_r = -ENOENT;					\
647 		else							\
648 			virtio_cread((vdev), structname, member, ptr);	\
649 		_r;							\
650 	})
651 
652 /* Conditional config space accessors. */
653 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
654 	({								\
655 		int _r = 0;						\
656 		if (!virtio_has_feature(vdev, fbit))			\
657 			_r = -ENOENT;					\
658 		else							\
659 			virtio_cread_le((vdev), structname, member, ptr); \
660 		_r;							\
661 	})
662 
663 #endif /* _LINUX_VIRTIO_CONFIG_H */
664