1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #ifndef _RTE_VHOST_H_
6 #define _RTE_VHOST_H_
7
8 /**
9 * @file
10 * Interface to vhost-user
11 */
12
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <sys/eventfd.h>
16
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23
24 #ifndef __cplusplus
25 /* These are not C++-aware. */
26 #include <linux/vhost.h>
27 #include <linux/virtio_ring.h>
28 #include <linux/virtio_net.h>
29 #endif
30
31 #define RTE_VHOST_USER_CLIENT (1ULL << 0)
32 #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
33 #define RTE_VHOST_USER_RESERVED_1 (1ULL << 2)
34 #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
35 #define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4)
36 /* support mbuf with external buffer attached */
37 #define RTE_VHOST_USER_EXTBUF_SUPPORT (1ULL << 5)
38 /* support only linear buffers (no chained mbufs) */
39 #define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
40 #define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
41 #define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS (1ULL << 8)
42
43 /* Features. */
44 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
45 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
46 #endif
47
48 #ifndef VIRTIO_NET_F_MQ
49 #define VIRTIO_NET_F_MQ 22
50 #endif
51
52 #ifndef VIRTIO_NET_F_MTU
53 #define VIRTIO_NET_F_MTU 3
54 #endif
55
56 #ifndef VIRTIO_F_ANY_LAYOUT
57 #define VIRTIO_F_ANY_LAYOUT 27
58 #endif
59
60 /** Protocol features. */
61 #ifndef VHOST_USER_PROTOCOL_F_MQ
62 #define VHOST_USER_PROTOCOL_F_MQ 0
63 #endif
64
65 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
66 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
67 #endif
68
69 #ifndef VHOST_USER_PROTOCOL_F_RARP
70 #define VHOST_USER_PROTOCOL_F_RARP 2
71 #endif
72
73 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
74 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
75 #endif
76
77 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
78 #define VHOST_USER_PROTOCOL_F_NET_MTU 4
79 #endif
80
81 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
82 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
83 #endif
84
85 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
86 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
87 #endif
88
89 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
90 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
91 #endif
92
93 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
94 #define VHOST_USER_PROTOCOL_F_CONFIG 9
95 #endif
96
97 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
98 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
99 #endif
100
101 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
102 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
103 #endif
104
105 #ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
106 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
107 #endif
108
109 #ifndef VHOST_USER_PROTOCOL_F_STATUS
110 #define VHOST_USER_PROTOCOL_F_STATUS 16
111 #endif
112
113 /** Indicate whether protocol features negotiation is supported. */
114 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
115 #define VHOST_USER_F_PROTOCOL_FEATURES 30
116 #endif
117
118 #define RTE_MAX_VHOST_DEVICE 1024
119
120 struct rte_vdpa_device;
121
122 /**
123 * Information relating to memory regions including offsets to
124 * addresses in QEMUs memory file.
125 */
126 struct rte_vhost_mem_region {
127 uint64_t guest_phys_addr;
128 uint64_t guest_user_addr;
129 uint64_t host_user_addr;
130 uint64_t size;
131 void *mmap_addr;
132 uint64_t mmap_size;
133 int fd;
134 };
135
136 /**
137 * Memory structure includes region and mapping information.
138 */
139 struct rte_vhost_memory {
140 uint32_t nregions;
141 struct rte_vhost_mem_region regions[];
142 };
143
144 struct rte_vhost_inflight_desc_split {
145 uint8_t inflight;
146 uint8_t padding[5];
147 uint16_t next;
148 uint64_t counter;
149 };
150
151 struct rte_vhost_inflight_info_split {
152 uint64_t features;
153 uint16_t version;
154 uint16_t desc_num;
155 uint16_t last_inflight_io;
156 uint16_t used_idx;
157 struct rte_vhost_inflight_desc_split desc[0];
158 };
159
160 struct rte_vhost_inflight_desc_packed {
161 uint8_t inflight;
162 uint8_t padding;
163 uint16_t next;
164 uint16_t last;
165 uint16_t num;
166 uint64_t counter;
167 uint16_t id;
168 uint16_t flags;
169 uint32_t len;
170 uint64_t addr;
171 };
172
173 struct rte_vhost_inflight_info_packed {
174 uint64_t features;
175 uint16_t version;
176 uint16_t desc_num;
177 uint16_t free_head;
178 uint16_t old_free_head;
179 uint16_t used_idx;
180 uint16_t old_used_idx;
181 uint8_t used_wrap_counter;
182 uint8_t old_used_wrap_counter;
183 uint8_t padding[7];
184 struct rte_vhost_inflight_desc_packed desc[0];
185 };
186
187 struct rte_vhost_resubmit_desc {
188 uint16_t index;
189 uint64_t counter;
190 };
191
192 struct rte_vhost_resubmit_info {
193 struct rte_vhost_resubmit_desc *resubmit_list;
194 uint16_t resubmit_num;
195 };
196
197 struct rte_vhost_ring_inflight {
198 union {
199 struct rte_vhost_inflight_info_split *inflight_split;
200 struct rte_vhost_inflight_info_packed *inflight_packed;
201 };
202
203 struct rte_vhost_resubmit_info *resubmit_inflight;
204 };
205
206 struct rte_vhost_vring {
207 union {
208 struct vring_desc *desc;
209 struct vring_packed_desc *desc_packed;
210 };
211 union {
212 struct vring_avail *avail;
213 struct vring_packed_desc_event *driver_event;
214 };
215 union {
216 struct vring_used *used;
217 struct vring_packed_desc_event *device_event;
218 };
219 uint64_t log_guest_addr;
220
221 /** Deprecated, use rte_vhost_vring_call() instead. */
222 int callfd;
223
224 int kickfd;
225 uint16_t size;
226 };
227
228 /**
229 * Possible results of the vhost user message handling callbacks
230 */
231 enum rte_vhost_msg_result {
232 /* Message handling failed */
233 RTE_VHOST_MSG_RESULT_ERR = -1,
234 /* Message handling successful */
235 RTE_VHOST_MSG_RESULT_OK = 0,
236 /* Message handling successful and reply prepared */
237 RTE_VHOST_MSG_RESULT_REPLY = 1,
238 /* Message not handled */
239 RTE_VHOST_MSG_RESULT_NOT_HANDLED,
240 };
241
242 /**
243 * Function prototype for the vhost backend to handle specific vhost user
244 * messages.
245 *
246 * @param vid
247 * vhost device id
248 * @param msg
249 * Message pointer.
250 * @return
251 * RTE_VHOST_MSG_RESULT_OK on success,
252 * RTE_VHOST_MSG_RESULT_REPLY on success with reply,
253 * RTE_VHOST_MSG_RESULT_ERR on failure,
254 * RTE_VHOST_MSG_RESULT_NOT_HANDLED if message was not handled.
255 */
256 typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg);
257
258 /**
259 * Optional vhost user message handlers.
260 */
261 struct rte_vhost_user_extern_ops {
262 /* Called prior to the master message handling. */
263 rte_vhost_msg_handle pre_msg_handle;
264 /* Called after the master message handling. */
265 rte_vhost_msg_handle post_msg_handle;
266 };
267
268 /**
269 * Device and vring operations.
270 */
271 struct rte_vhost_device_ops {
272 int (*new_device)(int vid); /**< Add device. */
273 void (*destroy_device)(int vid); /**< Remove device. */
274
275 int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
276
277 /**
278 * Features could be changed after the feature negotiation.
279 * For example, VHOST_F_LOG_ALL will be set/cleared at the
280 * start/end of live migration, respectively. This callback
281 * is used to inform the application on such change.
282 */
283 int (*features_changed)(int vid, uint64_t features);
284
285 int (*new_connection)(int vid);
286 void (*destroy_connection)(int vid);
287
288 /**
289 * This callback gets called each time a guest gets notified
290 * about waiting packets. This is the interrupt handling through
291 * the eventfd_write(callfd), which can be used for counting these
292 * "slow" syscalls.
293 */
294 void (*guest_notified)(int vid);
295
296 void *reserved[1]; /**< Reserved for future extension */
297 };
298
299 /**
300 * Power monitor condition.
301 */
302 struct rte_vhost_power_monitor_cond {
303 /**< Address to monitor for changes */
304 volatile void *addr;
305 /**< If the `mask` is non-zero, location pointed
306 * to by `addr` will be read and masked, then
307 * compared with this value.
308 */
309 uint64_t val;
310 /**< 64-bit mask to extract value read from `addr` */
311 uint64_t mask;
312 /**< Data size (in bytes) that will be read from the
313 * monitored memory location (`addr`).
314 */
315 uint8_t size;
316 /**< If 1, and masked value that read from 'addr' equals
317 * 'val', the driver should skip core sleep. If 0, and
318 * masked value that read from 'addr' does not equal 'val',
319 * the driver should skip core sleep.
320 */
321 uint8_t match;
322 };
323
324 /**
325 * Convert guest physical address to host virtual address
326 *
327 * This function is deprecated because unsafe.
328 * New rte_vhost_va_from_guest_pa() should be used instead to ensure
329 * guest physical ranges are fully and contiguously mapped into
330 * process virtual address space.
331 *
332 * @param mem
333 * the guest memory regions
334 * @param gpa
335 * the guest physical address for querying
336 * @return
337 * the host virtual address on success, 0 on failure
338 */
339 __rte_deprecated
340 static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory * mem,uint64_t gpa)341 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
342 {
343 struct rte_vhost_mem_region *reg;
344 uint32_t i;
345
346 for (i = 0; i < mem->nregions; i++) {
347 reg = &mem->regions[i];
348 if (gpa >= reg->guest_phys_addr &&
349 gpa < reg->guest_phys_addr + reg->size) {
350 return gpa - reg->guest_phys_addr +
351 reg->host_user_addr;
352 }
353 }
354
355 return 0;
356 }
357
358 /**
359 * Convert guest physical address to host virtual address safely
360 *
361 * This variant of rte_vhost_gpa_to_vva() takes care all the
362 * requested length is mapped and contiguous in process address
363 * space.
364 *
365 * @param mem
366 * the guest memory regions
367 * @param gpa
368 * the guest physical address for querying
369 * @param len
370 * the size of the requested area to map, updated with actual size mapped
371 * @return
372 * the host virtual address on success, 0 on failure
373 */
374 static __rte_always_inline uint64_t
rte_vhost_va_from_guest_pa(struct rte_vhost_memory * mem,uint64_t gpa,uint64_t * len)375 rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
376 uint64_t gpa, uint64_t *len)
377 {
378 struct rte_vhost_mem_region *r;
379 uint32_t i;
380
381 for (i = 0; i < mem->nregions; i++) {
382 r = &mem->regions[i];
383 if (gpa >= r->guest_phys_addr &&
384 gpa < r->guest_phys_addr + r->size) {
385
386 if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
387 *len = r->guest_phys_addr + r->size - gpa;
388
389 return gpa - r->guest_phys_addr +
390 r->host_user_addr;
391 }
392 }
393 *len = 0;
394
395 return 0;
396 }
397
398 #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
399
400 /**
401 * Log the memory write start with given address.
402 *
403 * This function only need be invoked when the live migration starts.
404 * Therefore, we won't need call it at all in the most of time. For
405 * making the performance impact be minimum, it's suggested to do a
406 * check before calling it:
407 *
408 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
409 * rte_vhost_log_write(vid, addr, len);
410 *
411 * @param vid
412 * vhost device ID
413 * @param addr
414 * the starting address for write (in guest physical address space)
415 * @param len
416 * the length to write
417 */
418 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
419
420 /**
421 * Log the used ring update start at given offset.
422 *
423 * Same as rte_vhost_log_write, it's suggested to do a check before
424 * calling it:
425 *
426 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
427 * rte_vhost_log_used_vring(vid, vring_idx, offset, len);
428 *
429 * @param vid
430 * vhost device ID
431 * @param vring_idx
432 * the vring index
433 * @param offset
434 * the offset inside the used ring
435 * @param len
436 * the length to write
437 */
438 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
439 uint64_t offset, uint64_t len);
440
441 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
442
443 /**
444 * Register vhost driver. path could be different for multiple
445 * instance support.
446 */
447 int rte_vhost_driver_register(const char *path, uint64_t flags);
448
449 /* Unregister vhost driver. This is only meaningful to vhost user. */
450 int rte_vhost_driver_unregister(const char *path);
451
452 /**
453 * Set the vdpa device id, enforce single connection per socket
454 *
455 * @param path
456 * The vhost-user socket file path
457 * @param dev
458 * vDPA device pointer
459 * @return
460 * 0 on success, -1 on failure
461 */
462 int
463 rte_vhost_driver_attach_vdpa_device(const char *path,
464 struct rte_vdpa_device *dev);
465
466 /**
467 * Unset the vdpa device id
468 *
469 * @param path
470 * The vhost-user socket file path
471 * @return
472 * 0 on success, -1 on failure
473 */
474 int
475 rte_vhost_driver_detach_vdpa_device(const char *path);
476
477 /**
478 * Get the device id
479 *
480 * @param path
481 * The vhost-user socket file path
482 * @return
483 * vDPA device pointer, NULL on failure
484 */
485 struct rte_vdpa_device *
486 rte_vhost_driver_get_vdpa_device(const char *path);
487
488 /**
489 * Set the feature bits the vhost-user driver supports.
490 *
491 * @param path
492 * The vhost-user socket file path
493 * @param features
494 * Supported features
495 * @return
496 * 0 on success, -1 on failure
497 */
498 int rte_vhost_driver_set_features(const char *path, uint64_t features);
499
500 /**
501 * Enable vhost-user driver features.
502 *
503 * Note that
504 * - the param features should be a subset of the feature bits provided
505 * by rte_vhost_driver_set_features().
506 * - it must be invoked before vhost-user negotiation starts.
507 *
508 * @param path
509 * The vhost-user socket file path
510 * @param features
511 * Features to enable
512 * @return
513 * 0 on success, -1 on failure
514 */
515 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
516
517 /**
518 * Disable vhost-user driver features.
519 *
520 * The two notes at rte_vhost_driver_enable_features() also apply here.
521 *
522 * @param path
523 * The vhost-user socket file path
524 * @param features
525 * Features to disable
526 * @return
527 * 0 on success, -1 on failure
528 */
529 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
530
531 /**
532 * Get the feature bits before feature negotiation.
533 *
534 * @param path
535 * The vhost-user socket file path
536 * @param features
537 * A pointer to store the queried feature bits
538 * @return
539 * 0 on success, -1 on failure
540 */
541 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
542
543 /**
544 * Set the protocol feature bits before feature negotiation.
545 *
546 * @param path
547 * The vhost-user socket file path
548 * @param protocol_features
549 * Supported protocol features
550 * @return
551 * 0 on success, -1 on failure
552 */
553 int
554 rte_vhost_driver_set_protocol_features(const char *path,
555 uint64_t protocol_features);
556
557 /**
558 * Get the protocol feature bits before feature negotiation.
559 *
560 * @param path
561 * The vhost-user socket file path
562 * @param protocol_features
563 * A pointer to store the queried protocol feature bits
564 * @return
565 * 0 on success, -1 on failure
566 */
567 int
568 rte_vhost_driver_get_protocol_features(const char *path,
569 uint64_t *protocol_features);
570
571 /**
572 * Get the queue number bits before feature negotiation.
573 *
574 * @param path
575 * The vhost-user socket file path
576 * @param queue_num
577 * A pointer to store the queried queue number bits
578 * @return
579 * 0 on success, -1 on failure
580 */
581 int
582 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
583
584 /**
585 * Get the feature bits after negotiation
586 *
587 * @param vid
588 * Vhost device ID
589 * @param features
590 * A pointer to store the queried feature bits
591 * @return
592 * 0 on success, -1 on failure
593 */
594 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
595
596 /**
597 * Get the protocol feature bits after negotiation
598 *
599 * @param vid
600 * Vhost device ID
601 * @param protocol_features
602 * A pointer to store the queried protocol feature bits
603 * @return
604 * 0 on success, -1 on failure
605 */
606 __rte_experimental
607 int
608 rte_vhost_get_negotiated_protocol_features(int vid,
609 uint64_t *protocol_features);
610
611 /* Register callbacks. */
612 int rte_vhost_driver_callback_register(const char *path,
613 struct rte_vhost_device_ops const * const ops);
614
615 /**
616 *
617 * Start the vhost-user driver.
618 *
619 * This function triggers the vhost-user negotiation.
620 *
621 * @param path
622 * The vhost-user socket file path
623 * @return
624 * 0 on success, -1 on failure
625 */
626 int rte_vhost_driver_start(const char *path);
627
628 /**
629 * Get the MTU value of the device if set in QEMU.
630 *
631 * @param vid
632 * virtio-net device ID
633 * @param mtu
634 * The variable to store the MTU value
635 *
636 * @return
637 * 0: success
638 * -EAGAIN: device not yet started
639 * -ENOTSUP: device does not support MTU feature
640 */
641 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
642
643 /**
644 * Get the numa node from which the virtio net device's memory
645 * is allocated.
646 *
647 * @param vid
648 * vhost device ID
649 *
650 * @return
651 * The numa node, -1 on failure
652 */
653 int rte_vhost_get_numa_node(int vid);
654
655 /**
656 * @deprecated
657 * Get the number of queues the device supports.
658 *
659 * Note this function is deprecated, as it returns a queue pair number,
660 * which is vhost specific. Instead, rte_vhost_get_vring_num should
661 * be used.
662 *
663 * @param vid
664 * vhost device ID
665 *
666 * @return
667 * The number of queues, 0 on failure
668 */
669 __rte_deprecated
670 uint32_t rte_vhost_get_queue_num(int vid);
671
672 /**
673 * Get the number of vrings the device supports.
674 *
675 * @param vid
676 * vhost device ID
677 *
678 * @return
679 * The number of vrings, 0 on failure
680 */
681 uint16_t rte_vhost_get_vring_num(int vid);
682
683 /**
684 * Get the virtio net device's ifname, which is the vhost-user socket
685 * file path.
686 *
687 * @param vid
688 * vhost device ID
689 * @param buf
690 * The buffer to stored the queried ifname
691 * @param len
692 * The length of buf
693 *
694 * @return
695 * 0 on success, -1 on failure
696 */
697 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
698
699 /**
700 * Get how many avail entries are left in the queue
701 *
702 * @param vid
703 * vhost device ID
704 * @param queue_id
705 * virtio queue index
706 *
707 * @return
708 * num of avail entries left
709 */
710 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
711
712 struct rte_mbuf;
713 struct rte_mempool;
714 /**
715 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
716 * be received from the physical port or from another virtual device. A packet
717 * count is returned to indicate the number of packets that were successfully
718 * added to the RX queue.
719 * @param vid
720 * vhost device ID
721 * @param queue_id
722 * virtio queue index in mq case
723 * @param pkts
724 * array to contain packets to be enqueued
725 * @param count
726 * packets num to be enqueued
727 * @return
728 * num of packets enqueued
729 */
730 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
731 struct rte_mbuf **pkts, uint16_t count);
732
733 /**
734 * This function gets guest buffers from the virtio device TX virtqueue,
735 * construct host mbufs, copies guest buffer content to host mbufs and
736 * store them in pkts to be processed.
737 * @param vid
738 * vhost device ID
739 * @param queue_id
740 * virtio queue index in mq case
741 * @param mbuf_pool
742 * mbuf_pool where host mbuf is allocated.
743 * @param pkts
744 * array to contain packets to be dequeued
745 * @param count
746 * packets num to be dequeued
747 * @return
748 * num of packets dequeued
749 */
750 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
751 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
752
753 /**
754 * Get guest mem table: a list of memory regions.
755 *
756 * An rte_vhost_vhost_memory object will be allocated internally, to hold the
757 * guest memory regions. Application should free it at destroy_device()
758 * callback.
759 *
760 * @param vid
761 * vhost device ID
762 * @param mem
763 * To store the returned mem regions
764 * @return
765 * 0 on success, -1 on failure
766 */
767 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
768
769 /**
770 * Get guest vring info, including the vring address, vring size, etc.
771 *
772 * @param vid
773 * vhost device ID
774 * @param vring_idx
775 * vring index
776 * @param vring
777 * the structure to hold the requested vring info
778 * @return
779 * 0 on success, -1 on failure
780 */
781 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
782 struct rte_vhost_vring *vring);
783
784 /**
785 * Get guest inflight vring info, including inflight ring and resubmit list.
786 *
787 * @param vid
788 * vhost device ID
789 * @param vring_idx
790 * vring index
791 * @param vring
792 * the structure to hold the requested inflight vring info
793 * @return
794 * 0 on success, -1 on failure
795 */
796 int
797 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
798 struct rte_vhost_ring_inflight *vring);
799
800 /**
801 * Set split inflight descriptor.
802 *
803 * This function save descriptors that has been consumed in available
804 * ring
805 *
806 * @param vid
807 * vhost device ID
808 * @param vring_idx
809 * vring index
810 * @param idx
811 * inflight entry index
812 * @return
813 * 0 on success, -1 on failure
814 */
815 int
816 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
817 uint16_t idx);
818
819 /**
820 * Set packed inflight descriptor and get corresponding inflight entry
821 *
822 * This function save descriptors that has been consumed
823 *
824 * @param vid
825 * vhost device ID
826 * @param vring_idx
827 * vring index
828 * @param head
829 * head of descriptors
830 * @param last
831 * last of descriptors
832 * @param inflight_entry
833 * corresponding inflight entry
834 * @return
835 * 0 on success, -1 on failure
836 */
837 int
838 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
839 uint16_t head, uint16_t last, uint16_t *inflight_entry);
840
841 /**
842 * Save the head of list that the last batch of used descriptors.
843 *
844 * @param vid
845 * vhost device ID
846 * @param vring_idx
847 * vring index
848 * @param idx
849 * descriptor entry index
850 * @return
851 * 0 on success, -1 on failure
852 */
853 int
854 rte_vhost_set_last_inflight_io_split(int vid,
855 uint16_t vring_idx, uint16_t idx);
856
857 /**
858 * Update the inflight free_head, used_idx and used_wrap_counter.
859 *
860 * This function will update status first before updating descriptors
861 * to used
862 *
863 * @param vid
864 * vhost device ID
865 * @param vring_idx
866 * vring index
867 * @param head
868 * head of descriptors
869 * @return
870 * 0 on success, -1 on failure
871 */
872 int
873 rte_vhost_set_last_inflight_io_packed(int vid,
874 uint16_t vring_idx, uint16_t head);
875
876 /**
877 * Clear the split inflight status.
878 *
879 * @param vid
880 * vhost device ID
881 * @param vring_idx
882 * vring index
883 * @param last_used_idx
884 * last used idx of used ring
885 * @param idx
886 * inflight entry index
887 * @return
888 * 0 on success, -1 on failure
889 */
890 int
891 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
892 uint16_t last_used_idx, uint16_t idx);
893
894 /**
895 * Clear the packed inflight status.
896 *
897 * @param vid
898 * vhost device ID
899 * @param vring_idx
900 * vring index
901 * @param head
902 * inflight entry index
903 * @return
904 * 0 on success, -1 on failure
905 */
906 int
907 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
908 uint16_t head);
909
910 /**
911 * Notify the guest that used descriptors have been added to the vring. This
912 * function acts as a memory barrier.
913 *
914 * @param vid
915 * vhost device ID
916 * @param vring_idx
917 * vring index
918 * @return
919 * 0 on success, -1 on failure
920 */
921 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
922
923 /**
924 * Get vhost RX queue avail count.
925 *
926 * @param vid
927 * vhost device ID
928 * @param qid
929 * virtio queue index in mq case
930 * @return
931 * num of desc available
932 */
933 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
934
935 /**
936 * Get power monitor address of the vhost device
937 *
938 * @param vid
939 * vhost device ID
940 * @param queue_id
941 * vhost queue ID
942 * @param pmc
943 * power monitor condition
944 * @return
945 * 0 on success, -1 on failure
946 */
947 __rte_experimental
948 int
949 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
950 struct rte_vhost_power_monitor_cond *pmc);
951
952 /**
953 * Get log base and log size of the vhost device
954 *
955 * @param vid
956 * vhost device ID
957 * @param log_base
958 * vhost log base
959 * @param log_size
960 * vhost log size
961 * @return
962 * 0 on success, -1 on failure
963 */
964 int
965 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
966
967 /**
968 * Get last_avail/used_idx of the vhost virtqueue
969 *
970 * @param vid
971 * vhost device ID
972 * @param queue_id
973 * vhost queue index
974 * @param last_avail_idx
975 * vhost last_avail_idx to get
976 * @param last_used_idx
977 * vhost last_used_idx to get
978 * @return
979 * 0 on success, -1 on failure
980 */
981 int
982 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
983 uint16_t *last_avail_idx, uint16_t *last_used_idx);
984
985 /**
986 * Get last_avail/last_used of the vhost virtqueue
987 *
988 * This function is designed for the reconnection and it's specific for
989 * the packed ring as we can get the two parameters from the inflight
990 * queueregion
991 *
992 * @param vid
993 * vhost device ID
994 * @param queue_id
995 * vhost queue index
996 * @param last_avail_idx
997 * vhost last_avail_idx to get
998 * @param last_used_idx
999 * vhost last_used_idx to get
1000 * @return
1001 * 0 on success, -1 on failure
1002 */
1003 int
1004 rte_vhost_get_vring_base_from_inflight(int vid,
1005 uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx);
1006
1007 /**
1008 * Set last_avail/used_idx of the vhost virtqueue
1009 *
1010 * @param vid
1011 * vhost device ID
1012 * @param queue_id
1013 * vhost queue index
1014 * @param last_avail_idx
1015 * last_avail_idx to set
1016 * @param last_used_idx
1017 * last_used_idx to set
1018 * @return
1019 * 0 on success, -1 on failure
1020 */
1021 int
1022 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
1023 uint16_t last_avail_idx, uint16_t last_used_idx);
1024
1025 /**
1026 * Register external message handling callbacks
1027 *
1028 * @param vid
1029 * vhost device ID
1030 * @param ops
1031 * virtio external callbacks to register
1032 * @param ctx
1033 * additional context passed to the callbacks
1034 * @return
1035 * 0 on success, -1 on failure
1036 */
1037 int
1038 rte_vhost_extern_callback_register(int vid,
1039 struct rte_vhost_user_extern_ops const * const ops, void *ctx);
1040
1041 /**
1042 * Get vdpa device id for vhost device.
1043 *
1044 * @param vid
1045 * vhost device id
1046 * @return
1047 * vDPA device pointer on success, NULL on failure
1048 */
1049 struct rte_vdpa_device *
1050 rte_vhost_get_vdpa_device(int vid);
1051
1052 /**
1053 * Notify the guest that should get virtio configuration space from backend.
1054 *
1055 * @param vid
1056 * vhost device ID
1057 * @param need_reply
1058 * wait for the master response the status of this operation
1059 * @return
1060 * 0 on success, < 0 on failure
1061 */
1062 __rte_experimental
1063 int
1064 rte_vhost_slave_config_change(int vid, bool need_reply);
1065
1066 #ifdef __cplusplus
1067 }
1068 #endif
1069
1070 #endif /* _RTE_VHOST_H_ */
1071