1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #ifndef _RTE_VHOST_H_
6 #define _RTE_VHOST_H_
7
8 /**
9 * @file
10 * Interface to vhost-user
11 */
12
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <sys/eventfd.h>
16
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23
24 /* These are not C++-aware. */
25 #include <linux/vhost.h>
26 #include <linux/virtio_ring.h>
27 #include <linux/virtio_net.h>
28
29 #define RTE_VHOST_USER_CLIENT (1ULL << 0)
30 #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
31 #define RTE_VHOST_USER_RESERVED_1 (1ULL << 2)
32 #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
33 #define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4)
34 /* support mbuf with external buffer attached */
35 #define RTE_VHOST_USER_EXTBUF_SUPPORT (1ULL << 5)
36 /* support only linear buffers (no chained mbufs) */
37 #define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
38 #define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
39
40 /* Features. */
41 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
42 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
43 #endif
44
45 #ifndef VIRTIO_NET_F_MQ
46 #define VIRTIO_NET_F_MQ 22
47 #endif
48
49 #ifndef VIRTIO_NET_F_MTU
50 #define VIRTIO_NET_F_MTU 3
51 #endif
52
53 #ifndef VIRTIO_F_ANY_LAYOUT
54 #define VIRTIO_F_ANY_LAYOUT 27
55 #endif
56
57 /** Protocol features. */
58 #ifndef VHOST_USER_PROTOCOL_F_MQ
59 #define VHOST_USER_PROTOCOL_F_MQ 0
60 #endif
61
62 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
63 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
64 #endif
65
66 #ifndef VHOST_USER_PROTOCOL_F_RARP
67 #define VHOST_USER_PROTOCOL_F_RARP 2
68 #endif
69
70 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
71 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
72 #endif
73
74 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
75 #define VHOST_USER_PROTOCOL_F_NET_MTU 4
76 #endif
77
78 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
79 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
80 #endif
81
82 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
83 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
84 #endif
85
86 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
87 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
88 #endif
89
90 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
91 #define VHOST_USER_PROTOCOL_F_CONFIG 9
92 #endif
93
94 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
95 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
96 #endif
97
98 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
99 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
100 #endif
101
102 #ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
103 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
104 #endif
105
106 #ifndef VHOST_USER_PROTOCOL_F_STATUS
107 #define VHOST_USER_PROTOCOL_F_STATUS 16
108 #endif
109
110 /** Indicate whether protocol features negotiation is supported. */
111 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
112 #define VHOST_USER_F_PROTOCOL_FEATURES 30
113 #endif
114
115 struct rte_vdpa_device;
116
117 /**
118 * Information relating to memory regions including offsets to
119 * addresses in QEMUs memory file.
120 */
121 struct rte_vhost_mem_region {
122 uint64_t guest_phys_addr;
123 uint64_t guest_user_addr;
124 uint64_t host_user_addr;
125 uint64_t size;
126 void *mmap_addr;
127 uint64_t mmap_size;
128 int fd;
129 };
130
131 /**
132 * Memory structure includes region and mapping information.
133 */
134 struct rte_vhost_memory {
135 uint32_t nregions;
136 struct rte_vhost_mem_region regions[];
137 };
138
139 struct rte_vhost_inflight_desc_split {
140 uint8_t inflight;
141 uint8_t padding[5];
142 uint16_t next;
143 uint64_t counter;
144 };
145
146 struct rte_vhost_inflight_info_split {
147 uint64_t features;
148 uint16_t version;
149 uint16_t desc_num;
150 uint16_t last_inflight_io;
151 uint16_t used_idx;
152 struct rte_vhost_inflight_desc_split desc[0];
153 };
154
155 struct rte_vhost_inflight_desc_packed {
156 uint8_t inflight;
157 uint8_t padding;
158 uint16_t next;
159 uint16_t last;
160 uint16_t num;
161 uint64_t counter;
162 uint16_t id;
163 uint16_t flags;
164 uint32_t len;
165 uint64_t addr;
166 };
167
168 struct rte_vhost_inflight_info_packed {
169 uint64_t features;
170 uint16_t version;
171 uint16_t desc_num;
172 uint16_t free_head;
173 uint16_t old_free_head;
174 uint16_t used_idx;
175 uint16_t old_used_idx;
176 uint8_t used_wrap_counter;
177 uint8_t old_used_wrap_counter;
178 uint8_t padding[7];
179 struct rte_vhost_inflight_desc_packed desc[0];
180 };
181
182 struct rte_vhost_resubmit_desc {
183 uint16_t index;
184 uint64_t counter;
185 };
186
187 struct rte_vhost_resubmit_info {
188 struct rte_vhost_resubmit_desc *resubmit_list;
189 uint16_t resubmit_num;
190 };
191
192 struct rte_vhost_ring_inflight {
193 union {
194 struct rte_vhost_inflight_info_split *inflight_split;
195 struct rte_vhost_inflight_info_packed *inflight_packed;
196 };
197
198 struct rte_vhost_resubmit_info *resubmit_inflight;
199 };
200
201 struct rte_vhost_vring {
202 union {
203 struct vring_desc *desc;
204 struct vring_packed_desc *desc_packed;
205 };
206 union {
207 struct vring_avail *avail;
208 struct vring_packed_desc_event *driver_event;
209 };
210 union {
211 struct vring_used *used;
212 struct vring_packed_desc_event *device_event;
213 };
214 uint64_t log_guest_addr;
215
216 /** Deprecated, use rte_vhost_vring_call() instead. */
217 int callfd;
218
219 int kickfd;
220 uint16_t size;
221 };
222
223 /**
224 * Possible results of the vhost user message handling callbacks
225 */
226 enum rte_vhost_msg_result {
227 /* Message handling failed */
228 RTE_VHOST_MSG_RESULT_ERR = -1,
229 /* Message handling successful */
230 RTE_VHOST_MSG_RESULT_OK = 0,
231 /* Message handling successful and reply prepared */
232 RTE_VHOST_MSG_RESULT_REPLY = 1,
233 /* Message not handled */
234 RTE_VHOST_MSG_RESULT_NOT_HANDLED,
235 };
236
237 /**
238 * Function prototype for the vhost backend to handle specific vhost user
239 * messages.
240 *
241 * @param vid
242 * vhost device id
243 * @param msg
244 * Message pointer.
245 * @return
246 * RTE_VHOST_MSG_RESULT_OK on success,
247 * RTE_VHOST_MSG_RESULT_REPLY on success with reply,
248 * RTE_VHOST_MSG_RESULT_ERR on failure,
249 * RTE_VHOST_MSG_RESULT_NOT_HANDLED if message was not handled.
250 */
251 typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg);
252
253 /**
254 * Optional vhost user message handlers.
255 */
256 struct rte_vhost_user_extern_ops {
257 /* Called prior to the master message handling. */
258 rte_vhost_msg_handle pre_msg_handle;
259 /* Called after the master message handling. */
260 rte_vhost_msg_handle post_msg_handle;
261 };
262
263 /**
264 * Device and vring operations.
265 */
266 struct vhost_device_ops {
267 int (*new_device)(int vid); /**< Add device. */
268 void (*destroy_device)(int vid); /**< Remove device. */
269
270 int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
271
272 /**
273 * Features could be changed after the feature negotiation.
274 * For example, VHOST_F_LOG_ALL will be set/cleared at the
275 * start/end of live migration, respectively. This callback
276 * is used to inform the application on such change.
277 */
278 int (*features_changed)(int vid, uint64_t features);
279
280 int (*new_connection)(int vid);
281 void (*destroy_connection)(int vid);
282
283 /**
284 * This callback gets called each time a guest gets notified
285 * about waiting packets. This is the interrupt handling through
286 * the eventfd_write(callfd), which can be used for counting these
287 * "slow" syscalls.
288 */
289 void (*guest_notified)(int vid);
290
291 void *reserved[1]; /**< Reserved for future extension */
292 };
293
294 /**
295 * Convert guest physical address to host virtual address
296 *
297 * This function is deprecated because unsafe.
298 * New rte_vhost_va_from_guest_pa() should be used instead to ensure
299 * guest physical ranges are fully and contiguously mapped into
300 * process virtual address space.
301 *
302 * @param mem
303 * the guest memory regions
304 * @param gpa
305 * the guest physical address for querying
306 * @return
307 * the host virtual address on success, 0 on failure
308 */
309 __rte_deprecated
310 static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory * mem,uint64_t gpa)311 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
312 {
313 struct rte_vhost_mem_region *reg;
314 uint32_t i;
315
316 for (i = 0; i < mem->nregions; i++) {
317 reg = &mem->regions[i];
318 if (gpa >= reg->guest_phys_addr &&
319 gpa < reg->guest_phys_addr + reg->size) {
320 return gpa - reg->guest_phys_addr +
321 reg->host_user_addr;
322 }
323 }
324
325 return 0;
326 }
327
328 /**
329 * Convert guest physical address to host virtual address safely
330 *
331 * This variant of rte_vhost_gpa_to_vva() takes care all the
332 * requested length is mapped and contiguous in process address
333 * space.
334 *
335 * @param mem
336 * the guest memory regions
337 * @param gpa
338 * the guest physical address for querying
339 * @param len
340 * the size of the requested area to map, updated with actual size mapped
341 * @return
342 * the host virtual address on success, 0 on failure
343 */
344 __rte_experimental
345 static __rte_always_inline uint64_t
rte_vhost_va_from_guest_pa(struct rte_vhost_memory * mem,uint64_t gpa,uint64_t * len)346 rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
347 uint64_t gpa, uint64_t *len)
348 {
349 struct rte_vhost_mem_region *r;
350 uint32_t i;
351
352 for (i = 0; i < mem->nregions; i++) {
353 r = &mem->regions[i];
354 if (gpa >= r->guest_phys_addr &&
355 gpa < r->guest_phys_addr + r->size) {
356
357 if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
358 *len = r->guest_phys_addr + r->size - gpa;
359
360 return gpa - r->guest_phys_addr +
361 r->host_user_addr;
362 }
363 }
364 *len = 0;
365
366 return 0;
367 }
368
369 #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
370
371 /**
372 * Log the memory write start with given address.
373 *
374 * This function only need be invoked when the live migration starts.
375 * Therefore, we won't need call it at all in the most of time. For
376 * making the performance impact be minimum, it's suggested to do a
377 * check before calling it:
378 *
379 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
380 * rte_vhost_log_write(vid, addr, len);
381 *
382 * @param vid
383 * vhost device ID
384 * @param addr
385 * the starting address for write (in guest physical address space)
386 * @param len
387 * the length to write
388 */
389 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
390
391 /**
392 * Log the used ring update start at given offset.
393 *
394 * Same as rte_vhost_log_write, it's suggested to do a check before
395 * calling it:
396 *
397 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
398 * rte_vhost_log_used_vring(vid, vring_idx, offset, len);
399 *
400 * @param vid
401 * vhost device ID
402 * @param vring_idx
403 * the vring index
404 * @param offset
405 * the offset inside the used ring
406 * @param len
407 * the length to write
408 */
409 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
410 uint64_t offset, uint64_t len);
411
412 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
413
414 /**
415 * Register vhost driver. path could be different for multiple
416 * instance support.
417 */
418 int rte_vhost_driver_register(const char *path, uint64_t flags);
419
420 /* Unregister vhost driver. This is only meaningful to vhost user. */
421 int rte_vhost_driver_unregister(const char *path);
422
423 /**
424 * Set the vdpa device id, enforce single connection per socket
425 *
426 * @param path
427 * The vhost-user socket file path
428 * @param dev
429 * vDPA device pointer
430 * @return
431 * 0 on success, -1 on failure
432 */
433 int
434 rte_vhost_driver_attach_vdpa_device(const char *path,
435 struct rte_vdpa_device *dev);
436
437 /**
438 * Unset the vdpa device id
439 *
440 * @param path
441 * The vhost-user socket file path
442 * @return
443 * 0 on success, -1 on failure
444 */
445 int
446 rte_vhost_driver_detach_vdpa_device(const char *path);
447
448 /**
449 * Get the device id
450 *
451 * @param path
452 * The vhost-user socket file path
453 * @return
454 * vDPA device pointer, NULL on failure
455 */
456 struct rte_vdpa_device *
457 rte_vhost_driver_get_vdpa_device(const char *path);
458
459 /**
460 * Set the feature bits the vhost-user driver supports.
461 *
462 * @param path
463 * The vhost-user socket file path
464 * @param features
465 * Supported features
466 * @return
467 * 0 on success, -1 on failure
468 */
469 int rte_vhost_driver_set_features(const char *path, uint64_t features);
470
471 /**
472 * Enable vhost-user driver features.
473 *
474 * Note that
475 * - the param features should be a subset of the feature bits provided
476 * by rte_vhost_driver_set_features().
477 * - it must be invoked before vhost-user negotiation starts.
478 *
479 * @param path
480 * The vhost-user socket file path
481 * @param features
482 * Features to enable
483 * @return
484 * 0 on success, -1 on failure
485 */
486 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
487
488 /**
489 * Disable vhost-user driver features.
490 *
491 * The two notes at rte_vhost_driver_enable_features() also apply here.
492 *
493 * @param path
494 * The vhost-user socket file path
495 * @param features
496 * Features to disable
497 * @return
498 * 0 on success, -1 on failure
499 */
500 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
501
502 /**
503 * Get the feature bits before feature negotiation.
504 *
505 * @param path
506 * The vhost-user socket file path
507 * @param features
508 * A pointer to store the queried feature bits
509 * @return
510 * 0 on success, -1 on failure
511 */
512 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
513
514 /**
515 * Set the protocol feature bits before feature negotiation.
516 *
517 * @param path
518 * The vhost-user socket file path
519 * @param protocol_features
520 * Supported protocol features
521 * @return
522 * 0 on success, -1 on failure
523 */
524 __rte_experimental
525 int
526 rte_vhost_driver_set_protocol_features(const char *path,
527 uint64_t protocol_features);
528
529 /**
530 * Get the protocol feature bits before feature negotiation.
531 *
532 * @param path
533 * The vhost-user socket file path
534 * @param protocol_features
535 * A pointer to store the queried protocol feature bits
536 * @return
537 * 0 on success, -1 on failure
538 */
539 __rte_experimental
540 int
541 rte_vhost_driver_get_protocol_features(const char *path,
542 uint64_t *protocol_features);
543
544 /**
545 * Get the queue number bits before feature negotiation.
546 *
547 * @param path
548 * The vhost-user socket file path
549 * @param queue_num
550 * A pointer to store the queried queue number bits
551 * @return
552 * 0 on success, -1 on failure
553 */
554 __rte_experimental
555 int
556 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
557
558 /**
559 * Get the feature bits after negotiation
560 *
561 * @param vid
562 * Vhost device ID
563 * @param features
564 * A pointer to store the queried feature bits
565 * @return
566 * 0 on success, -1 on failure
567 */
568 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
569
570 /* Register callbacks. */
571 int rte_vhost_driver_callback_register(const char *path,
572 struct vhost_device_ops const * const ops);
573
574 /**
575 *
576 * Start the vhost-user driver.
577 *
578 * This function triggers the vhost-user negotiation.
579 *
580 * @param path
581 * The vhost-user socket file path
582 * @return
583 * 0 on success, -1 on failure
584 */
585 int rte_vhost_driver_start(const char *path);
586
587 /**
588 * Get the MTU value of the device if set in QEMU.
589 *
590 * @param vid
591 * virtio-net device ID
592 * @param mtu
593 * The variable to store the MTU value
594 *
595 * @return
596 * 0: success
597 * -EAGAIN: device not yet started
598 * -ENOTSUP: device does not support MTU feature
599 */
600 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
601
602 /**
603 * Get the numa node from which the virtio net device's memory
604 * is allocated.
605 *
606 * @param vid
607 * vhost device ID
608 *
609 * @return
610 * The numa node, -1 on failure
611 */
612 int rte_vhost_get_numa_node(int vid);
613
614 /**
615 * @deprecated
616 * Get the number of queues the device supports.
617 *
618 * Note this function is deprecated, as it returns a queue pair number,
619 * which is vhost specific. Instead, rte_vhost_get_vring_num should
620 * be used.
621 *
622 * @param vid
623 * vhost device ID
624 *
625 * @return
626 * The number of queues, 0 on failure
627 */
628 __rte_deprecated
629 uint32_t rte_vhost_get_queue_num(int vid);
630
631 /**
632 * Get the number of vrings the device supports.
633 *
634 * @param vid
635 * vhost device ID
636 *
637 * @return
638 * The number of vrings, 0 on failure
639 */
640 uint16_t rte_vhost_get_vring_num(int vid);
641
642 /**
643 * Get the virtio net device's ifname, which is the vhost-user socket
644 * file path.
645 *
646 * @param vid
647 * vhost device ID
648 * @param buf
649 * The buffer to stored the queried ifname
650 * @param len
651 * The length of buf
652 *
653 * @return
654 * 0 on success, -1 on failure
655 */
656 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
657
658 /**
659 * Get how many avail entries are left in the queue
660 *
661 * @param vid
662 * vhost device ID
663 * @param queue_id
664 * virtio queue index
665 *
666 * @return
667 * num of avail entries left
668 */
669 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
670
671 struct rte_mbuf;
672 struct rte_mempool;
673 /**
674 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
675 * be received from the physical port or from another virtual device. A packet
676 * count is returned to indicate the number of packets that were successfully
677 * added to the RX queue.
678 * @param vid
679 * vhost device ID
680 * @param queue_id
681 * virtio queue index in mq case
682 * @param pkts
683 * array to contain packets to be enqueued
684 * @param count
685 * packets num to be enqueued
686 * @return
687 * num of packets enqueued
688 */
689 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
690 struct rte_mbuf **pkts, uint16_t count);
691
692 /**
693 * This function gets guest buffers from the virtio device TX virtqueue,
694 * construct host mbufs, copies guest buffer content to host mbufs and
695 * store them in pkts to be processed.
696 * @param vid
697 * vhost device ID
698 * @param queue_id
699 * virtio queue index in mq case
700 * @param mbuf_pool
701 * mbuf_pool where host mbuf is allocated.
702 * @param pkts
703 * array to contain packets to be dequeued
704 * @param count
705 * packets num to be dequeued
706 * @return
707 * num of packets dequeued
708 */
709 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
710 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
711
712 /**
713 * Get guest mem table: a list of memory regions.
714 *
715 * An rte_vhost_vhost_memory object will be allocated internally, to hold the
716 * guest memory regions. Application should free it at destroy_device()
717 * callback.
718 *
719 * @param vid
720 * vhost device ID
721 * @param mem
722 * To store the returned mem regions
723 * @return
724 * 0 on success, -1 on failure
725 */
726 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
727
728 /**
729 * Get guest vring info, including the vring address, vring size, etc.
730 *
731 * @param vid
732 * vhost device ID
733 * @param vring_idx
734 * vring index
735 * @param vring
736 * the structure to hold the requested vring info
737 * @return
738 * 0 on success, -1 on failure
739 */
740 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
741 struct rte_vhost_vring *vring);
742
743 /**
744 * Get guest inflight vring info, including inflight ring and resubmit list.
745 *
746 * @param vid
747 * vhost device ID
748 * @param vring_idx
749 * vring index
750 * @param vring
751 * the structure to hold the requested inflight vring info
752 * @return
753 * 0 on success, -1 on failure
754 */
755 __rte_experimental
756 int
757 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
758 struct rte_vhost_ring_inflight *vring);
759
760 /**
761 * Set split inflight descriptor.
762 *
763 * This function save descriptors that has been comsumed in available
764 * ring
765 *
766 * @param vid
767 * vhost device ID
768 * @param vring_idx
769 * vring index
770 * @param idx
771 * inflight entry index
772 * @return
773 * 0 on success, -1 on failure
774 */
775 __rte_experimental
776 int
777 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
778 uint16_t idx);
779
780 /**
781 * Set packed inflight descriptor and get corresponding inflight entry
782 *
783 * This function save descriptors that has been comsumed
784 *
785 * @param vid
786 * vhost device ID
787 * @param vring_idx
788 * vring index
789 * @param head
790 * head of descriptors
791 * @param last
792 * last of descriptors
793 * @param inflight_entry
794 * corresponding inflight entry
795 * @return
796 * 0 on success, -1 on failure
797 */
798 __rte_experimental
799 int
800 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
801 uint16_t head, uint16_t last, uint16_t *inflight_entry);
802
803 /**
804 * Save the head of list that the last batch of used descriptors.
805 *
806 * @param vid
807 * vhost device ID
808 * @param vring_idx
809 * vring index
810 * @param idx
811 * descriptor entry index
812 * @return
813 * 0 on success, -1 on failure
814 */
815 __rte_experimental
816 int
817 rte_vhost_set_last_inflight_io_split(int vid,
818 uint16_t vring_idx, uint16_t idx);
819
820 /**
821 * Update the inflight free_head, used_idx and used_wrap_counter.
822 *
823 * This function will update status first before updating descriptors
824 * to used
825 *
826 * @param vid
827 * vhost device ID
828 * @param vring_idx
829 * vring index
830 * @param head
831 * head of descriptors
832 * @return
833 * 0 on success, -1 on failure
834 */
835 __rte_experimental
836 int
837 rte_vhost_set_last_inflight_io_packed(int vid,
838 uint16_t vring_idx, uint16_t head);
839
840 /**
841 * Clear the split inflight status.
842 *
843 * @param vid
844 * vhost device ID
845 * @param vring_idx
846 * vring index
847 * @param last_used_idx
848 * last used idx of used ring
849 * @param idx
850 * inflight entry index
851 * @return
852 * 0 on success, -1 on failure
853 */
854 __rte_experimental
855 int
856 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
857 uint16_t last_used_idx, uint16_t idx);
858
859 /**
860 * Clear the packed inflight status.
861 *
862 * @param vid
863 * vhost device ID
864 * @param vring_idx
865 * vring index
866 * @param head
867 * inflight entry index
868 * @return
869 * 0 on success, -1 on failure
870 */
871 __rte_experimental
872 int
873 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
874 uint16_t head);
875
876 /**
877 * Notify the guest that used descriptors have been added to the vring. This
878 * function acts as a memory barrier.
879 *
880 * @param vid
881 * vhost device ID
882 * @param vring_idx
883 * vring index
884 * @return
885 * 0 on success, -1 on failure
886 */
887 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
888
889 /**
890 * Get vhost RX queue avail count.
891 *
892 * @param vid
893 * vhost device ID
894 * @param qid
895 * virtio queue index in mq case
896 * @return
897 * num of desc available
898 */
899 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
900
901 /**
902 * Get log base and log size of the vhost device
903 *
904 * @param vid
905 * vhost device ID
906 * @param log_base
907 * vhost log base
908 * @param log_size
909 * vhost log size
910 * @return
911 * 0 on success, -1 on failure
912 */
913 int
914 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
915
916 /**
917 * Get last_avail/used_idx of the vhost virtqueue
918 *
919 * @param vid
920 * vhost device ID
921 * @param queue_id
922 * vhost queue index
923 * @param last_avail_idx
924 * vhost last_avail_idx to get
925 * @param last_used_idx
926 * vhost last_used_idx to get
927 * @return
928 * 0 on success, -1 on failure
929 */
930 int
931 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
932 uint16_t *last_avail_idx, uint16_t *last_used_idx);
933
934 /**
935 * Get last_avail/last_used of the vhost virtqueue
936 *
937 * This function is designed for the reconnection and it's specific for
938 * the packed ring as we can get the two parameters from the inflight
939 * queueregion
940 *
941 * @param vid
942 * vhost device ID
943 * @param queue_id
944 * vhost queue index
945 * @param last_avail_idx
946 * vhost last_avail_idx to get
947 * @param last_used_idx
948 * vhost last_used_idx to get
949 * @return
950 * 0 on success, -1 on failure
951 */
952 __rte_experimental
953 int
954 rte_vhost_get_vring_base_from_inflight(int vid,
955 uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx);
956
957 /**
958 * Set last_avail/used_idx of the vhost virtqueue
959 *
960 * @param vid
961 * vhost device ID
962 * @param queue_id
963 * vhost queue index
964 * @param last_avail_idx
965 * last_avail_idx to set
966 * @param last_used_idx
967 * last_used_idx to set
968 * @return
969 * 0 on success, -1 on failure
970 */
971 int
972 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
973 uint16_t last_avail_idx, uint16_t last_used_idx);
974
975 /**
976 * Register external message handling callbacks
977 *
978 * @param vid
979 * vhost device ID
980 * @param ops
981 * virtio external callbacks to register
982 * @param ctx
983 * additional context passed to the callbacks
984 * @return
985 * 0 on success, -1 on failure
986 */
987 __rte_experimental
988 int
989 rte_vhost_extern_callback_register(int vid,
990 struct rte_vhost_user_extern_ops const * const ops, void *ctx);
991
992 /**
993 * Get vdpa device id for vhost device.
994 *
995 * @param vid
996 * vhost device id
997 * @return
998 * vDPA device pointer on success, NULL on failure
999 */
1000 struct rte_vdpa_device *
1001 rte_vhost_get_vdpa_device(int vid);
1002
1003 /**
1004 * Notify the guest that should get virtio configuration space from backend.
1005 *
1006 * @param vid
1007 * vhost device ID
1008 * @param need_reply
1009 * wait for the master response the status of this operation
1010 * @return
1011 * 0 on success, < 0 on failure
1012 */
1013 __rte_experimental
1014 int
1015 rte_vhost_slave_config_change(int vid, bool need_reply);
1016
1017 #ifdef __cplusplus
1018 }
1019 #endif
1020
1021 #endif /* _RTE_VHOST_H_ */
1022