1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2016 Intel Corporation
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
52bfe3f2eSlogwang #include <sys/types.h>
62bfe3f2eSlogwang #include <sys/stat.h>
72bfe3f2eSlogwang #include <fcntl.h>
82bfe3f2eSlogwang #include <unistd.h>
92bfe3f2eSlogwang
102bfe3f2eSlogwang #include <rte_memory.h>
112bfe3f2eSlogwang
122bfe3f2eSlogwang #include "vhost.h"
132bfe3f2eSlogwang #include "virtio_user_dev.h"
142bfe3f2eSlogwang #include "vhost_kernel_tap.h"
152bfe3f2eSlogwang
162bfe3f2eSlogwang struct vhost_memory_kernel {
172bfe3f2eSlogwang uint32_t nregions;
182bfe3f2eSlogwang uint32_t padding;
192bfe3f2eSlogwang struct vhost_memory_region regions[0];
202bfe3f2eSlogwang };
212bfe3f2eSlogwang
222bfe3f2eSlogwang /* vhost kernel ioctls */
232bfe3f2eSlogwang #define VHOST_VIRTIO 0xAF
242bfe3f2eSlogwang #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
252bfe3f2eSlogwang #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
262bfe3f2eSlogwang #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
272bfe3f2eSlogwang #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
282bfe3f2eSlogwang #define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel)
292bfe3f2eSlogwang #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
302bfe3f2eSlogwang #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
312bfe3f2eSlogwang #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
322bfe3f2eSlogwang #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
332bfe3f2eSlogwang #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
342bfe3f2eSlogwang #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
352bfe3f2eSlogwang #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
362bfe3f2eSlogwang #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
372bfe3f2eSlogwang #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
382bfe3f2eSlogwang #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
392bfe3f2eSlogwang
402bfe3f2eSlogwang static uint64_t max_regions = 64;
412bfe3f2eSlogwang
422bfe3f2eSlogwang static void
get_vhost_kernel_max_regions(void)432bfe3f2eSlogwang get_vhost_kernel_max_regions(void)
442bfe3f2eSlogwang {
452bfe3f2eSlogwang int fd;
462bfe3f2eSlogwang char buf[20] = {'\0'};
472bfe3f2eSlogwang
482bfe3f2eSlogwang fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY);
492bfe3f2eSlogwang if (fd < 0)
502bfe3f2eSlogwang return;
512bfe3f2eSlogwang
522bfe3f2eSlogwang if (read(fd, buf, sizeof(buf) - 1) > 0)
532bfe3f2eSlogwang max_regions = strtoull(buf, NULL, 10);
542bfe3f2eSlogwang
552bfe3f2eSlogwang close(fd);
562bfe3f2eSlogwang }
572bfe3f2eSlogwang
582bfe3f2eSlogwang static uint64_t vhost_req_user_to_kernel[] = {
592bfe3f2eSlogwang [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER,
602bfe3f2eSlogwang [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
612bfe3f2eSlogwang [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
622bfe3f2eSlogwang [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
632bfe3f2eSlogwang [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
642bfe3f2eSlogwang [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
652bfe3f2eSlogwang [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
662bfe3f2eSlogwang [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
672bfe3f2eSlogwang [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
682bfe3f2eSlogwang [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
692bfe3f2eSlogwang [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
702bfe3f2eSlogwang };
712bfe3f2eSlogwang
72d30ea906Sjfb8856606 static int
add_memseg_list(const struct rte_memseg_list * msl,void * arg)73d30ea906Sjfb8856606 add_memseg_list(const struct rte_memseg_list *msl, void *arg)
74d30ea906Sjfb8856606 {
75d30ea906Sjfb8856606 struct vhost_memory_kernel *vm = arg;
76d30ea906Sjfb8856606 struct vhost_memory_region *mr;
77d30ea906Sjfb8856606 void *start_addr;
78d30ea906Sjfb8856606 uint64_t len;
79d30ea906Sjfb8856606
80d30ea906Sjfb8856606 if (msl->external)
81d30ea906Sjfb8856606 return 0;
82d30ea906Sjfb8856606
83d30ea906Sjfb8856606 if (vm->nregions >= max_regions)
84d30ea906Sjfb8856606 return -1;
85d30ea906Sjfb8856606
86d30ea906Sjfb8856606 start_addr = msl->base_va;
87d30ea906Sjfb8856606 len = msl->page_sz * msl->memseg_arr.len;
88d30ea906Sjfb8856606
89d30ea906Sjfb8856606 mr = &vm->regions[vm->nregions++];
90d30ea906Sjfb8856606
91d30ea906Sjfb8856606 mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
92d30ea906Sjfb8856606 mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
93d30ea906Sjfb8856606 mr->memory_size = len;
94d30ea906Sjfb8856606 mr->mmap_offset = 0; /* flags_padding */
95d30ea906Sjfb8856606
96d30ea906Sjfb8856606 PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
97d30ea906Sjfb8856606 vm->nregions - 1, start_addr, len);
98d30ea906Sjfb8856606
99d30ea906Sjfb8856606 return 0;
100d30ea906Sjfb8856606 }
101d30ea906Sjfb8856606
102d30ea906Sjfb8856606 /* By default, vhost kernel module allows 64 regions, but DPDK may
103d30ea906Sjfb8856606 * have much more memory regions. Below function will treat each
104d30ea906Sjfb8856606 * contiguous memory space reserved by DPDK as one region.
1052bfe3f2eSlogwang */
1062bfe3f2eSlogwang static struct vhost_memory_kernel *
prepare_vhost_memory_kernel(void)1072bfe3f2eSlogwang prepare_vhost_memory_kernel(void)
1082bfe3f2eSlogwang {
1092bfe3f2eSlogwang struct vhost_memory_kernel *vm;
1102bfe3f2eSlogwang
1112bfe3f2eSlogwang vm = malloc(sizeof(struct vhost_memory_kernel) +
1122bfe3f2eSlogwang max_regions *
1132bfe3f2eSlogwang sizeof(struct vhost_memory_region));
1142bfe3f2eSlogwang if (!vm)
1152bfe3f2eSlogwang return NULL;
1162bfe3f2eSlogwang
117d30ea906Sjfb8856606 vm->nregions = 0;
118d30ea906Sjfb8856606 vm->padding = 0;
1192bfe3f2eSlogwang
120d30ea906Sjfb8856606 /*
121d30ea906Sjfb8856606 * The memory lock has already been taken by memory subsystem
122d30ea906Sjfb8856606 * or virtio_user_start_device().
123d30ea906Sjfb8856606 */
124d30ea906Sjfb8856606 if (rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm) < 0) {
1252bfe3f2eSlogwang free(vm);
1262bfe3f2eSlogwang return NULL;
1272bfe3f2eSlogwang }
1282bfe3f2eSlogwang
1292bfe3f2eSlogwang return vm;
1302bfe3f2eSlogwang }
1312bfe3f2eSlogwang
1322bfe3f2eSlogwang /* with below features, vhost kernel does not need to do the checksum and TSO,
1332bfe3f2eSlogwang * these info will be passed to virtio_user through virtio net header.
1342bfe3f2eSlogwang */
1352bfe3f2eSlogwang #define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
1362bfe3f2eSlogwang ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
1372bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
1382bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
1392bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
1402bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_GUEST_UFO))
1412bfe3f2eSlogwang
1422bfe3f2eSlogwang /* with below features, when flows from virtio_user to vhost kernel
1432bfe3f2eSlogwang * (1) if flows goes up through the kernel networking stack, it does not need
1442bfe3f2eSlogwang * to verify checksum, which can save CPU cycles;
1452bfe3f2eSlogwang * (2) if flows goes through a Linux bridge and outside from an interface
1462bfe3f2eSlogwang * (kernel driver), checksum and TSO will be done by GSO in kernel or even
1472bfe3f2eSlogwang * offloaded into real physical device.
1482bfe3f2eSlogwang */
1492bfe3f2eSlogwang #define VHOST_KERNEL_HOST_OFFLOADS_MASK \
1502bfe3f2eSlogwang ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
1512bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
1522bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_CSUM))
1532bfe3f2eSlogwang
154d30ea906Sjfb8856606 static unsigned int
tap_support_features(void)155d30ea906Sjfb8856606 tap_support_features(void)
1562bfe3f2eSlogwang {
1572bfe3f2eSlogwang int tapfd;
1582bfe3f2eSlogwang unsigned int tap_features;
1592bfe3f2eSlogwang
1602bfe3f2eSlogwang tapfd = open(PATH_NET_TUN, O_RDWR);
1612bfe3f2eSlogwang if (tapfd < 0) {
1622bfe3f2eSlogwang PMD_DRV_LOG(ERR, "fail to open %s: %s",
1632bfe3f2eSlogwang PATH_NET_TUN, strerror(errno));
1642bfe3f2eSlogwang return -1;
1652bfe3f2eSlogwang }
1662bfe3f2eSlogwang
1672bfe3f2eSlogwang if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
1682bfe3f2eSlogwang PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
1692bfe3f2eSlogwang close(tapfd);
1702bfe3f2eSlogwang return -1;
1712bfe3f2eSlogwang }
1722bfe3f2eSlogwang
1732bfe3f2eSlogwang close(tapfd);
174d30ea906Sjfb8856606 return tap_features;
1752bfe3f2eSlogwang }
1762bfe3f2eSlogwang
1772bfe3f2eSlogwang static int
vhost_kernel_ioctl(struct virtio_user_dev * dev,enum vhost_user_request req,void * arg)1782bfe3f2eSlogwang vhost_kernel_ioctl(struct virtio_user_dev *dev,
1792bfe3f2eSlogwang enum vhost_user_request req,
1802bfe3f2eSlogwang void *arg)
1812bfe3f2eSlogwang {
1822bfe3f2eSlogwang int ret = -1;
1832bfe3f2eSlogwang unsigned int i;
1842bfe3f2eSlogwang uint64_t req_kernel;
1852bfe3f2eSlogwang struct vhost_memory_kernel *vm = NULL;
1862bfe3f2eSlogwang int vhostfd;
1872bfe3f2eSlogwang unsigned int queue_sel;
188d30ea906Sjfb8856606 unsigned int features;
1892bfe3f2eSlogwang
1902bfe3f2eSlogwang PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
1912bfe3f2eSlogwang
1922bfe3f2eSlogwang req_kernel = vhost_req_user_to_kernel[req];
1932bfe3f2eSlogwang
1942bfe3f2eSlogwang if (req_kernel == VHOST_SET_MEM_TABLE) {
1952bfe3f2eSlogwang vm = prepare_vhost_memory_kernel();
1962bfe3f2eSlogwang if (!vm)
1972bfe3f2eSlogwang return -1;
1982bfe3f2eSlogwang arg = (void *)vm;
1992bfe3f2eSlogwang }
2002bfe3f2eSlogwang
2012bfe3f2eSlogwang if (req_kernel == VHOST_SET_FEATURES) {
2022bfe3f2eSlogwang /* We don't need memory protection here */
2032bfe3f2eSlogwang *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
2042bfe3f2eSlogwang
2052bfe3f2eSlogwang /* VHOST kernel does not know about below flags */
2062bfe3f2eSlogwang *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
2072bfe3f2eSlogwang *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
2082bfe3f2eSlogwang
2092bfe3f2eSlogwang *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
2102bfe3f2eSlogwang }
2112bfe3f2eSlogwang
2122bfe3f2eSlogwang switch (req_kernel) {
2132bfe3f2eSlogwang case VHOST_SET_VRING_NUM:
2142bfe3f2eSlogwang case VHOST_SET_VRING_ADDR:
2152bfe3f2eSlogwang case VHOST_SET_VRING_BASE:
2162bfe3f2eSlogwang case VHOST_GET_VRING_BASE:
2172bfe3f2eSlogwang case VHOST_SET_VRING_KICK:
2182bfe3f2eSlogwang case VHOST_SET_VRING_CALL:
2192bfe3f2eSlogwang queue_sel = *(unsigned int *)arg;
2202bfe3f2eSlogwang vhostfd = dev->vhostfds[queue_sel / 2];
2212bfe3f2eSlogwang *(unsigned int *)arg = queue_sel % 2;
2222bfe3f2eSlogwang PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
2232bfe3f2eSlogwang vhostfd, *(unsigned int *)arg);
2242bfe3f2eSlogwang break;
2252bfe3f2eSlogwang default:
2262bfe3f2eSlogwang vhostfd = -1;
2272bfe3f2eSlogwang }
2282bfe3f2eSlogwang if (vhostfd == -1) {
2292bfe3f2eSlogwang for (i = 0; i < dev->max_queue_pairs; ++i) {
2302bfe3f2eSlogwang if (dev->vhostfds[i] < 0)
2312bfe3f2eSlogwang continue;
2322bfe3f2eSlogwang
2332bfe3f2eSlogwang ret = ioctl(dev->vhostfds[i], req_kernel, arg);
2342bfe3f2eSlogwang if (ret < 0)
2352bfe3f2eSlogwang break;
2362bfe3f2eSlogwang }
2372bfe3f2eSlogwang } else {
2382bfe3f2eSlogwang ret = ioctl(vhostfd, req_kernel, arg);
2392bfe3f2eSlogwang }
2402bfe3f2eSlogwang
2412bfe3f2eSlogwang if (!ret && req_kernel == VHOST_GET_FEATURES) {
242d30ea906Sjfb8856606 features = tap_support_features();
2432bfe3f2eSlogwang /* with tap as the backend, all these features are supported
2442bfe3f2eSlogwang * but not claimed by vhost-net, so we add them back when
2452bfe3f2eSlogwang * reporting to upper layer.
2462bfe3f2eSlogwang */
247d30ea906Sjfb8856606 if (features & IFF_VNET_HDR) {
2482bfe3f2eSlogwang *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
2492bfe3f2eSlogwang *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
250d30ea906Sjfb8856606 }
2512bfe3f2eSlogwang
2522bfe3f2eSlogwang /* vhost_kernel will not declare this feature, but it does
2532bfe3f2eSlogwang * support multi-queue.
2542bfe3f2eSlogwang */
255d30ea906Sjfb8856606 if (features & IFF_MULTI_QUEUE)
2562bfe3f2eSlogwang *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
2572bfe3f2eSlogwang }
2582bfe3f2eSlogwang
2592bfe3f2eSlogwang if (vm)
2602bfe3f2eSlogwang free(vm);
2612bfe3f2eSlogwang
2622bfe3f2eSlogwang if (ret < 0)
2632bfe3f2eSlogwang PMD_DRV_LOG(ERR, "%s failed: %s",
2642bfe3f2eSlogwang vhost_msg_strings[req], strerror(errno));
2652bfe3f2eSlogwang
2662bfe3f2eSlogwang return ret;
2672bfe3f2eSlogwang }
2682bfe3f2eSlogwang
2692bfe3f2eSlogwang /**
2702bfe3f2eSlogwang * Set up environment to talk with a vhost kernel backend.
2712bfe3f2eSlogwang *
2722bfe3f2eSlogwang * @return
2732bfe3f2eSlogwang * - (-1) if fail to set up;
2742bfe3f2eSlogwang * - (>=0) if successful.
2752bfe3f2eSlogwang */
2762bfe3f2eSlogwang static int
vhost_kernel_setup(struct virtio_user_dev * dev)2772bfe3f2eSlogwang vhost_kernel_setup(struct virtio_user_dev *dev)
2782bfe3f2eSlogwang {
2792bfe3f2eSlogwang int vhostfd;
2802bfe3f2eSlogwang uint32_t i;
2812bfe3f2eSlogwang
2822bfe3f2eSlogwang get_vhost_kernel_max_regions();
2832bfe3f2eSlogwang
2842bfe3f2eSlogwang for (i = 0; i < dev->max_queue_pairs; ++i) {
2852bfe3f2eSlogwang vhostfd = open(dev->path, O_RDWR);
2862bfe3f2eSlogwang if (vhostfd < 0) {
2872bfe3f2eSlogwang PMD_DRV_LOG(ERR, "fail to open %s, %s",
2882bfe3f2eSlogwang dev->path, strerror(errno));
2892bfe3f2eSlogwang return -1;
2902bfe3f2eSlogwang }
2912bfe3f2eSlogwang
2922bfe3f2eSlogwang dev->vhostfds[i] = vhostfd;
2932bfe3f2eSlogwang }
2942bfe3f2eSlogwang
2952bfe3f2eSlogwang return 0;
2962bfe3f2eSlogwang }
2972bfe3f2eSlogwang
2982bfe3f2eSlogwang static int
vhost_kernel_set_backend(int vhostfd,int tapfd)2992bfe3f2eSlogwang vhost_kernel_set_backend(int vhostfd, int tapfd)
3002bfe3f2eSlogwang {
3012bfe3f2eSlogwang struct vhost_vring_file f;
3022bfe3f2eSlogwang
3032bfe3f2eSlogwang f.fd = tapfd;
3042bfe3f2eSlogwang f.index = 0;
3052bfe3f2eSlogwang if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
3062bfe3f2eSlogwang PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
3072bfe3f2eSlogwang strerror(errno));
3082bfe3f2eSlogwang return -1;
3092bfe3f2eSlogwang }
3102bfe3f2eSlogwang
3112bfe3f2eSlogwang f.index = 1;
3122bfe3f2eSlogwang if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
3132bfe3f2eSlogwang PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
3142bfe3f2eSlogwang strerror(errno));
3152bfe3f2eSlogwang return -1;
3162bfe3f2eSlogwang }
3172bfe3f2eSlogwang
3182bfe3f2eSlogwang return 0;
3192bfe3f2eSlogwang }
3202bfe3f2eSlogwang
3212bfe3f2eSlogwang static int
vhost_kernel_enable_queue_pair(struct virtio_user_dev * dev,uint16_t pair_idx,int enable)3222bfe3f2eSlogwang vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
3232bfe3f2eSlogwang uint16_t pair_idx,
3242bfe3f2eSlogwang int enable)
3252bfe3f2eSlogwang {
3262bfe3f2eSlogwang int hdr_size;
3272bfe3f2eSlogwang int vhostfd;
3282bfe3f2eSlogwang int tapfd;
3292bfe3f2eSlogwang int req_mq = (dev->max_queue_pairs > 1);
3302bfe3f2eSlogwang
3312bfe3f2eSlogwang vhostfd = dev->vhostfds[pair_idx];
3322bfe3f2eSlogwang
333*4418919fSjohnjiang if (dev->qp_enabled[pair_idx] == enable)
3342bfe3f2eSlogwang return 0;
335*4418919fSjohnjiang
336*4418919fSjohnjiang if (!enable) {
337*4418919fSjohnjiang tapfd = dev->tapfds[pair_idx];
338*4418919fSjohnjiang if (vhost_kernel_set_backend(vhostfd, -1) < 0) {
339*4418919fSjohnjiang PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
340*4418919fSjohnjiang return -1;
341*4418919fSjohnjiang }
342*4418919fSjohnjiang if (req_mq && vhost_kernel_tap_set_queue(tapfd, false) < 0) {
343*4418919fSjohnjiang PMD_DRV_LOG(ERR, "fail to disable tap for vhost kernel");
344*4418919fSjohnjiang return -1;
345*4418919fSjohnjiang }
346*4418919fSjohnjiang dev->qp_enabled[pair_idx] = false;
347*4418919fSjohnjiang return 0;
348*4418919fSjohnjiang }
349*4418919fSjohnjiang
350*4418919fSjohnjiang if (dev->tapfds[pair_idx] >= 0) {
351*4418919fSjohnjiang tapfd = dev->tapfds[pair_idx];
352*4418919fSjohnjiang if (vhost_kernel_tap_set_offload(tapfd, dev->features) == -1)
353*4418919fSjohnjiang return -1;
354*4418919fSjohnjiang if (req_mq && vhost_kernel_tap_set_queue(tapfd, true) < 0) {
355*4418919fSjohnjiang PMD_DRV_LOG(ERR, "fail to enable tap for vhost kernel");
356*4418919fSjohnjiang return -1;
357*4418919fSjohnjiang }
358*4418919fSjohnjiang goto set_backend;
3592bfe3f2eSlogwang }
3602bfe3f2eSlogwang
3612bfe3f2eSlogwang if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
3622bfe3f2eSlogwang (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
3632bfe3f2eSlogwang hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3642bfe3f2eSlogwang else
3652bfe3f2eSlogwang hdr_size = sizeof(struct virtio_net_hdr);
3662bfe3f2eSlogwang
367d30ea906Sjfb8856606 tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
368d30ea906Sjfb8856606 (char *)dev->mac_addr, dev->features);
3692bfe3f2eSlogwang if (tapfd < 0) {
3702bfe3f2eSlogwang PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
3712bfe3f2eSlogwang return -1;
3722bfe3f2eSlogwang }
3732bfe3f2eSlogwang
374*4418919fSjohnjiang dev->tapfds[pair_idx] = tapfd;
375*4418919fSjohnjiang
376*4418919fSjohnjiang set_backend:
3772bfe3f2eSlogwang if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
3782bfe3f2eSlogwang PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
3792bfe3f2eSlogwang return -1;
3802bfe3f2eSlogwang }
3812bfe3f2eSlogwang
382*4418919fSjohnjiang dev->qp_enabled[pair_idx] = true;
3832bfe3f2eSlogwang return 0;
3842bfe3f2eSlogwang }
3852bfe3f2eSlogwang
386d30ea906Sjfb8856606 struct virtio_user_backend_ops virtio_ops_kernel = {
3872bfe3f2eSlogwang .setup = vhost_kernel_setup,
3882bfe3f2eSlogwang .send_request = vhost_kernel_ioctl,
3892bfe3f2eSlogwang .enable_qp = vhost_kernel_enable_queue_pair
3902bfe3f2eSlogwang };
391