1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2010-2016 Intel Corporation
3a9643ea8Slogwang */
4a9643ea8Slogwang
5a9643ea8Slogwang #include <stdint.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <fcntl.h>
8a9643ea8Slogwang #include <string.h>
9a9643ea8Slogwang #include <errno.h>
10a9643ea8Slogwang #include <sys/mman.h>
11a9643ea8Slogwang #include <unistd.h>
12a9643ea8Slogwang #include <sys/eventfd.h>
132bfe3f2eSlogwang #include <sys/types.h>
142bfe3f2eSlogwang #include <sys/stat.h>
15a9643ea8Slogwang
164418919fSjohnjiang #include <rte_string_fns.h>
17d30ea906Sjfb8856606 #include <rte_eal_memconfig.h>
18d30ea906Sjfb8856606
19a9643ea8Slogwang #include "vhost.h"
20a9643ea8Slogwang #include "virtio_user_dev.h"
21a9643ea8Slogwang #include "../virtio_ethdev.h"
22a9643ea8Slogwang
23d30ea906Sjfb8856606 #define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
24d30ea906Sjfb8856606
25*2d9fd380Sjfb8856606 const char * const virtio_user_backend_strings[] = {
26*2d9fd380Sjfb8856606 [VIRTIO_USER_BACKEND_UNKNOWN] = "VIRTIO_USER_BACKEND_UNKNOWN",
27*2d9fd380Sjfb8856606 [VIRTIO_USER_BACKEND_VHOST_USER] = "VHOST_USER",
28*2d9fd380Sjfb8856606 [VIRTIO_USER_BACKEND_VHOST_KERNEL] = "VHOST_NET",
29*2d9fd380Sjfb8856606 [VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
30*2d9fd380Sjfb8856606 };
31*2d9fd380Sjfb8856606
32a9643ea8Slogwang static int
virtio_user_create_queue(struct virtio_user_dev * dev,uint32_t queue_sel)33a9643ea8Slogwang virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
34a9643ea8Slogwang {
35a9643ea8Slogwang /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
36a9643ea8Slogwang * firstly because vhost depends on this msg to allocate virtqueue
37a9643ea8Slogwang * pair.
38a9643ea8Slogwang */
39a9643ea8Slogwang struct vhost_vring_file file;
40a9643ea8Slogwang
41a9643ea8Slogwang file.index = queue_sel;
422bfe3f2eSlogwang file.fd = dev->callfds[queue_sel];
432bfe3f2eSlogwang dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
44a9643ea8Slogwang
45a9643ea8Slogwang return 0;
46a9643ea8Slogwang }
47a9643ea8Slogwang
48a9643ea8Slogwang static int
virtio_user_kick_queue(struct virtio_user_dev * dev,uint32_t queue_sel)49a9643ea8Slogwang virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
50a9643ea8Slogwang {
51a9643ea8Slogwang struct vhost_vring_file file;
52a9643ea8Slogwang struct vhost_vring_state state;
53a9643ea8Slogwang struct vring *vring = &dev->vrings[queue_sel];
544418919fSjohnjiang struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
55a9643ea8Slogwang struct vhost_vring_addr addr = {
56a9643ea8Slogwang .index = queue_sel,
57a9643ea8Slogwang .log_guest_addr = 0,
58a9643ea8Slogwang .flags = 0, /* disable log */
59a9643ea8Slogwang };
60a9643ea8Slogwang
614418919fSjohnjiang if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
624418919fSjohnjiang addr.desc_user_addr =
634418919fSjohnjiang (uint64_t)(uintptr_t)pq_vring->desc;
644418919fSjohnjiang addr.avail_user_addr =
654418919fSjohnjiang (uint64_t)(uintptr_t)pq_vring->driver;
664418919fSjohnjiang addr.used_user_addr =
674418919fSjohnjiang (uint64_t)(uintptr_t)pq_vring->device;
684418919fSjohnjiang } else {
694418919fSjohnjiang addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
704418919fSjohnjiang addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
714418919fSjohnjiang addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
724418919fSjohnjiang }
734418919fSjohnjiang
74a9643ea8Slogwang state.index = queue_sel;
75a9643ea8Slogwang state.num = vring->num;
762bfe3f2eSlogwang dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
77a9643ea8Slogwang
782bfe3f2eSlogwang state.index = queue_sel;
79a9643ea8Slogwang state.num = 0; /* no reservation */
804418919fSjohnjiang if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
814418919fSjohnjiang state.num |= (1 << 15);
822bfe3f2eSlogwang dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
83a9643ea8Slogwang
842bfe3f2eSlogwang dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
85a9643ea8Slogwang
86a9643ea8Slogwang /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
87a9643ea8Slogwang * lastly because vhost depends on this msg to judge if
88a9643ea8Slogwang * virtio is ready.
89a9643ea8Slogwang */
90a9643ea8Slogwang file.index = queue_sel;
912bfe3f2eSlogwang file.fd = dev->kickfds[queue_sel];
922bfe3f2eSlogwang dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
93a9643ea8Slogwang
94a9643ea8Slogwang return 0;
95a9643ea8Slogwang }
96a9643ea8Slogwang
97a9643ea8Slogwang static int
virtio_user_queue_setup(struct virtio_user_dev * dev,int (* fn)(struct virtio_user_dev *,uint32_t))98a9643ea8Slogwang virtio_user_queue_setup(struct virtio_user_dev *dev,
99a9643ea8Slogwang int (*fn)(struct virtio_user_dev *, uint32_t))
100a9643ea8Slogwang {
101a9643ea8Slogwang uint32_t i, queue_sel;
102a9643ea8Slogwang
103a9643ea8Slogwang for (i = 0; i < dev->max_queue_pairs; ++i) {
104a9643ea8Slogwang queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
105a9643ea8Slogwang if (fn(dev, queue_sel) < 0) {
106a9643ea8Slogwang PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
107a9643ea8Slogwang return -1;
108a9643ea8Slogwang }
109a9643ea8Slogwang }
110a9643ea8Slogwang for (i = 0; i < dev->max_queue_pairs; ++i) {
111a9643ea8Slogwang queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
112a9643ea8Slogwang if (fn(dev, queue_sel) < 0) {
113a9643ea8Slogwang PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
114a9643ea8Slogwang return -1;
115a9643ea8Slogwang }
116a9643ea8Slogwang }
117a9643ea8Slogwang
118a9643ea8Slogwang return 0;
119a9643ea8Slogwang }
120a9643ea8Slogwang
121a9643ea8Slogwang int
virtio_user_dev_set_features(struct virtio_user_dev * dev)122*2d9fd380Sjfb8856606 virtio_user_dev_set_features(struct virtio_user_dev *dev)
123d30ea906Sjfb8856606 {
124*2d9fd380Sjfb8856606 uint64_t features;
125*2d9fd380Sjfb8856606 int ret = -1;
126d30ea906Sjfb8856606
127*2d9fd380Sjfb8856606 pthread_mutex_lock(&dev->mutex);
128d30ea906Sjfb8856606
129*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
130*2d9fd380Sjfb8856606 dev->vhostfd < 0)
131*2d9fd380Sjfb8856606 goto error;
132*2d9fd380Sjfb8856606
133*2d9fd380Sjfb8856606 /* Step 0: tell vhost to create queues */
134*2d9fd380Sjfb8856606 if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
135*2d9fd380Sjfb8856606 goto error;
136*2d9fd380Sjfb8856606
137*2d9fd380Sjfb8856606 features = dev->features;
138*2d9fd380Sjfb8856606
139*2d9fd380Sjfb8856606 /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
140*2d9fd380Sjfb8856606 features &= ~(1ull << VIRTIO_NET_F_MAC);
141*2d9fd380Sjfb8856606 /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
142*2d9fd380Sjfb8856606 features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
143*2d9fd380Sjfb8856606 features &= ~(1ull << VIRTIO_NET_F_STATUS);
144*2d9fd380Sjfb8856606 ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
145*2d9fd380Sjfb8856606 if (ret < 0)
146*2d9fd380Sjfb8856606 goto error;
147*2d9fd380Sjfb8856606 PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
148*2d9fd380Sjfb8856606 error:
149*2d9fd380Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
150*2d9fd380Sjfb8856606
151*2d9fd380Sjfb8856606 return ret;
152d30ea906Sjfb8856606 }
153d30ea906Sjfb8856606
154d30ea906Sjfb8856606 int
virtio_user_start_device(struct virtio_user_dev * dev)155a9643ea8Slogwang virtio_user_start_device(struct virtio_user_dev *dev)
156a9643ea8Slogwang {
157a9643ea8Slogwang int ret;
158a9643ea8Slogwang
159d30ea906Sjfb8856606 /*
160d30ea906Sjfb8856606 * XXX workaround!
161d30ea906Sjfb8856606 *
162d30ea906Sjfb8856606 * We need to make sure that the locks will be
163d30ea906Sjfb8856606 * taken in the correct order to avoid deadlocks.
164d30ea906Sjfb8856606 *
165d30ea906Sjfb8856606 * Before releasing this lock, this thread should
166d30ea906Sjfb8856606 * not trigger any memory hotplug events.
167d30ea906Sjfb8856606 *
168d30ea906Sjfb8856606 * This is a temporary workaround, and should be
169d30ea906Sjfb8856606 * replaced when we get proper supports from the
170d30ea906Sjfb8856606 * memory subsystem in the future.
171d30ea906Sjfb8856606 */
1724418919fSjohnjiang rte_mcfg_mem_read_lock();
173d30ea906Sjfb8856606 pthread_mutex_lock(&dev->mutex);
174d30ea906Sjfb8856606
175*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER &&
176*2d9fd380Sjfb8856606 dev->vhostfd < 0)
177d30ea906Sjfb8856606 goto error;
178d30ea906Sjfb8856606
179a9643ea8Slogwang /* Step 2: share memory regions */
1802bfe3f2eSlogwang ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
181a9643ea8Slogwang if (ret < 0)
182a9643ea8Slogwang goto error;
183a9643ea8Slogwang
184a9643ea8Slogwang /* Step 3: kick queues */
185a9643ea8Slogwang if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
186a9643ea8Slogwang goto error;
187a9643ea8Slogwang
188a9643ea8Slogwang /* Step 4: enable queues
189a9643ea8Slogwang * we enable the 1st queue pair by default.
190a9643ea8Slogwang */
1912bfe3f2eSlogwang dev->ops->enable_qp(dev, 0, 1);
192a9643ea8Slogwang
193d30ea906Sjfb8856606 dev->started = true;
194d30ea906Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
1954418919fSjohnjiang rte_mcfg_mem_read_unlock();
196d30ea906Sjfb8856606
197a9643ea8Slogwang return 0;
198a9643ea8Slogwang error:
199d30ea906Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
2004418919fSjohnjiang rte_mcfg_mem_read_unlock();
201a9643ea8Slogwang /* TODO: free resource here or caller to check */
202a9643ea8Slogwang return -1;
203a9643ea8Slogwang }
204a9643ea8Slogwang
virtio_user_stop_device(struct virtio_user_dev * dev)205a9643ea8Slogwang int virtio_user_stop_device(struct virtio_user_dev *dev)
206a9643ea8Slogwang {
207d30ea906Sjfb8856606 struct vhost_vring_state state;
2082bfe3f2eSlogwang uint32_t i;
209d30ea906Sjfb8856606 int error = 0;
210d30ea906Sjfb8856606
211d30ea906Sjfb8856606 pthread_mutex_lock(&dev->mutex);
212d30ea906Sjfb8856606 if (!dev->started)
213d30ea906Sjfb8856606 goto out;
2142bfe3f2eSlogwang
2152bfe3f2eSlogwang for (i = 0; i < dev->max_queue_pairs; ++i)
2162bfe3f2eSlogwang dev->ops->enable_qp(dev, i, 0);
2172bfe3f2eSlogwang
218d30ea906Sjfb8856606 /* Stop the backend. */
219d30ea906Sjfb8856606 for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
220d30ea906Sjfb8856606 state.index = i;
221d30ea906Sjfb8856606 if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
222d30ea906Sjfb8856606 &state) < 0) {
223d30ea906Sjfb8856606 PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
224d30ea906Sjfb8856606 i);
225d30ea906Sjfb8856606 error = -1;
226d30ea906Sjfb8856606 goto out;
227d30ea906Sjfb8856606 }
2282bfe3f2eSlogwang }
2292bfe3f2eSlogwang
230d30ea906Sjfb8856606 dev->started = false;
231d30ea906Sjfb8856606 out:
232d30ea906Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
233d30ea906Sjfb8856606
234d30ea906Sjfb8856606 return error;
235a9643ea8Slogwang }
236a9643ea8Slogwang
237a9643ea8Slogwang static inline void
parse_mac(struct virtio_user_dev * dev,const char * mac)238a9643ea8Slogwang parse_mac(struct virtio_user_dev *dev, const char *mac)
239a9643ea8Slogwang {
2404418919fSjohnjiang struct rte_ether_addr tmp;
241a9643ea8Slogwang
242a9643ea8Slogwang if (!mac)
243a9643ea8Slogwang return;
244a9643ea8Slogwang
2454418919fSjohnjiang if (rte_ether_unformat_addr(mac, &tmp) == 0) {
2464418919fSjohnjiang memcpy(dev->mac_addr, &tmp, RTE_ETHER_ADDR_LEN);
247a9643ea8Slogwang dev->mac_specified = 1;
248a9643ea8Slogwang } else {
249a9643ea8Slogwang /* ignore the wrong mac, use random mac */
250a9643ea8Slogwang PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
251a9643ea8Slogwang }
252a9643ea8Slogwang }
253a9643ea8Slogwang
2542bfe3f2eSlogwang static int
virtio_user_dev_init_notify(struct virtio_user_dev * dev)2552bfe3f2eSlogwang virtio_user_dev_init_notify(struct virtio_user_dev *dev)
2562bfe3f2eSlogwang {
2572bfe3f2eSlogwang uint32_t i, j;
2582bfe3f2eSlogwang int callfd;
2592bfe3f2eSlogwang int kickfd;
2602bfe3f2eSlogwang
2612bfe3f2eSlogwang for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
2622bfe3f2eSlogwang if (i >= dev->max_queue_pairs * 2) {
2632bfe3f2eSlogwang dev->kickfds[i] = -1;
2642bfe3f2eSlogwang dev->callfds[i] = -1;
2652bfe3f2eSlogwang continue;
2662bfe3f2eSlogwang }
2672bfe3f2eSlogwang
2682bfe3f2eSlogwang /* May use invalid flag, but some backend uses kickfd and
2692bfe3f2eSlogwang * callfd as criteria to judge if dev is alive. so finally we
2702bfe3f2eSlogwang * use real event_fd.
2712bfe3f2eSlogwang */
2722bfe3f2eSlogwang callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
2732bfe3f2eSlogwang if (callfd < 0) {
2742bfe3f2eSlogwang PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
2752bfe3f2eSlogwang break;
2762bfe3f2eSlogwang }
2772bfe3f2eSlogwang kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
2782bfe3f2eSlogwang if (kickfd < 0) {
2792bfe3f2eSlogwang PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
2802bfe3f2eSlogwang break;
2812bfe3f2eSlogwang }
2822bfe3f2eSlogwang dev->callfds[i] = callfd;
2832bfe3f2eSlogwang dev->kickfds[i] = kickfd;
2842bfe3f2eSlogwang }
2852bfe3f2eSlogwang
2862bfe3f2eSlogwang if (i < VIRTIO_MAX_VIRTQUEUES) {
2872bfe3f2eSlogwang for (j = 0; j <= i; ++j) {
2882bfe3f2eSlogwang close(dev->callfds[j]);
2892bfe3f2eSlogwang close(dev->kickfds[j]);
2902bfe3f2eSlogwang }
2912bfe3f2eSlogwang
2922bfe3f2eSlogwang return -1;
2932bfe3f2eSlogwang }
2942bfe3f2eSlogwang
2952bfe3f2eSlogwang return 0;
2962bfe3f2eSlogwang }
2972bfe3f2eSlogwang
2982bfe3f2eSlogwang static int
virtio_user_fill_intr_handle(struct virtio_user_dev * dev)2992bfe3f2eSlogwang virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
3002bfe3f2eSlogwang {
3012bfe3f2eSlogwang uint32_t i;
3022bfe3f2eSlogwang struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
3032bfe3f2eSlogwang
3042bfe3f2eSlogwang if (!eth_dev->intr_handle) {
3052bfe3f2eSlogwang eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
3062bfe3f2eSlogwang if (!eth_dev->intr_handle) {
3072bfe3f2eSlogwang PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
3082bfe3f2eSlogwang return -1;
3092bfe3f2eSlogwang }
3102bfe3f2eSlogwang memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
3112bfe3f2eSlogwang }
3122bfe3f2eSlogwang
3132bfe3f2eSlogwang for (i = 0; i < dev->max_queue_pairs; ++i)
3142bfe3f2eSlogwang eth_dev->intr_handle->efds[i] = dev->callfds[i];
3152bfe3f2eSlogwang eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
3162bfe3f2eSlogwang eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
3172bfe3f2eSlogwang eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
3182bfe3f2eSlogwang /* For virtio vdev, no need to read counter for clean */
3192bfe3f2eSlogwang eth_dev->intr_handle->efd_counter_size = 0;
320d30ea906Sjfb8856606 eth_dev->intr_handle->fd = -1;
3212bfe3f2eSlogwang if (dev->vhostfd >= 0)
3222bfe3f2eSlogwang eth_dev->intr_handle->fd = dev->vhostfd;
323d30ea906Sjfb8856606 else if (dev->is_server)
324d30ea906Sjfb8856606 eth_dev->intr_handle->fd = dev->listenfd;
3252bfe3f2eSlogwang
3262bfe3f2eSlogwang return 0;
3272bfe3f2eSlogwang }
3282bfe3f2eSlogwang
329d30ea906Sjfb8856606 static void
virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,const void * addr,size_t len __rte_unused,void * arg)330d30ea906Sjfb8856606 virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
3314418919fSjohnjiang const void *addr,
332d30ea906Sjfb8856606 size_t len __rte_unused,
333d30ea906Sjfb8856606 void *arg)
334d30ea906Sjfb8856606 {
335d30ea906Sjfb8856606 struct virtio_user_dev *dev = arg;
336d30ea906Sjfb8856606 struct rte_memseg_list *msl;
337d30ea906Sjfb8856606 uint16_t i;
338d30ea906Sjfb8856606
339d30ea906Sjfb8856606 /* ignore externally allocated memory */
340d30ea906Sjfb8856606 msl = rte_mem_virt2memseg_list(addr);
341d30ea906Sjfb8856606 if (msl->external)
342d30ea906Sjfb8856606 return;
343d30ea906Sjfb8856606
344d30ea906Sjfb8856606 pthread_mutex_lock(&dev->mutex);
345d30ea906Sjfb8856606
346d30ea906Sjfb8856606 if (dev->started == false)
347d30ea906Sjfb8856606 goto exit;
348d30ea906Sjfb8856606
349d30ea906Sjfb8856606 /* Step 1: pause the active queues */
350d30ea906Sjfb8856606 for (i = 0; i < dev->queue_pairs; i++)
351d30ea906Sjfb8856606 dev->ops->enable_qp(dev, i, 0);
352d30ea906Sjfb8856606
353d30ea906Sjfb8856606 /* Step 2: update memory regions */
354d30ea906Sjfb8856606 dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
355d30ea906Sjfb8856606
356d30ea906Sjfb8856606 /* Step 3: resume the active queues */
357d30ea906Sjfb8856606 for (i = 0; i < dev->queue_pairs; i++)
358d30ea906Sjfb8856606 dev->ops->enable_qp(dev, i, 1);
359d30ea906Sjfb8856606
360d30ea906Sjfb8856606 exit:
361d30ea906Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
362d30ea906Sjfb8856606 }
363d30ea906Sjfb8856606
3642bfe3f2eSlogwang static int
virtio_user_dev_setup(struct virtio_user_dev * dev)3652bfe3f2eSlogwang virtio_user_dev_setup(struct virtio_user_dev *dev)
3662bfe3f2eSlogwang {
3672bfe3f2eSlogwang uint32_t q;
3682bfe3f2eSlogwang
3692bfe3f2eSlogwang dev->vhostfd = -1;
3702bfe3f2eSlogwang dev->vhostfds = NULL;
3712bfe3f2eSlogwang dev->tapfds = NULL;
3722bfe3f2eSlogwang
373d30ea906Sjfb8856606 if (dev->is_server) {
374*2d9fd380Sjfb8856606 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER) {
375*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Server mode only supports vhost-user!");
376d30ea906Sjfb8856606 return -1;
377d30ea906Sjfb8856606 }
378d30ea906Sjfb8856606 dev->ops = &virtio_ops_user;
37928440c50Sjfb8856606 } else {
380*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
381d30ea906Sjfb8856606 dev->ops = &virtio_ops_user;
382*2d9fd380Sjfb8856606 } else if (dev->backend_type ==
383*2d9fd380Sjfb8856606 VIRTIO_USER_BACKEND_VHOST_KERNEL) {
384d30ea906Sjfb8856606 dev->ops = &virtio_ops_kernel;
3852bfe3f2eSlogwang
386d30ea906Sjfb8856606 dev->vhostfds = malloc(dev->max_queue_pairs *
387d30ea906Sjfb8856606 sizeof(int));
388d30ea906Sjfb8856606 dev->tapfds = malloc(dev->max_queue_pairs *
389d30ea906Sjfb8856606 sizeof(int));
3902bfe3f2eSlogwang if (!dev->vhostfds || !dev->tapfds) {
3912bfe3f2eSlogwang PMD_INIT_LOG(ERR, "Failed to malloc");
3922bfe3f2eSlogwang return -1;
3932bfe3f2eSlogwang }
3942bfe3f2eSlogwang
3952bfe3f2eSlogwang for (q = 0; q < dev->max_queue_pairs; ++q) {
3962bfe3f2eSlogwang dev->vhostfds[q] = -1;
3972bfe3f2eSlogwang dev->tapfds[q] = -1;
3982bfe3f2eSlogwang }
399*2d9fd380Sjfb8856606 } else if (dev->backend_type ==
400*2d9fd380Sjfb8856606 VIRTIO_USER_BACKEND_VHOST_VDPA) {
401*2d9fd380Sjfb8856606 dev->ops = &virtio_ops_vdpa;
402*2d9fd380Sjfb8856606 } else {
403*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Unknown backend type");
404*2d9fd380Sjfb8856606 return -1;
4052bfe3f2eSlogwang }
406d30ea906Sjfb8856606 }
4072bfe3f2eSlogwang
4082bfe3f2eSlogwang if (dev->ops->setup(dev) < 0)
4092bfe3f2eSlogwang return -1;
4102bfe3f2eSlogwang
4112bfe3f2eSlogwang if (virtio_user_dev_init_notify(dev) < 0)
4122bfe3f2eSlogwang return -1;
4132bfe3f2eSlogwang
4142bfe3f2eSlogwang if (virtio_user_fill_intr_handle(dev) < 0)
4152bfe3f2eSlogwang return -1;
4162bfe3f2eSlogwang
4172bfe3f2eSlogwang return 0;
4182bfe3f2eSlogwang }
4192bfe3f2eSlogwang
4202bfe3f2eSlogwang /* Use below macro to filter features from vhost backend */
4212bfe3f2eSlogwang #define VIRTIO_USER_SUPPORTED_FEATURES \
4222bfe3f2eSlogwang (1ULL << VIRTIO_NET_F_MAC | \
4232bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_STATUS | \
4242bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_MQ | \
4252bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
4262bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_CTRL_VQ | \
4272bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_CTRL_RX | \
4282bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
4292bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_CSUM | \
4302bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
4312bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
4322bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
4332bfe3f2eSlogwang 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
4342bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
4352bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
4362bfe3f2eSlogwang 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
437d30ea906Sjfb8856606 1ULL << VIRTIO_F_IN_ORDER | \
4384418919fSjohnjiang 1ULL << VIRTIO_F_VERSION_1 | \
439*2d9fd380Sjfb8856606 1ULL << VIRTIO_F_RING_PACKED | \
440*2d9fd380Sjfb8856606 1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
441*2d9fd380Sjfb8856606
442*2d9fd380Sjfb8856606 #define VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES \
443*2d9fd380Sjfb8856606 (1ULL << VHOST_USER_PROTOCOL_F_MQ | \
444*2d9fd380Sjfb8856606 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
445*2d9fd380Sjfb8856606 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
4462bfe3f2eSlogwang
4472bfe3f2eSlogwang int
virtio_user_dev_init(struct virtio_user_dev * dev,char * path,int queues,int cq,int queue_size,const char * mac,char ** ifname,int server,int mrg_rxbuf,int in_order,int packed_vq,enum virtio_user_backend_type backend_type)448a9643ea8Slogwang virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
449d30ea906Sjfb8856606 int cq, int queue_size, const char *mac, char **ifname,
450*2d9fd380Sjfb8856606 int server, int mrg_rxbuf, int in_order, int packed_vq,
451*2d9fd380Sjfb8856606 enum virtio_user_backend_type backend_type)
452a9643ea8Slogwang {
453*2d9fd380Sjfb8856606 uint64_t protocol_features = 0;
454*2d9fd380Sjfb8856606
455d30ea906Sjfb8856606 pthread_mutex_init(&dev->mutex, NULL);
4564418919fSjohnjiang strlcpy(dev->path, path, PATH_MAX);
457d30ea906Sjfb8856606 dev->started = 0;
458a9643ea8Slogwang dev->max_queue_pairs = queues;
459a9643ea8Slogwang dev->queue_pairs = 1; /* mq disabled by default */
460a9643ea8Slogwang dev->queue_size = queue_size;
4611646932aSjfb8856606 dev->is_server = server;
462a9643ea8Slogwang dev->mac_specified = 0;
463d30ea906Sjfb8856606 dev->frontend_features = 0;
464d30ea906Sjfb8856606 dev->unsupported_features = ~VIRTIO_USER_SUPPORTED_FEATURES;
465*2d9fd380Sjfb8856606 dev->protocol_features = VIRTIO_USER_SUPPORTED_PROTOCOL_FEATURES;
466*2d9fd380Sjfb8856606 dev->backend_type = backend_type;
467*2d9fd380Sjfb8856606
468a9643ea8Slogwang parse_mac(dev, mac);
469a9643ea8Slogwang
4702bfe3f2eSlogwang if (*ifname) {
4712bfe3f2eSlogwang dev->ifname = *ifname;
4722bfe3f2eSlogwang *ifname = NULL;
4732bfe3f2eSlogwang }
4742bfe3f2eSlogwang
4752bfe3f2eSlogwang if (virtio_user_dev_setup(dev) < 0) {
476a9643ea8Slogwang PMD_INIT_LOG(ERR, "backend set up fails");
477a9643ea8Slogwang return -1;
478a9643ea8Slogwang }
479d30ea906Sjfb8856606
480*2d9fd380Sjfb8856606 if (dev->backend_type != VIRTIO_USER_BACKEND_VHOST_USER)
481*2d9fd380Sjfb8856606 dev->unsupported_features |=
482*2d9fd380Sjfb8856606 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
483*2d9fd380Sjfb8856606
484d30ea906Sjfb8856606 if (!dev->is_server) {
485d30ea906Sjfb8856606 if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
486d30ea906Sjfb8856606 NULL) < 0) {
487d30ea906Sjfb8856606 PMD_INIT_LOG(ERR, "set_owner fails: %s",
488d30ea906Sjfb8856606 strerror(errno));
489a9643ea8Slogwang return -1;
490a9643ea8Slogwang }
491a9643ea8Slogwang
4922bfe3f2eSlogwang if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
4932bfe3f2eSlogwang &dev->device_features) < 0) {
494d30ea906Sjfb8856606 PMD_INIT_LOG(ERR, "get_features failed: %s",
495d30ea906Sjfb8856606 strerror(errno));
496a9643ea8Slogwang return -1;
497a9643ea8Slogwang }
498*2d9fd380Sjfb8856606
499*2d9fd380Sjfb8856606
500*2d9fd380Sjfb8856606 if (dev->device_features &
501*2d9fd380Sjfb8856606 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) {
502*2d9fd380Sjfb8856606 if (dev->ops->send_request(dev,
503*2d9fd380Sjfb8856606 VHOST_USER_GET_PROTOCOL_FEATURES,
504*2d9fd380Sjfb8856606 &protocol_features))
505*2d9fd380Sjfb8856606 return -1;
506*2d9fd380Sjfb8856606
507*2d9fd380Sjfb8856606 dev->protocol_features &= protocol_features;
508*2d9fd380Sjfb8856606
509*2d9fd380Sjfb8856606 if (dev->ops->send_request(dev,
510*2d9fd380Sjfb8856606 VHOST_USER_SET_PROTOCOL_FEATURES,
511*2d9fd380Sjfb8856606 &dev->protocol_features))
512*2d9fd380Sjfb8856606 return -1;
513*2d9fd380Sjfb8856606
514*2d9fd380Sjfb8856606 if (!(dev->protocol_features &
515*2d9fd380Sjfb8856606 (1ULL << VHOST_USER_PROTOCOL_F_MQ)))
516*2d9fd380Sjfb8856606 dev->unsupported_features |=
517*2d9fd380Sjfb8856606 (1ull << VIRTIO_NET_F_MQ);
518*2d9fd380Sjfb8856606 }
519d30ea906Sjfb8856606 } else {
520d30ea906Sjfb8856606 /* We just pretend vhost-user can support all these features.
521d30ea906Sjfb8856606 * Note that this could be problematic that if some feature is
522d30ea906Sjfb8856606 * negotiated but not supported by the vhost-user which comes
523d30ea906Sjfb8856606 * later.
524d30ea906Sjfb8856606 */
525d30ea906Sjfb8856606 dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
526*2d9fd380Sjfb8856606
527*2d9fd380Sjfb8856606 /* We cannot assume VHOST_USER_PROTOCOL_F_STATUS is supported
528*2d9fd380Sjfb8856606 * until it's negotiated
529*2d9fd380Sjfb8856606 */
530*2d9fd380Sjfb8856606 dev->protocol_features &=
531*2d9fd380Sjfb8856606 ~(1ULL << VHOST_USER_PROTOCOL_F_STATUS);
532d30ea906Sjfb8856606 }
533d30ea906Sjfb8856606
534*2d9fd380Sjfb8856606
535*2d9fd380Sjfb8856606
536d30ea906Sjfb8856606 if (!mrg_rxbuf)
537d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
538d30ea906Sjfb8856606
539d30ea906Sjfb8856606 if (!in_order)
540d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
541d30ea906Sjfb8856606
5424418919fSjohnjiang if (!packed_vq)
5434418919fSjohnjiang dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
5444418919fSjohnjiang
545a9643ea8Slogwang if (dev->mac_specified)
546d30ea906Sjfb8856606 dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
547d30ea906Sjfb8856606 else
548d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
549a9643ea8Slogwang
5502bfe3f2eSlogwang if (cq) {
5512bfe3f2eSlogwang /* device does not really need to know anything about CQ,
5522bfe3f2eSlogwang * so if necessary, we just claim to support CQ
553a9643ea8Slogwang */
554d30ea906Sjfb8856606 dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
5552bfe3f2eSlogwang } else {
556d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
557d30ea906Sjfb8856606 /* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
558d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
559d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
560d30ea906Sjfb8856606 dev->unsupported_features |=
561d30ea906Sjfb8856606 (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
562d30ea906Sjfb8856606 dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
563d30ea906Sjfb8856606 dev->unsupported_features |=
564d30ea906Sjfb8856606 (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
565a9643ea8Slogwang }
566a9643ea8Slogwang
5672bfe3f2eSlogwang /* The backend will not report this feature, we add it explicitly */
568*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
569d30ea906Sjfb8856606 dev->frontend_features |= (1ull << VIRTIO_NET_F_STATUS);
5702bfe3f2eSlogwang
571d30ea906Sjfb8856606 /*
572d30ea906Sjfb8856606 * Device features =
573d30ea906Sjfb8856606 * (frontend_features | backend_features) & ~unsupported_features;
574d30ea906Sjfb8856606 */
575d30ea906Sjfb8856606 dev->device_features |= dev->frontend_features;
576d30ea906Sjfb8856606 dev->device_features &= ~dev->unsupported_features;
577d30ea906Sjfb8856606
578d30ea906Sjfb8856606 if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
579d30ea906Sjfb8856606 virtio_user_mem_event_cb, dev)) {
580d30ea906Sjfb8856606 if (rte_errno != ENOTSUP) {
581d30ea906Sjfb8856606 PMD_INIT_LOG(ERR, "Failed to register mem event"
582d30ea906Sjfb8856606 " callback\n");
583d30ea906Sjfb8856606 return -1;
584d30ea906Sjfb8856606 }
585d30ea906Sjfb8856606 }
586a9643ea8Slogwang
587a9643ea8Slogwang return 0;
588a9643ea8Slogwang }
589a9643ea8Slogwang
590a9643ea8Slogwang void
virtio_user_dev_uninit(struct virtio_user_dev * dev)591a9643ea8Slogwang virtio_user_dev_uninit(struct virtio_user_dev *dev)
592a9643ea8Slogwang {
593a9643ea8Slogwang uint32_t i;
594a9643ea8Slogwang
5952bfe3f2eSlogwang virtio_user_stop_device(dev);
5962bfe3f2eSlogwang
597d30ea906Sjfb8856606 rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
598d30ea906Sjfb8856606
599a9643ea8Slogwang for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
600a9643ea8Slogwang close(dev->callfds[i]);
601a9643ea8Slogwang close(dev->kickfds[i]);
602a9643ea8Slogwang }
603a9643ea8Slogwang
6044418919fSjohnjiang if (dev->vhostfd >= 0)
605a9643ea8Slogwang close(dev->vhostfd);
6062bfe3f2eSlogwang
607d30ea906Sjfb8856606 if (dev->is_server && dev->listenfd >= 0) {
608d30ea906Sjfb8856606 close(dev->listenfd);
609d30ea906Sjfb8856606 dev->listenfd = -1;
610d30ea906Sjfb8856606 }
611d30ea906Sjfb8856606
6122bfe3f2eSlogwang if (dev->vhostfds) {
6134418919fSjohnjiang for (i = 0; i < dev->max_queue_pairs; ++i) {
6142bfe3f2eSlogwang close(dev->vhostfds[i]);
6154418919fSjohnjiang if (dev->tapfds[i] >= 0)
6164418919fSjohnjiang close(dev->tapfds[i]);
6174418919fSjohnjiang }
6182bfe3f2eSlogwang free(dev->vhostfds);
6192bfe3f2eSlogwang free(dev->tapfds);
6202bfe3f2eSlogwang }
6212bfe3f2eSlogwang
6222bfe3f2eSlogwang free(dev->ifname);
623d30ea906Sjfb8856606
624d30ea906Sjfb8856606 if (dev->is_server)
625d30ea906Sjfb8856606 unlink(dev->path);
626a9643ea8Slogwang }
627a9643ea8Slogwang
628d30ea906Sjfb8856606 uint8_t
virtio_user_handle_mq(struct virtio_user_dev * dev,uint16_t q_pairs)629a9643ea8Slogwang virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
630a9643ea8Slogwang {
631a9643ea8Slogwang uint16_t i;
632a9643ea8Slogwang uint8_t ret = 0;
633a9643ea8Slogwang
634a9643ea8Slogwang if (q_pairs > dev->max_queue_pairs) {
635a9643ea8Slogwang PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
636a9643ea8Slogwang q_pairs, dev->max_queue_pairs);
637a9643ea8Slogwang return -1;
638a9643ea8Slogwang }
639a9643ea8Slogwang
640d30ea906Sjfb8856606 /* Server mode can't enable queue pairs if vhostfd is invalid,
641d30ea906Sjfb8856606 * always return 0 in this case.
642d30ea906Sjfb8856606 */
643d30ea906Sjfb8856606 if (!dev->is_server || dev->vhostfd >= 0) {
644a9643ea8Slogwang for (i = 0; i < q_pairs; ++i)
6452bfe3f2eSlogwang ret |= dev->ops->enable_qp(dev, i, 1);
646a9643ea8Slogwang for (i = q_pairs; i < dev->max_queue_pairs; ++i)
6472bfe3f2eSlogwang ret |= dev->ops->enable_qp(dev, i, 0);
648d30ea906Sjfb8856606 }
649a9643ea8Slogwang dev->queue_pairs = q_pairs;
650a9643ea8Slogwang
651a9643ea8Slogwang return ret;
652a9643ea8Slogwang }
653a9643ea8Slogwang
654a9643ea8Slogwang static uint32_t
virtio_user_handle_ctrl_msg(struct virtio_user_dev * dev,struct vring * vring,uint16_t idx_hdr)655a9643ea8Slogwang virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
656a9643ea8Slogwang uint16_t idx_hdr)
657a9643ea8Slogwang {
658a9643ea8Slogwang struct virtio_net_ctrl_hdr *hdr;
659a9643ea8Slogwang virtio_net_ctrl_ack status = ~0;
660a9643ea8Slogwang uint16_t i, idx_data, idx_status;
661a9643ea8Slogwang uint32_t n_descs = 0;
662a9643ea8Slogwang
663a9643ea8Slogwang /* locate desc for header, data, and status */
664a9643ea8Slogwang idx_data = vring->desc[idx_hdr].next;
665a9643ea8Slogwang n_descs++;
666a9643ea8Slogwang
667a9643ea8Slogwang i = idx_data;
668a9643ea8Slogwang while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
669a9643ea8Slogwang i = vring->desc[i].next;
670a9643ea8Slogwang n_descs++;
671a9643ea8Slogwang }
672a9643ea8Slogwang
673a9643ea8Slogwang /* locate desc for status */
674a9643ea8Slogwang idx_status = i;
675a9643ea8Slogwang n_descs++;
676a9643ea8Slogwang
677a9643ea8Slogwang hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
678a9643ea8Slogwang if (hdr->class == VIRTIO_NET_CTRL_MQ &&
679a9643ea8Slogwang hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
680a9643ea8Slogwang uint16_t queues;
681a9643ea8Slogwang
682a9643ea8Slogwang queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
683a9643ea8Slogwang status = virtio_user_handle_mq(dev, queues);
6844418919fSjohnjiang } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
6854418919fSjohnjiang hdr->class == VIRTIO_NET_CTRL_MAC ||
6864418919fSjohnjiang hdr->class == VIRTIO_NET_CTRL_VLAN) {
6874418919fSjohnjiang status = 0;
688a9643ea8Slogwang }
689a9643ea8Slogwang
690a9643ea8Slogwang /* Update status */
691a9643ea8Slogwang *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
692a9643ea8Slogwang
693a9643ea8Slogwang return n_descs;
694a9643ea8Slogwang }
695a9643ea8Slogwang
6964418919fSjohnjiang static inline int
desc_is_avail(struct vring_packed_desc * desc,bool wrap_counter)6974418919fSjohnjiang desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
6984418919fSjohnjiang {
6994418919fSjohnjiang uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
7004418919fSjohnjiang
7014418919fSjohnjiang return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
7024418919fSjohnjiang wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
7034418919fSjohnjiang }
7044418919fSjohnjiang
7054418919fSjohnjiang static uint32_t
virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev * dev,struct vring_packed * vring,uint16_t idx_hdr)7064418919fSjohnjiang virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
7074418919fSjohnjiang struct vring_packed *vring,
7084418919fSjohnjiang uint16_t idx_hdr)
7094418919fSjohnjiang {
7104418919fSjohnjiang struct virtio_net_ctrl_hdr *hdr;
7114418919fSjohnjiang virtio_net_ctrl_ack status = ~0;
7124418919fSjohnjiang uint16_t idx_data, idx_status;
7134418919fSjohnjiang /* initialize to one, header is first */
7144418919fSjohnjiang uint32_t n_descs = 1;
7154418919fSjohnjiang
7164418919fSjohnjiang /* locate desc for header, data, and status */
7174418919fSjohnjiang idx_data = idx_hdr + 1;
7184418919fSjohnjiang if (idx_data >= dev->queue_size)
7194418919fSjohnjiang idx_data -= dev->queue_size;
7204418919fSjohnjiang
7214418919fSjohnjiang n_descs++;
7224418919fSjohnjiang
7234418919fSjohnjiang idx_status = idx_data;
7244418919fSjohnjiang while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
7254418919fSjohnjiang idx_status++;
7264418919fSjohnjiang if (idx_status >= dev->queue_size)
7274418919fSjohnjiang idx_status -= dev->queue_size;
7284418919fSjohnjiang n_descs++;
7294418919fSjohnjiang }
7304418919fSjohnjiang
7314418919fSjohnjiang hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
7324418919fSjohnjiang if (hdr->class == VIRTIO_NET_CTRL_MQ &&
7334418919fSjohnjiang hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
7344418919fSjohnjiang uint16_t queues;
7354418919fSjohnjiang
7364418919fSjohnjiang queues = *(uint16_t *)(uintptr_t)
7374418919fSjohnjiang vring->desc[idx_data].addr;
7384418919fSjohnjiang status = virtio_user_handle_mq(dev, queues);
7394418919fSjohnjiang } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
7404418919fSjohnjiang hdr->class == VIRTIO_NET_CTRL_MAC ||
7414418919fSjohnjiang hdr->class == VIRTIO_NET_CTRL_VLAN) {
7424418919fSjohnjiang status = 0;
7434418919fSjohnjiang }
7444418919fSjohnjiang
7454418919fSjohnjiang /* Update status */
7464418919fSjohnjiang *(virtio_net_ctrl_ack *)(uintptr_t)
7474418919fSjohnjiang vring->desc[idx_status].addr = status;
7484418919fSjohnjiang
7494418919fSjohnjiang /* Update used descriptor */
7504418919fSjohnjiang vring->desc[idx_hdr].id = vring->desc[idx_status].id;
7514418919fSjohnjiang vring->desc[idx_hdr].len = sizeof(status);
7524418919fSjohnjiang
7534418919fSjohnjiang return n_descs;
7544418919fSjohnjiang }
7554418919fSjohnjiang
7564418919fSjohnjiang void
virtio_user_handle_cq_packed(struct virtio_user_dev * dev,uint16_t queue_idx)7574418919fSjohnjiang virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
7584418919fSjohnjiang {
7594418919fSjohnjiang struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
7604418919fSjohnjiang struct vring_packed *vring = &dev->packed_vrings[queue_idx];
7614418919fSjohnjiang uint16_t n_descs, flags;
7624418919fSjohnjiang
7634418919fSjohnjiang /* Perform a load-acquire barrier in desc_is_avail to
7644418919fSjohnjiang * enforce the ordering between desc flags and desc
7654418919fSjohnjiang * content.
7664418919fSjohnjiang */
7674418919fSjohnjiang while (desc_is_avail(&vring->desc[vq->used_idx],
7684418919fSjohnjiang vq->used_wrap_counter)) {
7694418919fSjohnjiang
7704418919fSjohnjiang n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
7714418919fSjohnjiang vq->used_idx);
7724418919fSjohnjiang
7734418919fSjohnjiang flags = VRING_DESC_F_WRITE;
7744418919fSjohnjiang if (vq->used_wrap_counter)
7754418919fSjohnjiang flags |= VRING_PACKED_DESC_F_AVAIL_USED;
7764418919fSjohnjiang
7774418919fSjohnjiang __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
7784418919fSjohnjiang __ATOMIC_RELEASE);
7794418919fSjohnjiang
7804418919fSjohnjiang vq->used_idx += n_descs;
7814418919fSjohnjiang if (vq->used_idx >= dev->queue_size) {
7824418919fSjohnjiang vq->used_idx -= dev->queue_size;
7834418919fSjohnjiang vq->used_wrap_counter ^= 1;
7844418919fSjohnjiang }
7854418919fSjohnjiang }
7864418919fSjohnjiang }
7874418919fSjohnjiang
788a9643ea8Slogwang void
virtio_user_handle_cq(struct virtio_user_dev * dev,uint16_t queue_idx)789a9643ea8Slogwang virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
790a9643ea8Slogwang {
791a9643ea8Slogwang uint16_t avail_idx, desc_idx;
792a9643ea8Slogwang struct vring_used_elem *uep;
793a9643ea8Slogwang uint32_t n_descs;
794a9643ea8Slogwang struct vring *vring = &dev->vrings[queue_idx];
795a9643ea8Slogwang
796a9643ea8Slogwang /* Consume avail ring, using used ring idx as first one */
797*2d9fd380Sjfb8856606 while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
798*2d9fd380Sjfb8856606 != vring->avail->idx) {
799*2d9fd380Sjfb8856606 avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
800*2d9fd380Sjfb8856606 & (vring->num - 1);
801a9643ea8Slogwang desc_idx = vring->avail->ring[avail_idx];
802a9643ea8Slogwang
803a9643ea8Slogwang n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
804a9643ea8Slogwang
805a9643ea8Slogwang /* Update used ring */
806a9643ea8Slogwang uep = &vring->used->ring[avail_idx];
8071646932aSjfb8856606 uep->id = desc_idx;
808a9643ea8Slogwang uep->len = n_descs;
809a9643ea8Slogwang
810*2d9fd380Sjfb8856606 __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
811a9643ea8Slogwang }
812a9643ea8Slogwang }
813*2d9fd380Sjfb8856606
814*2d9fd380Sjfb8856606 int
virtio_user_dev_set_status(struct virtio_user_dev * dev,uint8_t status)815*2d9fd380Sjfb8856606 virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
816*2d9fd380Sjfb8856606 {
817*2d9fd380Sjfb8856606 int ret;
818*2d9fd380Sjfb8856606 uint64_t arg = status;
819*2d9fd380Sjfb8856606
820*2d9fd380Sjfb8856606 pthread_mutex_lock(&dev->mutex);
821*2d9fd380Sjfb8856606 dev->status = status;
822*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER)
823*2d9fd380Sjfb8856606 ret = dev->ops->send_request(dev,
824*2d9fd380Sjfb8856606 VHOST_USER_SET_STATUS, &arg);
825*2d9fd380Sjfb8856606 else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA)
826*2d9fd380Sjfb8856606 ret = dev->ops->send_request(dev,
827*2d9fd380Sjfb8856606 VHOST_USER_SET_STATUS, &status);
828*2d9fd380Sjfb8856606 else
829*2d9fd380Sjfb8856606 ret = -ENOTSUP;
830*2d9fd380Sjfb8856606
831*2d9fd380Sjfb8856606 if (ret && ret != -ENOTSUP) {
832*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "VHOST_USER_SET_STATUS failed (%d): %s", ret,
833*2d9fd380Sjfb8856606 strerror(errno));
834*2d9fd380Sjfb8856606 }
835*2d9fd380Sjfb8856606
836*2d9fd380Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
837*2d9fd380Sjfb8856606 return ret;
838*2d9fd380Sjfb8856606 }
839*2d9fd380Sjfb8856606
840*2d9fd380Sjfb8856606 int
virtio_user_dev_update_status(struct virtio_user_dev * dev)841*2d9fd380Sjfb8856606 virtio_user_dev_update_status(struct virtio_user_dev *dev)
842*2d9fd380Sjfb8856606 {
843*2d9fd380Sjfb8856606 uint64_t ret;
844*2d9fd380Sjfb8856606 uint8_t status;
845*2d9fd380Sjfb8856606 int err;
846*2d9fd380Sjfb8856606
847*2d9fd380Sjfb8856606 pthread_mutex_lock(&dev->mutex);
848*2d9fd380Sjfb8856606 if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_USER) {
849*2d9fd380Sjfb8856606 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS, &ret);
850*2d9fd380Sjfb8856606 if (!err && ret > UINT8_MAX) {
851*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "Invalid VHOST_USER_GET_STATUS "
852*2d9fd380Sjfb8856606 "response 0x%" PRIx64 "\n", ret);
853*2d9fd380Sjfb8856606 err = -1;
854*2d9fd380Sjfb8856606 goto error;
855*2d9fd380Sjfb8856606 }
856*2d9fd380Sjfb8856606
857*2d9fd380Sjfb8856606 status = ret;
858*2d9fd380Sjfb8856606 } else if (dev->backend_type == VIRTIO_USER_BACKEND_VHOST_VDPA) {
859*2d9fd380Sjfb8856606 err = dev->ops->send_request(dev, VHOST_USER_GET_STATUS,
860*2d9fd380Sjfb8856606 &status);
861*2d9fd380Sjfb8856606 } else {
862*2d9fd380Sjfb8856606 err = -ENOTSUP;
863*2d9fd380Sjfb8856606 }
864*2d9fd380Sjfb8856606
865*2d9fd380Sjfb8856606 if (!err) {
866*2d9fd380Sjfb8856606 dev->status = status;
867*2d9fd380Sjfb8856606 PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
868*2d9fd380Sjfb8856606 "\t-RESET: %u\n"
869*2d9fd380Sjfb8856606 "\t-ACKNOWLEDGE: %u\n"
870*2d9fd380Sjfb8856606 "\t-DRIVER: %u\n"
871*2d9fd380Sjfb8856606 "\t-DRIVER_OK: %u\n"
872*2d9fd380Sjfb8856606 "\t-FEATURES_OK: %u\n"
873*2d9fd380Sjfb8856606 "\t-DEVICE_NEED_RESET: %u\n"
874*2d9fd380Sjfb8856606 "\t-FAILED: %u\n",
875*2d9fd380Sjfb8856606 dev->status,
876*2d9fd380Sjfb8856606 (dev->status == VIRTIO_CONFIG_STATUS_RESET),
877*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
878*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
879*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
880*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
881*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
882*2d9fd380Sjfb8856606 !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
883*2d9fd380Sjfb8856606 } else if (err != -ENOTSUP) {
884*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "VHOST_USER_GET_STATUS failed (%d): %s", err,
885*2d9fd380Sjfb8856606 strerror(errno));
886*2d9fd380Sjfb8856606 }
887*2d9fd380Sjfb8856606
888*2d9fd380Sjfb8856606 error:
889*2d9fd380Sjfb8856606 pthread_mutex_unlock(&dev->mutex);
890*2d9fd380Sjfb8856606 return err;
891*2d9fd380Sjfb8856606 }
892