15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson * Copyright(c) 2010-2016 Intel Corporation
36c3169a3SBruce Richardson */
46c3169a3SBruce Richardson
56c3169a3SBruce Richardson #include <stdint.h>
66c3169a3SBruce Richardson #include <string.h>
76c3169a3SBruce Richardson #include <stdio.h>
86c3169a3SBruce Richardson #include <errno.h>
96c3169a3SBruce Richardson #include <unistd.h>
106c3169a3SBruce Richardson
11df96fd0dSBruce Richardson #include <ethdev_driver.h>
126c3169a3SBruce Richardson #include <rte_memcpy.h>
136c3169a3SBruce Richardson #include <rte_string_fns.h>
146c3169a3SBruce Richardson #include <rte_memzone.h>
156c3169a3SBruce Richardson #include <rte_malloc.h>
166c3169a3SBruce Richardson #include <rte_branch_prediction.h>
176c3169a3SBruce Richardson #include <rte_ether.h>
187365504fSXiao Wang #include <rte_ip.h>
197365504fSXiao Wang #include <rte_arp.h>
206c3169a3SBruce Richardson #include <rte_common.h>
21abf4c84bSBernard Iremonger #include <rte_errno.h>
224819eae8SOlivier Matz #include <rte_cpuflags.h>
237566f28aSCiara Power #include <rte_vect.h>
246c3169a3SBruce Richardson #include <rte_memory.h>
25924e6b76SThomas Monjalon #include <rte_eal_paging.h>
266c3169a3SBruce Richardson #include <rte_eal.h>
276c3169a3SBruce Richardson #include <rte_dev.h>
281978a9dcSXiao Wang #include <rte_cycles.h>
29440f03c2SXiao Wang #include <rte_kvargs.h>
306c3169a3SBruce Richardson
316c3169a3SBruce Richardson #include "virtio_ethdev.h"
32b5ba7ee4SMaxime Coquelin #include "virtio.h"
336c3169a3SBruce Richardson #include "virtio_logs.h"
346c3169a3SBruce Richardson #include "virtqueue.h"
35cab04612SHuawei Xie #include "virtio_rxtx.h"
3631136836SIvan Ilchenko #include "virtio_rxtx_simple.h"
377f468b2eSTiwei Bie #include "virtio_user/virtio_user_dev.h"
386c3169a3SBruce Richardson
396c3169a3SBruce Richardson static int virtio_dev_configure(struct rte_eth_dev *dev);
406c3169a3SBruce Richardson static int virtio_dev_start(struct rte_eth_dev *dev);
419039c812SAndrew Rybchenko static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
429039c812SAndrew Rybchenko static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
43ca041cd4SIvan Ilchenko static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
44ca041cd4SIvan Ilchenko static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
4549119e38SIvan Dyukov static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
4649119e38SIvan Dyukov static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
474710e16aSMarvin Liu uint32_t *speed,
484710e16aSMarvin Liu int *vectorized);
49bdad90d1SIvan Ilchenko static int virtio_dev_info_get(struct rte_eth_dev *dev,
506c3169a3SBruce Richardson struct rte_eth_dev_info *dev_info);
516c3169a3SBruce Richardson static int virtio_dev_link_update(struct rte_eth_dev *dev,
52dd2c630aSFerruh Yigit int wait_to_complete);
53289ba0c0SDavid Harton static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
540c9d6620SMaxime Coquelin static int virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
550c9d6620SMaxime Coquelin struct rte_eth_rss_conf *rss_conf);
560c9d6620SMaxime Coquelin static int virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
570c9d6620SMaxime Coquelin struct rte_eth_rss_conf *rss_conf);
580c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
590c9d6620SMaxime Coquelin struct rte_eth_rss_reta_entry64 *reta_conf,
600c9d6620SMaxime Coquelin uint16_t reta_size);
610c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
620c9d6620SMaxime Coquelin struct rte_eth_rss_reta_entry64 *reta_conf,
630c9d6620SMaxime Coquelin uint16_t reta_size);
646c3169a3SBruce Richardson
656c3169a3SBruce Richardson static void virtio_set_hwaddr(struct virtio_hw *hw);
666c3169a3SBruce Richardson static void virtio_get_hwaddr(struct virtio_hw *hw);
676c3169a3SBruce Richardson
68d5b0924bSMatan Azrad static int virtio_dev_stats_get(struct rte_eth_dev *dev,
6976d4c652SHarry van Haaren struct rte_eth_stats *stats);
7076d4c652SHarry van Haaren static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
71e2aae1c1SRemy Horton struct rte_eth_xstat *xstats, unsigned n);
72baf91c39SRemy Horton static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
73baf91c39SRemy Horton struct rte_eth_xstat_name *xstats_names,
74baf91c39SRemy Horton unsigned limit);
759970a9adSIgor Romanov static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
766c3169a3SBruce Richardson static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
776c3169a3SBruce Richardson static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
786c3169a3SBruce Richardson uint16_t vlan_id, int on);
796d01e580SWei Dai static int virtio_mac_addr_add(struct rte_eth_dev *dev,
806d13ea8eSOlivier Matz struct rte_ether_addr *mac_addr,
81dd2c630aSFerruh Yigit uint32_t index, uint32_t vmdq);
826c3169a3SBruce Richardson static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
83caccf8b3SOlivier Matz static int virtio_mac_addr_set(struct rte_eth_dev *dev,
846d13ea8eSOlivier Matz struct rte_ether_addr *mac_addr);
856c3169a3SBruce Richardson
86fe19d49cSZhiyong Yang static int virtio_intr_disable(struct rte_eth_dev *dev);
8764ac7e08SMiao Li static int virtio_get_monitor_addr(void *rx_queue,
8864ac7e08SMiao Li struct rte_power_monitor_cond *pmc);
89fe19d49cSZhiyong Yang
906c3169a3SBruce Richardson static int virtio_dev_queue_stats_mapping_set(
91dd2c630aSFerruh Yigit struct rte_eth_dev *eth_dev,
92dd2c630aSFerruh Yigit uint16_t queue_id,
93dd2c630aSFerruh Yigit uint8_t stat_idx,
94dd2c630aSFerruh Yigit uint8_t is_rx);
956c3169a3SBruce Richardson
967365504fSXiao Wang static void virtio_notify_peers(struct rte_eth_dev *dev);
977365504fSXiao Wang static void virtio_ack_link_announce(struct rte_eth_dev *dev);
987365504fSXiao Wang
9976d4c652SHarry van Haaren struct rte_virtio_xstats_name_off {
10076d4c652SHarry van Haaren char name[RTE_ETH_XSTATS_NAME_SIZE];
10176d4c652SHarry van Haaren unsigned offset;
10276d4c652SHarry van Haaren };
10376d4c652SHarry van Haaren
10476d4c652SHarry van Haaren /* [rt]x_qX_ is prepended to the name string here */
10501ad44fdSHuawei Xie static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
10601ad44fdSHuawei Xie {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
10701ad44fdSHuawei Xie {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
10801ad44fdSHuawei Xie {"errors", offsetof(struct virtnet_rx, stats.errors)},
10901ad44fdSHuawei Xie {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
11001ad44fdSHuawei Xie {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
11101ad44fdSHuawei Xie {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
11201ad44fdSHuawei Xie {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
11301ad44fdSHuawei Xie {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
11401ad44fdSHuawei Xie {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
11501ad44fdSHuawei Xie {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
11601ad44fdSHuawei Xie {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
11781f7234bSZhiyong Yang {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
11881f7234bSZhiyong Yang {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
11976d4c652SHarry van Haaren };
12076d4c652SHarry van Haaren
12101ad44fdSHuawei Xie /* [rt]x_qX_ is prepended to the name string here */
12201ad44fdSHuawei Xie static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
12301ad44fdSHuawei Xie {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
12401ad44fdSHuawei Xie {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
12501ad44fdSHuawei Xie {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
12601ad44fdSHuawei Xie {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
12701ad44fdSHuawei Xie {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
12801ad44fdSHuawei Xie {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
12901ad44fdSHuawei Xie {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
13001ad44fdSHuawei Xie {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
13101ad44fdSHuawei Xie {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
13201ad44fdSHuawei Xie {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
13381f7234bSZhiyong Yang {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
13481f7234bSZhiyong Yang {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
13501ad44fdSHuawei Xie };
13601ad44fdSHuawei Xie
13701ad44fdSHuawei Xie #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
13801ad44fdSHuawei Xie sizeof(rte_virtio_rxq_stat_strings[0]))
13901ad44fdSHuawei Xie #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
14001ad44fdSHuawei Xie sizeof(rte_virtio_txq_stat_strings[0]))
14176d4c652SHarry van Haaren
142553f4593SYuanhan Liu struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
143553f4593SYuanhan Liu
144ec194c2fSJens Freimann static struct virtio_pmd_ctrl *
virtio_send_command_packed(struct virtnet_ctl * cvq,struct virtio_pmd_ctrl * ctrl,int * dlen,int pkt_num)1452923b8f9STiwei Bie virtio_send_command_packed(struct virtnet_ctl *cvq,
1462923b8f9STiwei Bie struct virtio_pmd_ctrl *ctrl,
147ec194c2fSJens Freimann int *dlen, int pkt_num)
148ec194c2fSJens Freimann {
1493169550fSMaxime Coquelin struct virtqueue *vq = virtnet_cq_to_vq(cvq);
150ec194c2fSJens Freimann int head;
1514cdc4d98STiwei Bie struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
152ec194c2fSJens Freimann struct virtio_pmd_ctrl *result;
1538e148e49STiwei Bie uint16_t flags;
154ec194c2fSJens Freimann int sum = 0;
1552923b8f9STiwei Bie int nb_descs = 0;
156ec194c2fSJens Freimann int k;
157ec194c2fSJens Freimann
158ec194c2fSJens Freimann /*
159ec194c2fSJens Freimann * Format is enforced in qemu code:
160ec194c2fSJens Freimann * One TX packet for header;
161ec194c2fSJens Freimann * At least one TX packet per argument;
162ec194c2fSJens Freimann * One RX packet for ACK.
163ec194c2fSJens Freimann */
164ec194c2fSJens Freimann head = vq->vq_avail_idx;
165dfd33aa4STiwei Bie flags = vq->vq_packed.cached_flags;
166ec194c2fSJens Freimann desc[head].addr = cvq->virtio_net_hdr_mem;
167ec194c2fSJens Freimann desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
168ec194c2fSJens Freimann vq->vq_free_cnt--;
1692923b8f9STiwei Bie nb_descs++;
170ec194c2fSJens Freimann if (++vq->vq_avail_idx >= vq->vq_nentries) {
171ec194c2fSJens Freimann vq->vq_avail_idx -= vq->vq_nentries;
17212e9e70cSTiwei Bie vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
173ec194c2fSJens Freimann }
174ec194c2fSJens Freimann
175ec194c2fSJens Freimann for (k = 0; k < pkt_num; k++) {
176ec194c2fSJens Freimann desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
177ec194c2fSJens Freimann + sizeof(struct virtio_net_ctrl_hdr)
178ec194c2fSJens Freimann + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
179ec194c2fSJens Freimann desc[vq->vq_avail_idx].len = dlen[k];
1802923b8f9STiwei Bie desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
181dfd33aa4STiwei Bie vq->vq_packed.cached_flags;
182ec194c2fSJens Freimann sum += dlen[k];
183ec194c2fSJens Freimann vq->vq_free_cnt--;
1842923b8f9STiwei Bie nb_descs++;
185ec194c2fSJens Freimann if (++vq->vq_avail_idx >= vq->vq_nentries) {
186ec194c2fSJens Freimann vq->vq_avail_idx -= vq->vq_nentries;
187dfd33aa4STiwei Bie vq->vq_packed.cached_flags ^=
18812e9e70cSTiwei Bie VRING_PACKED_DESC_F_AVAIL_USED;
189ec194c2fSJens Freimann }
190ec194c2fSJens Freimann }
191ec194c2fSJens Freimann
192ec194c2fSJens Freimann desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
193ec194c2fSJens Freimann + sizeof(struct virtio_net_ctrl_hdr);
194ec194c2fSJens Freimann desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
195dfd33aa4STiwei Bie desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
196dfd33aa4STiwei Bie vq->vq_packed.cached_flags;
197ec194c2fSJens Freimann vq->vq_free_cnt--;
1982923b8f9STiwei Bie nb_descs++;
199ec194c2fSJens Freimann if (++vq->vq_avail_idx >= vq->vq_nentries) {
200ec194c2fSJens Freimann vq->vq_avail_idx -= vq->vq_nentries;
20112e9e70cSTiwei Bie vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
202ec194c2fSJens Freimann }
203ec194c2fSJens Freimann
204e51a474cSJoyce Kong virtqueue_store_flags_packed(&desc[head], VRING_DESC_F_NEXT | flags,
205e51a474cSJoyce Kong vq->hw->weak_barriers);
2062923b8f9STiwei Bie
2072923b8f9STiwei Bie virtio_wmb(vq->hw->weak_barriers);
208ec194c2fSJens Freimann virtqueue_notify(vq);
209ec194c2fSJens Freimann
210f875cbfdSJoyce Kong /* wait for used desc in virtqueue
211f875cbfdSJoyce Kong * desc_is_used has a load-acquire or rte_io_rmb inside
212f875cbfdSJoyce Kong */
2132923b8f9STiwei Bie while (!desc_is_used(&desc[head], vq))
214ec194c2fSJens Freimann usleep(100);
2152923b8f9STiwei Bie
216ec194c2fSJens Freimann /* now get used descriptors */
2172923b8f9STiwei Bie vq->vq_free_cnt += nb_descs;
2182923b8f9STiwei Bie vq->vq_used_cons_idx += nb_descs;
2192923b8f9STiwei Bie if (vq->vq_used_cons_idx >= vq->vq_nentries) {
220ec194c2fSJens Freimann vq->vq_used_cons_idx -= vq->vq_nentries;
221dfd33aa4STiwei Bie vq->vq_packed.used_wrap_counter ^= 1;
222ec194c2fSJens Freimann }
223ec194c2fSJens Freimann
22445c224e7STiwei Bie PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
22545c224e7STiwei Bie "vq->vq_avail_idx=%d\n"
22645c224e7STiwei Bie "vq->vq_used_cons_idx=%d\n"
227dfd33aa4STiwei Bie "vq->vq_packed.cached_flags=0x%x\n"
228f3854ebaSThomas Monjalon "vq->vq_packed.used_wrap_counter=%d",
22945c224e7STiwei Bie vq->vq_free_cnt,
23045c224e7STiwei Bie vq->vq_avail_idx,
23145c224e7STiwei Bie vq->vq_used_cons_idx,
232dfd33aa4STiwei Bie vq->vq_packed.cached_flags,
233dfd33aa4STiwei Bie vq->vq_packed.used_wrap_counter);
23445c224e7STiwei Bie
235ec194c2fSJens Freimann result = cvq->virtio_net_hdr_mz->addr;
236ec194c2fSJens Freimann return result;
237ec194c2fSJens Freimann }
238ec194c2fSJens Freimann
239d5e1ce79STiwei Bie static struct virtio_pmd_ctrl *
virtio_send_command_split(struct virtnet_ctl * cvq,struct virtio_pmd_ctrl * ctrl,int * dlen,int pkt_num)240d5e1ce79STiwei Bie virtio_send_command_split(struct virtnet_ctl *cvq,
241d5e1ce79STiwei Bie struct virtio_pmd_ctrl *ctrl,
2426c3169a3SBruce Richardson int *dlen, int pkt_num)
2436c3169a3SBruce Richardson {
244d5e1ce79STiwei Bie struct virtio_pmd_ctrl *result;
2453169550fSMaxime Coquelin struct virtqueue *vq = virtnet_cq_to_vq(cvq);
2469e71668bSDamjan Marion uint32_t head, i;
2476c3169a3SBruce Richardson int k, sum = 0;
2486c3169a3SBruce Richardson
2499e71668bSDamjan Marion head = vq->vq_desc_head_idx;
2506c3169a3SBruce Richardson
2516c3169a3SBruce Richardson /*
2526c3169a3SBruce Richardson * Format is enforced in qemu code:
2536c3169a3SBruce Richardson * One TX packet for header;
2546c3169a3SBruce Richardson * At least one TX packet per argument;
2556c3169a3SBruce Richardson * One RX packet for ACK.
2566c3169a3SBruce Richardson */
257dfd33aa4STiwei Bie vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
258dfd33aa4STiwei Bie vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
259dfd33aa4STiwei Bie vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
2606c3169a3SBruce Richardson vq->vq_free_cnt--;
261dfd33aa4STiwei Bie i = vq->vq_split.ring.desc[head].next;
2626c3169a3SBruce Richardson
2636c3169a3SBruce Richardson for (k = 0; k < pkt_num; k++) {
264dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
265dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
2666c3169a3SBruce Richardson + sizeof(struct virtio_net_ctrl_hdr)
2676c3169a3SBruce Richardson + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
268dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].len = dlen[k];
2696c3169a3SBruce Richardson sum += dlen[k];
2706c3169a3SBruce Richardson vq->vq_free_cnt--;
271dfd33aa4STiwei Bie i = vq->vq_split.ring.desc[i].next;
2726c3169a3SBruce Richardson }
2736c3169a3SBruce Richardson
274dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
275dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
2766c3169a3SBruce Richardson + sizeof(struct virtio_net_ctrl_hdr);
277dfd33aa4STiwei Bie vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
2786c3169a3SBruce Richardson vq->vq_free_cnt--;
2796c3169a3SBruce Richardson
280dfd33aa4STiwei Bie vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
2816c3169a3SBruce Richardson
2826c3169a3SBruce Richardson vq_update_avail_ring(vq, head);
2836c3169a3SBruce Richardson vq_update_avail_idx(vq);
2846c3169a3SBruce Richardson
2856c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
2866c3169a3SBruce Richardson
2876c3169a3SBruce Richardson virtqueue_notify(vq);
2886c3169a3SBruce Richardson
289ea5207c1SJoyce Kong while (virtqueue_nused(vq) == 0)
2906c3169a3SBruce Richardson usleep(100);
2916c3169a3SBruce Richardson
292ea5207c1SJoyce Kong while (virtqueue_nused(vq)) {
2936c3169a3SBruce Richardson uint32_t idx, desc_idx, used_idx;
2946c3169a3SBruce Richardson struct vring_used_elem *uep;
2956c3169a3SBruce Richardson
2966c3169a3SBruce Richardson used_idx = (uint32_t)(vq->vq_used_cons_idx
2976c3169a3SBruce Richardson & (vq->vq_nentries - 1));
298dfd33aa4STiwei Bie uep = &vq->vq_split.ring.used->ring[used_idx];
2996c3169a3SBruce Richardson idx = (uint32_t) uep->id;
3006c3169a3SBruce Richardson desc_idx = idx;
3016c3169a3SBruce Richardson
302dfd33aa4STiwei Bie while (vq->vq_split.ring.desc[desc_idx].flags &
303dfd33aa4STiwei Bie VRING_DESC_F_NEXT) {
304dfd33aa4STiwei Bie desc_idx = vq->vq_split.ring.desc[desc_idx].next;
3056c3169a3SBruce Richardson vq->vq_free_cnt++;
3066c3169a3SBruce Richardson }
3076c3169a3SBruce Richardson
308dfd33aa4STiwei Bie vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
3096c3169a3SBruce Richardson vq->vq_desc_head_idx = idx;
3106c3169a3SBruce Richardson
3116c3169a3SBruce Richardson vq->vq_used_cons_idx++;
3126c3169a3SBruce Richardson vq->vq_free_cnt++;
3136c3169a3SBruce Richardson }
3146c3169a3SBruce Richardson
3156c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
3166c3169a3SBruce Richardson vq->vq_free_cnt, vq->vq_desc_head_idx);
3176c3169a3SBruce Richardson
318da4f2e4bSZhiyong Yang result = cvq->virtio_net_hdr_mz->addr;
319d5e1ce79STiwei Bie return result;
320d5e1ce79STiwei Bie }
3216c3169a3SBruce Richardson
322d5e1ce79STiwei Bie static int
virtio_send_command(struct virtnet_ctl * cvq,struct virtio_pmd_ctrl * ctrl,int * dlen,int pkt_num)323d5e1ce79STiwei Bie virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
324d5e1ce79STiwei Bie int *dlen, int pkt_num)
325d5e1ce79STiwei Bie {
326d5e1ce79STiwei Bie virtio_net_ctrl_ack status = ~0;
327d5e1ce79STiwei Bie struct virtio_pmd_ctrl *result;
328d5e1ce79STiwei Bie struct virtqueue *vq;
329d5e1ce79STiwei Bie
330d5e1ce79STiwei Bie ctrl->status = status;
331d5e1ce79STiwei Bie
3323169550fSMaxime Coquelin if (!cvq) {
333d5e1ce79STiwei Bie PMD_INIT_LOG(ERR, "Control queue is not supported.");
334d5e1ce79STiwei Bie return -1;
335d5e1ce79STiwei Bie }
336d5e1ce79STiwei Bie
337d5e1ce79STiwei Bie rte_spinlock_lock(&cvq->lock);
3383169550fSMaxime Coquelin vq = virtnet_cq_to_vq(cvq);
339d5e1ce79STiwei Bie
340d5e1ce79STiwei Bie PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
341d5e1ce79STiwei Bie "vq->hw->cvq = %p vq = %p",
342d5e1ce79STiwei Bie vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
343d5e1ce79STiwei Bie
344d5e1ce79STiwei Bie if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
345d5e1ce79STiwei Bie rte_spinlock_unlock(&cvq->lock);
346d5e1ce79STiwei Bie return -1;
347d5e1ce79STiwei Bie }
348d5e1ce79STiwei Bie
349d5e1ce79STiwei Bie memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
350d5e1ce79STiwei Bie sizeof(struct virtio_pmd_ctrl));
351d5e1ce79STiwei Bie
352b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(vq->hw))
353d5e1ce79STiwei Bie result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
354d5e1ce79STiwei Bie else
355d5e1ce79STiwei Bie result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
356d5e1ce79STiwei Bie
357a2ffb87bSXiao Wang rte_spinlock_unlock(&cvq->lock);
358da4f2e4bSZhiyong Yang return result->status;
3596c3169a3SBruce Richardson }
3606c3169a3SBruce Richardson
3616c3169a3SBruce Richardson static int
virtio_set_multiple_queues_rss(struct rte_eth_dev * dev,uint16_t nb_queues)3620c9d6620SMaxime Coquelin virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
3636c3169a3SBruce Richardson {
3646c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
3656c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
3660c9d6620SMaxime Coquelin struct virtio_net_ctrl_rss rss;
3670c9d6620SMaxime Coquelin int dlen, ret;
3680c9d6620SMaxime Coquelin
3690c9d6620SMaxime Coquelin rss.hash_types = hw->rss_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
3700c9d6620SMaxime Coquelin RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(VIRTIO_NET_RSS_RETA_SIZE));
3710c9d6620SMaxime Coquelin rss.indirection_table_mask = VIRTIO_NET_RSS_RETA_SIZE - 1;
3720c9d6620SMaxime Coquelin rss.unclassified_queue = 0;
3730c9d6620SMaxime Coquelin memcpy(rss.indirection_table, hw->rss_reta, VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t));
3740c9d6620SMaxime Coquelin rss.max_tx_vq = nb_queues;
3750c9d6620SMaxime Coquelin rss.hash_key_length = VIRTIO_NET_RSS_KEY_SIZE;
3760c9d6620SMaxime Coquelin memcpy(rss.hash_key_data, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
3770c9d6620SMaxime Coquelin
3780c9d6620SMaxime Coquelin ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
3790c9d6620SMaxime Coquelin ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_RSS_CONFIG;
3800c9d6620SMaxime Coquelin memcpy(ctrl.data, &rss, sizeof(rss));
3810c9d6620SMaxime Coquelin
3820c9d6620SMaxime Coquelin dlen = sizeof(rss);
3830c9d6620SMaxime Coquelin
3840c9d6620SMaxime Coquelin ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
3850c9d6620SMaxime Coquelin if (ret) {
3860c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "RSS multiqueue configured but send command failed");
3870c9d6620SMaxime Coquelin return -EINVAL;
3880c9d6620SMaxime Coquelin }
3890c9d6620SMaxime Coquelin
3900c9d6620SMaxime Coquelin return 0;
3910c9d6620SMaxime Coquelin }
3920c9d6620SMaxime Coquelin
3930c9d6620SMaxime Coquelin static int
virtio_set_multiple_queues_auto(struct rte_eth_dev * dev,uint16_t nb_queues)3940c9d6620SMaxime Coquelin virtio_set_multiple_queues_auto(struct rte_eth_dev *dev, uint16_t nb_queues)
3950c9d6620SMaxime Coquelin {
3960c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
3970c9d6620SMaxime Coquelin struct virtio_pmd_ctrl ctrl;
3980c9d6620SMaxime Coquelin int dlen;
3996c3169a3SBruce Richardson int ret;
4006c3169a3SBruce Richardson
4016c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
4026c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
4036c3169a3SBruce Richardson memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
4046c3169a3SBruce Richardson
4050c9d6620SMaxime Coquelin dlen = sizeof(uint16_t);
4066c3169a3SBruce Richardson
4070c9d6620SMaxime Coquelin ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
4086c3169a3SBruce Richardson if (ret) {
4096c3169a3SBruce Richardson PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
4106c3169a3SBruce Richardson "failed, this is too late now...");
4116c3169a3SBruce Richardson return -EINVAL;
4126c3169a3SBruce Richardson }
4136c3169a3SBruce Richardson
4146c3169a3SBruce Richardson return 0;
4156c3169a3SBruce Richardson }
4166c3169a3SBruce Richardson
4170c9d6620SMaxime Coquelin static int
virtio_set_multiple_queues(struct rte_eth_dev * dev,uint16_t nb_queues)4180c9d6620SMaxime Coquelin virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
4190c9d6620SMaxime Coquelin {
4200c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
4210c9d6620SMaxime Coquelin
4220c9d6620SMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
4230c9d6620SMaxime Coquelin return virtio_set_multiple_queues_rss(dev, nb_queues);
4240c9d6620SMaxime Coquelin else
4250c9d6620SMaxime Coquelin return virtio_set_multiple_queues_auto(dev, nb_queues);
4260c9d6620SMaxime Coquelin }
4270c9d6620SMaxime Coquelin
42869c80d4eSYuanhan Liu static uint16_t
virtio_get_nr_vq(struct virtio_hw * hw)42969c80d4eSYuanhan Liu virtio_get_nr_vq(struct virtio_hw *hw)
43069c80d4eSYuanhan Liu {
43169c80d4eSYuanhan Liu uint16_t nr_vq = hw->max_queue_pairs * 2;
43269c80d4eSYuanhan Liu
433b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
43469c80d4eSYuanhan Liu nr_vq += 1;
43569c80d4eSYuanhan Liu
43669c80d4eSYuanhan Liu return nr_vq;
43769c80d4eSYuanhan Liu }
43869c80d4eSYuanhan Liu
439f4d1ad15SYuanhan Liu static void
virtio_init_vring(struct virtqueue * vq)440f4d1ad15SYuanhan Liu virtio_init_vring(struct virtqueue *vq)
441f4d1ad15SYuanhan Liu {
442f4d1ad15SYuanhan Liu int size = vq->vq_nentries;
443f4d1ad15SYuanhan Liu uint8_t *ring_mem = vq->vq_ring_virt_mem;
444f4d1ad15SYuanhan Liu
445f4d1ad15SYuanhan Liu PMD_INIT_FUNC_TRACE();
446f4d1ad15SYuanhan Liu
447f4d1ad15SYuanhan Liu memset(ring_mem, 0, vq->vq_ring_size);
448f803734bSJens Freimann
449f4d1ad15SYuanhan Liu vq->vq_used_cons_idx = 0;
450f4d1ad15SYuanhan Liu vq->vq_desc_head_idx = 0;
451f4d1ad15SYuanhan Liu vq->vq_avail_idx = 0;
452f4d1ad15SYuanhan Liu vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
453f4d1ad15SYuanhan Liu vq->vq_free_cnt = vq->vq_nentries;
454f4d1ad15SYuanhan Liu memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
455b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(vq->hw)) {
456dfd33aa4STiwei Bie vring_init_packed(&vq->vq_packed.ring, ring_mem,
457df968842SMaxime Coquelin VIRTIO_VRING_ALIGN, size);
458f803734bSJens Freimann vring_desc_init_packed(vq, size);
459f803734bSJens Freimann } else {
460dfd33aa4STiwei Bie struct vring *vr = &vq->vq_split.ring;
461dfd33aa4STiwei Bie
462df968842SMaxime Coquelin vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
463f803734bSJens Freimann vring_desc_init_split(vr->desc, size);
464f803734bSJens Freimann }
465f4d1ad15SYuanhan Liu /*
466f4d1ad15SYuanhan Liu * Disable device(host) interrupting guest
467f4d1ad15SYuanhan Liu */
468f4d1ad15SYuanhan Liu virtqueue_disable_intr(vq);
469f4d1ad15SYuanhan Liu }
470f4d1ad15SYuanhan Liu
47169c80d4eSYuanhan Liu static int
virtio_init_queue(struct rte_eth_dev * dev,uint16_t queue_idx)472b5ba7ee4SMaxime Coquelin virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
4736c3169a3SBruce Richardson {
4746c3169a3SBruce Richardson char vq_name[VIRTQUEUE_MAX_NAME_SZ];
47501ad44fdSHuawei Xie char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
47601ad44fdSHuawei Xie const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
4771e7bd238SStephen Hemminger unsigned int vq_size, size;
4786c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
479f24f8f9fSJianfeng Tan struct virtnet_rx *rxvq = NULL;
480f24f8f9fSJianfeng Tan struct virtnet_tx *txvq = NULL;
481f24f8f9fSJianfeng Tan struct virtnet_ctl *cvq = NULL;
48201ad44fdSHuawei Xie struct virtqueue *vq;
483905a2469SYuanhan Liu size_t sz_hdr_mz = 0;
48401ad44fdSHuawei Xie void *sw_ring = NULL;
485b5ba7ee4SMaxime Coquelin int queue_type = virtio_get_queue_type(hw, queue_idx);
48601ad44fdSHuawei Xie int ret;
4874a5140abSMaxime Coquelin int numa_node = dev->device->numa_node;
488b59d4d55SMaxime Coquelin struct rte_mbuf *fake_mbuf = NULL;
4896c3169a3SBruce Richardson
4904a5140abSMaxime Coquelin PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
491b5ba7ee4SMaxime Coquelin queue_idx, numa_node);
4926c3169a3SBruce Richardson
4936c3169a3SBruce Richardson /*
4946c3169a3SBruce Richardson * Read the virtqueue size from the Queue Size field
4956c3169a3SBruce Richardson * Always power of 2 and if 0 virtqueue does not exist
4966c3169a3SBruce Richardson */
497b5ba7ee4SMaxime Coquelin vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
49869c80d4eSYuanhan Liu PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
4996c3169a3SBruce Richardson if (vq_size == 0) {
5000bb159adSHuawei Xie PMD_INIT_LOG(ERR, "virtqueue does not exist");
5016c3169a3SBruce Richardson return -EINVAL;
502d78deadaSStephen Hemminger }
503d78deadaSStephen Hemminger
504b4f9a45aSMaxime Coquelin if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
505df42dde5SMarvin Liu PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
5066c3169a3SBruce Richardson return -EINVAL;
507d78deadaSStephen Hemminger }
508d78deadaSStephen Hemminger
50973d017ddSYuanhan Liu snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
510b5ba7ee4SMaxime Coquelin dev->data->port_id, queue_idx);
51101ad44fdSHuawei Xie
512905a2469SYuanhan Liu size = RTE_ALIGN_CEIL(sizeof(*vq) +
5136c3169a3SBruce Richardson vq_size * sizeof(struct vq_desc_extra),
5146c3169a3SBruce Richardson RTE_CACHE_LINE_SIZE);
515905a2469SYuanhan Liu if (queue_type == VTNET_TQ) {
51601ad44fdSHuawei Xie /*
51701ad44fdSHuawei Xie * For each xmit packet, allocate a virtio_net_hdr
51801ad44fdSHuawei Xie * and indirect ring elements
51901ad44fdSHuawei Xie */
52001ad44fdSHuawei Xie sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
52101ad44fdSHuawei Xie } else if (queue_type == VTNET_CQ) {
52201ad44fdSHuawei Xie /* Allocate a page for control vq command, data and status */
523924e6b76SThomas Monjalon sz_hdr_mz = rte_mem_page_size();
52401ad44fdSHuawei Xie }
5254166bbf6SJianfeng Tan
52669c80d4eSYuanhan Liu vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
5274a5140abSMaxime Coquelin numa_node);
52801ad44fdSHuawei Xie if (vq == NULL) {
52901ad44fdSHuawei Xie PMD_INIT_LOG(ERR, "can not allocate vq");
530cab04612SHuawei Xie return -ENOMEM;
531cab04612SHuawei Xie }
532b5ba7ee4SMaxime Coquelin hw->vqs[queue_idx] = vq;
53369c80d4eSYuanhan Liu
5346c3169a3SBruce Richardson vq->hw = hw;
535b5ba7ee4SMaxime Coquelin vq->vq_queue_index = queue_idx;
5366c3169a3SBruce Richardson vq->vq_nentries = vq_size;
537b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(hw)) {
538dfd33aa4STiwei Bie vq->vq_packed.used_wrap_counter = 1;
53912e9e70cSTiwei Bie vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
540dfd33aa4STiwei Bie vq->vq_packed.event_flags_shadow = 0;
5418e148e49STiwei Bie if (queue_type == VTNET_RQ)
542dfd33aa4STiwei Bie vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
543f803734bSJens Freimann }
5446c3169a3SBruce Richardson
5456c3169a3SBruce Richardson /*
5466c3169a3SBruce Richardson * Reserve a memzone for vring elements
5476c3169a3SBruce Richardson */
548df968842SMaxime Coquelin size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
549df968842SMaxime Coquelin vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
55001ad44fdSHuawei Xie PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
55101ad44fdSHuawei Xie size, vq->vq_ring_size);
5526c3169a3SBruce Richardson
55369c80d4eSYuanhan Liu mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
5544a5140abSMaxime Coquelin numa_node, RTE_MEMZONE_IOVA_CONTIG,
555df968842SMaxime Coquelin VIRTIO_VRING_ALIGN);
5566c3169a3SBruce Richardson if (mz == NULL) {
557abf4c84bSBernard Iremonger if (rte_errno == EEXIST)
558abf4c84bSBernard Iremonger mz = rte_memzone_lookup(vq_name);
559abf4c84bSBernard Iremonger if (mz == NULL) {
56001ad44fdSHuawei Xie ret = -ENOMEM;
56176fd789cSMaxime Coquelin goto free_vq;
5626c3169a3SBruce Richardson }
563abf4c84bSBernard Iremonger }
5646c3169a3SBruce Richardson
5655bdd24e4STiwei Bie memset(mz->addr, 0, mz->len);
56601ad44fdSHuawei Xie
567ba55c94aSMaxime Coquelin if (hw->use_va)
568ba55c94aSMaxime Coquelin vq->vq_ring_mem = (uintptr_t)mz->addr;
569ba55c94aSMaxime Coquelin else
570f17ca787SThomas Monjalon vq->vq_ring_mem = mz->iova;
571ba55c94aSMaxime Coquelin
5726c3169a3SBruce Richardson vq->vq_ring_virt_mem = mz->addr;
573ba55c94aSMaxime Coquelin PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
574ba55c94aSMaxime Coquelin PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
5756c3169a3SBruce Richardson
576f4d1ad15SYuanhan Liu virtio_init_vring(vq);
577f4d1ad15SYuanhan Liu
57801ad44fdSHuawei Xie if (sz_hdr_mz) {
57973d017ddSYuanhan Liu snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
580b5ba7ee4SMaxime Coquelin dev->data->port_id, queue_idx);
58101ad44fdSHuawei Xie hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
5824a5140abSMaxime Coquelin numa_node, RTE_MEMZONE_IOVA_CONTIG,
5836dc5de3aSStephen Hemminger RTE_CACHE_LINE_SIZE);
5846dc5de3aSStephen Hemminger if (hdr_mz == NULL) {
585abf4c84bSBernard Iremonger if (rte_errno == EEXIST)
58601ad44fdSHuawei Xie hdr_mz = rte_memzone_lookup(vq_hdr_name);
5876dc5de3aSStephen Hemminger if (hdr_mz == NULL) {
58801ad44fdSHuawei Xie ret = -ENOMEM;
58976fd789cSMaxime Coquelin goto free_mz;
5906c3169a3SBruce Richardson }
591abf4c84bSBernard Iremonger }
59201ad44fdSHuawei Xie }
59301ad44fdSHuawei Xie
59401ad44fdSHuawei Xie if (queue_type == VTNET_RQ) {
59501ad44fdSHuawei Xie size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
59601ad44fdSHuawei Xie sizeof(vq->sw_ring[0]);
59701ad44fdSHuawei Xie
59801ad44fdSHuawei Xie sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
5994a5140abSMaxime Coquelin RTE_CACHE_LINE_SIZE, numa_node);
60001ad44fdSHuawei Xie if (!sw_ring) {
60101ad44fdSHuawei Xie PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
60201ad44fdSHuawei Xie ret = -ENOMEM;
60376fd789cSMaxime Coquelin goto free_hdr_mz;
60401ad44fdSHuawei Xie }
60501ad44fdSHuawei Xie
606b59d4d55SMaxime Coquelin fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
607b59d4d55SMaxime Coquelin RTE_CACHE_LINE_SIZE, numa_node);
608b59d4d55SMaxime Coquelin if (!fake_mbuf) {
609b59d4d55SMaxime Coquelin PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
610b59d4d55SMaxime Coquelin ret = -ENOMEM;
611b59d4d55SMaxime Coquelin goto free_sw_ring;
612b59d4d55SMaxime Coquelin }
613b59d4d55SMaxime Coquelin
61401ad44fdSHuawei Xie vq->sw_ring = sw_ring;
615905a2469SYuanhan Liu rxvq = &vq->rxq;
61601ad44fdSHuawei Xie rxvq->port_id = dev->data->port_id;
61701ad44fdSHuawei Xie rxvq->mz = mz;
618b59d4d55SMaxime Coquelin rxvq->fake_mbuf = fake_mbuf;
61901ad44fdSHuawei Xie } else if (queue_type == VTNET_TQ) {
620905a2469SYuanhan Liu txvq = &vq->txq;
62101ad44fdSHuawei Xie txvq->port_id = dev->data->port_id;
62201ad44fdSHuawei Xie txvq->mz = mz;
62301ad44fdSHuawei Xie txvq->virtio_net_hdr_mz = hdr_mz;
624ba55c94aSMaxime Coquelin if (hw->use_va)
625ba55c94aSMaxime Coquelin txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
626ba55c94aSMaxime Coquelin else
627f17ca787SThomas Monjalon txvq->virtio_net_hdr_mem = hdr_mz->iova;
628f24f8f9fSJianfeng Tan } else if (queue_type == VTNET_CQ) {
629905a2469SYuanhan Liu cvq = &vq->cq;
630f24f8f9fSJianfeng Tan cvq->mz = mz;
631f24f8f9fSJianfeng Tan cvq->virtio_net_hdr_mz = hdr_mz;
632ba55c94aSMaxime Coquelin if (hw->use_va)
633ba55c94aSMaxime Coquelin cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
634ba55c94aSMaxime Coquelin else
635f17ca787SThomas Monjalon cvq->virtio_net_hdr_mem = hdr_mz->iova;
636924e6b76SThomas Monjalon memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
63769c80d4eSYuanhan Liu
63869c80d4eSYuanhan Liu hw->cvq = cvq;
639f24f8f9fSJianfeng Tan }
640f24f8f9fSJianfeng Tan
641ba55c94aSMaxime Coquelin if (hw->use_va)
642ba55c94aSMaxime Coquelin vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
643ba55c94aSMaxime Coquelin else
644ba55c94aSMaxime Coquelin vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
645ba55c94aSMaxime Coquelin
646f24f8f9fSJianfeng Tan if (queue_type == VTNET_TQ) {
647f24f8f9fSJianfeng Tan struct virtio_tx_region *txr;
648f24f8f9fSJianfeng Tan unsigned int i;
649f24f8f9fSJianfeng Tan
6506dc5de3aSStephen Hemminger txr = hdr_mz->addr;
6516dc5de3aSStephen Hemminger memset(txr, 0, vq_size * sizeof(*txr));
6526dc5de3aSStephen Hemminger for (i = 0; i < vq_size; i++) {
6536dc5de3aSStephen Hemminger /* first indirect descriptor is always the tx header */
654b4f9a45aSMaxime Coquelin if (!virtio_with_packed_queue(hw)) {
655381f39ebSMarvin Liu struct vring_desc *start_dp = txr[i].tx_indir;
656892dc798SJens Freimann vring_desc_init_split(start_dp,
657892dc798SJens Freimann RTE_DIM(txr[i].tx_indir));
65801ad44fdSHuawei Xie start_dp->addr = txvq->virtio_net_hdr_mem
6596dc5de3aSStephen Hemminger + i * sizeof(*txr)
660892dc798SJens Freimann + offsetof(struct virtio_tx_region,
661892dc798SJens Freimann tx_hdr);
66201ad44fdSHuawei Xie start_dp->len = hw->vtnet_hdr_size;
6636dc5de3aSStephen Hemminger start_dp->flags = VRING_DESC_F_NEXT;
664381f39ebSMarvin Liu } else {
665381f39ebSMarvin Liu struct vring_packed_desc *start_dp =
666381f39ebSMarvin Liu txr[i].tx_packed_indir;
667381f39ebSMarvin Liu vring_desc_init_indirect_packed(start_dp,
668381f39ebSMarvin Liu RTE_DIM(txr[i].tx_packed_indir));
669381f39ebSMarvin Liu start_dp->addr = txvq->virtio_net_hdr_mem
670381f39ebSMarvin Liu + i * sizeof(*txr)
671381f39ebSMarvin Liu + offsetof(struct virtio_tx_region,
672381f39ebSMarvin Liu tx_hdr);
673381f39ebSMarvin Liu start_dp->len = hw->vtnet_hdr_size;
6746dc5de3aSStephen Hemminger }
6756c3169a3SBruce Richardson }
676892dc798SJens Freimann }
6776c3169a3SBruce Richardson
678f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
679595454c5SJianfeng Tan PMD_INIT_LOG(ERR, "setup_queue failed");
68076fd789cSMaxime Coquelin ret = -EINVAL;
68176fd789cSMaxime Coquelin goto clean_vq;
682595454c5SJianfeng Tan }
683595454c5SJianfeng Tan
6846c3169a3SBruce Richardson return 0;
68501ad44fdSHuawei Xie
68676fd789cSMaxime Coquelin clean_vq:
68776fd789cSMaxime Coquelin hw->cvq = NULL;
688b59d4d55SMaxime Coquelin rte_free(fake_mbuf);
689b59d4d55SMaxime Coquelin free_sw_ring:
69001ad44fdSHuawei Xie rte_free(sw_ring);
69176fd789cSMaxime Coquelin free_hdr_mz:
69201ad44fdSHuawei Xie rte_memzone_free(hdr_mz);
69376fd789cSMaxime Coquelin free_mz:
69401ad44fdSHuawei Xie rte_memzone_free(mz);
69576fd789cSMaxime Coquelin free_vq:
69601ad44fdSHuawei Xie rte_free(vq);
6975d903aeeSGaoxiang Liu hw->vqs[queue_idx] = NULL;
69801ad44fdSHuawei Xie
69901ad44fdSHuawei Xie return ret;
7006c3169a3SBruce Richardson }
7016c3169a3SBruce Richardson
70269c80d4eSYuanhan Liu static void
virtio_free_queues(struct virtio_hw * hw)70369c80d4eSYuanhan Liu virtio_free_queues(struct virtio_hw *hw)
7046c3169a3SBruce Richardson {
70569c80d4eSYuanhan Liu uint16_t nr_vq = virtio_get_nr_vq(hw);
70669c80d4eSYuanhan Liu struct virtqueue *vq;
70769c80d4eSYuanhan Liu int queue_type;
70869c80d4eSYuanhan Liu uint16_t i;
7096c3169a3SBruce Richardson
7100e78cfddSHuanle Han if (hw->vqs == NULL)
7110e78cfddSHuanle Han return;
7120e78cfddSHuanle Han
71369c80d4eSYuanhan Liu for (i = 0; i < nr_vq; i++) {
71469c80d4eSYuanhan Liu vq = hw->vqs[i];
71569c80d4eSYuanhan Liu if (!vq)
71669c80d4eSYuanhan Liu continue;
71769c80d4eSYuanhan Liu
71869c80d4eSYuanhan Liu queue_type = virtio_get_queue_type(hw, i);
71969c80d4eSYuanhan Liu if (queue_type == VTNET_RQ) {
720b59d4d55SMaxime Coquelin rte_free(vq->rxq.fake_mbuf);
72169c80d4eSYuanhan Liu rte_free(vq->sw_ring);
72269c80d4eSYuanhan Liu rte_memzone_free(vq->rxq.mz);
72369c80d4eSYuanhan Liu } else if (queue_type == VTNET_TQ) {
72469c80d4eSYuanhan Liu rte_memzone_free(vq->txq.mz);
72569c80d4eSYuanhan Liu rte_memzone_free(vq->txq.virtio_net_hdr_mz);
72669c80d4eSYuanhan Liu } else {
72769c80d4eSYuanhan Liu rte_memzone_free(vq->cq.mz);
72869c80d4eSYuanhan Liu rte_memzone_free(vq->cq.virtio_net_hdr_mz);
72969c80d4eSYuanhan Liu }
73069c80d4eSYuanhan Liu
73169c80d4eSYuanhan Liu rte_free(vq);
7320e78cfddSHuanle Han hw->vqs[i] = NULL;
73369c80d4eSYuanhan Liu }
73469c80d4eSYuanhan Liu
73569c80d4eSYuanhan Liu rte_free(hw->vqs);
7360e78cfddSHuanle Han hw->vqs = NULL;
73769c80d4eSYuanhan Liu }
73869c80d4eSYuanhan Liu
73969c80d4eSYuanhan Liu static int
virtio_alloc_queues(struct rte_eth_dev * dev)74069c80d4eSYuanhan Liu virtio_alloc_queues(struct rte_eth_dev *dev)
74169c80d4eSYuanhan Liu {
74269c80d4eSYuanhan Liu struct virtio_hw *hw = dev->data->dev_private;
74369c80d4eSYuanhan Liu uint16_t nr_vq = virtio_get_nr_vq(hw);
74469c80d4eSYuanhan Liu uint16_t i;
74569c80d4eSYuanhan Liu int ret;
74669c80d4eSYuanhan Liu
74769c80d4eSYuanhan Liu hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
74869c80d4eSYuanhan Liu if (!hw->vqs) {
74969c80d4eSYuanhan Liu PMD_INIT_LOG(ERR, "failed to allocate vqs");
75069c80d4eSYuanhan Liu return -ENOMEM;
75169c80d4eSYuanhan Liu }
75269c80d4eSYuanhan Liu
75369c80d4eSYuanhan Liu for (i = 0; i < nr_vq; i++) {
75469c80d4eSYuanhan Liu ret = virtio_init_queue(dev, i);
7556c3169a3SBruce Richardson if (ret < 0) {
75669c80d4eSYuanhan Liu virtio_free_queues(hw);
7576c3169a3SBruce Richardson return ret;
7586c3169a3SBruce Richardson }
7596c3169a3SBruce Richardson }
7606c3169a3SBruce Richardson
76169c80d4eSYuanhan Liu return 0;
762941d64b5SBernard Iremonger }
763941d64b5SBernard Iremonger
7649ebdeefeSJianfeng Tan static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
7659ebdeefeSJianfeng Tan
7660c9d6620SMaxime Coquelin static void
virtio_free_rss(struct virtio_hw * hw)7670c9d6620SMaxime Coquelin virtio_free_rss(struct virtio_hw *hw)
7680c9d6620SMaxime Coquelin {
7690c9d6620SMaxime Coquelin rte_free(hw->rss_key);
7700c9d6620SMaxime Coquelin hw->rss_key = NULL;
7710c9d6620SMaxime Coquelin
7720c9d6620SMaxime Coquelin rte_free(hw->rss_reta);
7730c9d6620SMaxime Coquelin hw->rss_reta = NULL;
7740c9d6620SMaxime Coquelin }
7750c9d6620SMaxime Coquelin
77636a7a2e7SMaxime Coquelin int
virtio_dev_close(struct rte_eth_dev * dev)7776c3169a3SBruce Richardson virtio_dev_close(struct rte_eth_dev *dev)
7786c3169a3SBruce Richardson {
7796c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
780295968d1SFerruh Yigit struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
7816c3169a3SBruce Richardson
7826c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "virtio_dev_close");
78330410493SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY)
78430410493SThomas Monjalon return 0;
7856c3169a3SBruce Richardson
7862a821d81SChas Williams if (!hw->opened)
787b142387bSThomas Monjalon return 0;
7886e1d9c0cSMaxime Coquelin hw->opened = 0;
7892a821d81SChas Williams
7906c3169a3SBruce Richardson /* reset the NIC */
79162a785a6SJianfeng Tan if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
792f8b60756SMaxime Coquelin VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
7939ebdeefeSJianfeng Tan if (intr_conf->rxq)
7949ebdeefeSJianfeng Tan virtio_queues_unbind_intr(dev);
7959ebdeefeSJianfeng Tan
7969ebdeefeSJianfeng Tan if (intr_conf->lsc || intr_conf->rxq) {
797fe19d49cSZhiyong Yang virtio_intr_disable(dev);
7989ebdeefeSJianfeng Tan rte_intr_efd_disable(dev->intr_handle);
799d61138d4SHarman Kalra rte_intr_vec_list_free(dev->intr_handle);
8009ebdeefeSJianfeng Tan }
8019ebdeefeSJianfeng Tan
8029328e105SMaxime Coquelin virtio_reset(hw);
8036c3169a3SBruce Richardson virtio_dev_free_mbufs(dev);
80469c80d4eSYuanhan Liu virtio_free_queues(hw);
8050c9d6620SMaxime Coquelin virtio_free_rss(hw);
8067f468b2eSTiwei Bie
807f8b60756SMaxime Coquelin return VIRTIO_OPS(hw)->dev_close(hw);
8086c3169a3SBruce Richardson }
8096c3169a3SBruce Richardson
8109039c812SAndrew Rybchenko static int
virtio_dev_promiscuous_enable(struct rte_eth_dev * dev)8116c3169a3SBruce Richardson virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
8126c3169a3SBruce Richardson {
8136c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
8146c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
8156c3169a3SBruce Richardson int dlen[1];
8166c3169a3SBruce Richardson int ret;
8176c3169a3SBruce Richardson
818b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
819f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "host does not support rx control");
8209039c812SAndrew Rybchenko return -ENOTSUP;
821e9e414a4SStephen Hemminger }
822e9e414a4SStephen Hemminger
8236c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
8246c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
8256c3169a3SBruce Richardson ctrl.data[0] = 1;
8266c3169a3SBruce Richardson dlen[0] = 1;
8276c3169a3SBruce Richardson
8286c3169a3SBruce Richardson ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
8299039c812SAndrew Rybchenko if (ret) {
8306c3169a3SBruce Richardson PMD_INIT_LOG(ERR, "Failed to enable promisc");
8319039c812SAndrew Rybchenko return -EAGAIN;
8326c3169a3SBruce Richardson }
8336c3169a3SBruce Richardson
8349039c812SAndrew Rybchenko return 0;
8359039c812SAndrew Rybchenko }
8369039c812SAndrew Rybchenko
8379039c812SAndrew Rybchenko static int
virtio_dev_promiscuous_disable(struct rte_eth_dev * dev)8386c3169a3SBruce Richardson virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
8396c3169a3SBruce Richardson {
8406c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
8416c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
8426c3169a3SBruce Richardson int dlen[1];
8436c3169a3SBruce Richardson int ret;
8446c3169a3SBruce Richardson
845b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
846f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "host does not support rx control");
8479039c812SAndrew Rybchenko return -ENOTSUP;
848e9e414a4SStephen Hemminger }
849e9e414a4SStephen Hemminger
8506c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
8516c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
8526c3169a3SBruce Richardson ctrl.data[0] = 0;
8536c3169a3SBruce Richardson dlen[0] = 1;
8546c3169a3SBruce Richardson
8556c3169a3SBruce Richardson ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
8569039c812SAndrew Rybchenko if (ret) {
8576c3169a3SBruce Richardson PMD_INIT_LOG(ERR, "Failed to disable promisc");
8589039c812SAndrew Rybchenko return -EAGAIN;
8599039c812SAndrew Rybchenko }
8609039c812SAndrew Rybchenko
8619039c812SAndrew Rybchenko return 0;
8626c3169a3SBruce Richardson }
8636c3169a3SBruce Richardson
864ca041cd4SIvan Ilchenko static int
virtio_dev_allmulticast_enable(struct rte_eth_dev * dev)8656c3169a3SBruce Richardson virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
8666c3169a3SBruce Richardson {
8676c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
8686c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
8696c3169a3SBruce Richardson int dlen[1];
8706c3169a3SBruce Richardson int ret;
8716c3169a3SBruce Richardson
872b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
873f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "host does not support rx control");
874ca041cd4SIvan Ilchenko return -ENOTSUP;
875e9e414a4SStephen Hemminger }
876e9e414a4SStephen Hemminger
8776c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
8786c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
8796c3169a3SBruce Richardson ctrl.data[0] = 1;
8806c3169a3SBruce Richardson dlen[0] = 1;
8816c3169a3SBruce Richardson
8826c3169a3SBruce Richardson ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
883ca041cd4SIvan Ilchenko if (ret) {
8846c3169a3SBruce Richardson PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
885ca041cd4SIvan Ilchenko return -EAGAIN;
8866c3169a3SBruce Richardson }
8876c3169a3SBruce Richardson
888ca041cd4SIvan Ilchenko return 0;
889ca041cd4SIvan Ilchenko }
890ca041cd4SIvan Ilchenko
891ca041cd4SIvan Ilchenko static int
virtio_dev_allmulticast_disable(struct rte_eth_dev * dev)8926c3169a3SBruce Richardson virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
8936c3169a3SBruce Richardson {
8946c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
8956c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
8966c3169a3SBruce Richardson int dlen[1];
8976c3169a3SBruce Richardson int ret;
8986c3169a3SBruce Richardson
899b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
900f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "host does not support rx control");
901ca041cd4SIvan Ilchenko return -ENOTSUP;
902e9e414a4SStephen Hemminger }
903e9e414a4SStephen Hemminger
9046c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
9056c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
9066c3169a3SBruce Richardson ctrl.data[0] = 0;
9076c3169a3SBruce Richardson dlen[0] = 1;
9086c3169a3SBruce Richardson
9096c3169a3SBruce Richardson ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
910ca041cd4SIvan Ilchenko if (ret) {
9116c3169a3SBruce Richardson PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
912ca041cd4SIvan Ilchenko return -EAGAIN;
913ca041cd4SIvan Ilchenko }
914ca041cd4SIvan Ilchenko
915ca041cd4SIvan Ilchenko return 0;
9166c3169a3SBruce Richardson }
9176c3169a3SBruce Richardson
9184e8169ebSIvan Ilchenko uint16_t
virtio_rx_mem_pool_buf_size(struct rte_mempool * mp)9194e8169ebSIvan Ilchenko virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
9204e8169ebSIvan Ilchenko {
9214e8169ebSIvan Ilchenko return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
9224e8169ebSIvan Ilchenko }
9234e8169ebSIvan Ilchenko
9244e8169ebSIvan Ilchenko bool
virtio_rx_check_scatter(uint16_t max_rx_pkt_len,uint16_t rx_buf_size,bool rx_scatter_enabled,const char ** error)9254e8169ebSIvan Ilchenko virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
9264e8169ebSIvan Ilchenko bool rx_scatter_enabled, const char **error)
9274e8169ebSIvan Ilchenko {
9284e8169ebSIvan Ilchenko if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
9294e8169ebSIvan Ilchenko *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
9304e8169ebSIvan Ilchenko return false;
9314e8169ebSIvan Ilchenko }
9324e8169ebSIvan Ilchenko
9334e8169ebSIvan Ilchenko return true;
9344e8169ebSIvan Ilchenko }
9354e8169ebSIvan Ilchenko
9364e8169ebSIvan Ilchenko static bool
virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev * dev,uint16_t frame_size)9374e8169ebSIvan Ilchenko virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
9384e8169ebSIvan Ilchenko uint16_t frame_size)
9394e8169ebSIvan Ilchenko {
9404e8169ebSIvan Ilchenko struct virtio_hw *hw = dev->data->dev_private;
9414e8169ebSIvan Ilchenko struct virtnet_rx *rxvq;
9424e8169ebSIvan Ilchenko struct virtqueue *vq;
9434e8169ebSIvan Ilchenko unsigned int qidx;
9444e8169ebSIvan Ilchenko uint16_t buf_size;
9454e8169ebSIvan Ilchenko const char *error;
9464e8169ebSIvan Ilchenko
9474e8169ebSIvan Ilchenko if (hw->vqs == NULL)
9484e8169ebSIvan Ilchenko return true;
9494e8169ebSIvan Ilchenko
95084cc857bSZhihong Peng for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
95184cc857bSZhihong Peng vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
95284cc857bSZhihong Peng if (vq == NULL)
95384cc857bSZhihong Peng continue;
95484cc857bSZhihong Peng
9554e8169ebSIvan Ilchenko rxvq = &vq->rxq;
9564e8169ebSIvan Ilchenko if (rxvq->mpool == NULL)
9574e8169ebSIvan Ilchenko continue;
9584e8169ebSIvan Ilchenko buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
9594e8169ebSIvan Ilchenko
9604e8169ebSIvan Ilchenko if (!virtio_rx_check_scatter(frame_size, buf_size,
9614e8169ebSIvan Ilchenko hw->rx_ol_scatter, &error)) {
9624e8169ebSIvan Ilchenko PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
9634e8169ebSIvan Ilchenko qidx, error);
9644e8169ebSIvan Ilchenko return false;
9654e8169ebSIvan Ilchenko }
9664e8169ebSIvan Ilchenko }
9674e8169ebSIvan Ilchenko
9684e8169ebSIvan Ilchenko return true;
9694e8169ebSIvan Ilchenko }
9704e8169ebSIvan Ilchenko
9714ec2424aSSouvik Dey #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
9724ec2424aSSouvik Dey static int
virtio_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)9734ec2424aSSouvik Dey virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
9744ec2424aSSouvik Dey {
9754ec2424aSSouvik Dey struct virtio_hw *hw = dev->data->dev_private;
97635b2d13fSOlivier Matz uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
9774ec2424aSSouvik Dey hw->vtnet_hdr_size;
9784ec2424aSSouvik Dey uint32_t frame_size = mtu + ether_hdr_len;
97949d26d9eSMaxime Coquelin uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
9804ec2424aSSouvik Dey
98149d26d9eSMaxime Coquelin max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
98249d26d9eSMaxime Coquelin
98335b2d13fSOlivier Matz if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
984f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
98535b2d13fSOlivier Matz RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
9864ec2424aSSouvik Dey return -EINVAL;
9874ec2424aSSouvik Dey }
9884e8169ebSIvan Ilchenko
9894e8169ebSIvan Ilchenko if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
9904e8169ebSIvan Ilchenko PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
9914e8169ebSIvan Ilchenko return -EINVAL;
9924e8169ebSIvan Ilchenko }
9934e8169ebSIvan Ilchenko
9944e8169ebSIvan Ilchenko hw->max_rx_pkt_len = frame_size;
9954e8169ebSIvan Ilchenko
9964ec2424aSSouvik Dey return 0;
9974ec2424aSSouvik Dey }
9984ec2424aSSouvik Dey
999c056be23SJianfeng Tan static int
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)1000c056be23SJianfeng Tan virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1001c056be23SJianfeng Tan {
10028f66bc4aSTiwei Bie struct virtio_hw *hw = dev->data->dev_private;
1003c056be23SJianfeng Tan struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
10043169550fSMaxime Coquelin struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1005c056be23SJianfeng Tan
1006c056be23SJianfeng Tan virtqueue_enable_intr(vq);
10078f66bc4aSTiwei Bie virtio_mb(hw->weak_barriers);
1008c056be23SJianfeng Tan return 0;
1009c056be23SJianfeng Tan }
1010c056be23SJianfeng Tan
1011c056be23SJianfeng Tan static int
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)1012c056be23SJianfeng Tan virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1013c056be23SJianfeng Tan {
1014c056be23SJianfeng Tan struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
10153169550fSMaxime Coquelin struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1016c056be23SJianfeng Tan
1017c056be23SJianfeng Tan virtqueue_disable_intr(vq);
1018c056be23SJianfeng Tan return 0;
1019c056be23SJianfeng Tan }
1020c056be23SJianfeng Tan
10216c3169a3SBruce Richardson /*
10226c3169a3SBruce Richardson * dev_ops for virtio, bare necessities for basic operation
10236c3169a3SBruce Richardson */
10246c3169a3SBruce Richardson static const struct eth_dev_ops virtio_eth_dev_ops = {
10256c3169a3SBruce Richardson .dev_configure = virtio_dev_configure,
10266c3169a3SBruce Richardson .dev_start = virtio_dev_start,
10276c3169a3SBruce Richardson .dev_stop = virtio_dev_stop,
10286c3169a3SBruce Richardson .dev_close = virtio_dev_close,
10296c3169a3SBruce Richardson .promiscuous_enable = virtio_dev_promiscuous_enable,
10306c3169a3SBruce Richardson .promiscuous_disable = virtio_dev_promiscuous_disable,
10316c3169a3SBruce Richardson .allmulticast_enable = virtio_dev_allmulticast_enable,
10326c3169a3SBruce Richardson .allmulticast_disable = virtio_dev_allmulticast_disable,
10334ec2424aSSouvik Dey .mtu_set = virtio_mtu_set,
10346c3169a3SBruce Richardson .dev_infos_get = virtio_dev_info_get,
10356c3169a3SBruce Richardson .stats_get = virtio_dev_stats_get,
103676d4c652SHarry van Haaren .xstats_get = virtio_dev_xstats_get,
1037baf91c39SRemy Horton .xstats_get_names = virtio_dev_xstats_get_names,
10386c3169a3SBruce Richardson .stats_reset = virtio_dev_stats_reset,
103976d4c652SHarry van Haaren .xstats_reset = virtio_dev_stats_reset,
10406c3169a3SBruce Richardson .link_update = virtio_dev_link_update,
1041289ba0c0SDavid Harton .vlan_offload_set = virtio_dev_vlan_offload_set,
10426c3169a3SBruce Richardson .rx_queue_setup = virtio_dev_rx_queue_setup,
1043c056be23SJianfeng Tan .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
1044c056be23SJianfeng Tan .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
10456c3169a3SBruce Richardson .tx_queue_setup = virtio_dev_tx_queue_setup,
10460c9d6620SMaxime Coquelin .rss_hash_update = virtio_dev_rss_hash_update,
10470c9d6620SMaxime Coquelin .rss_hash_conf_get = virtio_dev_rss_hash_conf_get,
10480c9d6620SMaxime Coquelin .reta_update = virtio_dev_rss_reta_update,
10490c9d6620SMaxime Coquelin .reta_query = virtio_dev_rss_reta_query,
10506c3169a3SBruce Richardson /* collect stats per queue */
10516c3169a3SBruce Richardson .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
10526c3169a3SBruce Richardson .vlan_filter_set = virtio_vlan_filter_set,
10536c3169a3SBruce Richardson .mac_addr_add = virtio_mac_addr_add,
10546c3169a3SBruce Richardson .mac_addr_remove = virtio_mac_addr_remove,
10556c3169a3SBruce Richardson .mac_addr_set = virtio_mac_addr_set,
105664ac7e08SMiao Li .get_monitor_addr = virtio_get_monitor_addr,
10576c3169a3SBruce Richardson };
10586c3169a3SBruce Richardson
10591c8489daSTiwei Bie /*
10601c8489daSTiwei Bie * dev_ops for virtio-user in secondary processes, as we just have
10611c8489daSTiwei Bie * some limited supports currently.
10621c8489daSTiwei Bie */
10631c8489daSTiwei Bie const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
10641c8489daSTiwei Bie .dev_infos_get = virtio_dev_info_get,
10651c8489daSTiwei Bie .stats_get = virtio_dev_stats_get,
10661c8489daSTiwei Bie .xstats_get = virtio_dev_xstats_get,
10671c8489daSTiwei Bie .xstats_get_names = virtio_dev_xstats_get_names,
10681c8489daSTiwei Bie .stats_reset = virtio_dev_stats_reset,
10691c8489daSTiwei Bie .xstats_reset = virtio_dev_stats_reset,
10701c8489daSTiwei Bie /* collect stats per queue */
10711c8489daSTiwei Bie .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
10721c8489daSTiwei Bie };
10731c8489daSTiwei Bie
10746c3169a3SBruce Richardson static void
virtio_update_stats(struct rte_eth_dev * dev,struct rte_eth_stats * stats)107576d4c652SHarry van Haaren virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
10766c3169a3SBruce Richardson {
10776c3169a3SBruce Richardson unsigned i;
10786c3169a3SBruce Richardson
10796c3169a3SBruce Richardson for (i = 0; i < dev->data->nb_tx_queues; i++) {
108001ad44fdSHuawei Xie const struct virtnet_tx *txvq = dev->data->tx_queues[i];
10816c3169a3SBruce Richardson if (txvq == NULL)
10826c3169a3SBruce Richardson continue;
10836c3169a3SBruce Richardson
108401ad44fdSHuawei Xie stats->opackets += txvq->stats.packets;
108501ad44fdSHuawei Xie stats->obytes += txvq->stats.bytes;
10866c3169a3SBruce Richardson
10876c3169a3SBruce Richardson if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
108801ad44fdSHuawei Xie stats->q_opackets[i] = txvq->stats.packets;
108901ad44fdSHuawei Xie stats->q_obytes[i] = txvq->stats.bytes;
10906c3169a3SBruce Richardson }
10916c3169a3SBruce Richardson }
10926c3169a3SBruce Richardson
10936c3169a3SBruce Richardson for (i = 0; i < dev->data->nb_rx_queues; i++) {
109401ad44fdSHuawei Xie const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
10956c3169a3SBruce Richardson if (rxvq == NULL)
10966c3169a3SBruce Richardson continue;
10976c3169a3SBruce Richardson
109801ad44fdSHuawei Xie stats->ipackets += rxvq->stats.packets;
109901ad44fdSHuawei Xie stats->ibytes += rxvq->stats.bytes;
110001ad44fdSHuawei Xie stats->ierrors += rxvq->stats.errors;
11016c3169a3SBruce Richardson
11026c3169a3SBruce Richardson if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
110301ad44fdSHuawei Xie stats->q_ipackets[i] = rxvq->stats.packets;
110401ad44fdSHuawei Xie stats->q_ibytes[i] = rxvq->stats.bytes;
11056c3169a3SBruce Richardson }
11066c3169a3SBruce Richardson }
11076c3169a3SBruce Richardson
11086c3169a3SBruce Richardson stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
11096c3169a3SBruce Richardson }
11106c3169a3SBruce Richardson
virtio_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)1111baf91c39SRemy Horton static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
1112baf91c39SRemy Horton struct rte_eth_xstat_name *xstats_names,
1113baf91c39SRemy Horton __rte_unused unsigned limit)
1114baf91c39SRemy Horton {
1115baf91c39SRemy Horton unsigned i;
1116baf91c39SRemy Horton unsigned count = 0;
1117baf91c39SRemy Horton unsigned t;
1118baf91c39SRemy Horton
111901ad44fdSHuawei Xie unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
112001ad44fdSHuawei Xie dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
1121baf91c39SRemy Horton
11227e1eb993SYuanhan Liu if (xstats_names != NULL) {
1123baf91c39SRemy Horton /* Note: limit checked in rte_eth_xstats_names() */
1124baf91c39SRemy Horton
1125baf91c39SRemy Horton for (i = 0; i < dev->data->nb_rx_queues; i++) {
112643ec842cSDidier Pallard struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1127baf91c39SRemy Horton if (rxvq == NULL)
1128baf91c39SRemy Horton continue;
112901ad44fdSHuawei Xie for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
1130baf91c39SRemy Horton snprintf(xstats_names[count].name,
1131baf91c39SRemy Horton sizeof(xstats_names[count].name),
1132baf91c39SRemy Horton "rx_q%u_%s", i,
113301ad44fdSHuawei Xie rte_virtio_rxq_stat_strings[t].name);
1134baf91c39SRemy Horton count++;
1135baf91c39SRemy Horton }
1136baf91c39SRemy Horton }
1137baf91c39SRemy Horton
1138baf91c39SRemy Horton for (i = 0; i < dev->data->nb_tx_queues; i++) {
113943ec842cSDidier Pallard struct virtnet_tx *txvq = dev->data->tx_queues[i];
1140baf91c39SRemy Horton if (txvq == NULL)
1141baf91c39SRemy Horton continue;
114201ad44fdSHuawei Xie for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
1143baf91c39SRemy Horton snprintf(xstats_names[count].name,
1144baf91c39SRemy Horton sizeof(xstats_names[count].name),
1145baf91c39SRemy Horton "tx_q%u_%s", i,
114601ad44fdSHuawei Xie rte_virtio_txq_stat_strings[t].name);
1147baf91c39SRemy Horton count++;
1148baf91c39SRemy Horton }
1149baf91c39SRemy Horton }
1150baf91c39SRemy Horton return count;
1151baf91c39SRemy Horton }
1152baf91c39SRemy Horton return nstats;
1153baf91c39SRemy Horton }
1154baf91c39SRemy Horton
115576d4c652SHarry van Haaren static int
virtio_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)1156e2aae1c1SRemy Horton virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
115776d4c652SHarry van Haaren unsigned n)
115876d4c652SHarry van Haaren {
115976d4c652SHarry van Haaren unsigned i;
116076d4c652SHarry van Haaren unsigned count = 0;
116176d4c652SHarry van Haaren
116201ad44fdSHuawei Xie unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
116301ad44fdSHuawei Xie dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
116476d4c652SHarry van Haaren
116576d4c652SHarry van Haaren if (n < nstats)
116676d4c652SHarry van Haaren return nstats;
116776d4c652SHarry van Haaren
116876d4c652SHarry van Haaren for (i = 0; i < dev->data->nb_rx_queues; i++) {
116901ad44fdSHuawei Xie struct virtnet_rx *rxvq = dev->data->rx_queues[i];
117076d4c652SHarry van Haaren
117176d4c652SHarry van Haaren if (rxvq == NULL)
117276d4c652SHarry van Haaren continue;
117376d4c652SHarry van Haaren
117476d4c652SHarry van Haaren unsigned t;
117576d4c652SHarry van Haaren
117601ad44fdSHuawei Xie for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
117776d4c652SHarry van Haaren xstats[count].value = *(uint64_t *)(((char *)rxvq) +
117801ad44fdSHuawei Xie rte_virtio_rxq_stat_strings[t].offset);
1179513c78aeSOlivier Matz xstats[count].id = count;
118076d4c652SHarry van Haaren count++;
118176d4c652SHarry van Haaren }
118276d4c652SHarry van Haaren }
118376d4c652SHarry van Haaren
118476d4c652SHarry van Haaren for (i = 0; i < dev->data->nb_tx_queues; i++) {
118501ad44fdSHuawei Xie struct virtnet_tx *txvq = dev->data->tx_queues[i];
118676d4c652SHarry van Haaren
118776d4c652SHarry van Haaren if (txvq == NULL)
118876d4c652SHarry van Haaren continue;
118976d4c652SHarry van Haaren
119076d4c652SHarry van Haaren unsigned t;
119176d4c652SHarry van Haaren
119201ad44fdSHuawei Xie for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
119376d4c652SHarry van Haaren xstats[count].value = *(uint64_t *)(((char *)txvq) +
119401ad44fdSHuawei Xie rte_virtio_txq_stat_strings[t].offset);
1195513c78aeSOlivier Matz xstats[count].id = count;
119676d4c652SHarry van Haaren count++;
119776d4c652SHarry van Haaren }
119876d4c652SHarry van Haaren }
119976d4c652SHarry van Haaren
120076d4c652SHarry van Haaren return count;
120176d4c652SHarry van Haaren }
120276d4c652SHarry van Haaren
1203d5b0924bSMatan Azrad static int
virtio_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)120476d4c652SHarry van Haaren virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
120576d4c652SHarry van Haaren {
120676d4c652SHarry van Haaren virtio_update_stats(dev, stats);
1207d5b0924bSMatan Azrad
1208d5b0924bSMatan Azrad return 0;
120976d4c652SHarry van Haaren }
121076d4c652SHarry van Haaren
12119970a9adSIgor Romanov static int
virtio_dev_stats_reset(struct rte_eth_dev * dev)12126c3169a3SBruce Richardson virtio_dev_stats_reset(struct rte_eth_dev *dev)
12136c3169a3SBruce Richardson {
12146c3169a3SBruce Richardson unsigned int i;
12156c3169a3SBruce Richardson
12166c3169a3SBruce Richardson for (i = 0; i < dev->data->nb_tx_queues; i++) {
121701ad44fdSHuawei Xie struct virtnet_tx *txvq = dev->data->tx_queues[i];
12186c3169a3SBruce Richardson if (txvq == NULL)
12196c3169a3SBruce Richardson continue;
12206c3169a3SBruce Richardson
122101ad44fdSHuawei Xie txvq->stats.packets = 0;
122201ad44fdSHuawei Xie txvq->stats.bytes = 0;
122301ad44fdSHuawei Xie txvq->stats.multicast = 0;
122401ad44fdSHuawei Xie txvq->stats.broadcast = 0;
122501ad44fdSHuawei Xie memset(txvq->stats.size_bins, 0,
122601ad44fdSHuawei Xie sizeof(txvq->stats.size_bins[0]) * 8);
12276c3169a3SBruce Richardson }
12286c3169a3SBruce Richardson
12296c3169a3SBruce Richardson for (i = 0; i < dev->data->nb_rx_queues; i++) {
123001ad44fdSHuawei Xie struct virtnet_rx *rxvq = dev->data->rx_queues[i];
12316c3169a3SBruce Richardson if (rxvq == NULL)
12326c3169a3SBruce Richardson continue;
12336c3169a3SBruce Richardson
123401ad44fdSHuawei Xie rxvq->stats.packets = 0;
123501ad44fdSHuawei Xie rxvq->stats.bytes = 0;
123601ad44fdSHuawei Xie rxvq->stats.errors = 0;
123701ad44fdSHuawei Xie rxvq->stats.multicast = 0;
123801ad44fdSHuawei Xie rxvq->stats.broadcast = 0;
123901ad44fdSHuawei Xie memset(rxvq->stats.size_bins, 0,
124001ad44fdSHuawei Xie sizeof(rxvq->stats.size_bins[0]) * 8);
12416c3169a3SBruce Richardson }
12429970a9adSIgor Romanov
12439970a9adSIgor Romanov return 0;
12446c3169a3SBruce Richardson }
12456c3169a3SBruce Richardson
12466c3169a3SBruce Richardson static void
virtio_set_hwaddr(struct virtio_hw * hw)12476c3169a3SBruce Richardson virtio_set_hwaddr(struct virtio_hw *hw)
12486c3169a3SBruce Richardson {
12499328e105SMaxime Coquelin virtio_write_dev_config(hw,
12506c3169a3SBruce Richardson offsetof(struct virtio_net_config, mac),
125135b2d13fSOlivier Matz &hw->mac_addr, RTE_ETHER_ADDR_LEN);
12526c3169a3SBruce Richardson }
12536c3169a3SBruce Richardson
12546c3169a3SBruce Richardson static void
virtio_get_hwaddr(struct virtio_hw * hw)12556c3169a3SBruce Richardson virtio_get_hwaddr(struct virtio_hw *hw)
12566c3169a3SBruce Richardson {
1257b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
12589328e105SMaxime Coquelin virtio_read_dev_config(hw,
12596c3169a3SBruce Richardson offsetof(struct virtio_net_config, mac),
126035b2d13fSOlivier Matz &hw->mac_addr, RTE_ETHER_ADDR_LEN);
12616c3169a3SBruce Richardson } else {
1262538da7a1SOlivier Matz rte_eth_random_addr(&hw->mac_addr[0]);
12636c3169a3SBruce Richardson virtio_set_hwaddr(hw);
12646c3169a3SBruce Richardson }
12656c3169a3SBruce Richardson }
12666c3169a3SBruce Richardson
12676d01e580SWei Dai static int
virtio_mac_table_set(struct virtio_hw * hw,const struct virtio_net_ctrl_mac * uc,const struct virtio_net_ctrl_mac * mc)12686c3169a3SBruce Richardson virtio_mac_table_set(struct virtio_hw *hw,
12696c3169a3SBruce Richardson const struct virtio_net_ctrl_mac *uc,
12706c3169a3SBruce Richardson const struct virtio_net_ctrl_mac *mc)
12716c3169a3SBruce Richardson {
12726c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
12736c3169a3SBruce Richardson int err, len[2];
12746c3169a3SBruce Richardson
1275b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1276e9083127SJianfeng Tan PMD_DRV_LOG(INFO, "host does not support mac table");
12776d01e580SWei Dai return -1;
127827046236SStephen Hemminger }
127927046236SStephen Hemminger
12806c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
12816c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
12826c3169a3SBruce Richardson
128335b2d13fSOlivier Matz len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
12846c3169a3SBruce Richardson memcpy(ctrl.data, uc, len[0]);
12856c3169a3SBruce Richardson
128635b2d13fSOlivier Matz len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
12876c3169a3SBruce Richardson memcpy(ctrl.data + len[0], mc, len[1]);
12886c3169a3SBruce Richardson
12896c3169a3SBruce Richardson err = virtio_send_command(hw->cvq, &ctrl, len, 2);
12906c3169a3SBruce Richardson if (err != 0)
12916c3169a3SBruce Richardson PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
12926d01e580SWei Dai return err;
12936c3169a3SBruce Richardson }
12946c3169a3SBruce Richardson
12956d01e580SWei Dai static int
virtio_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t vmdq __rte_unused)12966d13ea8eSOlivier Matz virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
12976c3169a3SBruce Richardson uint32_t index, uint32_t vmdq __rte_unused)
12986c3169a3SBruce Richardson {
12996c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
13006d13ea8eSOlivier Matz const struct rte_ether_addr *addrs = dev->data->mac_addrs;
13016c3169a3SBruce Richardson unsigned int i;
13026c3169a3SBruce Richardson struct virtio_net_ctrl_mac *uc, *mc;
13036c3169a3SBruce Richardson
13046c3169a3SBruce Richardson if (index >= VIRTIO_MAX_MAC_ADDRS) {
13056c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
13066d01e580SWei Dai return -EINVAL;
13076c3169a3SBruce Richardson }
13086c3169a3SBruce Richardson
130935b2d13fSOlivier Matz uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
131035b2d13fSOlivier Matz sizeof(uc->entries));
13116c3169a3SBruce Richardson uc->entries = 0;
131235b2d13fSOlivier Matz mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
131335b2d13fSOlivier Matz sizeof(mc->entries));
13146c3169a3SBruce Richardson mc->entries = 0;
13156c3169a3SBruce Richardson
13166c3169a3SBruce Richardson for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
13176d13ea8eSOlivier Matz const struct rte_ether_addr *addr
13186c3169a3SBruce Richardson = (i == index) ? mac_addr : addrs + i;
13196c3169a3SBruce Richardson struct virtio_net_ctrl_mac *tbl
1320538da7a1SOlivier Matz = rte_is_multicast_ether_addr(addr) ? mc : uc;
13216c3169a3SBruce Richardson
132235b2d13fSOlivier Matz memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
13236c3169a3SBruce Richardson }
13246c3169a3SBruce Richardson
13256d01e580SWei Dai return virtio_mac_table_set(hw, uc, mc);
13266c3169a3SBruce Richardson }
13276c3169a3SBruce Richardson
13286c3169a3SBruce Richardson static void
virtio_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)13296c3169a3SBruce Richardson virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
13306c3169a3SBruce Richardson {
13316c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
13326d13ea8eSOlivier Matz struct rte_ether_addr *addrs = dev->data->mac_addrs;
13336c3169a3SBruce Richardson struct virtio_net_ctrl_mac *uc, *mc;
13346c3169a3SBruce Richardson unsigned int i;
13356c3169a3SBruce Richardson
13366c3169a3SBruce Richardson if (index >= VIRTIO_MAX_MAC_ADDRS) {
13376c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
13386c3169a3SBruce Richardson return;
13396c3169a3SBruce Richardson }
13406c3169a3SBruce Richardson
134135b2d13fSOlivier Matz uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
134235b2d13fSOlivier Matz sizeof(uc->entries));
13436c3169a3SBruce Richardson uc->entries = 0;
134435b2d13fSOlivier Matz mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
134535b2d13fSOlivier Matz sizeof(mc->entries));
13466c3169a3SBruce Richardson mc->entries = 0;
13476c3169a3SBruce Richardson
13486c3169a3SBruce Richardson for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
13496c3169a3SBruce Richardson struct virtio_net_ctrl_mac *tbl;
13506c3169a3SBruce Richardson
1351538da7a1SOlivier Matz if (i == index || rte_is_zero_ether_addr(addrs + i))
13526c3169a3SBruce Richardson continue;
13536c3169a3SBruce Richardson
1354538da7a1SOlivier Matz tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
135535b2d13fSOlivier Matz memcpy(&tbl->macs[tbl->entries++], addrs + i,
135635b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN);
13576c3169a3SBruce Richardson }
13586c3169a3SBruce Richardson
13596c3169a3SBruce Richardson virtio_mac_table_set(hw, uc, mc);
13606c3169a3SBruce Richardson }
13616c3169a3SBruce Richardson
1362caccf8b3SOlivier Matz static int
virtio_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)13636d13ea8eSOlivier Matz virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
13646c3169a3SBruce Richardson {
13656c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
13666c3169a3SBruce Richardson
136735b2d13fSOlivier Matz memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
13686c3169a3SBruce Richardson
13696c3169a3SBruce Richardson /* Use atomic update if available */
1370b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
13716c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
137235b2d13fSOlivier Matz int len = RTE_ETHER_ADDR_LEN;
13736c3169a3SBruce Richardson
13746c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
13756c3169a3SBruce Richardson ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
13766c3169a3SBruce Richardson
137735b2d13fSOlivier Matz memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
1378caccf8b3SOlivier Matz return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1379caccf8b3SOlivier Matz }
1380caccf8b3SOlivier Matz
1381b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
1382caccf8b3SOlivier Matz return -ENOTSUP;
1383caccf8b3SOlivier Matz
13846c3169a3SBruce Richardson virtio_set_hwaddr(hw);
1385caccf8b3SOlivier Matz return 0;
13866c3169a3SBruce Richardson }
13876c3169a3SBruce Richardson
138864ac7e08SMiao Li #define CLB_VAL_IDX 0
138964ac7e08SMiao Li #define CLB_MSK_IDX 1
139064ac7e08SMiao Li #define CLB_MATCH_IDX 2
139164ac7e08SMiao Li static int
virtio_monitor_callback(const uint64_t value,const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])139264ac7e08SMiao Li virtio_monitor_callback(const uint64_t value,
139364ac7e08SMiao Li const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
139464ac7e08SMiao Li {
139564ac7e08SMiao Li const uint64_t m = opaque[CLB_MSK_IDX];
139664ac7e08SMiao Li const uint64_t v = opaque[CLB_VAL_IDX];
139764ac7e08SMiao Li const uint64_t c = opaque[CLB_MATCH_IDX];
139864ac7e08SMiao Li
139964ac7e08SMiao Li if (c)
140064ac7e08SMiao Li return (value & m) == v ? -1 : 0;
140164ac7e08SMiao Li else
140264ac7e08SMiao Li return (value & m) == v ? 0 : -1;
140364ac7e08SMiao Li }
140464ac7e08SMiao Li
140564ac7e08SMiao Li static int
virtio_get_monitor_addr(void * rx_queue,struct rte_power_monitor_cond * pmc)140664ac7e08SMiao Li virtio_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
140764ac7e08SMiao Li {
140864ac7e08SMiao Li struct virtnet_rx *rxvq = rx_queue;
140964ac7e08SMiao Li struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
141064ac7e08SMiao Li struct virtio_hw *hw;
141164ac7e08SMiao Li
141264ac7e08SMiao Li if (vq == NULL)
141364ac7e08SMiao Li return -EINVAL;
141464ac7e08SMiao Li
141564ac7e08SMiao Li hw = vq->hw;
141664ac7e08SMiao Li if (virtio_with_packed_queue(hw)) {
141764ac7e08SMiao Li struct vring_packed_desc *desc;
141864ac7e08SMiao Li desc = vq->vq_packed.ring.desc;
141964ac7e08SMiao Li pmc->addr = &desc[vq->vq_used_cons_idx].flags;
142064ac7e08SMiao Li if (vq->vq_packed.used_wrap_counter)
142164ac7e08SMiao Li pmc->opaque[CLB_VAL_IDX] =
142264ac7e08SMiao Li VRING_PACKED_DESC_F_AVAIL_USED;
142364ac7e08SMiao Li else
142464ac7e08SMiao Li pmc->opaque[CLB_VAL_IDX] = 0;
142564ac7e08SMiao Li pmc->opaque[CLB_MSK_IDX] = VRING_PACKED_DESC_F_AVAIL_USED;
142664ac7e08SMiao Li pmc->opaque[CLB_MATCH_IDX] = 1;
142764ac7e08SMiao Li pmc->size = sizeof(desc[vq->vq_used_cons_idx].flags);
142864ac7e08SMiao Li } else {
142964ac7e08SMiao Li pmc->addr = &vq->vq_split.ring.used->idx;
143064ac7e08SMiao Li pmc->opaque[CLB_VAL_IDX] = vq->vq_used_cons_idx
143164ac7e08SMiao Li & (vq->vq_nentries - 1);
143264ac7e08SMiao Li pmc->opaque[CLB_MSK_IDX] = vq->vq_nentries - 1;
143364ac7e08SMiao Li pmc->opaque[CLB_MATCH_IDX] = 0;
143464ac7e08SMiao Li pmc->size = sizeof(vq->vq_split.ring.used->idx);
143564ac7e08SMiao Li }
143664ac7e08SMiao Li pmc->fn = virtio_monitor_callback;
143764ac7e08SMiao Li
143864ac7e08SMiao Li return 0;
143964ac7e08SMiao Li }
144064ac7e08SMiao Li
14416c3169a3SBruce Richardson static int
virtio_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)14426c3169a3SBruce Richardson virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
14436c3169a3SBruce Richardson {
14446c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
14456c3169a3SBruce Richardson struct virtio_pmd_ctrl ctrl;
14466c3169a3SBruce Richardson int len;
14476c3169a3SBruce Richardson
1448b4f9a45aSMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
14496c3169a3SBruce Richardson return -ENOTSUP;
14506c3169a3SBruce Richardson
14516c3169a3SBruce Richardson ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
14526c3169a3SBruce Richardson ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
14536c3169a3SBruce Richardson memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
14546c3169a3SBruce Richardson len = sizeof(vlan_id);
14556c3169a3SBruce Richardson
14566c3169a3SBruce Richardson return virtio_send_command(hw->cvq, &ctrl, &len, 1);
14576c3169a3SBruce Richardson }
14586c3169a3SBruce Richardson
14596ba1f63bSYuanhan Liu static int
virtio_intr_unmask(struct rte_eth_dev * dev)14606bee9d5fSNithin Dabilpuram virtio_intr_unmask(struct rte_eth_dev *dev)
14616bee9d5fSNithin Dabilpuram {
14626bee9d5fSNithin Dabilpuram struct virtio_hw *hw = dev->data->dev_private;
14636bee9d5fSNithin Dabilpuram
14646bee9d5fSNithin Dabilpuram if (rte_intr_ack(dev->intr_handle) < 0)
14656bee9d5fSNithin Dabilpuram return -1;
14666bee9d5fSNithin Dabilpuram
1467f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->intr_detect)
1468f8b60756SMaxime Coquelin VIRTIO_OPS(hw)->intr_detect(hw);
14696bee9d5fSNithin Dabilpuram
14706bee9d5fSNithin Dabilpuram return 0;
14716bee9d5fSNithin Dabilpuram }
14726bee9d5fSNithin Dabilpuram
14736bee9d5fSNithin Dabilpuram static int
virtio_intr_enable(struct rte_eth_dev * dev)1474fe19d49cSZhiyong Yang virtio_intr_enable(struct rte_eth_dev *dev)
1475fe19d49cSZhiyong Yang {
1476fe19d49cSZhiyong Yang struct virtio_hw *hw = dev->data->dev_private;
1477fe19d49cSZhiyong Yang
1478fe19d49cSZhiyong Yang if (rte_intr_enable(dev->intr_handle) < 0)
1479fe19d49cSZhiyong Yang return -1;
1480fe19d49cSZhiyong Yang
1481f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->intr_detect)
1482f8b60756SMaxime Coquelin VIRTIO_OPS(hw)->intr_detect(hw);
1483fe19d49cSZhiyong Yang
1484fe19d49cSZhiyong Yang return 0;
1485fe19d49cSZhiyong Yang }
1486fe19d49cSZhiyong Yang
1487fe19d49cSZhiyong Yang static int
virtio_intr_disable(struct rte_eth_dev * dev)1488fe19d49cSZhiyong Yang virtio_intr_disable(struct rte_eth_dev *dev)
1489fe19d49cSZhiyong Yang {
1490fe19d49cSZhiyong Yang struct virtio_hw *hw = dev->data->dev_private;
1491fe19d49cSZhiyong Yang
1492fe19d49cSZhiyong Yang if (rte_intr_disable(dev->intr_handle) < 0)
1493fe19d49cSZhiyong Yang return -1;
1494fe19d49cSZhiyong Yang
1495f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->intr_detect)
1496f8b60756SMaxime Coquelin VIRTIO_OPS(hw)->intr_detect(hw);
1497fe19d49cSZhiyong Yang
1498fe19d49cSZhiyong Yang return 0;
1499fe19d49cSZhiyong Yang }
1500fe19d49cSZhiyong Yang
1501fe19d49cSZhiyong Yang static int
virtio_ethdev_negotiate_features(struct virtio_hw * hw,uint64_t req_features)1502b4f9a45aSMaxime Coquelin virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
15036c3169a3SBruce Richardson {
15043891f233SYuanhan Liu uint64_t host_features;
15056c3169a3SBruce Richardson
15066c3169a3SBruce Richardson /* Prepare guest_features: feature that driver wants to support */
15073891f233SYuanhan Liu PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
150860e6f470SOlivier Matz req_features);
15096c3169a3SBruce Richardson
15106c3169a3SBruce Richardson /* Read device(host) feature bits */
1511f8b60756SMaxime Coquelin host_features = VIRTIO_OPS(hw)->get_features(hw);
15123891f233SYuanhan Liu PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
15136c3169a3SBruce Richardson host_features);
15146c3169a3SBruce Richardson
151549d26d9eSMaxime Coquelin /* If supported, ensure MTU value is valid before acknowledging it. */
151649d26d9eSMaxime Coquelin if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
151749d26d9eSMaxime Coquelin struct virtio_net_config config;
151849d26d9eSMaxime Coquelin
15199328e105SMaxime Coquelin virtio_read_dev_config(hw,
152049d26d9eSMaxime Coquelin offsetof(struct virtio_net_config, mtu),
152149d26d9eSMaxime Coquelin &config.mtu, sizeof(config.mtu));
152249d26d9eSMaxime Coquelin
152335b2d13fSOlivier Matz if (config.mtu < RTE_ETHER_MIN_MTU)
152449d26d9eSMaxime Coquelin req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
152549d26d9eSMaxime Coquelin }
152649d26d9eSMaxime Coquelin
15276c3169a3SBruce Richardson /*
15286c3169a3SBruce Richardson * Negotiate features: Subset of device feature bits are written back
15296c3169a3SBruce Richardson * guest feature bits.
15306c3169a3SBruce Richardson */
153160e6f470SOlivier Matz hw->guest_features = req_features;
1532b4f9a45aSMaxime Coquelin hw->guest_features = virtio_negotiate_features(hw, host_features);
15333891f233SYuanhan Liu PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
15346c3169a3SBruce Richardson hw->guest_features);
15356ba1f63bSYuanhan Liu
1536f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
15376ba1f63bSYuanhan Liu return -1;
1538ce40b4a8SAdrian Moreno
1539b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
15409328e105SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1541cbb135b3SMaxime Coquelin
15429328e105SMaxime Coquelin if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1543cbb135b3SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
15446ba1f63bSYuanhan Liu return -1;
15456ba1f63bSYuanhan Liu }
15466ba1f63bSYuanhan Liu }
15476ba1f63bSYuanhan Liu
154860e6f470SOlivier Matz hw->req_guest_features = req_features;
154960e6f470SOlivier Matz
15506ba1f63bSYuanhan Liu return 0;
15516c3169a3SBruce Richardson }
15526c3169a3SBruce Richardson
15531978a9dcSXiao Wang int
virtio_dev_pause(struct rte_eth_dev * dev)15541978a9dcSXiao Wang virtio_dev_pause(struct rte_eth_dev *dev)
15551978a9dcSXiao Wang {
15561978a9dcSXiao Wang struct virtio_hw *hw = dev->data->dev_private;
15571978a9dcSXiao Wang
15581978a9dcSXiao Wang rte_spinlock_lock(&hw->state_lock);
15591978a9dcSXiao Wang
15601978a9dcSXiao Wang if (hw->started == 0) {
15611978a9dcSXiao Wang /* Device is just stopped. */
15621978a9dcSXiao Wang rte_spinlock_unlock(&hw->state_lock);
15631978a9dcSXiao Wang return -1;
15641978a9dcSXiao Wang }
15651978a9dcSXiao Wang hw->started = 0;
15661978a9dcSXiao Wang /*
15671978a9dcSXiao Wang * Prevent the worker threads from touching queues to avoid contention,
15681978a9dcSXiao Wang * 1 ms should be enough for the ongoing Tx function to finish.
15691978a9dcSXiao Wang */
15701978a9dcSXiao Wang rte_delay_ms(1);
15711978a9dcSXiao Wang return 0;
15721978a9dcSXiao Wang }
15731978a9dcSXiao Wang
15741978a9dcSXiao Wang /*
15751978a9dcSXiao Wang * Recover hw state to let the worker threads continue.
15761978a9dcSXiao Wang */
15771978a9dcSXiao Wang void
virtio_dev_resume(struct rte_eth_dev * dev)15781978a9dcSXiao Wang virtio_dev_resume(struct rte_eth_dev *dev)
15791978a9dcSXiao Wang {
15801978a9dcSXiao Wang struct virtio_hw *hw = dev->data->dev_private;
15811978a9dcSXiao Wang
15821978a9dcSXiao Wang hw->started = 1;
15831978a9dcSXiao Wang rte_spinlock_unlock(&hw->state_lock);
15841978a9dcSXiao Wang }
15851978a9dcSXiao Wang
15861978a9dcSXiao Wang /*
15871978a9dcSXiao Wang * Should be called only after device is paused.
15881978a9dcSXiao Wang */
15891978a9dcSXiao Wang int
virtio_inject_pkts(struct rte_eth_dev * dev,struct rte_mbuf ** tx_pkts,int nb_pkts)15901978a9dcSXiao Wang virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
15911978a9dcSXiao Wang int nb_pkts)
15921978a9dcSXiao Wang {
15931978a9dcSXiao Wang struct virtio_hw *hw = dev->data->dev_private;
15941978a9dcSXiao Wang struct virtnet_tx *txvq = dev->data->tx_queues[0];
15951978a9dcSXiao Wang int ret;
15961978a9dcSXiao Wang
15971978a9dcSXiao Wang hw->inject_pkts = tx_pkts;
15981978a9dcSXiao Wang ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
15991978a9dcSXiao Wang hw->inject_pkts = NULL;
16001978a9dcSXiao Wang
16011978a9dcSXiao Wang return ret;
16021978a9dcSXiao Wang }
16031978a9dcSXiao Wang
16047365504fSXiao Wang static void
virtio_notify_peers(struct rte_eth_dev * dev)16057365504fSXiao Wang virtio_notify_peers(struct rte_eth_dev *dev)
16067365504fSXiao Wang {
16077365504fSXiao Wang struct virtio_hw *hw = dev->data->dev_private;
16087c7f2e60SZhiyong Yang struct virtnet_rx *rxvq;
16097365504fSXiao Wang struct rte_mbuf *rarp_mbuf;
16107365504fSXiao Wang
16117c7f2e60SZhiyong Yang if (!dev->data->rx_queues)
16127c7f2e60SZhiyong Yang return;
16137c7f2e60SZhiyong Yang
16147c7f2e60SZhiyong Yang rxvq = dev->data->rx_queues[0];
1615f42deafaSZhiyong Yang if (!rxvq)
1616f42deafaSZhiyong Yang return;
1617f42deafaSZhiyong Yang
16187365504fSXiao Wang rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
16196d13ea8eSOlivier Matz (struct rte_ether_addr *)hw->mac_addr);
16207365504fSXiao Wang if (rarp_mbuf == NULL) {
16217365504fSXiao Wang PMD_DRV_LOG(ERR, "failed to make RARP packet.");
16227365504fSXiao Wang return;
16237365504fSXiao Wang }
16247365504fSXiao Wang
16257365504fSXiao Wang /* If virtio port just stopped, no need to send RARP */
16267365504fSXiao Wang if (virtio_dev_pause(dev) < 0) {
16277365504fSXiao Wang rte_pktmbuf_free(rarp_mbuf);
16287365504fSXiao Wang return;
16297365504fSXiao Wang }
16307365504fSXiao Wang
16317365504fSXiao Wang virtio_inject_pkts(dev, &rarp_mbuf, 1);
16327365504fSXiao Wang virtio_dev_resume(dev);
16337365504fSXiao Wang }
16347365504fSXiao Wang
16357365504fSXiao Wang static void
virtio_ack_link_announce(struct rte_eth_dev * dev)16367365504fSXiao Wang virtio_ack_link_announce(struct rte_eth_dev *dev)
16377365504fSXiao Wang {
16387365504fSXiao Wang struct virtio_hw *hw = dev->data->dev_private;
16397365504fSXiao Wang struct virtio_pmd_ctrl ctrl;
16407365504fSXiao Wang
16417365504fSXiao Wang ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
16427365504fSXiao Wang ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
16437365504fSXiao Wang
16447365504fSXiao Wang virtio_send_command(hw->cvq, &ctrl, NULL, 0);
16457365504fSXiao Wang }
16467365504fSXiao Wang
16476c3169a3SBruce Richardson /*
16487365504fSXiao Wang * Process virtio config changed interrupt. Call the callback
16497365504fSXiao Wang * if link state changed, generate gratuitous RARP packet if
16507365504fSXiao Wang * the status indicates an ANNOUNCE.
16516c3169a3SBruce Richardson */
1652ef53b603SJianfeng Tan void
virtio_interrupt_handler(void * param)1653c23a1a30SQi Zhang virtio_interrupt_handler(void *param)
16546c3169a3SBruce Richardson {
16556c3169a3SBruce Richardson struct rte_eth_dev *dev = param;
16566c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
16576c3169a3SBruce Richardson uint8_t isr;
16585db1684eSTiwei Bie uint16_t status;
16596c3169a3SBruce Richardson
16606c3169a3SBruce Richardson /* Read interrupt status which clears interrupt */
16616a504290SMaxime Coquelin isr = virtio_get_isr(hw);
16626c3169a3SBruce Richardson PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
16636c3169a3SBruce Richardson
16646bee9d5fSNithin Dabilpuram if (virtio_intr_unmask(dev) < 0)
16656c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "interrupt enable failed");
16666c3169a3SBruce Richardson
16676a504290SMaxime Coquelin if (isr & VIRTIO_ISR_CONFIG) {
16686c3169a3SBruce Richardson if (virtio_dev_link_update(dev, 0) == 0)
16695723fbedSFerruh Yigit rte_eth_dev_callback_process(dev,
1670d6af1a13SBernard Iremonger RTE_ETH_EVENT_INTR_LSC,
1671cebe3d7bSThomas Monjalon NULL);
16726c3169a3SBruce Richardson
1673b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
16749328e105SMaxime Coquelin virtio_read_dev_config(hw,
16755db1684eSTiwei Bie offsetof(struct virtio_net_config, status),
16765db1684eSTiwei Bie &status, sizeof(status));
16775db1684eSTiwei Bie if (status & VIRTIO_NET_S_ANNOUNCE) {
16787365504fSXiao Wang virtio_notify_peers(dev);
1679ac860c86SZhiyong Yang if (hw->cvq)
16807365504fSXiao Wang virtio_ack_link_announce(dev);
16817365504fSXiao Wang }
16826c3169a3SBruce Richardson }
16835db1684eSTiwei Bie }
16845db1684eSTiwei Bie }
16856c3169a3SBruce Richardson
16864819eae8SOlivier Matz /* set rx and tx handlers according to what is supported */
16876c3169a3SBruce Richardson static void
set_rxtx_funcs(struct rte_eth_dev * eth_dev)16884819eae8SOlivier Matz set_rxtx_funcs(struct rte_eth_dev *eth_dev)
16896c3169a3SBruce Richardson {
16906c3169a3SBruce Richardson struct virtio_hw *hw = eth_dev->data->dev_private;
16914819eae8SOlivier Matz
169200a5ea02SDilshod Urazov eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1693b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(hw)) {
1694892dc798SJens Freimann PMD_INIT_LOG(INFO,
16955c75a8efSTiwei Bie "virtio: using packed ring %s Tx path on port %u",
1696ccb10995SMarvin Liu hw->use_vec_tx ? "vectorized" : "standard",
1697892dc798SJens Freimann eth_dev->data->port_id);
1698ccb10995SMarvin Liu if (hw->use_vec_tx)
1699ccb10995SMarvin Liu eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
1700ccb10995SMarvin Liu else
1701892dc798SJens Freimann eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1702892dc798SJens Freimann } else {
1703892dc798SJens Freimann if (hw->use_inorder_tx) {
1704892dc798SJens Freimann PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1705892dc798SJens Freimann eth_dev->data->port_id);
1706892dc798SJens Freimann eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1707892dc798SJens Freimann } else {
1708892dc798SJens Freimann PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1709892dc798SJens Freimann eth_dev->data->port_id);
1710892dc798SJens Freimann eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1711892dc798SJens Freimann }
1712892dc798SJens Freimann }
1713892dc798SJens Freimann
1714b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(hw)) {
1715ccb10995SMarvin Liu if (hw->use_vec_rx) {
1716ccb10995SMarvin Liu PMD_INIT_LOG(INFO,
1717ccb10995SMarvin Liu "virtio: using packed ring vectorized Rx path on port %u",
1718ccb10995SMarvin Liu eth_dev->data->port_id);
1719ccb10995SMarvin Liu eth_dev->rx_pkt_burst =
1720ccb10995SMarvin Liu &virtio_recv_pkts_packed_vec;
1721b4f9a45aSMaxime Coquelin } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1722a76290c8SJens Freimann PMD_INIT_LOG(INFO,
1723a76290c8SJens Freimann "virtio: using packed ring mergeable buffer Rx path on port %u",
1724a76290c8SJens Freimann eth_dev->data->port_id);
1725a76290c8SJens Freimann eth_dev->rx_pkt_burst =
1726a76290c8SJens Freimann &virtio_recv_mergeable_pkts_packed;
1727a76290c8SJens Freimann } else {
1728a76290c8SJens Freimann PMD_INIT_LOG(INFO,
1729a76290c8SJens Freimann "virtio: using packed ring standard Rx path on port %u",
1730a76290c8SJens Freimann eth_dev->data->port_id);
1731a76290c8SJens Freimann eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1732a76290c8SJens Freimann }
1733a76290c8SJens Freimann } else {
17344710e16aSMarvin Liu if (hw->use_vec_rx) {
17354710e16aSMarvin Liu PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
17364819eae8SOlivier Matz eth_dev->data->port_id);
17374819eae8SOlivier Matz eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
17388f3bd7e8SMarvin Liu } else if (hw->use_inorder_rx) {
17398f3bd7e8SMarvin Liu PMD_INIT_LOG(INFO,
1740efcda136SMaxime Coquelin "virtio: using inorder Rx path on port %u",
17418f3bd7e8SMarvin Liu eth_dev->data->port_id);
1742efcda136SMaxime Coquelin eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
1743b4f9a45aSMaxime Coquelin } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
17444819eae8SOlivier Matz PMD_INIT_LOG(INFO,
17454819eae8SOlivier Matz "virtio: using mergeable buffer Rx path on port %u",
17464819eae8SOlivier Matz eth_dev->data->port_id);
17476c3169a3SBruce Richardson eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
17484819eae8SOlivier Matz } else {
17494819eae8SOlivier Matz PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
17504819eae8SOlivier Matz eth_dev->data->port_id);
17516c3169a3SBruce Richardson eth_dev->rx_pkt_burst = &virtio_recv_pkts;
17526c3169a3SBruce Richardson }
1753a76290c8SJens Freimann }
17546c3169a3SBruce Richardson
17554819eae8SOlivier Matz }
17564819eae8SOlivier Matz
175726b683b4SJianfeng Tan /* Only support 1:1 queue/interrupt mapping so far.
175826b683b4SJianfeng Tan * TODO: support n:1 queue/interrupt mapping when there are limited number of
175926b683b4SJianfeng Tan * interrupt vectors (<N+1).
176026b683b4SJianfeng Tan */
176126b683b4SJianfeng Tan static int
virtio_queues_bind_intr(struct rte_eth_dev * dev)176226b683b4SJianfeng Tan virtio_queues_bind_intr(struct rte_eth_dev *dev)
176326b683b4SJianfeng Tan {
176426b683b4SJianfeng Tan uint32_t i;
176526b683b4SJianfeng Tan struct virtio_hw *hw = dev->data->dev_private;
176626b683b4SJianfeng Tan
1767f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "queue/interrupt binding");
176826b683b4SJianfeng Tan for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1769d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(dev->intr_handle, i,
1770d61138d4SHarman Kalra i + 1))
1771d61138d4SHarman Kalra return -rte_errno;
1772f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
177326b683b4SJianfeng Tan VIRTIO_MSI_NO_VECTOR) {
177426b683b4SJianfeng Tan PMD_DRV_LOG(ERR, "failed to set queue vector");
177526b683b4SJianfeng Tan return -EBUSY;
177626b683b4SJianfeng Tan }
177726b683b4SJianfeng Tan }
177826b683b4SJianfeng Tan
177926b683b4SJianfeng Tan return 0;
178026b683b4SJianfeng Tan }
178126b683b4SJianfeng Tan
17829ebdeefeSJianfeng Tan static void
virtio_queues_unbind_intr(struct rte_eth_dev * dev)17839ebdeefeSJianfeng Tan virtio_queues_unbind_intr(struct rte_eth_dev *dev)
17849ebdeefeSJianfeng Tan {
17859ebdeefeSJianfeng Tan uint32_t i;
17869ebdeefeSJianfeng Tan struct virtio_hw *hw = dev->data->dev_private;
17879ebdeefeSJianfeng Tan
1788f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
17899ebdeefeSJianfeng Tan for (i = 0; i < dev->data->nb_rx_queues; ++i)
1790f8b60756SMaxime Coquelin VIRTIO_OPS(hw)->set_queue_irq(hw,
17919ebdeefeSJianfeng Tan hw->vqs[i * VTNET_CQ],
17929ebdeefeSJianfeng Tan VIRTIO_MSI_NO_VECTOR);
17939ebdeefeSJianfeng Tan }
17949ebdeefeSJianfeng Tan
179526b683b4SJianfeng Tan static int
virtio_configure_intr(struct rte_eth_dev * dev)179626b683b4SJianfeng Tan virtio_configure_intr(struct rte_eth_dev *dev)
179726b683b4SJianfeng Tan {
179826b683b4SJianfeng Tan struct virtio_hw *hw = dev->data->dev_private;
179926b683b4SJianfeng Tan
180026b683b4SJianfeng Tan if (!rte_intr_cap_multiple(dev->intr_handle)) {
180126b683b4SJianfeng Tan PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
180226b683b4SJianfeng Tan return -ENOTSUP;
180326b683b4SJianfeng Tan }
180426b683b4SJianfeng Tan
180526b683b4SJianfeng Tan if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
180626b683b4SJianfeng Tan PMD_INIT_LOG(ERR, "Fail to create eventfd");
180726b683b4SJianfeng Tan return -1;
180826b683b4SJianfeng Tan }
180926b683b4SJianfeng Tan
1810d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
1811d61138d4SHarman Kalra hw->max_queue_pairs)) {
181226b683b4SJianfeng Tan PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
181326b683b4SJianfeng Tan hw->max_queue_pairs);
181426b683b4SJianfeng Tan return -ENOMEM;
181526b683b4SJianfeng Tan }
181626b683b4SJianfeng Tan
18175be2325eSDavid Marchand if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
181826b683b4SJianfeng Tan /* Re-register callback to update max_intr */
181926b683b4SJianfeng Tan rte_intr_callback_unregister(dev->intr_handle,
182026b683b4SJianfeng Tan virtio_interrupt_handler,
182126b683b4SJianfeng Tan dev);
182226b683b4SJianfeng Tan rte_intr_callback_register(dev->intr_handle,
182326b683b4SJianfeng Tan virtio_interrupt_handler,
182426b683b4SJianfeng Tan dev);
18255be2325eSDavid Marchand }
182626b683b4SJianfeng Tan
182726b683b4SJianfeng Tan /* DO NOT try to remove this! This function will enable msix, or QEMU
182826b683b4SJianfeng Tan * will encounter SIGSEGV when DRIVER_OK is sent.
182926b683b4SJianfeng Tan * And for legacy devices, this should be done before queue/vec binding
183026b683b4SJianfeng Tan * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
183126b683b4SJianfeng Tan * (22) will be ignored.
183226b683b4SJianfeng Tan */
1833fe19d49cSZhiyong Yang if (virtio_intr_enable(dev) < 0) {
183426b683b4SJianfeng Tan PMD_DRV_LOG(ERR, "interrupt enable failed");
183526b683b4SJianfeng Tan return -1;
183626b683b4SJianfeng Tan }
183726b683b4SJianfeng Tan
183826b683b4SJianfeng Tan if (virtio_queues_bind_intr(dev) < 0) {
183926b683b4SJianfeng Tan PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
184026b683b4SJianfeng Tan return -1;
184126b683b4SJianfeng Tan }
184226b683b4SJianfeng Tan
184326b683b4SJianfeng Tan return 0;
184426b683b4SJianfeng Tan }
18450c9d6620SMaxime Coquelin
18463c3c54cfSIvan Ilchenko static void
virtio_get_speed_duplex(struct rte_eth_dev * eth_dev,struct rte_eth_link * link)18473c3c54cfSIvan Ilchenko virtio_get_speed_duplex(struct rte_eth_dev *eth_dev,
18483c3c54cfSIvan Ilchenko struct rte_eth_link *link)
18493c3c54cfSIvan Ilchenko {
18503c3c54cfSIvan Ilchenko struct virtio_hw *hw = eth_dev->data->dev_private;
18513c3c54cfSIvan Ilchenko struct virtio_net_config *config;
18523c3c54cfSIvan Ilchenko struct virtio_net_config local_config;
18533c3c54cfSIvan Ilchenko
18543c3c54cfSIvan Ilchenko config = &local_config;
18553c3c54cfSIvan Ilchenko virtio_read_dev_config(hw,
18563c3c54cfSIvan Ilchenko offsetof(struct virtio_net_config, speed),
18573c3c54cfSIvan Ilchenko &config->speed, sizeof(config->speed));
18583c3c54cfSIvan Ilchenko virtio_read_dev_config(hw,
18593c3c54cfSIvan Ilchenko offsetof(struct virtio_net_config, duplex),
18603c3c54cfSIvan Ilchenko &config->duplex, sizeof(config->duplex));
18613c3c54cfSIvan Ilchenko hw->speed = config->speed;
18623c3c54cfSIvan Ilchenko hw->duplex = config->duplex;
18633c3c54cfSIvan Ilchenko if (link != NULL) {
18643c3c54cfSIvan Ilchenko link->link_duplex = hw->duplex;
18653c3c54cfSIvan Ilchenko link->link_speed = hw->speed;
18663c3c54cfSIvan Ilchenko }
18673c3c54cfSIvan Ilchenko PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
18683c3c54cfSIvan Ilchenko hw->speed, hw->duplex);
18693c3c54cfSIvan Ilchenko }
18703c3c54cfSIvan Ilchenko
18710c9d6620SMaxime Coquelin static uint64_t
ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)18720c9d6620SMaxime Coquelin ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)
18730c9d6620SMaxime Coquelin {
18740c9d6620SMaxime Coquelin uint64_t virtio_hash_types = 0;
18750c9d6620SMaxime Coquelin
18760c9d6620SMaxime Coquelin if (ethdev_hash_types & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
18770c9d6620SMaxime Coquelin RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
18780c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV4;
18790c9d6620SMaxime Coquelin
18800c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
18810c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV4;
18820c9d6620SMaxime Coquelin
18830c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
18840c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV4;
18850c9d6620SMaxime Coquelin
18860c9d6620SMaxime Coquelin if (ethdev_hash_types & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
18870c9d6620SMaxime Coquelin RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
18880c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV6;
18890c9d6620SMaxime Coquelin
18900c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
18910c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV6;
18920c9d6620SMaxime Coquelin
18930c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
18940c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV6;
18950c9d6620SMaxime Coquelin
18960c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_IPV6_EX)
18970c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IP_EX;
18980c9d6620SMaxime Coquelin
18990c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_IPV6_TCP_EX)
19000c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCP_EX;
19010c9d6620SMaxime Coquelin
19020c9d6620SMaxime Coquelin if (ethdev_hash_types & RTE_ETH_RSS_IPV6_UDP_EX)
19030c9d6620SMaxime Coquelin virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDP_EX;
19040c9d6620SMaxime Coquelin
19050c9d6620SMaxime Coquelin return virtio_hash_types;
19060c9d6620SMaxime Coquelin }
19070c9d6620SMaxime Coquelin
19080c9d6620SMaxime Coquelin static uint64_t
virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)19090c9d6620SMaxime Coquelin virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)
19100c9d6620SMaxime Coquelin {
19110c9d6620SMaxime Coquelin uint64_t rss_offloads = 0;
19120c9d6620SMaxime Coquelin
19130c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV4)
19140c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
19150c9d6620SMaxime Coquelin RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
19160c9d6620SMaxime Coquelin
19170c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV4)
19180c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
19190c9d6620SMaxime Coquelin
19200c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV4)
19210c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
19220c9d6620SMaxime Coquelin
19230c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV6)
19240c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
19250c9d6620SMaxime Coquelin RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
19260c9d6620SMaxime Coquelin
19270c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV6)
19280c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
19290c9d6620SMaxime Coquelin
19300c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV6)
19310c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
19320c9d6620SMaxime Coquelin
19330c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IP_EX)
19340c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_IPV6_EX;
19350c9d6620SMaxime Coquelin
19360c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCP_EX)
19370c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_IPV6_TCP_EX;
19380c9d6620SMaxime Coquelin
19390c9d6620SMaxime Coquelin if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDP_EX)
19400c9d6620SMaxime Coquelin rss_offloads |= RTE_ETH_RSS_IPV6_UDP_EX;
19410c9d6620SMaxime Coquelin
19420c9d6620SMaxime Coquelin return rss_offloads;
19430c9d6620SMaxime Coquelin }
19440c9d6620SMaxime Coquelin
19450c9d6620SMaxime Coquelin static int
virtio_dev_get_rss_config(struct virtio_hw * hw,uint32_t * rss_hash_types)19460c9d6620SMaxime Coquelin virtio_dev_get_rss_config(struct virtio_hw *hw, uint32_t *rss_hash_types)
19470c9d6620SMaxime Coquelin {
19480c9d6620SMaxime Coquelin struct virtio_net_config local_config;
19490c9d6620SMaxime Coquelin struct virtio_net_config *config = &local_config;
19500c9d6620SMaxime Coquelin
19510c9d6620SMaxime Coquelin virtio_read_dev_config(hw,
19520c9d6620SMaxime Coquelin offsetof(struct virtio_net_config, rss_max_key_size),
19530c9d6620SMaxime Coquelin &config->rss_max_key_size,
19540c9d6620SMaxime Coquelin sizeof(config->rss_max_key_size));
19550c9d6620SMaxime Coquelin if (config->rss_max_key_size < VIRTIO_NET_RSS_KEY_SIZE) {
19560c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Invalid device RSS max key size (%u)",
19570c9d6620SMaxime Coquelin config->rss_max_key_size);
19580c9d6620SMaxime Coquelin return -EINVAL;
19590c9d6620SMaxime Coquelin }
19600c9d6620SMaxime Coquelin
19610c9d6620SMaxime Coquelin virtio_read_dev_config(hw,
19620c9d6620SMaxime Coquelin offsetof(struct virtio_net_config,
19630c9d6620SMaxime Coquelin rss_max_indirection_table_length),
19640c9d6620SMaxime Coquelin &config->rss_max_indirection_table_length,
19650c9d6620SMaxime Coquelin sizeof(config->rss_max_indirection_table_length));
19660c9d6620SMaxime Coquelin if (config->rss_max_indirection_table_length < VIRTIO_NET_RSS_RETA_SIZE) {
19670c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Invalid device RSS max reta size (%u)",
19680c9d6620SMaxime Coquelin config->rss_max_indirection_table_length);
19690c9d6620SMaxime Coquelin return -EINVAL;
19700c9d6620SMaxime Coquelin }
19710c9d6620SMaxime Coquelin
19720c9d6620SMaxime Coquelin virtio_read_dev_config(hw,
19730c9d6620SMaxime Coquelin offsetof(struct virtio_net_config, supported_hash_types),
19740c9d6620SMaxime Coquelin &config->supported_hash_types,
19750c9d6620SMaxime Coquelin sizeof(config->supported_hash_types));
19760c9d6620SMaxime Coquelin if ((config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK) == 0) {
19770c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Invalid device RSS hash types (0x%x)",
19780c9d6620SMaxime Coquelin config->supported_hash_types);
19790c9d6620SMaxime Coquelin return -EINVAL;
19800c9d6620SMaxime Coquelin }
19810c9d6620SMaxime Coquelin
19820c9d6620SMaxime Coquelin *rss_hash_types = config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
19830c9d6620SMaxime Coquelin
19840c9d6620SMaxime Coquelin PMD_INIT_LOG(DEBUG, "Device RSS config:");
19850c9d6620SMaxime Coquelin PMD_INIT_LOG(DEBUG, "\t-Max key size: %u", config->rss_max_key_size);
19860c9d6620SMaxime Coquelin PMD_INIT_LOG(DEBUG, "\t-Max reta size: %u", config->rss_max_indirection_table_length);
19870c9d6620SMaxime Coquelin PMD_INIT_LOG(DEBUG, "\t-Supported hash types: 0x%x", *rss_hash_types);
19880c9d6620SMaxime Coquelin
19890c9d6620SMaxime Coquelin return 0;
19900c9d6620SMaxime Coquelin }
19910c9d6620SMaxime Coquelin
19920c9d6620SMaxime Coquelin static int
virtio_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)19930c9d6620SMaxime Coquelin virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
19940c9d6620SMaxime Coquelin struct rte_eth_rss_conf *rss_conf)
19950c9d6620SMaxime Coquelin {
19960c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
19970c9d6620SMaxime Coquelin char old_rss_key[VIRTIO_NET_RSS_KEY_SIZE];
19980c9d6620SMaxime Coquelin uint32_t old_hash_types;
19990c9d6620SMaxime Coquelin uint16_t nb_queues;
20000c9d6620SMaxime Coquelin int ret;
20010c9d6620SMaxime Coquelin
20020c9d6620SMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
20030c9d6620SMaxime Coquelin return -ENOTSUP;
20040c9d6620SMaxime Coquelin
20050c9d6620SMaxime Coquelin if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(VIRTIO_NET_HASH_TYPE_MASK))
20060c9d6620SMaxime Coquelin return -EINVAL;
20070c9d6620SMaxime Coquelin
20080c9d6620SMaxime Coquelin old_hash_types = hw->rss_hash_types;
20090c9d6620SMaxime Coquelin hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
20100c9d6620SMaxime Coquelin
20110c9d6620SMaxime Coquelin if (rss_conf->rss_key && rss_conf->rss_key_len) {
20120c9d6620SMaxime Coquelin if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
20130c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
20140c9d6620SMaxime Coquelin VIRTIO_NET_RSS_KEY_SIZE);
20150c9d6620SMaxime Coquelin ret = -EINVAL;
20160c9d6620SMaxime Coquelin goto restore_types;
20170c9d6620SMaxime Coquelin }
20180c9d6620SMaxime Coquelin memcpy(old_rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
20190c9d6620SMaxime Coquelin memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
20200c9d6620SMaxime Coquelin }
20210c9d6620SMaxime Coquelin
20220c9d6620SMaxime Coquelin nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
20230c9d6620SMaxime Coquelin ret = virtio_set_multiple_queues_rss(dev, nb_queues);
20240c9d6620SMaxime Coquelin if (ret < 0) {
20250c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to apply new RSS config to the device");
20260c9d6620SMaxime Coquelin goto restore_key;
20270c9d6620SMaxime Coquelin }
20280c9d6620SMaxime Coquelin
20290c9d6620SMaxime Coquelin return 0;
20300c9d6620SMaxime Coquelin restore_key:
2031*0f7438e6SYunjian Wang if (rss_conf->rss_key && rss_conf->rss_key_len)
20320c9d6620SMaxime Coquelin memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
20330c9d6620SMaxime Coquelin restore_types:
20340c9d6620SMaxime Coquelin hw->rss_hash_types = old_hash_types;
20350c9d6620SMaxime Coquelin
20360c9d6620SMaxime Coquelin return ret;
20370c9d6620SMaxime Coquelin }
20380c9d6620SMaxime Coquelin
20390c9d6620SMaxime Coquelin static int
virtio_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)20400c9d6620SMaxime Coquelin virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
20410c9d6620SMaxime Coquelin struct rte_eth_rss_conf *rss_conf)
20420c9d6620SMaxime Coquelin {
20430c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
20440c9d6620SMaxime Coquelin
20450c9d6620SMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
20460c9d6620SMaxime Coquelin return -ENOTSUP;
20470c9d6620SMaxime Coquelin
20480c9d6620SMaxime Coquelin if (rss_conf->rss_key && rss_conf->rss_key_len >= VIRTIO_NET_RSS_KEY_SIZE)
20490c9d6620SMaxime Coquelin memcpy(rss_conf->rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
20500c9d6620SMaxime Coquelin rss_conf->rss_key_len = VIRTIO_NET_RSS_KEY_SIZE;
20510c9d6620SMaxime Coquelin rss_conf->rss_hf = virtio_to_ethdev_rss_offloads(hw->rss_hash_types);
20520c9d6620SMaxime Coquelin
20530c9d6620SMaxime Coquelin return 0;
20540c9d6620SMaxime Coquelin }
20550c9d6620SMaxime Coquelin
virtio_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)20560c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
20570c9d6620SMaxime Coquelin struct rte_eth_rss_reta_entry64 *reta_conf,
20580c9d6620SMaxime Coquelin uint16_t reta_size)
20590c9d6620SMaxime Coquelin {
20600c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
20610c9d6620SMaxime Coquelin uint16_t nb_queues;
20620c9d6620SMaxime Coquelin uint16_t old_reta[VIRTIO_NET_RSS_RETA_SIZE];
20630c9d6620SMaxime Coquelin int idx, pos, i, ret;
20640c9d6620SMaxime Coquelin
20650c9d6620SMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
20660c9d6620SMaxime Coquelin return -ENOTSUP;
20670c9d6620SMaxime Coquelin
20680c9d6620SMaxime Coquelin if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
20690c9d6620SMaxime Coquelin return -EINVAL;
20700c9d6620SMaxime Coquelin
20710c9d6620SMaxime Coquelin memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
20720c9d6620SMaxime Coquelin
20730c9d6620SMaxime Coquelin for (i = 0; i < reta_size; i++) {
20740c9d6620SMaxime Coquelin idx = i / RTE_ETH_RETA_GROUP_SIZE;
20750c9d6620SMaxime Coquelin pos = i % RTE_ETH_RETA_GROUP_SIZE;
20760c9d6620SMaxime Coquelin
20770c9d6620SMaxime Coquelin if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
20780c9d6620SMaxime Coquelin continue;
20790c9d6620SMaxime Coquelin
20800c9d6620SMaxime Coquelin hw->rss_reta[i] = reta_conf[idx].reta[pos];
20810c9d6620SMaxime Coquelin }
20820c9d6620SMaxime Coquelin
20830c9d6620SMaxime Coquelin nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
20840c9d6620SMaxime Coquelin ret = virtio_set_multiple_queues_rss(dev, nb_queues);
20850c9d6620SMaxime Coquelin if (ret < 0) {
20860c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to apply new RETA to the device");
20870c9d6620SMaxime Coquelin memcpy(hw->rss_reta, old_reta, sizeof(old_reta));
20880c9d6620SMaxime Coquelin }
20890c9d6620SMaxime Coquelin
20900c9d6620SMaxime Coquelin hw->rss_rx_queues = dev->data->nb_rx_queues;
20910c9d6620SMaxime Coquelin
20920c9d6620SMaxime Coquelin return ret;
20930c9d6620SMaxime Coquelin }
20940c9d6620SMaxime Coquelin
virtio_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)20950c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
20960c9d6620SMaxime Coquelin struct rte_eth_rss_reta_entry64 *reta_conf,
20970c9d6620SMaxime Coquelin uint16_t reta_size)
20980c9d6620SMaxime Coquelin {
20990c9d6620SMaxime Coquelin struct virtio_hw *hw = dev->data->dev_private;
21000c9d6620SMaxime Coquelin int idx, i;
21010c9d6620SMaxime Coquelin
21020c9d6620SMaxime Coquelin if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
21030c9d6620SMaxime Coquelin return -ENOTSUP;
21040c9d6620SMaxime Coquelin
21050c9d6620SMaxime Coquelin if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
21060c9d6620SMaxime Coquelin return -EINVAL;
21070c9d6620SMaxime Coquelin
21080c9d6620SMaxime Coquelin for (i = 0; i < reta_size; i++) {
21090c9d6620SMaxime Coquelin idx = i / RTE_ETH_RETA_GROUP_SIZE;
21100c9d6620SMaxime Coquelin reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
21110c9d6620SMaxime Coquelin }
21120c9d6620SMaxime Coquelin
21130c9d6620SMaxime Coquelin return 0;
21140c9d6620SMaxime Coquelin }
21150c9d6620SMaxime Coquelin
21160c9d6620SMaxime Coquelin /*
21170c9d6620SMaxime Coquelin * As default RSS hash key, it uses the default key of the
21180c9d6620SMaxime Coquelin * Intel IXGBE devices. It can be updated by the application
21190c9d6620SMaxime Coquelin * with any 40B key value.
21200c9d6620SMaxime Coquelin */
21210c9d6620SMaxime Coquelin static uint8_t rss_intel_key[VIRTIO_NET_RSS_KEY_SIZE] = {
21220c9d6620SMaxime Coquelin 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
21230c9d6620SMaxime Coquelin 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
21240c9d6620SMaxime Coquelin 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
21250c9d6620SMaxime Coquelin 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
21260c9d6620SMaxime Coquelin 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
21270c9d6620SMaxime Coquelin };
21280c9d6620SMaxime Coquelin
21290c9d6620SMaxime Coquelin static int
virtio_dev_rss_init(struct rte_eth_dev * eth_dev)21300c9d6620SMaxime Coquelin virtio_dev_rss_init(struct rte_eth_dev *eth_dev)
21310c9d6620SMaxime Coquelin {
21320c9d6620SMaxime Coquelin struct virtio_hw *hw = eth_dev->data->dev_private;
21330c9d6620SMaxime Coquelin uint16_t nb_rx_queues = eth_dev->data->nb_rx_queues;
21340c9d6620SMaxime Coquelin struct rte_eth_rss_conf *rss_conf;
21350c9d6620SMaxime Coquelin int ret, i;
21360c9d6620SMaxime Coquelin
21370c9d6620SMaxime Coquelin if (!nb_rx_queues) {
21380c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Cannot init RSS if no Rx queues");
21390c9d6620SMaxime Coquelin return -EINVAL;
21400c9d6620SMaxime Coquelin }
21410c9d6620SMaxime Coquelin
21420c9d6620SMaxime Coquelin rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf;
21430c9d6620SMaxime Coquelin
21440c9d6620SMaxime Coquelin ret = virtio_dev_get_rss_config(hw, &hw->rss_hash_types);
21450c9d6620SMaxime Coquelin if (ret)
21460c9d6620SMaxime Coquelin return ret;
21470c9d6620SMaxime Coquelin
21480c9d6620SMaxime Coquelin if (rss_conf->rss_hf) {
21490c9d6620SMaxime Coquelin /* Ensure requested hash types are supported by the device */
21500c9d6620SMaxime Coquelin if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(hw->rss_hash_types))
21510c9d6620SMaxime Coquelin return -EINVAL;
21520c9d6620SMaxime Coquelin
21530c9d6620SMaxime Coquelin hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
21540c9d6620SMaxime Coquelin }
21550c9d6620SMaxime Coquelin
21560c9d6620SMaxime Coquelin if (!hw->rss_key) {
21570c9d6620SMaxime Coquelin /* Setup default RSS key if not already setup by the user */
21580c9d6620SMaxime Coquelin hw->rss_key = rte_malloc_socket("rss_key",
21590c9d6620SMaxime Coquelin VIRTIO_NET_RSS_KEY_SIZE, 0,
21600c9d6620SMaxime Coquelin eth_dev->device->numa_node);
21610c9d6620SMaxime Coquelin if (!hw->rss_key) {
21620c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
21630c9d6620SMaxime Coquelin return -1;
21640c9d6620SMaxime Coquelin }
21650c9d6620SMaxime Coquelin }
21660c9d6620SMaxime Coquelin
21670c9d6620SMaxime Coquelin if (rss_conf->rss_key && rss_conf->rss_key_len) {
21680c9d6620SMaxime Coquelin if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
21690c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
21700c9d6620SMaxime Coquelin VIRTIO_NET_RSS_KEY_SIZE);
21710c9d6620SMaxime Coquelin return -EINVAL;
21720c9d6620SMaxime Coquelin }
21730c9d6620SMaxime Coquelin memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
21740c9d6620SMaxime Coquelin } else {
21750c9d6620SMaxime Coquelin memcpy(hw->rss_key, rss_intel_key, VIRTIO_NET_RSS_KEY_SIZE);
21760c9d6620SMaxime Coquelin }
21770c9d6620SMaxime Coquelin
21780c9d6620SMaxime Coquelin if (!hw->rss_reta) {
21790c9d6620SMaxime Coquelin /* Setup default RSS reta if not already setup by the user */
21800c9d6620SMaxime Coquelin hw->rss_reta = rte_zmalloc_socket("rss_reta",
21810c9d6620SMaxime Coquelin VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t), 0,
21820c9d6620SMaxime Coquelin eth_dev->device->numa_node);
21830c9d6620SMaxime Coquelin if (!hw->rss_reta) {
21840c9d6620SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to allocate RSS reta");
21850c9d6620SMaxime Coquelin return -1;
21860c9d6620SMaxime Coquelin }
21870c9d6620SMaxime Coquelin
21880c9d6620SMaxime Coquelin hw->rss_rx_queues = 0;
21890c9d6620SMaxime Coquelin }
21900c9d6620SMaxime Coquelin
21910c9d6620SMaxime Coquelin /* Re-initialize the RSS reta if the number of RX queues has changed */
21920c9d6620SMaxime Coquelin if (hw->rss_rx_queues != nb_rx_queues) {
21930c9d6620SMaxime Coquelin for (i = 0; i < VIRTIO_NET_RSS_RETA_SIZE; i++)
21940c9d6620SMaxime Coquelin hw->rss_reta[i] = i % nb_rx_queues;
21950c9d6620SMaxime Coquelin hw->rss_rx_queues = nb_rx_queues;
21960c9d6620SMaxime Coquelin }
21970c9d6620SMaxime Coquelin
21980c9d6620SMaxime Coquelin return 0;
21990c9d6620SMaxime Coquelin }
22000c9d6620SMaxime Coquelin
22011357b4b3SIvan Dyukov #define DUPLEX_UNKNOWN 0xff
220260e6f470SOlivier Matz /* reset device and renegotiate features if needed */
2203198ab336SOlivier Matz static int
virtio_init_device(struct rte_eth_dev * eth_dev,uint64_t req_features)220460e6f470SOlivier Matz virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
22056c3169a3SBruce Richardson {
22066c3169a3SBruce Richardson struct virtio_hw *hw = eth_dev->data->dev_private;
22076c3169a3SBruce Richardson struct virtio_net_config *config;
22086c3169a3SBruce Richardson struct virtio_net_config local_config;
220969c80d4eSYuanhan Liu int ret;
22106c3169a3SBruce Richardson
22116c3169a3SBruce Richardson /* Reset the device although not necessary at startup */
22129328e105SMaxime Coquelin virtio_reset(hw);
22136c3169a3SBruce Richardson
22143669a1afSOlivier Matz if (hw->vqs) {
22153669a1afSOlivier Matz virtio_dev_free_mbufs(eth_dev);
22163669a1afSOlivier Matz virtio_free_queues(hw);
22173669a1afSOlivier Matz }
22183669a1afSOlivier Matz
22196c3169a3SBruce Richardson /* Tell the host we've noticed this device. */
22209328e105SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
22216c3169a3SBruce Richardson
22226c3169a3SBruce Richardson /* Tell the host we've known how to drive the device. */
22239328e105SMaxime Coquelin virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
2224b4f9a45aSMaxime Coquelin if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
22256ba1f63bSYuanhan Liu return -1;
22266c3169a3SBruce Richardson
2227b4f9a45aSMaxime Coquelin hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
22289230ab8dSIlya Maximets
2229a5ed8448SMatt Peters /* If host does not support both status and MSI-X then disable LSC */
22306a504290SMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
2231198ab336SOlivier Matz eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2232a5ed8448SMatt Peters else
2233a5ed8448SMatt Peters eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
2234954ea115SStephen Hemminger
2235f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2236f30e69b4SFerruh Yigit
22376c3169a3SBruce Richardson /* Setting up rx_header size for the device */
2238b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
2239b4f9a45aSMaxime Coquelin virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2240b4f9a45aSMaxime Coquelin virtio_with_packed_queue(hw))
22416c3169a3SBruce Richardson hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
22426c3169a3SBruce Richardson else
22436c3169a3SBruce Richardson hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
22446c3169a3SBruce Richardson
22456c3169a3SBruce Richardson /* Copy the permanent MAC address to: virtio_hw */
22466c3169a3SBruce Richardson virtio_get_hwaddr(hw);
2247538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
22486c3169a3SBruce Richardson ð_dev->data->mac_addrs[0]);
22496c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
2250c2c4f87bSAman Deep Singh "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
22516c3169a3SBruce Richardson hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
22526c3169a3SBruce Richardson hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
22536c3169a3SBruce Richardson
22543c3c54cfSIvan Ilchenko hw->get_speed_via_feat = hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN &&
22553c3c54cfSIvan Ilchenko virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX);
22563c3c54cfSIvan Ilchenko if (hw->get_speed_via_feat)
22573c3c54cfSIvan Ilchenko virtio_get_speed_duplex(eth_dev, NULL);
22581357b4b3SIvan Dyukov if (hw->duplex == DUPLEX_UNKNOWN)
2259295968d1SFerruh Yigit hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
22601357b4b3SIvan Dyukov PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
22611357b4b3SIvan Dyukov hw->speed, hw->duplex);
2262b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
22636c3169a3SBruce Richardson config = &local_config;
22646c3169a3SBruce Richardson
22659328e105SMaxime Coquelin virtio_read_dev_config(hw,
22666d7740e2SChangchun Ouyang offsetof(struct virtio_net_config, mac),
22676d7740e2SChangchun Ouyang &config->mac, sizeof(config->mac));
22686d7740e2SChangchun Ouyang
2269b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
22709328e105SMaxime Coquelin virtio_read_dev_config(hw,
22716d7740e2SChangchun Ouyang offsetof(struct virtio_net_config, status),
22726d7740e2SChangchun Ouyang &config->status, sizeof(config->status));
22736c3169a3SBruce Richardson } else {
22746c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
22756c3169a3SBruce Richardson "VIRTIO_NET_F_STATUS is not supported");
22766c3169a3SBruce Richardson config->status = 0;
22776c3169a3SBruce Richardson }
22786c3169a3SBruce Richardson
22790c9d6620SMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MQ) ||
22800c9d6620SMaxime Coquelin virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
22819328e105SMaxime Coquelin virtio_read_dev_config(hw,
22826d7740e2SChangchun Ouyang offsetof(struct virtio_net_config, max_virtqueue_pairs),
22836d7740e2SChangchun Ouyang &config->max_virtqueue_pairs,
22846d7740e2SChangchun Ouyang sizeof(config->max_virtqueue_pairs));
22856c3169a3SBruce Richardson } else {
22866c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
22870c9d6620SMaxime Coquelin "Neither VIRTIO_NET_F_MQ nor VIRTIO_NET_F_RSS are supported");
22886c3169a3SBruce Richardson config->max_virtqueue_pairs = 1;
22896c3169a3SBruce Richardson }
22906c3169a3SBruce Richardson
229145e4acd4SOlivier Matz hw->max_queue_pairs = config->max_virtqueue_pairs;
22926c3169a3SBruce Richardson
2293b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
22949328e105SMaxime Coquelin virtio_read_dev_config(hw,
229549d26d9eSMaxime Coquelin offsetof(struct virtio_net_config, mtu),
229649d26d9eSMaxime Coquelin &config->mtu,
229749d26d9eSMaxime Coquelin sizeof(config->mtu));
229849d26d9eSMaxime Coquelin
229949d26d9eSMaxime Coquelin /*
230049d26d9eSMaxime Coquelin * MTU value has already been checked at negotiation
230149d26d9eSMaxime Coquelin * time, but check again in case it has changed since
230249d26d9eSMaxime Coquelin * then, which should not happen.
230349d26d9eSMaxime Coquelin */
230435b2d13fSOlivier Matz if (config->mtu < RTE_ETHER_MIN_MTU) {
230549d26d9eSMaxime Coquelin PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
230649d26d9eSMaxime Coquelin config->mtu);
230749d26d9eSMaxime Coquelin return -1;
230849d26d9eSMaxime Coquelin }
230949d26d9eSMaxime Coquelin
231049d26d9eSMaxime Coquelin hw->max_mtu = config->mtu;
231149d26d9eSMaxime Coquelin /* Set initial MTU to maximum one supported by vhost */
231249d26d9eSMaxime Coquelin eth_dev->data->mtu = config->mtu;
231349d26d9eSMaxime Coquelin
231449d26d9eSMaxime Coquelin } else {
231535b2d13fSOlivier Matz hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
231649d26d9eSMaxime Coquelin VLAN_TAG_LEN - hw->vtnet_hdr_size;
231749d26d9eSMaxime Coquelin }
231849d26d9eSMaxime Coquelin
23190c9d6620SMaxime Coquelin hw->rss_hash_types = 0;
23200c9d6620SMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
23210c9d6620SMaxime Coquelin if (virtio_dev_rss_init(eth_dev))
23220c9d6620SMaxime Coquelin return -1;
23230c9d6620SMaxime Coquelin
23246c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
23256c3169a3SBruce Richardson config->max_virtqueue_pairs);
23266c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
23276c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
2328c2c4f87bSAman Deep Singh "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
23296c3169a3SBruce Richardson config->mac[0], config->mac[1],
23306c3169a3SBruce Richardson config->mac[2], config->mac[3],
23316c3169a3SBruce Richardson config->mac[4], config->mac[5]);
23326c3169a3SBruce Richardson } else {
233345e4acd4SOlivier Matz PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
233445e4acd4SOlivier Matz hw->max_queue_pairs = 1;
233535b2d13fSOlivier Matz hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
2336240da8b2SZhike Wang VLAN_TAG_LEN - hw->vtnet_hdr_size;
23376c3169a3SBruce Richardson }
23386c3169a3SBruce Richardson
233969c80d4eSYuanhan Liu ret = virtio_alloc_queues(eth_dev);
234069c80d4eSYuanhan Liu if (ret < 0)
234169c80d4eSYuanhan Liu return ret;
234226b683b4SJianfeng Tan
234326b683b4SJianfeng Tan if (eth_dev->data->dev_conf.intr_conf.rxq) {
234426b683b4SJianfeng Tan if (virtio_configure_intr(eth_dev) < 0) {
234526b683b4SJianfeng Tan PMD_INIT_LOG(ERR, "failed to configure interrupt");
23462b38151fSTiwei Bie virtio_free_queues(hw);
234726b683b4SJianfeng Tan return -1;
234826b683b4SJianfeng Tan }
234926b683b4SJianfeng Tan }
235026b683b4SJianfeng Tan
23519328e105SMaxime Coquelin virtio_reinit_complete(hw);
235269c80d4eSYuanhan Liu
2353198ab336SOlivier Matz return 0;
2354198ab336SOlivier Matz }
2355198ab336SOlivier Matz
23566d890f8aSYuanhan Liu /*
2357198ab336SOlivier Matz * This function is based on probe() function in virtio_pci.c
2358198ab336SOlivier Matz * It returns 0 on success.
2359198ab336SOlivier Matz */
2360198ab336SOlivier Matz int
eth_virtio_dev_init(struct rte_eth_dev * eth_dev)2361198ab336SOlivier Matz eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
2362198ab336SOlivier Matz {
2363198ab336SOlivier Matz struct virtio_hw *hw = eth_dev->data->dev_private;
2364295968d1SFerruh Yigit uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
23654710e16aSMarvin Liu int vectorized = 0;
2366198ab336SOlivier Matz int ret;
2367198ab336SOlivier Matz
2368c1ada9b6SHemant Agrawal if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
2369c1ada9b6SHemant Agrawal PMD_INIT_LOG(ERR,
2370c1ada9b6SHemant Agrawal "Not sufficient headroom required = %d, avail = %d",
2371c1ada9b6SHemant Agrawal (int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
2372c1ada9b6SHemant Agrawal RTE_PKTMBUF_HEADROOM);
2373c1ada9b6SHemant Agrawal
2374c1ada9b6SHemant Agrawal return -1;
2375c1ada9b6SHemant Agrawal }
2376198ab336SOlivier Matz
2377198ab336SOlivier Matz eth_dev->dev_ops = &virtio_eth_dev_ops;
2378198ab336SOlivier Matz
2379198ab336SOlivier Matz if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
23804819eae8SOlivier Matz set_rxtx_funcs(eth_dev);
2381198ab336SOlivier Matz return 0;
2382198ab336SOlivier Matz }
2383512e27eeSMaxime Coquelin
238436a7a2e7SMaxime Coquelin ret = virtio_dev_devargs_parse(eth_dev->device->devargs, &speed, &vectorized);
238549119e38SIvan Dyukov if (ret < 0)
238649119e38SIvan Dyukov return ret;
238749119e38SIvan Dyukov hw->speed = speed;
23881e9221aeSChenbo Xia hw->duplex = DUPLEX_UNKNOWN;
23897f468b2eSTiwei Bie
2390198ab336SOlivier Matz /* Allocate memory for storing MAC addresses */
239135b2d13fSOlivier Matz eth_dev->data->mac_addrs = rte_zmalloc("virtio",
239235b2d13fSOlivier Matz VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
2393198ab336SOlivier Matz if (eth_dev->data->mac_addrs == NULL) {
2394198ab336SOlivier Matz PMD_INIT_LOG(ERR,
2395198ab336SOlivier Matz "Failed to allocate %d bytes needed to store MAC addresses",
239635b2d13fSOlivier Matz VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
2397198ab336SOlivier Matz return -ENOMEM;
2398198ab336SOlivier Matz }
2399198ab336SOlivier Matz
24006ebbf410SXuan Ding rte_spinlock_init(&hw->state_lock);
24016ebbf410SXuan Ding
240260e6f470SOlivier Matz /* reset device and negotiate default features */
240396cb6711SOlivier Matz ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
2404198ab336SOlivier Matz if (ret < 0)
24053484c8d8STiwei Bie goto err_virtio_init;
2406198ab336SOlivier Matz
24074710e16aSMarvin Liu if (vectorized) {
2408b4f9a45aSMaxime Coquelin if (!virtio_with_packed_queue(hw)) {
24094710e16aSMarvin Liu hw->use_vec_rx = 1;
2410ccb10995SMarvin Liu } else {
24119ef38ddbSJoyce Kong #if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
2412ccb10995SMarvin Liu hw->use_vec_rx = 1;
2413ccb10995SMarvin Liu hw->use_vec_tx = 1;
24149ef38ddbSJoyce Kong #else
24159ef38ddbSJoyce Kong PMD_DRV_LOG(INFO,
24169ef38ddbSJoyce Kong "building environment do not support packed ring vectorized");
2417ccb10995SMarvin Liu #endif
2418ccb10995SMarvin Liu }
24194710e16aSMarvin Liu }
24204710e16aSMarvin Liu
24216e1d9c0cSMaxime Coquelin hw->opened = 1;
24227f468b2eSTiwei Bie
24236c3169a3SBruce Richardson return 0;
242443d18765SPengzhen Liu
24253484c8d8STiwei Bie err_virtio_init:
242643d18765SPengzhen Liu rte_free(eth_dev->data->mac_addrs);
24277dee8c79SAaron Conole eth_dev->data->mac_addrs = NULL;
242843d18765SPengzhen Liu return ret;
24296c3169a3SBruce Richardson }
24306c3169a3SBruce Richardson
243149119e38SIvan Dyukov static uint32_t
virtio_dev_speed_capa_get(uint32_t speed)243249119e38SIvan Dyukov virtio_dev_speed_capa_get(uint32_t speed)
243349119e38SIvan Dyukov {
243449119e38SIvan Dyukov switch (speed) {
2435295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_10G:
2436295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_10G;
2437295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_20G:
2438295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_20G;
2439295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_25G:
2440295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_25G;
2441295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_40G:
2442295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_40G;
2443295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_50G:
2444295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_50G;
2445295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_56G:
2446295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_56G;
2447295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_100G:
2448295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_100G;
2449295968d1SFerruh Yigit case RTE_ETH_SPEED_NUM_200G:
2450295968d1SFerruh Yigit return RTE_ETH_LINK_SPEED_200G;
245149119e38SIvan Dyukov default:
245249119e38SIvan Dyukov return 0;
245349119e38SIvan Dyukov }
245449119e38SIvan Dyukov }
245549119e38SIvan Dyukov
vectorized_check_handler(__rte_unused const char * key,const char * value,void * ret_val)24564710e16aSMarvin Liu static int vectorized_check_handler(__rte_unused const char *key,
24574710e16aSMarvin Liu const char *value, void *ret_val)
24584710e16aSMarvin Liu {
24594710e16aSMarvin Liu if (strcmp(value, "1") == 0)
24604710e16aSMarvin Liu *(int *)ret_val = 1;
24614710e16aSMarvin Liu else
24624710e16aSMarvin Liu *(int *)ret_val = 0;
24634710e16aSMarvin Liu
24644710e16aSMarvin Liu return 0;
24654710e16aSMarvin Liu }
246649119e38SIvan Dyukov
246749119e38SIvan Dyukov #define VIRTIO_ARG_SPEED "speed"
24684710e16aSMarvin Liu #define VIRTIO_ARG_VECTORIZED "vectorized"
246949119e38SIvan Dyukov
2470440f03c2SXiao Wang static int
link_speed_handler(const char * key __rte_unused,const char * value,void * ret_val)247149119e38SIvan Dyukov link_speed_handler(const char *key __rte_unused,
247249119e38SIvan Dyukov const char *value, void *ret_val)
247349119e38SIvan Dyukov {
247449119e38SIvan Dyukov uint32_t val;
247549119e38SIvan Dyukov if (!value || !ret_val)
247649119e38SIvan Dyukov return -EINVAL;
247749119e38SIvan Dyukov val = strtoul(value, NULL, 0);
247849119e38SIvan Dyukov /* validate input */
247949119e38SIvan Dyukov if (virtio_dev_speed_capa_get(val) == 0)
248049119e38SIvan Dyukov return -EINVAL;
248149119e38SIvan Dyukov *(uint32_t *)ret_val = val;
248249119e38SIvan Dyukov
248349119e38SIvan Dyukov return 0;
248449119e38SIvan Dyukov }
248549119e38SIvan Dyukov
248649119e38SIvan Dyukov
248749119e38SIvan Dyukov static int
virtio_dev_devargs_parse(struct rte_devargs * devargs,uint32_t * speed,int * vectorized)248836a7a2e7SMaxime Coquelin virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vectorized)
2489440f03c2SXiao Wang {
2490440f03c2SXiao Wang struct rte_kvargs *kvlist;
2491440f03c2SXiao Wang int ret = 0;
2492440f03c2SXiao Wang
2493440f03c2SXiao Wang if (devargs == NULL)
2494440f03c2SXiao Wang return 0;
2495440f03c2SXiao Wang
2496440f03c2SXiao Wang kvlist = rte_kvargs_parse(devargs->args, NULL);
249749119e38SIvan Dyukov if (kvlist == NULL) {
249849119e38SIvan Dyukov PMD_INIT_LOG(ERR, "error when parsing param");
2499440f03c2SXiao Wang return 0;
250049119e38SIvan Dyukov }
250136a7a2e7SMaxime Coquelin
250249119e38SIvan Dyukov if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
250349119e38SIvan Dyukov ret = rte_kvargs_process(kvlist,
250449119e38SIvan Dyukov VIRTIO_ARG_SPEED,
250549119e38SIvan Dyukov link_speed_handler, speed);
250649119e38SIvan Dyukov if (ret < 0) {
250749119e38SIvan Dyukov PMD_INIT_LOG(ERR, "Failed to parse %s",
250849119e38SIvan Dyukov VIRTIO_ARG_SPEED);
250949119e38SIvan Dyukov goto exit;
251049119e38SIvan Dyukov }
251149119e38SIvan Dyukov }
2512440f03c2SXiao Wang
25134710e16aSMarvin Liu if (vectorized &&
25144710e16aSMarvin Liu rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
25154710e16aSMarvin Liu ret = rte_kvargs_process(kvlist,
25164710e16aSMarvin Liu VIRTIO_ARG_VECTORIZED,
25174710e16aSMarvin Liu vectorized_check_handler, vectorized);
25184710e16aSMarvin Liu if (ret < 0) {
25194710e16aSMarvin Liu PMD_INIT_LOG(ERR, "Failed to parse %s",
25204710e16aSMarvin Liu VIRTIO_ARG_VECTORIZED);
25214710e16aSMarvin Liu goto exit;
25224710e16aSMarvin Liu }
25234710e16aSMarvin Liu }
25244710e16aSMarvin Liu
2525440f03c2SXiao Wang exit:
2526440f03c2SXiao Wang rte_kvargs_free(kvlist);
2527440f03c2SXiao Wang return ret;
2528440f03c2SXiao Wang }
2529440f03c2SXiao Wang
25306e1d9c0cSMaxime Coquelin static uint8_t
rx_offload_enabled(struct virtio_hw * hw)2531db8d6790SMaxime Coquelin rx_offload_enabled(struct virtio_hw *hw)
2532db8d6790SMaxime Coquelin {
2533b4f9a45aSMaxime Coquelin return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2534b4f9a45aSMaxime Coquelin virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2535b4f9a45aSMaxime Coquelin virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2536db8d6790SMaxime Coquelin }
2537db8d6790SMaxime Coquelin
25386e1d9c0cSMaxime Coquelin static uint8_t
tx_offload_enabled(struct virtio_hw * hw)2539db8d6790SMaxime Coquelin tx_offload_enabled(struct virtio_hw *hw)
2540db8d6790SMaxime Coquelin {
2541b4f9a45aSMaxime Coquelin return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2542b4f9a45aSMaxime Coquelin virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2543b4f9a45aSMaxime Coquelin virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2544db8d6790SMaxime Coquelin }
2545db8d6790SMaxime Coquelin
25466c3169a3SBruce Richardson /*
25476c3169a3SBruce Richardson * Configure virtio device
25486c3169a3SBruce Richardson * It returns 0 on success.
25496c3169a3SBruce Richardson */
25506c3169a3SBruce Richardson static int
virtio_dev_configure(struct rte_eth_dev * dev)25516c3169a3SBruce Richardson virtio_dev_configure(struct rte_eth_dev *dev)
25526c3169a3SBruce Richardson {
25536c3169a3SBruce Richardson const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
25544174a7b5SMaxime Coquelin const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
25556c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
255635b2d13fSOlivier Matz uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
25578b90e435SJens Freimann hw->vtnet_hdr_size;
25589c7ce8bdSTiwei Bie uint64_t rx_offloads = rxmode->offloads;
25594174a7b5SMaxime Coquelin uint64_t tx_offloads = txmode->offloads;
2560ec9f3d12SOlivier Matz uint64_t req_features;
2561e7b9d1d2SJiayu Hu int ret;
25626c3169a3SBruce Richardson
25636c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "configure");
2564ec9f3d12SOlivier Matz req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
25654dab342bSStephen Hemminger
25660c9d6620SMaxime Coquelin if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
256713b3137fSDilshod Urazov PMD_DRV_LOG(ERR,
256813b3137fSDilshod Urazov "Unsupported Rx multi queue mode %d",
256913b3137fSDilshod Urazov rxmode->mq_mode);
257013b3137fSDilshod Urazov return -EINVAL;
257113b3137fSDilshod Urazov }
257213b3137fSDilshod Urazov
2573295968d1SFerruh Yigit if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
25749fc963acSAndrew Rybchenko PMD_DRV_LOG(ERR,
25759fc963acSAndrew Rybchenko "Unsupported Tx multi queue mode %d",
25769fc963acSAndrew Rybchenko txmode->mq_mode);
25779fc963acSAndrew Rybchenko return -EINVAL;
25789fc963acSAndrew Rybchenko }
25799fc963acSAndrew Rybchenko
2580e7b9d1d2SJiayu Hu if (dev->data->dev_conf.intr_conf.rxq) {
2581e7b9d1d2SJiayu Hu ret = virtio_init_device(dev, hw->req_guest_features);
2582e7b9d1d2SJiayu Hu if (ret < 0)
2583e7b9d1d2SJiayu Hu return ret;
2584e7b9d1d2SJiayu Hu }
2585e7b9d1d2SJiayu Hu
25860c9d6620SMaxime Coquelin if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)
25870c9d6620SMaxime Coquelin req_features |= (1ULL << VIRTIO_NET_F_RSS);
25880c9d6620SMaxime Coquelin
25891bb4a528SFerruh Yigit if (rxmode->mtu > hw->max_mtu)
25908b90e435SJens Freimann req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
25918b90e435SJens Freimann
25921bb4a528SFerruh Yigit hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
25934e8169ebSIvan Ilchenko
2594295968d1SFerruh Yigit if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2595295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
2596d67d86ceSOlivier Matz req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2597d67d86ceSOlivier Matz
2598295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
2599ec9f3d12SOlivier Matz req_features |=
2600ec9f3d12SOlivier Matz (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2601ec9f3d12SOlivier Matz (1ULL << VIRTIO_NET_F_GUEST_TSO6);
26026c3169a3SBruce Richardson
2603295968d1SFerruh Yigit if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2604295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
26054174a7b5SMaxime Coquelin req_features |= (1ULL << VIRTIO_NET_F_CSUM);
26064174a7b5SMaxime Coquelin
2607295968d1SFerruh Yigit if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
26084174a7b5SMaxime Coquelin req_features |=
26094174a7b5SMaxime Coquelin (1ULL << VIRTIO_NET_F_HOST_TSO4) |
26104174a7b5SMaxime Coquelin (1ULL << VIRTIO_NET_F_HOST_TSO6);
26114174a7b5SMaxime Coquelin
2612ec9f3d12SOlivier Matz /* if request features changed, reinit the device */
2613ec9f3d12SOlivier Matz if (req_features != hw->req_guest_features) {
2614ec9f3d12SOlivier Matz ret = virtio_init_device(dev, req_features);
2615ec9f3d12SOlivier Matz if (ret < 0)
2616ec9f3d12SOlivier Matz return ret;
2617ec9f3d12SOlivier Matz }
2618ec9f3d12SOlivier Matz
26190c9d6620SMaxime Coquelin if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
26200c9d6620SMaxime Coquelin !virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
26210c9d6620SMaxime Coquelin PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device");
26220c9d6620SMaxime Coquelin return -ENOTSUP;
26230c9d6620SMaxime Coquelin }
26240c9d6620SMaxime Coquelin
2625295968d1SFerruh Yigit if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2626295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
2627b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
262878fd97c3SOlivier Matz PMD_DRV_LOG(ERR,
2629d67d86ceSOlivier Matz "rx checksum not available on this host");
2630d67d86ceSOlivier Matz return -ENOTSUP;
2631d67d86ceSOlivier Matz }
2632d67d86ceSOlivier Matz
2633295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
2634b4f9a45aSMaxime Coquelin (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2635b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
263678fd97c3SOlivier Matz PMD_DRV_LOG(ERR,
2637ec9f3d12SOlivier Matz "Large Receive Offload not available on this host");
263886d59b21SOlivier Matz return -ENOTSUP;
263986d59b21SOlivier Matz }
264086d59b21SOlivier Matz
264169c80d4eSYuanhan Liu /* start control queue */
2642b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
264345e4acd4SOlivier Matz virtio_dev_cq_start(dev);
264445e4acd4SOlivier Matz
2645295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
26469c7ce8bdSTiwei Bie hw->vlan_strip = 1;
26476c3169a3SBruce Richardson
2648295968d1SFerruh Yigit hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
26494e8169ebSIvan Ilchenko
2650295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2651b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
265278fd97c3SOlivier Matz PMD_DRV_LOG(ERR,
26536c3169a3SBruce Richardson "vlan filtering not available on this host");
26546c3169a3SBruce Richardson return -ENOTSUP;
26556c3169a3SBruce Richardson }
26566c3169a3SBruce Richardson
2657db8d6790SMaxime Coquelin hw->has_tx_offload = tx_offload_enabled(hw);
2658db8d6790SMaxime Coquelin hw->has_rx_offload = rx_offload_enabled(hw);
2659db8d6790SMaxime Coquelin
266062a785a6SJianfeng Tan if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
26617be78d02SJosh Soref /* Enable vector (0) for Link State Interrupt */
2662f8b60756SMaxime Coquelin if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
2663981e61f5SJianfeng Tan VIRTIO_MSI_NO_VECTOR) {
26646c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "failed to set config vector");
26656c3169a3SBruce Richardson return -EBUSY;
26666c3169a3SBruce Richardson }
26676c3169a3SBruce Richardson
2668b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(hw)) {
2669ccb10995SMarvin Liu #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
2670ccb10995SMarvin Liu if ((hw->use_vec_rx || hw->use_vec_tx) &&
2671ccb10995SMarvin Liu (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
2672b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2673b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
26747566f28aSCiara Power rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
2675ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2676ccb10995SMarvin Liu "disabled packed ring vectorized path for requirements not met");
2677ccb10995SMarvin Liu hw->use_vec_rx = 0;
2678ccb10995SMarvin Liu hw->use_vec_tx = 0;
2679ccb10995SMarvin Liu }
26809ef38ddbSJoyce Kong #elif defined(RTE_ARCH_ARM)
26819ef38ddbSJoyce Kong if ((hw->use_vec_rx || hw->use_vec_tx) &&
26829ef38ddbSJoyce Kong (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
2683b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2684b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
26859ef38ddbSJoyce Kong rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
26869ef38ddbSJoyce Kong PMD_DRV_LOG(INFO,
26879ef38ddbSJoyce Kong "disabled packed ring vectorized path for requirements not met");
26889ef38ddbSJoyce Kong hw->use_vec_rx = 0;
26899ef38ddbSJoyce Kong hw->use_vec_tx = 0;
26909ef38ddbSJoyce Kong }
2691ccb10995SMarvin Liu #else
2692ccb10995SMarvin Liu hw->use_vec_rx = 0;
2693ccb10995SMarvin Liu hw->use_vec_tx = 0;
2694ccb10995SMarvin Liu #endif
2695ccb10995SMarvin Liu
2696ccb10995SMarvin Liu if (hw->use_vec_rx) {
2697b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2698ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2699ccb10995SMarvin Liu "disabled packed ring vectorized rx for mrg_rxbuf enabled");
2700ccb10995SMarvin Liu hw->use_vec_rx = 0;
2701ccb10995SMarvin Liu }
2702ccb10995SMarvin Liu
2703295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2704ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2705ccb10995SMarvin Liu "disabled packed ring vectorized rx for TCP_LRO enabled");
2706ccb10995SMarvin Liu hw->use_vec_rx = 0;
2707ccb10995SMarvin Liu }
2708ccb10995SMarvin Liu }
2709ccb10995SMarvin Liu } else {
2710b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
27118f3bd7e8SMarvin Liu hw->use_inorder_tx = 1;
27128f3bd7e8SMarvin Liu hw->use_inorder_rx = 1;
27134710e16aSMarvin Liu hw->use_vec_rx = 0;
27148f3bd7e8SMarvin Liu }
27158f3bd7e8SMarvin Liu
2716ccb10995SMarvin Liu if (hw->use_vec_rx) {
2717e9b97392SRuifeng Wang #if defined RTE_ARCH_ARM
271809649363SOlivier Matz if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2719ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2720ccb10995SMarvin Liu "disabled split ring vectorized path for requirement not met");
27214710e16aSMarvin Liu hw->use_vec_rx = 0;
272209649363SOlivier Matz }
27234819eae8SOlivier Matz #endif
2724b4f9a45aSMaxime Coquelin if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2725ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2726ccb10995SMarvin Liu "disabled split ring vectorized rx for mrg_rxbuf enabled");
27274710e16aSMarvin Liu hw->use_vec_rx = 0;
272809649363SOlivier Matz }
27294819eae8SOlivier Matz
2730295968d1SFerruh Yigit if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2731295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2732295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_LRO |
2733295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
2734ccb10995SMarvin Liu PMD_DRV_LOG(INFO,
2735ccb10995SMarvin Liu "disabled split ring vectorized rx for offloading enabled");
27364710e16aSMarvin Liu hw->use_vec_rx = 0;
2737ccb10995SMarvin Liu }
27387566f28aSCiara Power
27397566f28aSCiara Power if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
27407566f28aSCiara Power PMD_DRV_LOG(INFO,
27417566f28aSCiara Power "disabled split ring vectorized rx, max SIMD bitwidth too low");
27427566f28aSCiara Power hw->use_vec_rx = 0;
27437566f28aSCiara Power }
2744ccb10995SMarvin Liu }
2745ccb10995SMarvin Liu }
274616e48c9eSOlivier Matz
27476c3169a3SBruce Richardson return 0;
27486c3169a3SBruce Richardson }
27496c3169a3SBruce Richardson
27506c3169a3SBruce Richardson
27516c3169a3SBruce Richardson static int
virtio_dev_start(struct rte_eth_dev * dev)27526c3169a3SBruce Richardson virtio_dev_start(struct rte_eth_dev *dev)
27536c3169a3SBruce Richardson {
27546c3169a3SBruce Richardson uint16_t nb_queues, i;
27553169550fSMaxime Coquelin struct virtqueue *vq;
2756663c7679SYuanhan Liu struct virtio_hw *hw = dev->data->dev_private;
2757efc83a1eSOlivier Matz int ret;
2758efc83a1eSOlivier Matz
2759efc83a1eSOlivier Matz /* Finish the initialization of the queues */
2760efc83a1eSOlivier Matz for (i = 0; i < dev->data->nb_rx_queues; i++) {
2761efc83a1eSOlivier Matz ret = virtio_dev_rx_queue_setup_finish(dev, i);
2762efc83a1eSOlivier Matz if (ret < 0)
2763efc83a1eSOlivier Matz return ret;
2764efc83a1eSOlivier Matz }
2765efc83a1eSOlivier Matz for (i = 0; i < dev->data->nb_tx_queues; i++) {
2766efc83a1eSOlivier Matz ret = virtio_dev_tx_queue_setup_finish(dev, i);
2767efc83a1eSOlivier Matz if (ret < 0)
2768efc83a1eSOlivier Matz return ret;
2769efc83a1eSOlivier Matz }
27706c3169a3SBruce Richardson
27716c3169a3SBruce Richardson /* check if lsc interrupt feature is enabled */
2772954ea115SStephen Hemminger if (dev->data->dev_conf.intr_conf.lsc) {
277362a785a6SJianfeng Tan if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
27746c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "link status not supported by host");
27756c3169a3SBruce Richardson return -ENOTSUP;
27766c3169a3SBruce Richardson }
2777349a447bSJianfeng Tan }
2778349a447bSJianfeng Tan
27797be78d02SJosh Soref /* Enable uio/vfio intr/eventfd mapping: although we already did that
2780349a447bSJianfeng Tan * in device configure, but it could be unmapped when device is
2781349a447bSJianfeng Tan * stopped.
2782349a447bSJianfeng Tan */
2783349a447bSJianfeng Tan if (dev->data->dev_conf.intr_conf.lsc ||
2784349a447bSJianfeng Tan dev->data->dev_conf.intr_conf.rxq) {
2785fe19d49cSZhiyong Yang virtio_intr_disable(dev);
27866c3169a3SBruce Richardson
2787f9b0d190SLuca Boccassi /* Setup interrupt callback */
2788f9b0d190SLuca Boccassi if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2789f9b0d190SLuca Boccassi rte_intr_callback_register(dev->intr_handle,
2790f9b0d190SLuca Boccassi virtio_interrupt_handler,
2791f9b0d190SLuca Boccassi dev);
2792f9b0d190SLuca Boccassi
2793fe19d49cSZhiyong Yang if (virtio_intr_enable(dev) < 0) {
27946c3169a3SBruce Richardson PMD_DRV_LOG(ERR, "interrupt enable failed");
27956c3169a3SBruce Richardson return -EIO;
27966c3169a3SBruce Richardson }
27976c3169a3SBruce Richardson }
27986c3169a3SBruce Richardson
27996c3169a3SBruce Richardson /*Notify the backend
28006c3169a3SBruce Richardson *Otherwise the tap backend might already stop its queue due to fullness.
28016c3169a3SBruce Richardson *vhost backend will have no chance to be waked up
28026c3169a3SBruce Richardson */
280360d4a353SYuanhan Liu nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2804663c7679SYuanhan Liu if (hw->max_queue_pairs > 1) {
28056c3169a3SBruce Richardson if (virtio_set_multiple_queues(dev, nb_queues) != 0)
28066c3169a3SBruce Richardson return -EINVAL;
28076c3169a3SBruce Richardson }
28086c3169a3SBruce Richardson
28096c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
28106c3169a3SBruce Richardson
281160d4a353SYuanhan Liu for (i = 0; i < dev->data->nb_rx_queues; i++) {
28123169550fSMaxime Coquelin vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2813d8227497STiwei Bie /* Flush the old packets */
28143169550fSMaxime Coquelin virtqueue_rxvq_flush(vq);
28153169550fSMaxime Coquelin virtqueue_notify(vq);
281601ad44fdSHuawei Xie }
28176c3169a3SBruce Richardson
2818924da8f1SSteven Luong for (i = 0; i < dev->data->nb_tx_queues; i++) {
28193169550fSMaxime Coquelin vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
28203169550fSMaxime Coquelin virtqueue_notify(vq);
2821924da8f1SSteven Luong }
2822924da8f1SSteven Luong
28236c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
28246c3169a3SBruce Richardson
282501ad44fdSHuawei Xie for (i = 0; i < dev->data->nb_rx_queues; i++) {
28263169550fSMaxime Coquelin vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
28273169550fSMaxime Coquelin VIRTQUEUE_DUMP(vq);
282801ad44fdSHuawei Xie }
28296c3169a3SBruce Richardson
283001ad44fdSHuawei Xie for (i = 0; i < dev->data->nb_tx_queues; i++) {
28313169550fSMaxime Coquelin vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
28323169550fSMaxime Coquelin VIRTQUEUE_DUMP(vq);
283301ad44fdSHuawei Xie }
283458d9fe40SJianfeng Tan
28354819eae8SOlivier Matz set_rxtx_funcs(dev);
28366e1d9c0cSMaxime Coquelin hw->started = 1;
28376c3169a3SBruce Richardson
283858d9fe40SJianfeng Tan /* Initialize Link state */
283958d9fe40SJianfeng Tan virtio_dev_link_update(dev, 0);
284058d9fe40SJianfeng Tan
28416c3169a3SBruce Richardson return 0;
28426c3169a3SBruce Richardson }
28436c3169a3SBruce Richardson
virtio_dev_free_mbufs(struct rte_eth_dev * dev)28446c3169a3SBruce Richardson static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
28456c3169a3SBruce Richardson {
2846bdb32afbSOlivier Matz struct virtio_hw *hw = dev->data->dev_private;
2847bdb32afbSOlivier Matz uint16_t nr_vq = virtio_get_nr_vq(hw);
2848bdb32afbSOlivier Matz const char *type __rte_unused;
2849bdb32afbSOlivier Matz unsigned int i, mbuf_num = 0;
2850bdb32afbSOlivier Matz struct virtqueue *vq;
28516c3169a3SBruce Richardson struct rte_mbuf *buf;
2852bdb32afbSOlivier Matz int queue_type;
28536c3169a3SBruce Richardson
2854b87c0648SDavid Harton if (hw->vqs == NULL)
2855b87c0648SDavid Harton return;
2856b87c0648SDavid Harton
2857bdb32afbSOlivier Matz for (i = 0; i < nr_vq; i++) {
2858bdb32afbSOlivier Matz vq = hw->vqs[i];
2859bdb32afbSOlivier Matz if (!vq)
2860bdb32afbSOlivier Matz continue;
286101ad44fdSHuawei Xie
2862bdb32afbSOlivier Matz queue_type = virtio_get_queue_type(hw, i);
2863bdb32afbSOlivier Matz if (queue_type == VTNET_RQ)
2864bdb32afbSOlivier Matz type = "rxq";
2865bdb32afbSOlivier Matz else if (queue_type == VTNET_TQ)
2866bdb32afbSOlivier Matz type = "txq";
2867bdb32afbSOlivier Matz else
28683669a1afSOlivier Matz continue;
28693669a1afSOlivier Matz
28706c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
2871bdb32afbSOlivier Matz "Before freeing %s[%d] used and unused buf",
2872bdb32afbSOlivier Matz type, i);
2873bdb32afbSOlivier Matz VIRTQUEUE_DUMP(vq);
28746c3169a3SBruce Richardson
2875727411f5SOlivier Matz while ((buf = virtqueue_detach_unused(vq)) != NULL) {
28766c3169a3SBruce Richardson rte_pktmbuf_free(buf);
28776c3169a3SBruce Richardson mbuf_num++;
28786c3169a3SBruce Richardson }
28796c3169a3SBruce Richardson
28806c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG,
2881bdb32afbSOlivier Matz "After freeing %s[%d] used and unused buf",
2882bdb32afbSOlivier Matz type, i);
2883bdb32afbSOlivier Matz VIRTQUEUE_DUMP(vq);
28846c3169a3SBruce Richardson }
28856c3169a3SBruce Richardson
2886bdb32afbSOlivier Matz PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
28876c3169a3SBruce Richardson }
28886c3169a3SBruce Richardson
28899de76dfbSIvan Ilchenko static void
virtio_tx_completed_cleanup(struct rte_eth_dev * dev)28909de76dfbSIvan Ilchenko virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
28919de76dfbSIvan Ilchenko {
28929de76dfbSIvan Ilchenko struct virtio_hw *hw = dev->data->dev_private;
28939de76dfbSIvan Ilchenko struct virtqueue *vq;
28949de76dfbSIvan Ilchenko int qidx;
28959de76dfbSIvan Ilchenko void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
28969de76dfbSIvan Ilchenko
28979de76dfbSIvan Ilchenko if (virtio_with_packed_queue(hw)) {
28989de76dfbSIvan Ilchenko if (hw->use_vec_tx)
28999de76dfbSIvan Ilchenko xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
29009de76dfbSIvan Ilchenko else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
29019de76dfbSIvan Ilchenko xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
29029de76dfbSIvan Ilchenko else
29039de76dfbSIvan Ilchenko xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
29049de76dfbSIvan Ilchenko } else {
29059de76dfbSIvan Ilchenko if (hw->use_inorder_tx)
29069de76dfbSIvan Ilchenko xmit_cleanup = &virtio_xmit_cleanup_inorder;
29079de76dfbSIvan Ilchenko else
29089de76dfbSIvan Ilchenko xmit_cleanup = &virtio_xmit_cleanup;
29099de76dfbSIvan Ilchenko }
29109de76dfbSIvan Ilchenko
29119de76dfbSIvan Ilchenko for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
29129de76dfbSIvan Ilchenko vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
29139de76dfbSIvan Ilchenko if (vq != NULL)
29149de76dfbSIvan Ilchenko xmit_cleanup(vq, virtqueue_nused(vq));
29159de76dfbSIvan Ilchenko }
29169de76dfbSIvan Ilchenko }
29179de76dfbSIvan Ilchenko
29186c3169a3SBruce Richardson /*
29196c3169a3SBruce Richardson * Stop device: disable interrupt and mark link down
29206c3169a3SBruce Richardson */
292136a7a2e7SMaxime Coquelin int
virtio_dev_stop(struct rte_eth_dev * dev)29226c3169a3SBruce Richardson virtio_dev_stop(struct rte_eth_dev *dev)
29236c3169a3SBruce Richardson {
2924aa9f0606SYuanhan Liu struct virtio_hw *hw = dev->data->dev_private;
29256c3169a3SBruce Richardson struct rte_eth_link link;
2926295968d1SFerruh Yigit struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
29276c3169a3SBruce Richardson
29286c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "stop");
2929b8f5d2aeSThomas Monjalon dev->data->dev_started = 0;
29306c3169a3SBruce Richardson
29311978a9dcSXiao Wang rte_spinlock_lock(&hw->state_lock);
29322a821d81SChas Williams if (!hw->started)
29332a821d81SChas Williams goto out_unlock;
29346e1d9c0cSMaxime Coquelin hw->started = 0;
29352a821d81SChas Williams
29369de76dfbSIvan Ilchenko virtio_tx_completed_cleanup(dev);
29379de76dfbSIvan Ilchenko
2938f9b0d190SLuca Boccassi if (intr_conf->lsc || intr_conf->rxq) {
2939fe19d49cSZhiyong Yang virtio_intr_disable(dev);
29406c3169a3SBruce Richardson
2941f9b0d190SLuca Boccassi /* Reset interrupt callback */
2942f9b0d190SLuca Boccassi if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2943f9b0d190SLuca Boccassi rte_intr_callback_unregister(dev->intr_handle,
2944f9b0d190SLuca Boccassi virtio_interrupt_handler,
2945f9b0d190SLuca Boccassi dev);
2946f9b0d190SLuca Boccassi }
2947f9b0d190SLuca Boccassi }
2948f9b0d190SLuca Boccassi
29496c3169a3SBruce Richardson memset(&link, 0, sizeof(link));
2950717b2e8eSStephen Hemminger rte_eth_linkstatus_set(dev, &link);
29512a821d81SChas Williams out_unlock:
29521978a9dcSXiao Wang rte_spinlock_unlock(&hw->state_lock);
295362024eb8SIvan Ilchenko
295462024eb8SIvan Ilchenko return 0;
29556c3169a3SBruce Richardson }
29566c3169a3SBruce Richardson
29576c3169a3SBruce Richardson static int
virtio_dev_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)29586c3169a3SBruce Richardson virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
29596c3169a3SBruce Richardson {
2960717b2e8eSStephen Hemminger struct rte_eth_link link;
29616c3169a3SBruce Richardson uint16_t status;
29626c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
2963717b2e8eSStephen Hemminger
29646c3169a3SBruce Richardson memset(&link, 0, sizeof(link));
29651357b4b3SIvan Dyukov link.link_duplex = hw->duplex;
296649119e38SIvan Dyukov link.link_speed = hw->speed;
2967295968d1SFerruh Yigit link.link_autoneg = RTE_ETH_LINK_AUTONEG;
29686c3169a3SBruce Richardson
29692a821d81SChas Williams if (!hw->started) {
2970295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_DOWN;
2971295968d1SFerruh Yigit link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2972b4f9a45aSMaxime Coquelin } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
29736c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "Get link status from hw");
29749328e105SMaxime Coquelin virtio_read_dev_config(hw,
29756c3169a3SBruce Richardson offsetof(struct virtio_net_config, status),
29766c3169a3SBruce Richardson &status, sizeof(status));
29776c3169a3SBruce Richardson if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2978295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_DOWN;
2979295968d1SFerruh Yigit link.link_speed = RTE_ETH_SPEED_NUM_NONE;
29806c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "Port %d is down",
29816c3169a3SBruce Richardson dev->data->port_id);
29826c3169a3SBruce Richardson } else {
2983295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_UP;
29843c3c54cfSIvan Ilchenko if (hw->get_speed_via_feat)
29853c3c54cfSIvan Ilchenko virtio_get_speed_duplex(dev, &link);
29866c3169a3SBruce Richardson PMD_INIT_LOG(DEBUG, "Port %d is up",
29876c3169a3SBruce Richardson dev->data->port_id);
29886c3169a3SBruce Richardson }
29896c3169a3SBruce Richardson } else {
2990295968d1SFerruh Yigit link.link_status = RTE_ETH_LINK_UP;
29913c3c54cfSIvan Ilchenko if (hw->get_speed_via_feat)
29923c3c54cfSIvan Ilchenko virtio_get_speed_duplex(dev, &link);
29936c3169a3SBruce Richardson }
29946c3169a3SBruce Richardson
2995717b2e8eSStephen Hemminger return rte_eth_linkstatus_set(dev, &link);
29966c3169a3SBruce Richardson }
29976c3169a3SBruce Richardson
2998289ba0c0SDavid Harton static int
virtio_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)2999289ba0c0SDavid Harton virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3000289ba0c0SDavid Harton {
3001289ba0c0SDavid Harton const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3002289ba0c0SDavid Harton struct virtio_hw *hw = dev->data->dev_private;
30039c7ce8bdSTiwei Bie uint64_t offloads = rxmode->offloads;
3004289ba0c0SDavid Harton
3005295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) {
3006295968d1SFerruh Yigit if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
3007b4f9a45aSMaxime Coquelin !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
3008289ba0c0SDavid Harton
3009289ba0c0SDavid Harton PMD_DRV_LOG(NOTICE,
3010289ba0c0SDavid Harton "vlan filtering not available on this host");
3011289ba0c0SDavid Harton
3012289ba0c0SDavid Harton return -ENOTSUP;
3013289ba0c0SDavid Harton }
3014289ba0c0SDavid Harton }
3015289ba0c0SDavid Harton
3016295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_STRIP_MASK)
3017295968d1SFerruh Yigit hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3018289ba0c0SDavid Harton
3019289ba0c0SDavid Harton return 0;
3020289ba0c0SDavid Harton }
3021289ba0c0SDavid Harton
3022bdad90d1SIvan Ilchenko static int
virtio_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)30236c3169a3SBruce Richardson virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
30246c3169a3SBruce Richardson {
3025c1e55ed3SOlivier Matz uint64_t tso_mask, host_features;
30260c9d6620SMaxime Coquelin uint32_t rss_hash_types = 0;
30276c3169a3SBruce Richardson struct virtio_hw *hw = dev->data->dev_private;
302849119e38SIvan Dyukov dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
3029b392e987SIdo Barnea
303045e4acd4SOlivier Matz dev_info->max_rx_queues =
303145e4acd4SOlivier Matz RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
303245e4acd4SOlivier Matz dev_info->max_tx_queues =
303345e4acd4SOlivier Matz RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
30346c3169a3SBruce Richardson dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
30356c3169a3SBruce Richardson dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
30366c3169a3SBruce Richardson dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
303711d7bc9fSIvan Ilchenko dev_info->max_mtu = hw->max_mtu;
303858169a9cSOlivier Matz
3039f8b60756SMaxime Coquelin host_features = VIRTIO_OPS(hw)->get_features(hw);
3040295968d1SFerruh Yigit dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
30414e8169ebSIvan Ilchenko if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
3042295968d1SFerruh Yigit dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
3043c1e55ed3SOlivier Matz if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
3044c1e55ed3SOlivier Matz dev_info->rx_offload_capa |=
3045295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3046295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
3047c1e55ed3SOlivier Matz }
30489c7ce8bdSTiwei Bie if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
3049295968d1SFerruh Yigit dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3050c1e55ed3SOlivier Matz tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
3051c1e55ed3SOlivier Matz (1ULL << VIRTIO_NET_F_GUEST_TSO6);
3052ec9f3d12SOlivier Matz if ((host_features & tso_mask) == tso_mask)
3053295968d1SFerruh Yigit dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
3054c1e55ed3SOlivier Matz
3055295968d1SFerruh Yigit dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3056295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
30574174a7b5SMaxime Coquelin if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
305858169a9cSOlivier Matz dev_info->tx_offload_capa |=
3059295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3060295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
306158169a9cSOlivier Matz }
306269657304SOlivier Matz tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
306369657304SOlivier Matz (1ULL << VIRTIO_NET_F_HOST_TSO6);
30644174a7b5SMaxime Coquelin if ((host_features & tso_mask) == tso_mask)
3065295968d1SFerruh Yigit dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
3066bdad90d1SIvan Ilchenko
30670c9d6620SMaxime Coquelin if (host_features & (1ULL << VIRTIO_NET_F_RSS)) {
30680c9d6620SMaxime Coquelin virtio_dev_get_rss_config(hw, &rss_hash_types);
30690c9d6620SMaxime Coquelin dev_info->hash_key_size = VIRTIO_NET_RSS_KEY_SIZE;
30700c9d6620SMaxime Coquelin dev_info->reta_size = VIRTIO_NET_RSS_RETA_SIZE;
30710c9d6620SMaxime Coquelin dev_info->flow_type_rss_offloads =
30720c9d6620SMaxime Coquelin virtio_to_ethdev_rss_offloads(rss_hash_types);
30730c9d6620SMaxime Coquelin } else {
30740c9d6620SMaxime Coquelin dev_info->hash_key_size = 0;
30750c9d6620SMaxime Coquelin dev_info->reta_size = 0;
30760c9d6620SMaxime Coquelin dev_info->flow_type_rss_offloads = 0;
30770c9d6620SMaxime Coquelin }
30780c9d6620SMaxime Coquelin
307931136836SIvan Ilchenko if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
308031136836SIvan Ilchenko /*
308131136836SIvan Ilchenko * According to 2.7 Packed Virtqueues,
308231136836SIvan Ilchenko * 2.7.10.1 Structure Size and Alignment:
308331136836SIvan Ilchenko * The Queue Size value does not have to be a power of 2.
308431136836SIvan Ilchenko */
308531136836SIvan Ilchenko dev_info->rx_desc_lim.nb_max = UINT16_MAX;
3086492a239cSIvan Ilchenko dev_info->tx_desc_lim.nb_max = UINT16_MAX;
308731136836SIvan Ilchenko } else {
308831136836SIvan Ilchenko /*
308931136836SIvan Ilchenko * According to 2.6 Split Virtqueues:
309031136836SIvan Ilchenko * Queue Size value is always a power of 2. The maximum Queue
309131136836SIvan Ilchenko * Size value is 32768.
309231136836SIvan Ilchenko */
309331136836SIvan Ilchenko dev_info->rx_desc_lim.nb_max = 32768;
3094492a239cSIvan Ilchenko dev_info->tx_desc_lim.nb_max = 32768;
309531136836SIvan Ilchenko }
309631136836SIvan Ilchenko /*
309731136836SIvan Ilchenko * Actual minimum is not the same for virtqueues of different kinds,
309831136836SIvan Ilchenko * but to avoid tangling the code with separate branches, rely on
309931136836SIvan Ilchenko * default thresholds since desc number must be at least of their size.
310031136836SIvan Ilchenko */
310131136836SIvan Ilchenko dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
310231136836SIvan Ilchenko RTE_VIRTIO_VPMD_RX_REARM_THRESH);
3103492a239cSIvan Ilchenko dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
310431136836SIvan Ilchenko dev_info->rx_desc_lim.nb_align = 1;
3105492a239cSIvan Ilchenko dev_info->tx_desc_lim.nb_align = 1;
310631136836SIvan Ilchenko
3107bdad90d1SIvan Ilchenko return 0;
31086c3169a3SBruce Richardson }
31096c3169a3SBruce Richardson
31106c3169a3SBruce Richardson /*
31116c3169a3SBruce Richardson * It enables testpmd to collect per queue stats.
31126c3169a3SBruce Richardson */
31136c3169a3SBruce Richardson static int
virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev * eth_dev,__rte_unused uint16_t queue_id,__rte_unused uint8_t stat_idx,__rte_unused uint8_t is_rx)31146c3169a3SBruce Richardson virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
31156c3169a3SBruce Richardson __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
31166c3169a3SBruce Richardson __rte_unused uint8_t is_rx)
31176c3169a3SBruce Richardson {
31186c3169a3SBruce Richardson return 0;
31196c3169a3SBruce Richardson }
31206c3169a3SBruce Richardson
3121eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
3122eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);
3123