1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15
16 #include <rte_interrupts.h>
17 #include <rte_debug.h>
18 #include <rte_pci.h>
19 #include <rte_alarm.h>
20 #include <rte_atomic.h>
21 #include <rte_eal.h>
22 #include <rte_ether.h>
23 #include <ethdev_driver.h>
24 #include <ethdev_pci.h>
25 #include <rte_malloc.h>
26 #include <rte_memzone.h>
27 #include <rte_dev.h>
28
29 #include "iavf.h"
30 #include "iavf_rxtx.h"
31 #include "iavf_generic_flow.h"
32 #include "rte_pmd_iavf.h"
33 #include "iavf_ipsec_crypto.h"
34
35 /* devargs */
36 #define IAVF_PROTO_XTR_ARG "proto_xtr"
37 #define IAVF_QUANTA_SIZE_ARG "quanta_size"
38
39 uint64_t iavf_timestamp_dynflag;
40 int iavf_timestamp_dynfield_offset = -1;
41
42 static const char * const iavf_valid_args[] = {
43 IAVF_PROTO_XTR_ARG,
44 IAVF_QUANTA_SIZE_ARG,
45 NULL
46 };
47
48 static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
49 .name = "intel_pmd_dynfield_proto_xtr_metadata",
50 .size = sizeof(uint32_t),
51 .align = __alignof__(uint32_t),
52 .flags = 0,
53 };
54
55 struct iavf_proto_xtr_ol {
56 const struct rte_mbuf_dynflag param;
57 uint64_t *ol_flag;
58 bool required;
59 };
60
61 static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
62 [IAVF_PROTO_XTR_VLAN] = {
63 .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
64 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
65 [IAVF_PROTO_XTR_IPV4] = {
66 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
67 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
68 [IAVF_PROTO_XTR_IPV6] = {
69 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
70 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
71 [IAVF_PROTO_XTR_IPV6_FLOW] = {
72 .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
73 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
74 [IAVF_PROTO_XTR_TCP] = {
75 .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
76 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
77 [IAVF_PROTO_XTR_IP_OFFSET] = {
78 .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
79 .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
80 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
81 .param = {
82 .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
83 .ol_flag =
84 &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
85 };
86
87 static int iavf_dev_configure(struct rte_eth_dev *dev);
88 static int iavf_dev_start(struct rte_eth_dev *dev);
89 static int iavf_dev_stop(struct rte_eth_dev *dev);
90 static int iavf_dev_close(struct rte_eth_dev *dev);
91 static int iavf_dev_reset(struct rte_eth_dev *dev);
92 static int iavf_dev_info_get(struct rte_eth_dev *dev,
93 struct rte_eth_dev_info *dev_info);
94 static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
95 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
96 struct rte_eth_stats *stats);
97 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
98 static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
99 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
100 struct rte_eth_xstat *xstats, unsigned int n);
101 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
102 struct rte_eth_xstat_name *xstats_names,
103 unsigned int limit);
104 static int iavf_dev_promiscuous_enable(struct rte_eth_dev *dev);
105 static int iavf_dev_promiscuous_disable(struct rte_eth_dev *dev);
106 static int iavf_dev_allmulticast_enable(struct rte_eth_dev *dev);
107 static int iavf_dev_allmulticast_disable(struct rte_eth_dev *dev);
108 static int iavf_dev_add_mac_addr(struct rte_eth_dev *dev,
109 struct rte_ether_addr *addr,
110 uint32_t index,
111 uint32_t pool);
112 static void iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
113 static int iavf_dev_vlan_filter_set(struct rte_eth_dev *dev,
114 uint16_t vlan_id, int on);
115 static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
116 static int iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
118 uint16_t reta_size);
119 static int iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
120 struct rte_eth_rss_reta_entry64 *reta_conf,
121 uint16_t reta_size);
122 static int iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
123 struct rte_eth_rss_conf *rss_conf);
124 static int iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
125 struct rte_eth_rss_conf *rss_conf);
126 static int iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127 static int iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
128 struct rte_ether_addr *mac_addr);
129 static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
130 uint16_t queue_id);
131 static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
132 uint16_t queue_id);
133 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
134 const struct rte_flow_ops **ops);
135 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
136 struct rte_ether_addr *mc_addrs,
137 uint32_t mc_addrs_num);
138 static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
139
140 static const struct rte_pci_id pci_id_iavf_map[] = {
141 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
142 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
143 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
144 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
145 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
146 { .vendor_id = 0, /* sentinel */ },
147 };
148
149 struct rte_iavf_xstats_name_off {
150 char name[RTE_ETH_XSTATS_NAME_SIZE];
151 unsigned int offset;
152 };
153
154 #define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
155 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
156 {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
157 {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
158 {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
159 {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
160 {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
161 {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
162 rx_unknown_protocol)},
163 {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
164 {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
165 {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
166 {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
167 {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
168 {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
169
170 {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
171 {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
172 {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
173 {"inline_ipsec_crypto_ierrors_sad_lookup",
174 _OFF_OF(ips_stats.ierrors.sad_miss)},
175 {"inline_ipsec_crypto_ierrors_not_processed",
176 _OFF_OF(ips_stats.ierrors.not_processed)},
177 {"inline_ipsec_crypto_ierrors_icv_fail",
178 _OFF_OF(ips_stats.ierrors.icv_check)},
179 {"inline_ipsec_crypto_ierrors_length",
180 _OFF_OF(ips_stats.ierrors.ipsec_length)},
181 {"inline_ipsec_crypto_ierrors_misc",
182 _OFF_OF(ips_stats.ierrors.misc)},
183 };
184 #undef _OFF_OF
185
186 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
187 sizeof(rte_iavf_stats_strings[0]))
188
189 static const struct eth_dev_ops iavf_eth_dev_ops = {
190 .dev_configure = iavf_dev_configure,
191 .dev_start = iavf_dev_start,
192 .dev_stop = iavf_dev_stop,
193 .dev_close = iavf_dev_close,
194 .dev_reset = iavf_dev_reset,
195 .dev_infos_get = iavf_dev_info_get,
196 .dev_supported_ptypes_get = iavf_dev_supported_ptypes_get,
197 .link_update = iavf_dev_link_update,
198 .stats_get = iavf_dev_stats_get,
199 .stats_reset = iavf_dev_stats_reset,
200 .xstats_get = iavf_dev_xstats_get,
201 .xstats_get_names = iavf_dev_xstats_get_names,
202 .xstats_reset = iavf_dev_xstats_reset,
203 .promiscuous_enable = iavf_dev_promiscuous_enable,
204 .promiscuous_disable = iavf_dev_promiscuous_disable,
205 .allmulticast_enable = iavf_dev_allmulticast_enable,
206 .allmulticast_disable = iavf_dev_allmulticast_disable,
207 .mac_addr_add = iavf_dev_add_mac_addr,
208 .mac_addr_remove = iavf_dev_del_mac_addr,
209 .set_mc_addr_list = iavf_set_mc_addr_list,
210 .vlan_filter_set = iavf_dev_vlan_filter_set,
211 .vlan_offload_set = iavf_dev_vlan_offload_set,
212 .rx_queue_start = iavf_dev_rx_queue_start,
213 .rx_queue_stop = iavf_dev_rx_queue_stop,
214 .tx_queue_start = iavf_dev_tx_queue_start,
215 .tx_queue_stop = iavf_dev_tx_queue_stop,
216 .rx_queue_setup = iavf_dev_rx_queue_setup,
217 .rx_queue_release = iavf_dev_rx_queue_release,
218 .tx_queue_setup = iavf_dev_tx_queue_setup,
219 .tx_queue_release = iavf_dev_tx_queue_release,
220 .mac_addr_set = iavf_dev_set_default_mac_addr,
221 .reta_update = iavf_dev_rss_reta_update,
222 .reta_query = iavf_dev_rss_reta_query,
223 .rss_hash_update = iavf_dev_rss_hash_update,
224 .rss_hash_conf_get = iavf_dev_rss_hash_conf_get,
225 .rxq_info_get = iavf_dev_rxq_info_get,
226 .txq_info_get = iavf_dev_txq_info_get,
227 .mtu_set = iavf_dev_mtu_set,
228 .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
229 .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
230 .flow_ops_get = iavf_dev_flow_ops_get,
231 .tx_done_cleanup = iavf_dev_tx_done_cleanup,
232 .get_monitor_addr = iavf_get_monitor_addr,
233 .tm_ops_get = iavf_tm_ops_get,
234 };
235
236 static int
iavf_tm_ops_get(struct rte_eth_dev * dev __rte_unused,void * arg)237 iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
238 void *arg)
239 {
240 if (!arg)
241 return -EINVAL;
242
243 *(const void **)arg = &iavf_tm_ops;
244
245 return 0;
246 }
247
248 __rte_unused
249 static int
iavf_vfr_inprogress(struct iavf_hw * hw)250 iavf_vfr_inprogress(struct iavf_hw *hw)
251 {
252 int inprogress = 0;
253
254 if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
255 IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
256 VIRTCHNL_VFR_INPROGRESS)
257 inprogress = 1;
258
259 if (inprogress)
260 PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
261
262 return inprogress;
263 }
264
265 __rte_unused
266 static void
iavf_dev_watchdog(void * cb_arg)267 iavf_dev_watchdog(void *cb_arg)
268 {
269 struct iavf_adapter *adapter = cb_arg;
270 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
271 int vfr_inprogress = 0, rc = 0;
272
273 /* check if watchdog has been disabled since last call */
274 if (!adapter->vf.watchdog_enabled)
275 return;
276
277 /* If in reset then poll vfr_inprogress register for completion */
278 if (adapter->vf.vf_reset) {
279 vfr_inprogress = iavf_vfr_inprogress(hw);
280
281 if (!vfr_inprogress) {
282 PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
283 adapter->vf.eth_dev->data->name);
284 adapter->vf.vf_reset = false;
285 }
286 /* If not in reset then poll vfr_inprogress register for VFLR event */
287 } else {
288 vfr_inprogress = iavf_vfr_inprogress(hw);
289
290 if (vfr_inprogress) {
291 PMD_DRV_LOG(INFO,
292 "VF \"%s\" reset event detected by watchdog",
293 adapter->vf.eth_dev->data->name);
294
295 /* enter reset state with VFLR event */
296 adapter->vf.vf_reset = true;
297
298 rte_eth_dev_callback_process(adapter->vf.eth_dev,
299 RTE_ETH_EVENT_INTR_RESET, NULL);
300 }
301 }
302
303 /* re-alarm watchdog */
304 rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
305 &iavf_dev_watchdog, cb_arg);
306
307 if (rc)
308 PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
309 adapter->vf.eth_dev->data->name);
310 }
311
312 static void
iavf_dev_watchdog_enable(struct iavf_adapter * adapter __rte_unused)313 iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
314 {
315 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
316 PMD_DRV_LOG(INFO, "Enabling device watchdog");
317 adapter->vf.watchdog_enabled = true;
318 if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
319 &iavf_dev_watchdog, (void *)adapter))
320 PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
321 #endif
322 }
323
324 static void
iavf_dev_watchdog_disable(struct iavf_adapter * adapter __rte_unused)325 iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
326 {
327 #if (IAVF_DEV_WATCHDOG_PERIOD > 0)
328 PMD_DRV_LOG(INFO, "Disabling device watchdog");
329 adapter->vf.watchdog_enabled = false;
330 #endif
331 }
332
333 static int
iavf_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addrs,uint32_t mc_addrs_num)334 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
335 struct rte_ether_addr *mc_addrs,
336 uint32_t mc_addrs_num)
337 {
338 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
339 struct iavf_adapter *adapter =
340 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
341 int err, ret;
342
343 if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
344 PMD_DRV_LOG(ERR,
345 "can't add more than a limited number (%u) of addresses.",
346 (uint32_t)IAVF_NUM_MACADDR_MAX);
347 return -EINVAL;
348 }
349
350 /* flush previous addresses */
351 err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
352 false);
353 if (err)
354 return err;
355
356 /* add new ones */
357 err = iavf_add_del_mc_addr_list(adapter, mc_addrs, mc_addrs_num, true);
358
359 if (err) {
360 /* if adding mac address list fails, should add the previous
361 * addresses back.
362 */
363 ret = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs,
364 vf->mc_addrs_num, true);
365 if (ret)
366 return ret;
367 } else {
368 vf->mc_addrs_num = mc_addrs_num;
369 memcpy(vf->mc_addrs,
370 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
371 }
372
373 return err;
374 }
375
376 static void
iavf_config_rss_hf(struct iavf_adapter * adapter,uint64_t rss_hf)377 iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
378 {
379 static const uint64_t map_hena_rss[] = {
380 /* IPv4 */
381 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
382 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
383 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
384 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
385 [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
386 RTE_ETH_RSS_NONFRAG_IPV4_UDP,
387 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
388 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
389 [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
390 RTE_ETH_RSS_NONFRAG_IPV4_TCP,
391 [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
392 RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
393 [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
394 RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
395 [IAVF_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
396
397 /* IPv6 */
398 [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
399 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
400 [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
401 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
402 [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
403 RTE_ETH_RSS_NONFRAG_IPV6_UDP,
404 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
405 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
406 [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
407 RTE_ETH_RSS_NONFRAG_IPV6_TCP,
408 [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
409 RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
410 [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
411 RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
412 [IAVF_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
413
414 /* L2 Payload */
415 [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
416 };
417
418 const uint64_t ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
419 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
420 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
421 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
422 RTE_ETH_RSS_FRAG_IPV4;
423
424 const uint64_t ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
425 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
426 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
427 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
428 RTE_ETH_RSS_FRAG_IPV6;
429
430 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
431 uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
432 uint32_t i;
433 int ret;
434
435 ret = iavf_get_hena_caps(adapter, &caps);
436 if (ret) {
437 /**
438 * RSS offload type configuration is not a necessary feature
439 * for VF, so here just print a warning and return.
440 */
441 PMD_DRV_LOG(WARNING,
442 "fail to get RSS offload type caps, ret: %d", ret);
443 return;
444 }
445
446 /**
447 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
448 * generalizations of all other IPv4 and IPv6 RSS types.
449 */
450 if (rss_hf & RTE_ETH_RSS_IPV4)
451 rss_hf |= ipv4_rss;
452
453 if (rss_hf & RTE_ETH_RSS_IPV6)
454 rss_hf |= ipv6_rss;
455
456 RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
457
458 for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
459 uint64_t bit = BIT_ULL(i);
460
461 if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
462 valid_rss_hf |= map_hena_rss[i];
463 hena |= bit;
464 }
465 }
466
467 ret = iavf_set_hena(adapter, hena);
468 if (ret) {
469 /**
470 * RSS offload type configuration is not a necessary feature
471 * for VF, so here just print a warning and return.
472 */
473 PMD_DRV_LOG(WARNING,
474 "fail to set RSS offload types, ret: %d", ret);
475 return;
476 }
477
478 if (valid_rss_hf & ipv4_rss)
479 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
480
481 if (valid_rss_hf & ipv6_rss)
482 valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
483
484 if (rss_hf & ~valid_rss_hf)
485 PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
486 rss_hf & ~valid_rss_hf);
487
488 vf->rss_hf = valid_rss_hf;
489 }
490
491 static int
iavf_init_rss(struct iavf_adapter * adapter)492 iavf_init_rss(struct iavf_adapter *adapter)
493 {
494 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
495 struct rte_eth_rss_conf *rss_conf;
496 uint16_t i, j, nb_q;
497 int ret;
498
499 rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
500 nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
501 vf->max_rss_qregion);
502
503 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
504 PMD_DRV_LOG(DEBUG, "RSS is not supported");
505 return -ENOTSUP;
506 }
507
508 /* configure RSS key */
509 if (!rss_conf->rss_key) {
510 /* Calculate the default hash key */
511 for (i = 0; i < vf->vf_res->rss_key_size; i++)
512 vf->rss_key[i] = (uint8_t)rte_rand();
513 } else
514 rte_memcpy(vf->rss_key, rss_conf->rss_key,
515 RTE_MIN(rss_conf->rss_key_len,
516 vf->vf_res->rss_key_size));
517
518 /* init RSS LUT table */
519 for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
520 if (j >= nb_q)
521 j = 0;
522 vf->rss_lut[i] = j;
523 }
524 /* send virtchnl ops to configure RSS */
525 ret = iavf_configure_rss_lut(adapter);
526 if (ret)
527 return ret;
528 ret = iavf_configure_rss_key(adapter);
529 if (ret)
530 return ret;
531
532 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
533 /* Set RSS hash configuration based on rss_conf->rss_hf. */
534 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
535 if (ret) {
536 PMD_DRV_LOG(ERR, "fail to set default RSS");
537 return ret;
538 }
539 } else {
540 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
541 }
542
543 return 0;
544 }
545
546 static int
iavf_queues_req_reset(struct rte_eth_dev * dev,uint16_t num)547 iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
548 {
549 struct iavf_adapter *ad =
550 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
551 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
552 int ret;
553
554 ret = iavf_request_queues(dev, num);
555 if (ret) {
556 PMD_DRV_LOG(ERR, "request queues from PF failed");
557 return ret;
558 }
559 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
560 vf->vsi_res->num_queue_pairs, num);
561
562 ret = iavf_dev_reset(dev);
563 if (ret) {
564 PMD_DRV_LOG(ERR, "vf reset failed");
565 return ret;
566 }
567
568 return 0;
569 }
570
571 static int
iavf_dev_vlan_insert_set(struct rte_eth_dev * dev)572 iavf_dev_vlan_insert_set(struct rte_eth_dev *dev)
573 {
574 struct iavf_adapter *adapter =
575 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
576 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
577 bool enable;
578
579 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2))
580 return 0;
581
582 enable = !!(dev->data->dev_conf.txmode.offloads &
583 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
584 iavf_config_vlan_insert_v2(adapter, enable);
585
586 return 0;
587 }
588
589 static int
iavf_dev_init_vlan(struct rte_eth_dev * dev)590 iavf_dev_init_vlan(struct rte_eth_dev *dev)
591 {
592 int err;
593
594 err = iavf_dev_vlan_offload_set(dev,
595 RTE_ETH_VLAN_STRIP_MASK |
596 RTE_ETH_QINQ_STRIP_MASK |
597 RTE_ETH_VLAN_FILTER_MASK |
598 RTE_ETH_VLAN_EXTEND_MASK);
599 if (err) {
600 PMD_DRV_LOG(ERR, "Failed to update vlan offload");
601 return err;
602 }
603
604 err = iavf_dev_vlan_insert_set(dev);
605 if (err)
606 PMD_DRV_LOG(ERR, "Failed to update vlan insertion");
607
608 return err;
609 }
610
611 static int
iavf_dev_configure(struct rte_eth_dev * dev)612 iavf_dev_configure(struct rte_eth_dev *dev)
613 {
614 struct iavf_adapter *ad =
615 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
616 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
617 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
618 dev->data->nb_tx_queues);
619 int ret;
620
621 ad->rx_bulk_alloc_allowed = true;
622 /* Initialize to TRUE. If any of Rx queues doesn't meet the
623 * vector Rx/Tx preconditions, it will be reset.
624 */
625 ad->rx_vec_allowed = true;
626 ad->tx_vec_allowed = true;
627
628 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
629 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
630
631 /* Large VF setting */
632 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
633 if (!(vf->vf_res->vf_cap_flags &
634 VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
635 PMD_DRV_LOG(ERR, "large VF is not supported");
636 return -1;
637 }
638
639 if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
640 PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u",
641 IAVF_MAX_NUM_QUEUES_LV);
642 return -1;
643 }
644
645 ret = iavf_queues_req_reset(dev, num_queue_pairs);
646 if (ret)
647 return ret;
648
649 ret = iavf_get_max_rss_queue_region(ad);
650 if (ret) {
651 PMD_INIT_LOG(ERR, "get max rss queue region failed");
652 return ret;
653 }
654
655 vf->lv_enabled = true;
656 } else {
657 /* Check if large VF is already enabled. If so, disable and
658 * release redundant queue resource.
659 * Or check if enough queue pairs. If not, request them from PF.
660 */
661 if (vf->lv_enabled ||
662 num_queue_pairs > vf->vsi_res->num_queue_pairs) {
663 ret = iavf_queues_req_reset(dev, num_queue_pairs);
664 if (ret)
665 return ret;
666
667 vf->lv_enabled = false;
668 }
669 /* if large VF is not required, use default rss queue region */
670 vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
671 }
672
673 ret = iavf_dev_init_vlan(dev);
674 if (ret)
675 PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret);
676
677 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
678 if (iavf_init_rss(ad) != 0) {
679 PMD_DRV_LOG(ERR, "configure rss failed");
680 return -1;
681 }
682 }
683 return 0;
684 }
685
686 static int
iavf_init_rxq(struct rte_eth_dev * dev,struct iavf_rx_queue * rxq)687 iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
688 {
689 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 struct rte_eth_dev_data *dev_data = dev->data;
691 uint16_t buf_size, max_pkt_len;
692 uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
693 enum iavf_status err;
694
695 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
696
697 /* Calculate the maximum packet length allowed */
698 max_pkt_len = RTE_MIN((uint32_t)
699 rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
700 frame_size);
701
702 /* Check if maximum packet length is set correctly. */
703 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
704 max_pkt_len > IAVF_FRAME_SIZE_MAX) {
705 PMD_DRV_LOG(ERR, "maximum packet length must be "
706 "larger than %u and smaller than %u",
707 (uint32_t)IAVF_ETH_MAX_LEN,
708 (uint32_t)IAVF_FRAME_SIZE_MAX);
709 return -EINVAL;
710 }
711
712 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
713 /* Register mbuf field and flag for Rx timestamp */
714 err = rte_mbuf_dyn_rx_timestamp_register(
715 &iavf_timestamp_dynfield_offset,
716 &iavf_timestamp_dynflag);
717 if (err) {
718 PMD_DRV_LOG(ERR,
719 "Cannot register mbuf field/flag for timestamp");
720 return -EINVAL;
721 }
722 }
723
724 rxq->max_pkt_len = max_pkt_len;
725 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
726 rxq->max_pkt_len > buf_size) {
727 dev_data->scattered_rx = 1;
728 }
729 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
730 IAVF_WRITE_FLUSH(hw);
731
732 return 0;
733 }
734
735 static int
iavf_init_queues(struct rte_eth_dev * dev)736 iavf_init_queues(struct rte_eth_dev *dev)
737 {
738 struct iavf_rx_queue **rxq =
739 (struct iavf_rx_queue **)dev->data->rx_queues;
740 int i, ret = IAVF_SUCCESS;
741
742 for (i = 0; i < dev->data->nb_rx_queues; i++) {
743 if (!rxq[i] || !rxq[i]->q_set)
744 continue;
745 ret = iavf_init_rxq(dev, rxq[i]);
746 if (ret != IAVF_SUCCESS)
747 break;
748 }
749 /* set rx/tx function to vector/scatter/single-segment
750 * according to parameters
751 */
752 iavf_set_rx_function(dev);
753 iavf_set_tx_function(dev);
754
755 return ret;
756 }
757
iavf_config_rx_queues_irqs(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)758 static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
759 struct rte_intr_handle *intr_handle)
760 {
761 struct iavf_adapter *adapter =
762 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
763 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
764 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
765 struct iavf_qv_map *qv_map;
766 uint16_t interval, i;
767 int vec;
768
769 if (rte_intr_cap_multiple(intr_handle) &&
770 dev->data->dev_conf.intr_conf.rxq) {
771 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
772 return -1;
773 }
774
775 if (rte_intr_dp_is_en(intr_handle)) {
776 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
777 dev->data->nb_rx_queues)) {
778 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
779 dev->data->nb_rx_queues);
780 return -1;
781 }
782 }
783
784
785 qv_map = rte_zmalloc("qv_map",
786 dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
787 if (!qv_map) {
788 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
789 dev->data->nb_rx_queues);
790 goto qv_map_alloc_err;
791 }
792
793 if (!dev->data->dev_conf.intr_conf.rxq ||
794 !rte_intr_dp_is_en(intr_handle)) {
795 /* Rx interrupt disabled, Map interrupt only for writeback */
796 vf->nb_msix = 1;
797 if (vf->vf_res->vf_cap_flags &
798 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
799 /* If WB_ON_ITR supports, enable it */
800 vf->msix_base = IAVF_RX_VEC_START;
801 /* Set the ITR for index zero, to 2us to make sure that
802 * we leave time for aggregation to occur, but don't
803 * increase latency dramatically.
804 */
805 IAVF_WRITE_REG(hw,
806 IAVF_VFINT_DYN_CTLN1(vf->msix_base - 1),
807 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
808 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
809 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
810 /* debug - check for success! the return value
811 * should be 2, offset is 0x2800
812 */
813 /* IAVF_READ_REG(hw, IAVF_VFINT_ITRN1(0, 0)); */
814 } else {
815 /* If no WB_ON_ITR offload flags, need to set
816 * interrupt for descriptor write back.
817 */
818 vf->msix_base = IAVF_MISC_VEC_ID;
819
820 /* set ITR to default */
821 interval = iavf_calc_itr_interval(
822 IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
823 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
824 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
825 (IAVF_ITR_INDEX_DEFAULT <<
826 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
827 (interval <<
828 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
829 }
830 IAVF_WRITE_FLUSH(hw);
831 /* map all queues to the same interrupt */
832 for (i = 0; i < dev->data->nb_rx_queues; i++) {
833 qv_map[i].queue_id = i;
834 qv_map[i].vector_id = vf->msix_base;
835 }
836 vf->qv_map = qv_map;
837 } else {
838 if (!rte_intr_allow_others(intr_handle)) {
839 vf->nb_msix = 1;
840 vf->msix_base = IAVF_MISC_VEC_ID;
841 for (i = 0; i < dev->data->nb_rx_queues; i++) {
842 qv_map[i].queue_id = i;
843 qv_map[i].vector_id = vf->msix_base;
844 rte_intr_vec_list_index_set(intr_handle,
845 i, IAVF_MISC_VEC_ID);
846 }
847 vf->qv_map = qv_map;
848 PMD_DRV_LOG(DEBUG,
849 "vector %u are mapping to all Rx queues",
850 vf->msix_base);
851 } else {
852 /* If Rx interrupt is required, and we can use
853 * multi interrupts, then the vec is from 1
854 */
855 vf->nb_msix =
856 RTE_MIN(rte_intr_nb_efd_get(intr_handle),
857 (uint16_t)(vf->vf_res->max_vectors - 1));
858 vf->msix_base = IAVF_RX_VEC_START;
859 vec = IAVF_RX_VEC_START;
860 for (i = 0; i < dev->data->nb_rx_queues; i++) {
861 qv_map[i].queue_id = i;
862 qv_map[i].vector_id = vec;
863 rte_intr_vec_list_index_set(intr_handle,
864 i, vec++);
865 if (vec >= vf->nb_msix + IAVF_RX_VEC_START)
866 vec = IAVF_RX_VEC_START;
867 }
868 vf->qv_map = qv_map;
869 PMD_DRV_LOG(DEBUG,
870 "%u vectors are mapping to %u Rx queues",
871 vf->nb_msix, dev->data->nb_rx_queues);
872 }
873 }
874
875 if (!vf->lv_enabled) {
876 if (iavf_config_irq_map(adapter)) {
877 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
878 goto config_irq_map_err;
879 }
880 } else {
881 uint16_t num_qv_maps = dev->data->nb_rx_queues;
882 uint16_t index = 0;
883
884 while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
885 if (iavf_config_irq_map_lv(adapter,
886 IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
887 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
888 goto config_irq_map_err;
889 }
890 num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
891 index += IAVF_IRQ_MAP_NUM_PER_BUF;
892 }
893
894 if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
895 PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
896 goto config_irq_map_err;
897 }
898 }
899 return 0;
900
901 config_irq_map_err:
902 rte_free(vf->qv_map);
903 vf->qv_map = NULL;
904
905 qv_map_alloc_err:
906 rte_intr_vec_list_free(intr_handle);
907
908 return -1;
909 }
910
911 static int
iavf_start_queues(struct rte_eth_dev * dev)912 iavf_start_queues(struct rte_eth_dev *dev)
913 {
914 struct iavf_rx_queue *rxq;
915 struct iavf_tx_queue *txq;
916 int i;
917
918 for (i = 0; i < dev->data->nb_tx_queues; i++) {
919 txq = dev->data->tx_queues[i];
920 if (txq->tx_deferred_start)
921 continue;
922 if (iavf_dev_tx_queue_start(dev, i) != 0) {
923 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
924 return -1;
925 }
926 }
927
928 for (i = 0; i < dev->data->nb_rx_queues; i++) {
929 rxq = dev->data->rx_queues[i];
930 if (rxq->rx_deferred_start)
931 continue;
932 if (iavf_dev_rx_queue_start(dev, i) != 0) {
933 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
934 return -1;
935 }
936 }
937
938 return 0;
939 }
940
941 static int
iavf_dev_start(struct rte_eth_dev * dev)942 iavf_dev_start(struct rte_eth_dev *dev)
943 {
944 struct iavf_adapter *adapter =
945 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
946 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
947 struct rte_intr_handle *intr_handle = dev->intr_handle;
948 uint16_t num_queue_pairs;
949 uint16_t index = 0;
950
951 PMD_INIT_FUNC_TRACE();
952
953 adapter->stopped = 0;
954
955 vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
956 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
957 dev->data->nb_tx_queues);
958 num_queue_pairs = vf->num_queue_pairs;
959
960 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
961 if (iavf_get_qos_cap(adapter)) {
962 PMD_INIT_LOG(ERR, "Failed to get qos capability");
963 return -1;
964 }
965
966 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
967 if (iavf_get_ptp_cap(adapter)) {
968 PMD_INIT_LOG(ERR, "Failed to get ptp capability");
969 return -1;
970 }
971 }
972
973 if (iavf_init_queues(dev) != 0) {
974 PMD_DRV_LOG(ERR, "failed to do Queue init");
975 return -1;
976 }
977
978 if (iavf_set_vf_quanta_size(adapter, index, num_queue_pairs) != 0)
979 PMD_DRV_LOG(WARNING, "configure quanta size failed");
980
981 /* If needed, send configure queues msg multiple times to make the
982 * adminq buffer length smaller than the 4K limitation.
983 */
984 while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
985 if (iavf_configure_queues(adapter,
986 IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
987 PMD_DRV_LOG(ERR, "configure queues failed");
988 goto err_queue;
989 }
990 num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
991 index += IAVF_CFG_Q_NUM_PER_BUF;
992 }
993
994 if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
995 PMD_DRV_LOG(ERR, "configure queues failed");
996 goto err_queue;
997 }
998
999 if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) {
1000 PMD_DRV_LOG(ERR, "configure irq failed");
1001 goto err_queue;
1002 }
1003 /* re-enable intr again, because efd assign may change */
1004 if (dev->data->dev_conf.intr_conf.rxq != 0) {
1005 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1006 rte_intr_disable(intr_handle);
1007 rte_intr_enable(intr_handle);
1008 }
1009
1010 /* Set all mac addrs */
1011 iavf_add_del_all_mac_addr(adapter, true);
1012
1013 /* Set all multicast addresses */
1014 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1015 true);
1016
1017 if (iavf_start_queues(dev) != 0) {
1018 PMD_DRV_LOG(ERR, "enable queues failed");
1019 goto err_mac;
1020 }
1021
1022 if (dev->data->dev_conf.rxmode.offloads &
1023 RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1024 if (iavf_get_phc_time(adapter)) {
1025 PMD_DRV_LOG(ERR, "get physical time failed");
1026 goto err_mac;
1027 }
1028 adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1029 }
1030
1031 return 0;
1032
1033 err_mac:
1034 iavf_add_del_all_mac_addr(adapter, false);
1035 err_queue:
1036 return -1;
1037 }
1038
1039 static int
iavf_dev_stop(struct rte_eth_dev * dev)1040 iavf_dev_stop(struct rte_eth_dev *dev)
1041 {
1042 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1043 struct iavf_adapter *adapter =
1044 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1045 struct rte_intr_handle *intr_handle = dev->intr_handle;
1046
1047 PMD_INIT_FUNC_TRACE();
1048
1049 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
1050 dev->data->dev_conf.intr_conf.rxq != 0)
1051 rte_intr_disable(intr_handle);
1052
1053 if (adapter->stopped == 1)
1054 return 0;
1055
1056 iavf_stop_queues(dev);
1057
1058 /* Disable the interrupt for Rx */
1059 rte_intr_efd_disable(intr_handle);
1060 /* Rx interrupt vector mapping free */
1061 rte_intr_vec_list_free(intr_handle);
1062
1063 /* remove all mac addrs */
1064 iavf_add_del_all_mac_addr(adapter, false);
1065
1066 /* remove all multicast addresses */
1067 iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
1068 false);
1069
1070 /* free iAVF security device context all related resources */
1071 iavf_security_ctx_destroy(adapter);
1072
1073 adapter->stopped = 1;
1074 dev->data->dev_started = 0;
1075
1076 return 0;
1077 }
1078
1079 static int
iavf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1080 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1081 {
1082 struct iavf_adapter *adapter =
1083 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1084 struct iavf_info *vf = &adapter->vf;
1085
1086 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
1087 dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
1088 dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
1089 dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
1090 dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
1091 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1092 dev_info->hash_key_size = vf->vf_res->rss_key_size;
1093 dev_info->reta_size = vf->vf_res->rss_lut_size;
1094 dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
1095 dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
1096 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1097 dev_info->rx_offload_capa =
1098 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1099 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
1100 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1101 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1102 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1103 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1104 RTE_ETH_RX_OFFLOAD_SCATTER |
1105 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1106 RTE_ETH_RX_OFFLOAD_RSS_HASH;
1107
1108 dev_info->tx_offload_capa =
1109 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1110 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1111 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1112 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1113 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1114 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1115 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1116 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1117 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1118 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
1119 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
1120 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
1121 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1122 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1123
1124 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
1125 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1126
1127 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
1128 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
1129
1130 if (iavf_ipsec_crypto_supported(adapter)) {
1131 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1132 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1133 }
1134
1135 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1136 .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
1137 .rx_drop_en = 0,
1138 .offloads = 0,
1139 };
1140
1141 dev_info->default_txconf = (struct rte_eth_txconf) {
1142 .tx_free_thresh = IAVF_DEFAULT_TX_FREE_THRESH,
1143 .tx_rs_thresh = IAVF_DEFAULT_TX_RS_THRESH,
1144 .offloads = 0,
1145 };
1146
1147 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1148 .nb_max = IAVF_MAX_RING_DESC,
1149 .nb_min = IAVF_MIN_RING_DESC,
1150 .nb_align = IAVF_ALIGN_RING_DESC,
1151 };
1152
1153 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1154 .nb_max = IAVF_MAX_RING_DESC,
1155 .nb_min = IAVF_MIN_RING_DESC,
1156 .nb_align = IAVF_ALIGN_RING_DESC,
1157 };
1158
1159 return 0;
1160 }
1161
1162 static const uint32_t *
iavf_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused)1163 iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1164 {
1165 static const uint32_t ptypes[] = {
1166 RTE_PTYPE_L2_ETHER,
1167 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1168 RTE_PTYPE_L4_FRAG,
1169 RTE_PTYPE_L4_ICMP,
1170 RTE_PTYPE_L4_NONFRAG,
1171 RTE_PTYPE_L4_SCTP,
1172 RTE_PTYPE_L4_TCP,
1173 RTE_PTYPE_L4_UDP,
1174 RTE_PTYPE_UNKNOWN
1175 };
1176 return ptypes;
1177 }
1178
1179 int
iavf_dev_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)1180 iavf_dev_link_update(struct rte_eth_dev *dev,
1181 __rte_unused int wait_to_complete)
1182 {
1183 struct rte_eth_link new_link;
1184 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1185
1186 memset(&new_link, 0, sizeof(new_link));
1187
1188 /* Only read status info stored in VF, and the info is updated
1189 * when receive LINK_CHANGE evnet from PF by Virtchnnl.
1190 */
1191 switch (vf->link_speed) {
1192 case 10:
1193 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1194 break;
1195 case 100:
1196 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1197 break;
1198 case 1000:
1199 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1200 break;
1201 case 10000:
1202 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1203 break;
1204 case 20000:
1205 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1206 break;
1207 case 25000:
1208 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1209 break;
1210 case 40000:
1211 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1212 break;
1213 case 50000:
1214 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1215 break;
1216 case 100000:
1217 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1218 break;
1219 default:
1220 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1221 break;
1222 }
1223
1224 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1225 new_link.link_status = vf->link_up ? RTE_ETH_LINK_UP :
1226 RTE_ETH_LINK_DOWN;
1227 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1228 RTE_ETH_LINK_SPEED_FIXED);
1229
1230 return rte_eth_linkstatus_set(dev, &new_link);
1231 }
1232
1233 static int
iavf_dev_promiscuous_enable(struct rte_eth_dev * dev)1234 iavf_dev_promiscuous_enable(struct rte_eth_dev *dev)
1235 {
1236 struct iavf_adapter *adapter =
1237 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1238 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1239
1240 return iavf_config_promisc(adapter,
1241 true, vf->promisc_multicast_enabled);
1242 }
1243
1244 static int
iavf_dev_promiscuous_disable(struct rte_eth_dev * dev)1245 iavf_dev_promiscuous_disable(struct rte_eth_dev *dev)
1246 {
1247 struct iavf_adapter *adapter =
1248 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1249 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1250
1251 return iavf_config_promisc(adapter,
1252 false, vf->promisc_multicast_enabled);
1253 }
1254
1255 static int
iavf_dev_allmulticast_enable(struct rte_eth_dev * dev)1256 iavf_dev_allmulticast_enable(struct rte_eth_dev *dev)
1257 {
1258 struct iavf_adapter *adapter =
1259 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1260 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1261
1262 return iavf_config_promisc(adapter,
1263 vf->promisc_unicast_enabled, true);
1264 }
1265
1266 static int
iavf_dev_allmulticast_disable(struct rte_eth_dev * dev)1267 iavf_dev_allmulticast_disable(struct rte_eth_dev *dev)
1268 {
1269 struct iavf_adapter *adapter =
1270 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1271 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1272
1273 return iavf_config_promisc(adapter,
1274 vf->promisc_unicast_enabled, false);
1275 }
1276
1277 static int
iavf_dev_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr,__rte_unused uint32_t index,__rte_unused uint32_t pool)1278 iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
1279 __rte_unused uint32_t index,
1280 __rte_unused uint32_t pool)
1281 {
1282 struct iavf_adapter *adapter =
1283 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1284 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1285 int err;
1286
1287 if (rte_is_zero_ether_addr(addr)) {
1288 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1289 return -EINVAL;
1290 }
1291
1292 err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
1293 if (err) {
1294 PMD_DRV_LOG(ERR, "fail to add MAC address");
1295 return -EIO;
1296 }
1297
1298 vf->mac_num++;
1299
1300 return 0;
1301 }
1302
1303 static void
iavf_dev_del_mac_addr(struct rte_eth_dev * dev,uint32_t index)1304 iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1305 {
1306 struct iavf_adapter *adapter =
1307 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1308 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1309 struct rte_ether_addr *addr;
1310 int err;
1311
1312 addr = &dev->data->mac_addrs[index];
1313
1314 err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
1315 if (err)
1316 PMD_DRV_LOG(ERR, "fail to delete MAC address");
1317
1318 vf->mac_num--;
1319 }
1320
1321 static int
iavf_dev_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1322 iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1323 {
1324 struct iavf_adapter *adapter =
1325 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1326 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1327 int err;
1328
1329 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
1330 err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
1331 if (err)
1332 return -EIO;
1333 return 0;
1334 }
1335
1336 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1337 return -ENOTSUP;
1338
1339 err = iavf_add_del_vlan(adapter, vlan_id, on);
1340 if (err)
1341 return -EIO;
1342 return 0;
1343 }
1344
1345 static void
iavf_iterate_vlan_filters_v2(struct rte_eth_dev * dev,bool enable)1346 iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable)
1347 {
1348 struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf;
1349 struct iavf_adapter *adapter =
1350 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1351 uint32_t i, j;
1352 uint64_t ids;
1353
1354 for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1355 if (vfc->ids[i] == 0)
1356 continue;
1357
1358 ids = vfc->ids[i];
1359 for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) {
1360 if (ids & 1)
1361 iavf_add_del_vlan_v2(adapter,
1362 64 * i + j, enable);
1363 }
1364 }
1365 }
1366
1367 static int
iavf_dev_vlan_offload_set_v2(struct rte_eth_dev * dev,int mask)1368 iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
1369 {
1370 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1371 struct iavf_adapter *adapter =
1372 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1373 bool enable;
1374 int err;
1375
1376 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1377 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
1378
1379 iavf_iterate_vlan_filters_v2(dev, enable);
1380 }
1381
1382 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1383 enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1384
1385 err = iavf_config_vlan_strip_v2(adapter, enable);
1386 /* If not support, the stripping is already disabled by PF */
1387 if (err == -ENOTSUP && !enable)
1388 err = 0;
1389 if (err)
1390 return -EIO;
1391 }
1392
1393 return 0;
1394 }
1395
1396 static int
iavf_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)1397 iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1398 {
1399 struct iavf_adapter *adapter =
1400 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1401 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1402 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1403 int err;
1404
1405 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1406 return iavf_dev_vlan_offload_set_v2(dev, mask);
1407
1408 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1409 return -ENOTSUP;
1410
1411 /* Vlan stripping setting */
1412 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1413 /* Enable or disable VLAN stripping */
1414 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1415 err = iavf_enable_vlan_strip(adapter);
1416 else
1417 err = iavf_disable_vlan_strip(adapter);
1418
1419 if (err)
1420 return -EIO;
1421 }
1422 return 0;
1423 }
1424
1425 static int
iavf_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1426 iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
1427 struct rte_eth_rss_reta_entry64 *reta_conf,
1428 uint16_t reta_size)
1429 {
1430 struct iavf_adapter *adapter =
1431 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1432 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1433 uint8_t *lut;
1434 uint16_t i, idx, shift;
1435 int ret;
1436
1437 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1438 return -ENOTSUP;
1439
1440 if (reta_size != vf->vf_res->rss_lut_size) {
1441 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1442 "(%d) doesn't match the number of hardware can "
1443 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1444 return -EINVAL;
1445 }
1446
1447 lut = rte_zmalloc("rss_lut", reta_size, 0);
1448 if (!lut) {
1449 PMD_DRV_LOG(ERR, "No memory can be allocated");
1450 return -ENOMEM;
1451 }
1452 /* store the old lut table temporarily */
1453 rte_memcpy(lut, vf->rss_lut, reta_size);
1454
1455 for (i = 0; i < reta_size; i++) {
1456 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1457 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1458 if (reta_conf[idx].mask & (1ULL << shift))
1459 lut[i] = reta_conf[idx].reta[shift];
1460 }
1461
1462 rte_memcpy(vf->rss_lut, lut, reta_size);
1463 /* send virtchnl ops to configure RSS */
1464 ret = iavf_configure_rss_lut(adapter);
1465 if (ret) /* revert back */
1466 rte_memcpy(vf->rss_lut, lut, reta_size);
1467 rte_free(lut);
1468
1469 return ret;
1470 }
1471
1472 static int
iavf_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1473 iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
1474 struct rte_eth_rss_reta_entry64 *reta_conf,
1475 uint16_t reta_size)
1476 {
1477 struct iavf_adapter *adapter =
1478 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1479 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1480 uint16_t i, idx, shift;
1481
1482 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1483 return -ENOTSUP;
1484
1485 if (reta_size != vf->vf_res->rss_lut_size) {
1486 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1487 "(%d) doesn't match the number of hardware can "
1488 "support (%d)", reta_size, vf->vf_res->rss_lut_size);
1489 return -EINVAL;
1490 }
1491
1492 for (i = 0; i < reta_size; i++) {
1493 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1494 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1495 if (reta_conf[idx].mask & (1ULL << shift))
1496 reta_conf[idx].reta[shift] = vf->rss_lut[i];
1497 }
1498
1499 return 0;
1500 }
1501
1502 static int
iavf_set_rss_key(struct iavf_adapter * adapter,uint8_t * key,uint8_t key_len)1503 iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
1504 {
1505 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1506
1507 /* HENA setting, it is enabled by default, no change */
1508 if (!key || key_len == 0) {
1509 PMD_DRV_LOG(DEBUG, "No key to be configured");
1510 return 0;
1511 } else if (key_len != vf->vf_res->rss_key_size) {
1512 PMD_DRV_LOG(ERR, "The size of hash key configured "
1513 "(%d) doesn't match the size of hardware can "
1514 "support (%d)", key_len,
1515 vf->vf_res->rss_key_size);
1516 return -EINVAL;
1517 }
1518
1519 rte_memcpy(vf->rss_key, key, key_len);
1520
1521 return iavf_configure_rss_key(adapter);
1522 }
1523
1524 static int
iavf_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1525 iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
1526 struct rte_eth_rss_conf *rss_conf)
1527 {
1528 struct iavf_adapter *adapter =
1529 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1530 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1531 int ret;
1532
1533 adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
1534
1535 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1536 return -ENOTSUP;
1537
1538 /* Set hash key. */
1539 ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
1540 rss_conf->rss_key_len);
1541 if (ret)
1542 return ret;
1543
1544 if (rss_conf->rss_hf == 0) {
1545 vf->rss_hf = 0;
1546 ret = iavf_set_hena(adapter, 0);
1547
1548 /* It is a workaround, temporarily allow error to be returned
1549 * due to possible lack of PF handling for hena = 0.
1550 */
1551 if (ret)
1552 PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
1553 return 0;
1554 }
1555
1556 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
1557 /* Clear existing RSS. */
1558 ret = iavf_set_hena(adapter, 0);
1559
1560 /* It is a workaround, temporarily allow error to be returned
1561 * due to possible lack of PF handling for hena = 0.
1562 */
1563 if (ret)
1564 PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
1565 "lack PF support");
1566
1567 /* Set new RSS configuration. */
1568 ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
1569 if (ret) {
1570 PMD_DRV_LOG(ERR, "fail to set new RSS");
1571 return ret;
1572 }
1573 } else {
1574 iavf_config_rss_hf(adapter, rss_conf->rss_hf);
1575 }
1576
1577 return 0;
1578 }
1579
1580 static int
iavf_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1581 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1582 struct rte_eth_rss_conf *rss_conf)
1583 {
1584 struct iavf_adapter *adapter =
1585 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1586 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1587
1588 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1589 return -ENOTSUP;
1590
1591 rss_conf->rss_hf = vf->rss_hf;
1592
1593 if (!rss_conf->rss_key)
1594 return 0;
1595
1596 rss_conf->rss_key_len = vf->vf_res->rss_key_size;
1597 rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
1598
1599 return 0;
1600 }
1601
1602 static int
iavf_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu __rte_unused)1603 iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1604 {
1605 /* mtu setting is forbidden if port is start */
1606 if (dev->data->dev_started) {
1607 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
1608 return -EBUSY;
1609 }
1610
1611 return 0;
1612 }
1613
1614 static int
iavf_dev_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)1615 iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1616 struct rte_ether_addr *mac_addr)
1617 {
1618 struct iavf_adapter *adapter =
1619 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1620 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1621 struct rte_ether_addr *old_addr;
1622 int ret;
1623
1624 old_addr = (struct rte_ether_addr *)hw->mac.addr;
1625
1626 if (rte_is_same_ether_addr(old_addr, mac_addr))
1627 return 0;
1628
1629 ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
1630 if (ret)
1631 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1632 RTE_ETHER_ADDR_PRT_FMT,
1633 RTE_ETHER_ADDR_BYTES(old_addr));
1634
1635 ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
1636 if (ret)
1637 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1638 RTE_ETHER_ADDR_PRT_FMT,
1639 RTE_ETHER_ADDR_BYTES(mac_addr));
1640
1641 if (ret)
1642 return -EIO;
1643
1644 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)hw->mac.addr);
1645 return 0;
1646 }
1647
1648 static void
iavf_stat_update_48(uint64_t * offset,uint64_t * stat)1649 iavf_stat_update_48(uint64_t *offset, uint64_t *stat)
1650 {
1651 if (*stat >= *offset)
1652 *stat = *stat - *offset;
1653 else
1654 *stat = (uint64_t)((*stat +
1655 ((uint64_t)1 << IAVF_48_BIT_WIDTH)) - *offset);
1656
1657 *stat &= IAVF_48_BIT_MASK;
1658 }
1659
1660 static void
iavf_stat_update_32(uint64_t * offset,uint64_t * stat)1661 iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
1662 {
1663 if (*stat >= *offset)
1664 *stat = (uint64_t)(*stat - *offset);
1665 else
1666 *stat = (uint64_t)((*stat +
1667 ((uint64_t)1 << IAVF_32_BIT_WIDTH)) - *offset);
1668 }
1669
1670 static void
iavf_update_stats(struct iavf_vsi * vsi,struct virtchnl_eth_stats * nes)1671 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
1672 {
1673 struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
1674
1675 iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1676 iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1677 iavf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1678 iavf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1679 iavf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1680 iavf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1681 iavf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1682 iavf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1683 iavf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1684 iavf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1685 iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1686 }
1687
1688 static int
iavf_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1689 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1690 {
1691 struct iavf_adapter *adapter =
1692 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1693 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1694 struct iavf_vsi *vsi = &vf->vsi;
1695 struct virtchnl_eth_stats *pstats = NULL;
1696 int ret;
1697
1698 ret = iavf_query_stats(adapter, &pstats);
1699 if (ret == 0) {
1700 uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
1701 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
1702 RTE_ETHER_CRC_LEN;
1703 iavf_update_stats(vsi, pstats);
1704 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
1705 pstats->rx_broadcast - pstats->rx_discards;
1706 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
1707 pstats->tx_unicast;
1708 stats->imissed = pstats->rx_discards;
1709 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
1710 stats->ibytes = pstats->rx_bytes;
1711 stats->ibytes -= stats->ipackets * crc_stats_len;
1712 stats->obytes = pstats->tx_bytes;
1713 } else {
1714 PMD_DRV_LOG(ERR, "Get statistics failed");
1715 }
1716 return ret;
1717 }
1718
1719 static int
iavf_dev_stats_reset(struct rte_eth_dev * dev)1720 iavf_dev_stats_reset(struct rte_eth_dev *dev)
1721 {
1722 int ret;
1723 struct iavf_adapter *adapter =
1724 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1725 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1726 struct iavf_vsi *vsi = &vf->vsi;
1727 struct virtchnl_eth_stats *pstats = NULL;
1728
1729 /* read stat values to clear hardware registers */
1730 ret = iavf_query_stats(adapter, &pstats);
1731 if (ret != 0)
1732 return ret;
1733
1734 /* set stats offset base on current values */
1735 vsi->eth_stats_offset.eth_stats = *pstats;
1736
1737 return 0;
1738 }
1739
1740 static int
iavf_dev_xstats_reset(struct rte_eth_dev * dev)1741 iavf_dev_xstats_reset(struct rte_eth_dev *dev)
1742 {
1743 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1744 iavf_dev_stats_reset(dev);
1745 memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
1746 sizeof(struct iavf_ipsec_crypto_stats));
1747 return 0;
1748 }
1749
iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int limit)1750 static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1751 struct rte_eth_xstat_name *xstats_names,
1752 __rte_unused unsigned int limit)
1753 {
1754 unsigned int i;
1755
1756 if (xstats_names != NULL)
1757 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1758 snprintf(xstats_names[i].name,
1759 sizeof(xstats_names[i].name),
1760 "%s", rte_iavf_stats_strings[i].name);
1761 }
1762 return IAVF_NB_XSTATS;
1763 }
1764
1765 static void
iavf_dev_update_ipsec_xstats(struct rte_eth_dev * ethdev,struct iavf_ipsec_crypto_stats * ips)1766 iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
1767 struct iavf_ipsec_crypto_stats *ips)
1768 {
1769 uint16_t idx;
1770 for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
1771 struct iavf_rx_queue *rxq;
1772 struct iavf_ipsec_crypto_stats *stats;
1773 rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
1774 stats = &rxq->stats.ipsec_crypto;
1775 ips->icount += stats->icount;
1776 ips->ibytes += stats->ibytes;
1777 ips->ierrors.count += stats->ierrors.count;
1778 ips->ierrors.sad_miss += stats->ierrors.sad_miss;
1779 ips->ierrors.not_processed += stats->ierrors.not_processed;
1780 ips->ierrors.icv_check += stats->ierrors.icv_check;
1781 ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
1782 ips->ierrors.misc += stats->ierrors.misc;
1783 }
1784 }
1785
iavf_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1786 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
1787 struct rte_eth_xstat *xstats, unsigned int n)
1788 {
1789 int ret;
1790 unsigned int i;
1791 struct iavf_adapter *adapter =
1792 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1793 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1794 struct iavf_vsi *vsi = &vf->vsi;
1795 struct virtchnl_eth_stats *pstats = NULL;
1796 struct iavf_eth_xstats iavf_xtats = {{0}};
1797
1798 if (n < IAVF_NB_XSTATS)
1799 return IAVF_NB_XSTATS;
1800
1801 ret = iavf_query_stats(adapter, &pstats);
1802 if (ret != 0)
1803 return 0;
1804
1805 if (!xstats)
1806 return 0;
1807
1808 iavf_update_stats(vsi, pstats);
1809 iavf_xtats.eth_stats = *pstats;
1810
1811 if (iavf_ipsec_crypto_supported(adapter))
1812 iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
1813
1814 /* loop over xstats array and values from pstats */
1815 for (i = 0; i < IAVF_NB_XSTATS; i++) {
1816 xstats[i].id = i;
1817 xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
1818 rte_iavf_stats_strings[i].offset);
1819 }
1820
1821 return IAVF_NB_XSTATS;
1822 }
1823
1824
1825 static int
iavf_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)1826 iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1827 {
1828 struct iavf_adapter *adapter =
1829 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1830 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1831 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
1832 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1833 uint16_t msix_intr;
1834
1835 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1836 queue_id);
1837 if (msix_intr == IAVF_MISC_VEC_ID) {
1838 PMD_DRV_LOG(INFO, "MISC is also enabled for control");
1839 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
1840 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
1841 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1842 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
1843 } else {
1844 IAVF_WRITE_REG(hw,
1845 IAVF_VFINT_DYN_CTLN1
1846 (msix_intr - IAVF_RX_VEC_START),
1847 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1848 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
1849 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1850 }
1851
1852 IAVF_WRITE_FLUSH(hw);
1853
1854 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1855 rte_intr_ack(pci_dev->intr_handle);
1856
1857 return 0;
1858 }
1859
1860 static int
iavf_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)1861 iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1862 {
1863 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1864 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1865 uint16_t msix_intr;
1866
1867 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
1868 queue_id);
1869 if (msix_intr == IAVF_MISC_VEC_ID) {
1870 PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
1871 return -EIO;
1872 }
1873
1874 IAVF_WRITE_REG(hw,
1875 IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
1876 0);
1877
1878 IAVF_WRITE_FLUSH(hw);
1879 return 0;
1880 }
1881
1882 static int
iavf_check_vf_reset_done(struct iavf_hw * hw)1883 iavf_check_vf_reset_done(struct iavf_hw *hw)
1884 {
1885 int i, reset;
1886
1887 for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
1888 reset = IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
1889 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1890 reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
1891 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1892 reset == VIRTCHNL_VFR_COMPLETED)
1893 break;
1894 rte_delay_ms(20);
1895 }
1896
1897 if (i >= IAVF_RESET_WAIT_CNT)
1898 return -1;
1899
1900 return 0;
1901 }
1902
1903 static int
iavf_lookup_proto_xtr_type(const char * flex_name)1904 iavf_lookup_proto_xtr_type(const char *flex_name)
1905 {
1906 static struct {
1907 const char *name;
1908 enum iavf_proto_xtr_type type;
1909 } xtr_type_map[] = {
1910 { "vlan", IAVF_PROTO_XTR_VLAN },
1911 { "ipv4", IAVF_PROTO_XTR_IPV4 },
1912 { "ipv6", IAVF_PROTO_XTR_IPV6 },
1913 { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
1914 { "tcp", IAVF_PROTO_XTR_TCP },
1915 { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
1916 { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
1917 };
1918 uint32_t i;
1919
1920 for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
1921 if (strcmp(flex_name, xtr_type_map[i].name) == 0)
1922 return xtr_type_map[i].type;
1923 }
1924
1925 PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
1926 "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
1927
1928 return -1;
1929 }
1930
1931 /**
1932 * Parse elem, the elem could be single number/range or '(' ')' group
1933 * 1) A single number elem, it's just a simple digit. e.g. 9
1934 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
1935 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
1936 * Within group elem, '-' used for a range separator;
1937 * ',' used for a single number.
1938 */
1939 static int
iavf_parse_queue_set(const char * input,int xtr_type,struct iavf_devargs * devargs)1940 iavf_parse_queue_set(const char *input, int xtr_type,
1941 struct iavf_devargs *devargs)
1942 {
1943 const char *str = input;
1944 char *end = NULL;
1945 uint32_t min, max;
1946 uint32_t idx;
1947
1948 while (isblank(*str))
1949 str++;
1950
1951 if (!isdigit(*str) && *str != '(')
1952 return -1;
1953
1954 /* process single number or single range of number */
1955 if (*str != '(') {
1956 errno = 0;
1957 idx = strtoul(str, &end, 10);
1958 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1959 return -1;
1960
1961 while (isblank(*end))
1962 end++;
1963
1964 min = idx;
1965 max = idx;
1966
1967 /* process single <number>-<number> */
1968 if (*end == '-') {
1969 end++;
1970 while (isblank(*end))
1971 end++;
1972 if (!isdigit(*end))
1973 return -1;
1974
1975 errno = 0;
1976 idx = strtoul(end, &end, 10);
1977 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
1978 return -1;
1979
1980 max = idx;
1981 while (isblank(*end))
1982 end++;
1983 }
1984
1985 if (*end != ':')
1986 return -1;
1987
1988 for (idx = RTE_MIN(min, max);
1989 idx <= RTE_MAX(min, max); idx++)
1990 devargs->proto_xtr[idx] = xtr_type;
1991
1992 return 0;
1993 }
1994
1995 /* process set within bracket */
1996 str++;
1997 while (isblank(*str))
1998 str++;
1999 if (*str == '\0')
2000 return -1;
2001
2002 min = IAVF_MAX_QUEUE_NUM;
2003 do {
2004 /* go ahead to the first digit */
2005 while (isblank(*str))
2006 str++;
2007 if (!isdigit(*str))
2008 return -1;
2009
2010 /* get the digit value */
2011 errno = 0;
2012 idx = strtoul(str, &end, 10);
2013 if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
2014 return -1;
2015
2016 /* go ahead to separator '-',',' and ')' */
2017 while (isblank(*end))
2018 end++;
2019 if (*end == '-') {
2020 if (min == IAVF_MAX_QUEUE_NUM)
2021 min = idx;
2022 else /* avoid continuous '-' */
2023 return -1;
2024 } else if (*end == ',' || *end == ')') {
2025 max = idx;
2026 if (min == IAVF_MAX_QUEUE_NUM)
2027 min = idx;
2028
2029 for (idx = RTE_MIN(min, max);
2030 idx <= RTE_MAX(min, max); idx++)
2031 devargs->proto_xtr[idx] = xtr_type;
2032
2033 min = IAVF_MAX_QUEUE_NUM;
2034 } else {
2035 return -1;
2036 }
2037
2038 str = end + 1;
2039 } while (*end != ')' && *end != '\0');
2040
2041 return 0;
2042 }
2043
2044 static int
iavf_parse_queue_proto_xtr(const char * queues,struct iavf_devargs * devargs)2045 iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
2046 {
2047 const char *queue_start;
2048 uint32_t idx;
2049 int xtr_type;
2050 char flex_name[32];
2051
2052 while (isblank(*queues))
2053 queues++;
2054
2055 if (*queues != '[') {
2056 xtr_type = iavf_lookup_proto_xtr_type(queues);
2057 if (xtr_type < 0)
2058 return -1;
2059
2060 devargs->proto_xtr_dflt = xtr_type;
2061
2062 return 0;
2063 }
2064
2065 queues++;
2066 do {
2067 while (isblank(*queues))
2068 queues++;
2069 if (*queues == '\0')
2070 return -1;
2071
2072 queue_start = queues;
2073
2074 /* go across a complete bracket */
2075 if (*queue_start == '(') {
2076 queues += strcspn(queues, ")");
2077 if (*queues != ')')
2078 return -1;
2079 }
2080
2081 /* scan the separator ':' */
2082 queues += strcspn(queues, ":");
2083 if (*queues++ != ':')
2084 return -1;
2085 while (isblank(*queues))
2086 queues++;
2087
2088 for (idx = 0; ; idx++) {
2089 if (isblank(queues[idx]) ||
2090 queues[idx] == ',' ||
2091 queues[idx] == ']' ||
2092 queues[idx] == '\0')
2093 break;
2094
2095 if (idx > sizeof(flex_name) - 2)
2096 return -1;
2097
2098 flex_name[idx] = queues[idx];
2099 }
2100 flex_name[idx] = '\0';
2101 xtr_type = iavf_lookup_proto_xtr_type(flex_name);
2102 if (xtr_type < 0)
2103 return -1;
2104
2105 queues += idx;
2106
2107 while (isblank(*queues) || *queues == ',' || *queues == ']')
2108 queues++;
2109
2110 if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
2111 return -1;
2112 } while (*queues != '\0');
2113
2114 return 0;
2115 }
2116
2117 static int
iavf_handle_proto_xtr_arg(__rte_unused const char * key,const char * value,void * extra_args)2118 iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
2119 void *extra_args)
2120 {
2121 struct iavf_devargs *devargs = extra_args;
2122
2123 if (!value || !extra_args)
2124 return -EINVAL;
2125
2126 if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
2127 PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
2128 value);
2129 return -1;
2130 }
2131
2132 return 0;
2133 }
2134
2135 static int
parse_u16(__rte_unused const char * key,const char * value,void * args)2136 parse_u16(__rte_unused const char *key, const char *value, void *args)
2137 {
2138 u16 *num = (u16 *)args;
2139 u16 tmp;
2140
2141 errno = 0;
2142 tmp = strtoull(value, NULL, 10);
2143 if (errno || !tmp) {
2144 PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u16",
2145 key, value);
2146 return -1;
2147 }
2148
2149 *num = tmp;
2150
2151 return 0;
2152 }
2153
iavf_parse_devargs(struct rte_eth_dev * dev)2154 static int iavf_parse_devargs(struct rte_eth_dev *dev)
2155 {
2156 struct iavf_adapter *ad =
2157 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2158 struct rte_devargs *devargs = dev->device->devargs;
2159 struct rte_kvargs *kvlist;
2160 int ret;
2161
2162 if (!devargs)
2163 return 0;
2164
2165 kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
2166 if (!kvlist) {
2167 PMD_INIT_LOG(ERR, "invalid kvargs key\n");
2168 return -EINVAL;
2169 }
2170
2171 ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
2172 memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
2173 sizeof(ad->devargs.proto_xtr));
2174
2175 ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
2176 &iavf_handle_proto_xtr_arg, &ad->devargs);
2177 if (ret)
2178 goto bail;
2179
2180 ret = rte_kvargs_process(kvlist, IAVF_QUANTA_SIZE_ARG,
2181 &parse_u16, &ad->devargs.quanta_size);
2182 if (ret)
2183 goto bail;
2184
2185 if (ad->devargs.quanta_size == 0)
2186 ad->devargs.quanta_size = 1024;
2187
2188 if (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
2189 ad->devargs.quanta_size & 0x40) {
2190 PMD_INIT_LOG(ERR, "invalid quanta size\n");
2191 return -EINVAL;
2192 }
2193
2194 bail:
2195 rte_kvargs_free(kvlist);
2196 return ret;
2197 }
2198
2199 static void
iavf_init_proto_xtr(struct rte_eth_dev * dev)2200 iavf_init_proto_xtr(struct rte_eth_dev *dev)
2201 {
2202 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2203 struct iavf_adapter *ad =
2204 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2205 const struct iavf_proto_xtr_ol *xtr_ol;
2206 bool proto_xtr_enable = false;
2207 int offset;
2208 uint16_t i;
2209
2210 vf->proto_xtr = rte_zmalloc("vf proto xtr",
2211 vf->vsi_res->num_queue_pairs, 0);
2212 if (unlikely(!(vf->proto_xtr))) {
2213 PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
2214 return;
2215 }
2216
2217 for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
2218 vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
2219 IAVF_PROTO_XTR_NONE ?
2220 ad->devargs.proto_xtr[i] :
2221 ad->devargs.proto_xtr_dflt;
2222
2223 if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
2224 uint8_t type = vf->proto_xtr[i];
2225
2226 iavf_proto_xtr_params[type].required = true;
2227 proto_xtr_enable = true;
2228 }
2229 }
2230
2231 if (likely(!proto_xtr_enable))
2232 return;
2233
2234 offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
2235 if (unlikely(offset == -1)) {
2236 PMD_DRV_LOG(ERR,
2237 "failed to extract protocol metadata, error %d",
2238 -rte_errno);
2239 return;
2240 }
2241
2242 PMD_DRV_LOG(DEBUG,
2243 "proto_xtr metadata offset in mbuf is : %d",
2244 offset);
2245 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
2246
2247 for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
2248 xtr_ol = &iavf_proto_xtr_params[i];
2249
2250 uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
2251
2252 if (!xtr_ol->required)
2253 continue;
2254
2255 if (!(vf->supported_rxdid & BIT(rxdid))) {
2256 PMD_DRV_LOG(ERR,
2257 "rxdid[%u] is not supported in hardware",
2258 rxdid);
2259 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2260 break;
2261 }
2262
2263 offset = rte_mbuf_dynflag_register(&xtr_ol->param);
2264 if (unlikely(offset == -1)) {
2265 PMD_DRV_LOG(ERR,
2266 "failed to register proto_xtr offload '%s', error %d",
2267 xtr_ol->param.name, -rte_errno);
2268
2269 rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
2270 break;
2271 }
2272
2273 PMD_DRV_LOG(DEBUG,
2274 "proto_xtr offload '%s' offset in mbuf is : %d",
2275 xtr_ol->param.name, offset);
2276 *xtr_ol->ol_flag = 1ULL << offset;
2277 }
2278 }
2279
2280 static int
iavf_init_vf(struct rte_eth_dev * dev)2281 iavf_init_vf(struct rte_eth_dev *dev)
2282 {
2283 int err, bufsz;
2284 struct iavf_adapter *adapter =
2285 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2286 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2287 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2288
2289 vf->eth_dev = dev;
2290
2291 err = iavf_parse_devargs(dev);
2292 if (err) {
2293 PMD_INIT_LOG(ERR, "Failed to parse devargs");
2294 goto err;
2295 }
2296
2297 err = iavf_set_mac_type(hw);
2298 if (err) {
2299 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
2300 goto err;
2301 }
2302
2303 err = iavf_check_vf_reset_done(hw);
2304 if (err) {
2305 PMD_INIT_LOG(ERR, "VF is still resetting");
2306 goto err;
2307 }
2308
2309 iavf_init_adminq_parameter(hw);
2310 err = iavf_init_adminq(hw);
2311 if (err) {
2312 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
2313 goto err;
2314 }
2315
2316 vf->aq_resp = rte_zmalloc("vf_aq_resp", IAVF_AQ_BUF_SZ, 0);
2317 if (!vf->aq_resp) {
2318 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
2319 goto err_aq;
2320 }
2321 if (iavf_check_api_version(adapter) != 0) {
2322 PMD_INIT_LOG(ERR, "check_api version failed");
2323 goto err_api;
2324 }
2325
2326 bufsz = sizeof(struct virtchnl_vf_resource) +
2327 (IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
2328 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
2329 if (!vf->vf_res) {
2330 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
2331 goto err_api;
2332 }
2333
2334 if (iavf_get_vf_resource(adapter) != 0) {
2335 PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
2336 goto err_alloc;
2337 }
2338 /* Allocate memort for RSS info */
2339 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2340 vf->rss_key = rte_zmalloc("rss_key",
2341 vf->vf_res->rss_key_size, 0);
2342 if (!vf->rss_key) {
2343 PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
2344 goto err_rss;
2345 }
2346 vf->rss_lut = rte_zmalloc("rss_lut",
2347 vf->vf_res->rss_lut_size, 0);
2348 if (!vf->rss_lut) {
2349 PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
2350 goto err_rss;
2351 }
2352 }
2353
2354 if (vf->vsi_res->num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT)
2355 vf->lv_enabled = true;
2356
2357 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2358 if (iavf_get_supported_rxdid(adapter) != 0) {
2359 PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
2360 goto err_rss;
2361 }
2362 }
2363
2364 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
2365 if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
2366 PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
2367 goto err_rss;
2368 }
2369 }
2370
2371 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
2372 bufsz = sizeof(struct virtchnl_qos_cap_list) +
2373 IAVF_MAX_TRAFFIC_CLASS *
2374 sizeof(struct virtchnl_qos_cap_elem);
2375 vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
2376 if (!vf->qos_cap) {
2377 PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
2378 goto err_rss;
2379 }
2380 iavf_tm_conf_init(dev);
2381 }
2382
2383 iavf_init_proto_xtr(dev);
2384
2385 return 0;
2386 err_rss:
2387 rte_free(vf->rss_key);
2388 rte_free(vf->rss_lut);
2389 err_alloc:
2390 rte_free(vf->qos_cap);
2391 rte_free(vf->vf_res);
2392 vf->vsi_res = NULL;
2393 err_api:
2394 rte_free(vf->aq_resp);
2395 err_aq:
2396 iavf_shutdown_adminq(hw);
2397 err:
2398 return -1;
2399 }
2400
2401 static void
iavf_uninit_vf(struct rte_eth_dev * dev)2402 iavf_uninit_vf(struct rte_eth_dev *dev)
2403 {
2404 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2405 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2406
2407 iavf_shutdown_adminq(hw);
2408
2409 rte_free(vf->vf_res);
2410 vf->vsi_res = NULL;
2411 vf->vf_res = NULL;
2412
2413 rte_free(vf->aq_resp);
2414 vf->aq_resp = NULL;
2415
2416 rte_free(vf->qos_cap);
2417 vf->qos_cap = NULL;
2418
2419 rte_free(vf->rss_lut);
2420 vf->rss_lut = NULL;
2421 rte_free(vf->rss_key);
2422 vf->rss_key = NULL;
2423 }
2424
2425 /* Enable default admin queue interrupt setting */
2426 static inline void
iavf_enable_irq0(struct iavf_hw * hw)2427 iavf_enable_irq0(struct iavf_hw *hw)
2428 {
2429 /* Enable admin queue interrupt trigger */
2430 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1,
2431 IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
2432
2433 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2434 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
2435 IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
2436 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2437
2438 IAVF_WRITE_FLUSH(hw);
2439 }
2440
2441 static inline void
iavf_disable_irq0(struct iavf_hw * hw)2442 iavf_disable_irq0(struct iavf_hw *hw)
2443 {
2444 /* Disable all interrupt types */
2445 IAVF_WRITE_REG(hw, IAVF_VFINT_ICR0_ENA1, 0);
2446 IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
2447 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
2448 IAVF_WRITE_FLUSH(hw);
2449 }
2450
2451 static void
iavf_dev_interrupt_handler(void * param)2452 iavf_dev_interrupt_handler(void *param)
2453 {
2454 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2455 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2456
2457 iavf_disable_irq0(hw);
2458
2459 iavf_handle_virtchnl_msg(dev);
2460
2461 iavf_enable_irq0(hw);
2462 }
2463
2464 void
iavf_dev_alarm_handler(void * param)2465 iavf_dev_alarm_handler(void *param)
2466 {
2467 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2468 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2469 uint32_t icr0;
2470
2471 iavf_disable_irq0(hw);
2472
2473 /* read out interrupt causes */
2474 icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
2475
2476 if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
2477 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
2478 iavf_handle_virtchnl_msg(dev);
2479 }
2480
2481 iavf_enable_irq0(hw);
2482
2483 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2484 iavf_dev_alarm_handler, dev);
2485 }
2486
2487 static int
iavf_dev_flow_ops_get(struct rte_eth_dev * dev,const struct rte_flow_ops ** ops)2488 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
2489 const struct rte_flow_ops **ops)
2490 {
2491 if (!dev)
2492 return -EINVAL;
2493
2494 *ops = &iavf_flow_ops;
2495 return 0;
2496 }
2497
2498 static void
iavf_default_rss_disable(struct iavf_adapter * adapter)2499 iavf_default_rss_disable(struct iavf_adapter *adapter)
2500 {
2501 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2502 int ret = 0;
2503
2504 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2505 /* Set hena = 0 to ask PF to cleanup all existing RSS. */
2506 ret = iavf_set_hena(adapter, 0);
2507 if (ret)
2508 /* It is a workaround, temporarily allow error to be
2509 * returned due to possible lack of PF handling for
2510 * hena = 0.
2511 */
2512 PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
2513 "lack PF support");
2514 }
2515 }
2516
2517 static int
iavf_dev_init(struct rte_eth_dev * eth_dev)2518 iavf_dev_init(struct rte_eth_dev *eth_dev)
2519 {
2520 struct iavf_adapter *adapter =
2521 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2522 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
2523 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
2524 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2525 int ret = 0;
2526
2527 PMD_INIT_FUNC_TRACE();
2528
2529 /* assign ops func pointer */
2530 eth_dev->dev_ops = &iavf_eth_dev_ops;
2531 eth_dev->rx_queue_count = iavf_dev_rxq_count;
2532 eth_dev->rx_descriptor_status = iavf_dev_rx_desc_status;
2533 eth_dev->tx_descriptor_status = iavf_dev_tx_desc_status;
2534 eth_dev->rx_pkt_burst = &iavf_recv_pkts;
2535 eth_dev->tx_pkt_burst = &iavf_xmit_pkts;
2536 eth_dev->tx_pkt_prepare = &iavf_prep_pkts;
2537
2538 /* For secondary processes, we don't initialise any further as primary
2539 * has already done this work. Only check if we need a different RX
2540 * and TX function.
2541 */
2542 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2543 iavf_set_rx_function(eth_dev);
2544 iavf_set_tx_function(eth_dev);
2545 return 0;
2546 }
2547 rte_eth_copy_pci_info(eth_dev, pci_dev);
2548
2549 hw->vendor_id = pci_dev->id.vendor_id;
2550 hw->device_id = pci_dev->id.device_id;
2551 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2552 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2553 hw->bus.bus_id = pci_dev->addr.bus;
2554 hw->bus.device = pci_dev->addr.devid;
2555 hw->bus.func = pci_dev->addr.function;
2556 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
2557 hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
2558 adapter->dev_data = eth_dev->data;
2559 adapter->stopped = 1;
2560
2561 if (iavf_init_vf(eth_dev) != 0) {
2562 PMD_INIT_LOG(ERR, "Init vf failed");
2563 return -1;
2564 }
2565
2566 /* set default ptype table */
2567 iavf_set_default_ptype_table(eth_dev);
2568
2569 /* copy mac addr */
2570 eth_dev->data->mac_addrs = rte_zmalloc(
2571 "iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
2572 if (!eth_dev->data->mac_addrs) {
2573 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
2574 " store MAC addresses",
2575 RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
2576 ret = -ENOMEM;
2577 goto init_vf_err;
2578 }
2579 /* If the MAC address is not configured by host,
2580 * generate a random one.
2581 */
2582 if (!rte_is_valid_assigned_ether_addr(
2583 (struct rte_ether_addr *)hw->mac.addr))
2584 rte_eth_random_addr(hw->mac.addr);
2585 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
2586 ð_dev->data->mac_addrs[0]);
2587
2588 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2589 /* register callback func to eal lib */
2590 rte_intr_callback_register(pci_dev->intr_handle,
2591 iavf_dev_interrupt_handler,
2592 (void *)eth_dev);
2593
2594 /* enable uio intr after callback register */
2595 rte_intr_enable(pci_dev->intr_handle);
2596 } else {
2597 rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
2598 iavf_dev_alarm_handler, eth_dev);
2599 }
2600
2601 /* configure and enable device interrupt */
2602 iavf_enable_irq0(hw);
2603
2604 ret = iavf_flow_init(adapter);
2605 if (ret) {
2606 PMD_INIT_LOG(ERR, "Failed to initialize flow");
2607 goto flow_init_err;
2608 }
2609
2610 /** Check if the IPsec Crypto offload is supported and create
2611 * security_ctx if it is.
2612 */
2613 if (iavf_ipsec_crypto_supported(adapter)) {
2614 /* Initialize security_ctx only for primary process*/
2615 ret = iavf_security_ctx_create(adapter);
2616 if (ret) {
2617 PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
2618 return ret;
2619 }
2620
2621 ret = iavf_security_init(adapter);
2622 if (ret) {
2623 PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
2624 return ret;
2625 }
2626 }
2627
2628 iavf_default_rss_disable(adapter);
2629
2630
2631 /* Start device watchdog */
2632 iavf_dev_watchdog_enable(adapter);
2633
2634
2635 return 0;
2636
2637 flow_init_err:
2638 rte_free(eth_dev->data->mac_addrs);
2639 eth_dev->data->mac_addrs = NULL;
2640
2641 init_vf_err:
2642 iavf_uninit_vf(eth_dev);
2643
2644 return ret;
2645 }
2646
2647 static int
iavf_dev_close(struct rte_eth_dev * dev)2648 iavf_dev_close(struct rte_eth_dev *dev)
2649 {
2650 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2651 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2652 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2653 struct iavf_adapter *adapter =
2654 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2655 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2656 int ret;
2657
2658 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2659 return 0;
2660
2661 ret = iavf_dev_stop(dev);
2662
2663 iavf_flow_flush(dev, NULL);
2664 iavf_flow_uninit(adapter);
2665
2666 /*
2667 * disable promiscuous mode before reset vf
2668 * it is a workaround solution when work with kernel driver
2669 * and it is not the normal way
2670 */
2671 if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
2672 iavf_config_promisc(adapter, false, false);
2673
2674 iavf_shutdown_adminq(hw);
2675 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
2676 /* disable uio intr before callback unregister */
2677 rte_intr_disable(intr_handle);
2678
2679 /* unregister callback func from eal lib */
2680 rte_intr_callback_unregister(intr_handle,
2681 iavf_dev_interrupt_handler, dev);
2682 } else {
2683 rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
2684 }
2685 iavf_disable_irq0(hw);
2686
2687 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
2688 iavf_tm_conf_uninit(dev);
2689
2690 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2691 if (vf->rss_lut) {
2692 rte_free(vf->rss_lut);
2693 vf->rss_lut = NULL;
2694 }
2695 if (vf->rss_key) {
2696 rte_free(vf->rss_key);
2697 vf->rss_key = NULL;
2698 }
2699 }
2700
2701 rte_free(vf->vf_res);
2702 vf->vsi_res = NULL;
2703 vf->vf_res = NULL;
2704
2705 rte_free(vf->aq_resp);
2706 vf->aq_resp = NULL;
2707
2708 /*
2709 * If the VF is reset via VFLR, the device will be knocked out of bus
2710 * master mode, and the driver will fail to recover from the reset. Fix
2711 * this by enabling bus mastering after every reset. In a non-VFLR case,
2712 * the bus master bit will not be disabled, and this call will have no
2713 * effect.
2714 */
2715 if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
2716 vf->vf_reset = false;
2717
2718 /* disable watchdog */
2719 iavf_dev_watchdog_disable(adapter);
2720
2721 return ret;
2722 }
2723
2724 static int
iavf_dev_uninit(struct rte_eth_dev * dev)2725 iavf_dev_uninit(struct rte_eth_dev *dev)
2726 {
2727 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2728 return -EPERM;
2729
2730 iavf_dev_close(dev);
2731
2732 return 0;
2733 }
2734
2735 /*
2736 * Reset VF device only to re-initialize resources in PMD layer
2737 */
2738 static int
iavf_dev_reset(struct rte_eth_dev * dev)2739 iavf_dev_reset(struct rte_eth_dev *dev)
2740 {
2741 int ret;
2742
2743 ret = iavf_dev_uninit(dev);
2744 if (ret)
2745 return ret;
2746
2747 return iavf_dev_init(dev);
2748 }
2749
2750 static int
iavf_dcf_cap_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)2751 iavf_dcf_cap_check_handler(__rte_unused const char *key,
2752 const char *value, __rte_unused void *opaque)
2753 {
2754 if (strcmp(value, "dcf"))
2755 return -1;
2756
2757 return 0;
2758 }
2759
2760 static int
iavf_dcf_cap_selected(struct rte_devargs * devargs)2761 iavf_dcf_cap_selected(struct rte_devargs *devargs)
2762 {
2763 struct rte_kvargs *kvlist;
2764 const char *key = "cap";
2765 int ret = 0;
2766
2767 if (devargs == NULL)
2768 return 0;
2769
2770 kvlist = rte_kvargs_parse(devargs->args, NULL);
2771 if (kvlist == NULL)
2772 return 0;
2773
2774 if (!rte_kvargs_count(kvlist, key))
2775 goto exit;
2776
2777 /* dcf capability selected when there's a key-value pair: cap=dcf */
2778 if (rte_kvargs_process(kvlist, key,
2779 iavf_dcf_cap_check_handler, NULL) < 0)
2780 goto exit;
2781
2782 ret = 1;
2783
2784 exit:
2785 rte_kvargs_free(kvlist);
2786 return ret;
2787 }
2788
eth_iavf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)2789 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2790 struct rte_pci_device *pci_dev)
2791 {
2792 if (iavf_dcf_cap_selected(pci_dev->device.devargs))
2793 return 1;
2794
2795 return rte_eth_dev_pci_generic_probe(pci_dev,
2796 sizeof(struct iavf_adapter), iavf_dev_init);
2797 }
2798
eth_iavf_pci_remove(struct rte_pci_device * pci_dev)2799 static int eth_iavf_pci_remove(struct rte_pci_device *pci_dev)
2800 {
2801 return rte_eth_dev_pci_generic_remove(pci_dev, iavf_dev_uninit);
2802 }
2803
2804 /* Adaptive virtual function driver struct */
2805 static struct rte_pci_driver rte_iavf_pmd = {
2806 .id_table = pci_id_iavf_map,
2807 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2808 .probe = eth_iavf_pci_probe,
2809 .remove = eth_iavf_pci_remove,
2810 };
2811
2812 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
2813 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
2814 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
2815 RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
2816 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
2817 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
2818 #ifdef RTE_ETHDEV_DEBUG_RX
2819 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
2820 #endif
2821 #ifdef RTE_ETHDEV_DEBUG_TX
2822 RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);
2823 #endif
2824