1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
3 */
4
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/queue.h>
8 #include <sys/types.h>
9 #include <unistd.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22
23 #include <iavf_devids.h>
24
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28
29 #define DCF_NUM_MACADDR_MAX 64
30
31 static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
32 struct rte_ether_addr *mc_addrs,
33 uint32_t mc_addrs_num, bool add);
34
35 static int
36 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
37 struct rte_eth_udp_tunnel *udp_tunnel);
38 static int
39 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
40 struct rte_eth_udp_tunnel *udp_tunnel);
41
42 static int
43 ice_dcf_dev_init(struct rte_eth_dev *eth_dev);
44
45 static int
46 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev);
47
48 struct rte_ice_dcf_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
50 unsigned int offset;
51 };
52
53 static const struct rte_ice_dcf_xstats_name_off rte_ice_dcf_stats_strings[] = {
54 {"rx_bytes", offsetof(struct ice_dcf_eth_stats, rx_bytes)},
55 {"rx_unicast_packets", offsetof(struct ice_dcf_eth_stats, rx_unicast)},
56 {"rx_multicast_packets", offsetof(struct ice_dcf_eth_stats, rx_multicast)},
57 {"rx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, rx_broadcast)},
58 {"rx_dropped_packets", offsetof(struct ice_dcf_eth_stats, rx_discards)},
59 {"rx_unknown_protocol_packets", offsetof(struct ice_dcf_eth_stats,
60 rx_unknown_protocol)},
61 {"tx_bytes", offsetof(struct ice_dcf_eth_stats, tx_bytes)},
62 {"tx_unicast_packets", offsetof(struct ice_dcf_eth_stats, tx_unicast)},
63 {"tx_multicast_packets", offsetof(struct ice_dcf_eth_stats, tx_multicast)},
64 {"tx_broadcast_packets", offsetof(struct ice_dcf_eth_stats, tx_broadcast)},
65 {"tx_dropped_packets", offsetof(struct ice_dcf_eth_stats, tx_discards)},
66 {"tx_error_packets", offsetof(struct ice_dcf_eth_stats, tx_errors)},
67 };
68
69 #define ICE_DCF_NB_XSTATS (sizeof(rte_ice_dcf_stats_strings) / \
70 sizeof(rte_ice_dcf_stats_strings[0]))
71
72 static uint16_t
ice_dcf_recv_pkts(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)73 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
74 __rte_unused struct rte_mbuf **bufs,
75 __rte_unused uint16_t nb_pkts)
76 {
77 return 0;
78 }
79
80 static uint16_t
ice_dcf_xmit_pkts(__rte_unused void * tx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)81 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
82 __rte_unused struct rte_mbuf **bufs,
83 __rte_unused uint16_t nb_pkts)
84 {
85 return 0;
86 }
87
88 static int
ice_dcf_init_rxq(struct rte_eth_dev * dev,struct ice_rx_queue * rxq)89 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
90 {
91 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
92 struct rte_eth_dev_data *dev_data = dev->data;
93 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
94 uint16_t buf_size, max_pkt_len;
95
96 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
97 rxq->rx_hdr_len = 0;
98 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
99 max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
100 dev->data->mtu + ICE_ETH_OVERHEAD);
101
102 /* Check maximum packet length is set correctly. */
103 if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
104 max_pkt_len > ICE_FRAME_SIZE_MAX) {
105 PMD_DRV_LOG(ERR, "maximum packet length must be "
106 "larger than %u and smaller than %u",
107 (uint32_t)RTE_ETHER_MIN_LEN,
108 (uint32_t)ICE_FRAME_SIZE_MAX);
109 return -EINVAL;
110 }
111
112 rxq->max_pkt_len = max_pkt_len;
113 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
114 (rxq->max_pkt_len + 2 * RTE_VLAN_HLEN) > buf_size) {
115 dev_data->scattered_rx = 1;
116 }
117 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
118 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
119 IAVF_WRITE_FLUSH(hw);
120
121 return 0;
122 }
123
124 static int
ice_dcf_init_rx_queues(struct rte_eth_dev * dev)125 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
126 {
127 struct ice_rx_queue **rxq =
128 (struct ice_rx_queue **)dev->data->rx_queues;
129 int i, ret;
130
131 for (i = 0; i < dev->data->nb_rx_queues; i++) {
132 if (!rxq[i] || !rxq[i]->q_set)
133 continue;
134 ret = ice_dcf_init_rxq(dev, rxq[i]);
135 if (ret)
136 return ret;
137 }
138
139 ice_set_rx_function(dev);
140 ice_set_tx_function(dev);
141
142 return 0;
143 }
144
145 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
146 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
147
148 #define IAVF_ITR_INDEX_DEFAULT 0
149 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
150 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
151
152 static inline uint16_t
iavf_calc_itr_interval(int16_t interval)153 iavf_calc_itr_interval(int16_t interval)
154 {
155 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
156 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
157
158 /* Convert to hardware count, as writing each 1 represents 2 us */
159 return interval / 2;
160 }
161
162 static int
ice_dcf_config_rx_queues_irqs(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)163 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
164 struct rte_intr_handle *intr_handle)
165 {
166 struct ice_dcf_adapter *adapter = dev->data->dev_private;
167 struct ice_dcf_hw *hw = &adapter->real_hw;
168 uint16_t interval, i;
169 int vec;
170
171 if (rte_intr_cap_multiple(intr_handle) &&
172 dev->data->dev_conf.intr_conf.rxq) {
173 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
174 return -1;
175 }
176
177 if (rte_intr_dp_is_en(intr_handle)) {
178 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
179 dev->data->nb_rx_queues)) {
180 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
181 dev->data->nb_rx_queues);
182 return -1;
183 }
184 }
185
186 if (!dev->data->dev_conf.intr_conf.rxq ||
187 !rte_intr_dp_is_en(intr_handle)) {
188 /* Rx interrupt disabled, Map interrupt only for writeback */
189 hw->nb_msix = 1;
190 if (hw->vf_res->vf_cap_flags &
191 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
192 /* If WB_ON_ITR supports, enable it */
193 hw->msix_base = IAVF_RX_VEC_START;
194 /* Set the ITR for index zero, to 2us to make sure that
195 * we leave time for aggregation to occur, but don't
196 * increase latency dramatically.
197 */
198 IAVF_WRITE_REG(&hw->avf,
199 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
200 (0 << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
201 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
202 (2UL << IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
203 } else {
204 /* If no WB_ON_ITR offload flags, need to set
205 * interrupt for descriptor write back.
206 */
207 hw->msix_base = IAVF_MISC_VEC_ID;
208
209 /* set ITR to max */
210 interval =
211 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
212 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
213 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 (IAVF_ITR_INDEX_DEFAULT <<
215 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
216 (interval <<
217 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
218 }
219 IAVF_WRITE_FLUSH(&hw->avf);
220 /* map all queues to the same interrupt */
221 for (i = 0; i < dev->data->nb_rx_queues; i++)
222 hw->rxq_map[hw->msix_base] |= 1 << i;
223 } else {
224 if (!rte_intr_allow_others(intr_handle)) {
225 hw->nb_msix = 1;
226 hw->msix_base = IAVF_MISC_VEC_ID;
227 for (i = 0; i < dev->data->nb_rx_queues; i++) {
228 hw->rxq_map[hw->msix_base] |= 1 << i;
229 rte_intr_vec_list_index_set(intr_handle,
230 i, IAVF_MISC_VEC_ID);
231 }
232 PMD_DRV_LOG(DEBUG,
233 "vector %u are mapping to all Rx queues",
234 hw->msix_base);
235 } else {
236 /* If Rx interrupt is required, and we can use
237 * multi interrupts, then the vec is from 1
238 */
239 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
240 rte_intr_nb_efd_get(intr_handle));
241 hw->msix_base = IAVF_MISC_VEC_ID;
242 vec = IAVF_MISC_VEC_ID;
243 for (i = 0; i < dev->data->nb_rx_queues; i++) {
244 hw->rxq_map[vec] |= 1 << i;
245 rte_intr_vec_list_index_set(intr_handle,
246 i, vec++);
247 if (vec >= hw->nb_msix)
248 vec = IAVF_RX_VEC_START;
249 }
250 PMD_DRV_LOG(DEBUG,
251 "%u vectors are mapping to %u Rx queues",
252 hw->nb_msix, dev->data->nb_rx_queues);
253 }
254 }
255
256 if (ice_dcf_config_irq_map(hw)) {
257 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
258 return -1;
259 }
260 return 0;
261 }
262
263 static int
alloc_rxq_mbufs(struct ice_rx_queue * rxq)264 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
265 {
266 volatile union ice_rx_flex_desc *rxd;
267 struct rte_mbuf *mbuf = NULL;
268 uint64_t dma_addr;
269 uint16_t i;
270
271 for (i = 0; i < rxq->nb_rx_desc; i++) {
272 mbuf = rte_mbuf_raw_alloc(rxq->mp);
273 if (unlikely(!mbuf)) {
274 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
275 return -ENOMEM;
276 }
277
278 rte_mbuf_refcnt_set(mbuf, 1);
279 mbuf->next = NULL;
280 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
281 mbuf->nb_segs = 1;
282 mbuf->port = rxq->port_id;
283
284 dma_addr =
285 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
286
287 rxd = &rxq->rx_ring[i];
288 rxd->read.pkt_addr = dma_addr;
289 rxd->read.hdr_addr = 0;
290 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
291 rxd->read.rsvd1 = 0;
292 rxd->read.rsvd2 = 0;
293 #endif
294
295 rxq->sw_ring[i].mbuf = (void *)mbuf;
296 }
297
298 return 0;
299 }
300
301 static int
ice_dcf_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)302 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
303 {
304 struct ice_dcf_adapter *ad = dev->data->dev_private;
305 struct iavf_hw *hw = &ad->real_hw.avf;
306 struct ice_rx_queue *rxq;
307 int err = 0;
308
309 if (rx_queue_id >= dev->data->nb_rx_queues)
310 return -EINVAL;
311
312 rxq = dev->data->rx_queues[rx_queue_id];
313
314 err = alloc_rxq_mbufs(rxq);
315 if (err) {
316 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
317 return err;
318 }
319
320 rte_wmb();
321
322 /* Init the RX tail register. */
323 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
324 IAVF_WRITE_FLUSH(hw);
325
326 /* Ready to switch the queue on */
327 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
328 if (err) {
329 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
330 rx_queue_id);
331 return err;
332 }
333
334 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
335
336 return 0;
337 }
338
339 static inline void
reset_rx_queue(struct ice_rx_queue * rxq)340 reset_rx_queue(struct ice_rx_queue *rxq)
341 {
342 uint16_t len;
343 uint32_t i;
344
345 if (!rxq)
346 return;
347
348 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
349
350 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
351 ((volatile char *)rxq->rx_ring)[i] = 0;
352
353 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
354
355 for (i = 0; i < ICE_RX_MAX_BURST; i++)
356 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
357
358 /* for rx bulk */
359 rxq->rx_nb_avail = 0;
360 rxq->rx_next_avail = 0;
361 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
362
363 rxq->rx_tail = 0;
364 rxq->nb_rx_hold = 0;
365 rxq->pkt_first_seg = NULL;
366 rxq->pkt_last_seg = NULL;
367 }
368
369 static inline void
reset_tx_queue(struct ice_tx_queue * txq)370 reset_tx_queue(struct ice_tx_queue *txq)
371 {
372 struct ice_tx_entry *txe;
373 uint32_t i, size;
374 uint16_t prev;
375
376 if (!txq) {
377 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
378 return;
379 }
380
381 txe = txq->sw_ring;
382 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
383 for (i = 0; i < size; i++)
384 ((volatile char *)txq->tx_ring)[i] = 0;
385
386 prev = (uint16_t)(txq->nb_tx_desc - 1);
387 for (i = 0; i < txq->nb_tx_desc; i++) {
388 txq->tx_ring[i].cmd_type_offset_bsz =
389 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
390 txe[i].mbuf = NULL;
391 txe[i].last_id = i;
392 txe[prev].next_id = i;
393 prev = i;
394 }
395
396 txq->tx_tail = 0;
397 txq->nb_tx_used = 0;
398
399 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
400 txq->nb_tx_free = txq->nb_tx_desc - 1;
401
402 txq->tx_next_dd = txq->tx_rs_thresh - 1;
403 txq->tx_next_rs = txq->tx_rs_thresh - 1;
404 }
405
406 static int
ice_dcf_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)407 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
408 {
409 struct ice_dcf_adapter *ad = dev->data->dev_private;
410 struct ice_dcf_hw *hw = &ad->real_hw;
411 struct ice_rx_queue *rxq;
412 int err;
413
414 if (rx_queue_id >= dev->data->nb_rx_queues)
415 return -EINVAL;
416
417 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
418 if (err) {
419 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
420 rx_queue_id);
421 return err;
422 }
423
424 rxq = dev->data->rx_queues[rx_queue_id];
425 rxq->rx_rel_mbufs(rxq);
426 reset_rx_queue(rxq);
427 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
428
429 return 0;
430 }
431
432 static int
ice_dcf_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)433 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
434 {
435 struct ice_dcf_adapter *ad = dev->data->dev_private;
436 struct iavf_hw *hw = &ad->real_hw.avf;
437 struct ice_tx_queue *txq;
438 int err = 0;
439
440 if (tx_queue_id >= dev->data->nb_tx_queues)
441 return -EINVAL;
442
443 txq = dev->data->tx_queues[tx_queue_id];
444
445 /* Init the RX tail register. */
446 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
447 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
448 IAVF_WRITE_FLUSH(hw);
449
450 /* Ready to switch the queue on */
451 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
452
453 if (err) {
454 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
455 tx_queue_id);
456 return err;
457 }
458
459 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
460
461 return 0;
462 }
463
464 static int
ice_dcf_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)465 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
466 {
467 struct ice_dcf_adapter *ad = dev->data->dev_private;
468 struct ice_dcf_hw *hw = &ad->real_hw;
469 struct ice_tx_queue *txq;
470 int err;
471
472 if (tx_queue_id >= dev->data->nb_tx_queues)
473 return -EINVAL;
474
475 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
476 if (err) {
477 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
478 tx_queue_id);
479 return err;
480 }
481
482 txq = dev->data->tx_queues[tx_queue_id];
483 txq->tx_rel_mbufs(txq);
484 reset_tx_queue(txq);
485 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
486
487 return 0;
488 }
489
490 static int
ice_dcf_start_queues(struct rte_eth_dev * dev)491 ice_dcf_start_queues(struct rte_eth_dev *dev)
492 {
493 struct ice_rx_queue *rxq;
494 struct ice_tx_queue *txq;
495 int nb_rxq = 0;
496 int nb_txq, i;
497
498 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
499 txq = dev->data->tx_queues[nb_txq];
500 if (txq->tx_deferred_start)
501 continue;
502 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
503 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
504 goto tx_err;
505 }
506 }
507
508 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
509 rxq = dev->data->rx_queues[nb_rxq];
510 if (rxq->rx_deferred_start)
511 continue;
512 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
513 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
514 goto rx_err;
515 }
516 }
517
518 return 0;
519
520 /* stop the started queues if failed to start all queues */
521 rx_err:
522 for (i = 0; i < nb_rxq; i++)
523 ice_dcf_rx_queue_stop(dev, i);
524 tx_err:
525 for (i = 0; i < nb_txq; i++)
526 ice_dcf_tx_queue_stop(dev, i);
527
528 return -1;
529 }
530
531 static int
ice_dcf_dev_start(struct rte_eth_dev * dev)532 ice_dcf_dev_start(struct rte_eth_dev *dev)
533 {
534 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
535 struct rte_intr_handle *intr_handle = dev->intr_handle;
536 struct ice_adapter *ad = &dcf_ad->parent;
537 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
538 int ret;
539
540 if (hw->resetting) {
541 PMD_DRV_LOG(ERR,
542 "The DCF has been reset by PF, please reinit first");
543 return -EIO;
544 }
545
546 if (hw->tm_conf.root && !hw->tm_conf.committed) {
547 PMD_DRV_LOG(ERR,
548 "please call hierarchy_commit() before starting the port");
549 return -EIO;
550 }
551
552 ad->pf.adapter_stopped = 0;
553
554 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
555 dev->data->nb_tx_queues);
556
557 ret = ice_dcf_init_rx_queues(dev);
558 if (ret) {
559 PMD_DRV_LOG(ERR, "Fail to init queues");
560 return ret;
561 }
562
563 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
564 ret = ice_dcf_init_rss(hw);
565 if (ret) {
566 PMD_DRV_LOG(ERR, "Failed to configure RSS");
567 return ret;
568 }
569 }
570
571 ret = ice_dcf_configure_queues(hw);
572 if (ret) {
573 PMD_DRV_LOG(ERR, "Fail to config queues");
574 return ret;
575 }
576
577 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
578 if (ret) {
579 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
580 return ret;
581 }
582
583 if (dev->data->dev_conf.intr_conf.rxq != 0) {
584 rte_intr_disable(intr_handle);
585 rte_intr_enable(intr_handle);
586 }
587
588 ret = ice_dcf_start_queues(dev);
589 if (ret) {
590 PMD_DRV_LOG(ERR, "Failed to enable queues");
591 return ret;
592 }
593
594 ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs,
595 true, VIRTCHNL_ETHER_ADDR_PRIMARY);
596 if (ret) {
597 PMD_DRV_LOG(ERR, "Failed to add mac addr");
598 return ret;
599 }
600
601 if (dcf_ad->mc_addrs_num) {
602 /* flush previous addresses */
603 ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs,
604 dcf_ad->mc_addrs_num, true);
605 if (ret)
606 return ret;
607 }
608
609
610 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
611
612 return 0;
613 }
614
615 static void
ice_dcf_stop_queues(struct rte_eth_dev * dev)616 ice_dcf_stop_queues(struct rte_eth_dev *dev)
617 {
618 struct ice_dcf_adapter *ad = dev->data->dev_private;
619 struct ice_dcf_hw *hw = &ad->real_hw;
620 struct ice_rx_queue *rxq;
621 struct ice_tx_queue *txq;
622 int ret, i;
623
624 /* Stop All queues */
625 ret = ice_dcf_disable_queues(hw);
626 if (ret)
627 PMD_DRV_LOG(WARNING, "Fail to stop queues");
628
629 for (i = 0; i < dev->data->nb_tx_queues; i++) {
630 txq = dev->data->tx_queues[i];
631 if (!txq)
632 continue;
633 txq->tx_rel_mbufs(txq);
634 reset_tx_queue(txq);
635 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
636 }
637 for (i = 0; i < dev->data->nb_rx_queues; i++) {
638 rxq = dev->data->rx_queues[i];
639 if (!rxq)
640 continue;
641 rxq->rx_rel_mbufs(rxq);
642 reset_rx_queue(rxq);
643 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
644 }
645 }
646
647 static int
ice_dcf_dev_stop(struct rte_eth_dev * dev)648 ice_dcf_dev_stop(struct rte_eth_dev *dev)
649 {
650 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
651 struct rte_intr_handle *intr_handle = dev->intr_handle;
652 struct ice_adapter *ad = &dcf_ad->parent;
653 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
654
655 if (ad->pf.adapter_stopped == 1) {
656 PMD_DRV_LOG(DEBUG, "Port is already stopped");
657 return 0;
658 }
659
660 /* Stop the VF representors for this device */
661 ice_dcf_vf_repr_stop_all(dcf_ad);
662
663 ice_dcf_stop_queues(dev);
664
665 rte_intr_efd_disable(intr_handle);
666 rte_intr_vec_list_free(intr_handle);
667
668 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw,
669 dcf_ad->real_hw.eth_dev->data->mac_addrs,
670 false, VIRTCHNL_ETHER_ADDR_PRIMARY);
671
672 if (dcf_ad->mc_addrs_num)
673 /* flush previous addresses */
674 (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw,
675 dcf_ad->mc_addrs,
676 dcf_ad->mc_addrs_num, false);
677
678 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
679 ad->pf.adapter_stopped = 1;
680 hw->tm_conf.committed = false;
681
682 return 0;
683 }
684
685 static int
ice_dcf_dev_configure(struct rte_eth_dev * dev)686 ice_dcf_dev_configure(struct rte_eth_dev *dev)
687 {
688 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
689 struct ice_adapter *ad = &dcf_ad->parent;
690
691 ad->rx_bulk_alloc_allowed = true;
692 ad->tx_simple_allowed = true;
693
694 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
695 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
696
697 return 0;
698 }
699
700 static int
ice_dcf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)701 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
702 struct rte_eth_dev_info *dev_info)
703 {
704 struct ice_dcf_adapter *adapter = dev->data->dev_private;
705 struct ice_dcf_hw *hw = &adapter->real_hw;
706
707 dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX;
708 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
709 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
710 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
711 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
712 dev_info->hash_key_size = hw->vf_res->rss_key_size;
713 dev_info->reta_size = hw->vf_res->rss_lut_size;
714 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
715 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
716
717 dev_info->rx_offload_capa =
718 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
719 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
720 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
721 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
722 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
723 RTE_ETH_RX_OFFLOAD_SCATTER |
724 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
725 RTE_ETH_RX_OFFLOAD_RSS_HASH;
726 dev_info->tx_offload_capa =
727 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
728 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
729 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
730 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
731 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
732 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
733 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
734 RTE_ETH_TX_OFFLOAD_TCP_TSO |
735 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
736 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
737 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
738 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
739 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
740
741 dev_info->default_rxconf = (struct rte_eth_rxconf) {
742 .rx_thresh = {
743 .pthresh = ICE_DEFAULT_RX_PTHRESH,
744 .hthresh = ICE_DEFAULT_RX_HTHRESH,
745 .wthresh = ICE_DEFAULT_RX_WTHRESH,
746 },
747 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
748 .rx_drop_en = 0,
749 .offloads = 0,
750 };
751
752 dev_info->default_txconf = (struct rte_eth_txconf) {
753 .tx_thresh = {
754 .pthresh = ICE_DEFAULT_TX_PTHRESH,
755 .hthresh = ICE_DEFAULT_TX_HTHRESH,
756 .wthresh = ICE_DEFAULT_TX_WTHRESH,
757 },
758 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
759 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
760 .offloads = 0,
761 };
762
763 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
764 .nb_max = ICE_MAX_RING_DESC,
765 .nb_min = ICE_MIN_RING_DESC,
766 .nb_align = ICE_ALIGN_RING_DESC,
767 };
768
769 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
770 .nb_max = ICE_MAX_RING_DESC,
771 .nb_min = ICE_MIN_RING_DESC,
772 .nb_align = ICE_ALIGN_RING_DESC,
773 };
774
775 return 0;
776 }
777
778 static int
dcf_config_promisc(struct ice_dcf_adapter * adapter,bool enable_unicast,bool enable_multicast)779 dcf_config_promisc(struct ice_dcf_adapter *adapter,
780 bool enable_unicast,
781 bool enable_multicast)
782 {
783 struct ice_dcf_hw *hw = &adapter->real_hw;
784 struct virtchnl_promisc_info promisc;
785 struct dcf_virtchnl_cmd args;
786 int err;
787
788 promisc.flags = 0;
789 promisc.vsi_id = hw->vsi_res->vsi_id;
790
791 if (enable_unicast)
792 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
793
794 if (enable_multicast)
795 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
796
797 memset(&args, 0, sizeof(args));
798 args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
799 args.req_msg = (uint8_t *)&promisc;
800 args.req_msglen = sizeof(promisc);
801
802 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
803 if (err) {
804 PMD_DRV_LOG(ERR,
805 "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE");
806 return err;
807 }
808
809 adapter->promisc_unicast_enabled = enable_unicast;
810 adapter->promisc_multicast_enabled = enable_multicast;
811 return 0;
812 }
813
814 static int
ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev * dev)815 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
816 {
817 struct ice_dcf_adapter *adapter = dev->data->dev_private;
818
819 if (adapter->promisc_unicast_enabled) {
820 PMD_DRV_LOG(INFO, "promiscuous has been enabled");
821 return 0;
822 }
823
824 return dcf_config_promisc(adapter, true,
825 adapter->promisc_multicast_enabled);
826 }
827
828 static int
ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev * dev)829 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
830 {
831 struct ice_dcf_adapter *adapter = dev->data->dev_private;
832
833 if (!adapter->promisc_unicast_enabled) {
834 PMD_DRV_LOG(INFO, "promiscuous has been disabled");
835 return 0;
836 }
837
838 return dcf_config_promisc(adapter, false,
839 adapter->promisc_multicast_enabled);
840 }
841
842 static int
ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev * dev)843 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
844 {
845 struct ice_dcf_adapter *adapter = dev->data->dev_private;
846
847 if (adapter->promisc_multicast_enabled) {
848 PMD_DRV_LOG(INFO, "allmulticast has been enabled");
849 return 0;
850 }
851
852 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
853 true);
854 }
855
856 static int
ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev * dev)857 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
858 {
859 struct ice_dcf_adapter *adapter = dev->data->dev_private;
860
861 if (!adapter->promisc_multicast_enabled) {
862 PMD_DRV_LOG(INFO, "allmulticast has been disabled");
863 return 0;
864 }
865
866 return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled,
867 false);
868 }
869
870 static int
dcf_dev_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr,__rte_unused uint32_t index,__rte_unused uint32_t pool)871 dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
872 __rte_unused uint32_t index,
873 __rte_unused uint32_t pool)
874 {
875 struct ice_dcf_adapter *adapter = dev->data->dev_private;
876 int err;
877
878 if (rte_is_zero_ether_addr(addr)) {
879 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
880 return -EINVAL;
881 }
882
883 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true,
884 VIRTCHNL_ETHER_ADDR_EXTRA);
885 if (err) {
886 PMD_DRV_LOG(ERR, "fail to add MAC address");
887 return err;
888 }
889
890 return 0;
891 }
892
893 static void
dcf_dev_del_mac_addr(struct rte_eth_dev * dev,uint32_t index)894 dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
895 {
896 struct ice_dcf_adapter *adapter = dev->data->dev_private;
897 struct rte_ether_addr *addr = &dev->data->mac_addrs[index];
898 int err;
899
900 err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false,
901 VIRTCHNL_ETHER_ADDR_EXTRA);
902 if (err)
903 PMD_DRV_LOG(ERR, "fail to remove MAC address");
904 }
905
906 static int
dcf_add_del_mc_addr_list(struct ice_dcf_hw * hw,struct rte_ether_addr * mc_addrs,uint32_t mc_addrs_num,bool add)907 dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw,
908 struct rte_ether_addr *mc_addrs,
909 uint32_t mc_addrs_num, bool add)
910 {
911 struct virtchnl_ether_addr_list *list;
912 struct dcf_virtchnl_cmd args;
913 uint32_t i;
914 int len, err = 0;
915
916 len = sizeof(struct virtchnl_ether_addr_list);
917 len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num;
918
919 list = rte_zmalloc(NULL, len, 0);
920 if (!list) {
921 PMD_DRV_LOG(ERR, "fail to allocate memory");
922 return -ENOMEM;
923 }
924
925 for (i = 0; i < mc_addrs_num; i++) {
926 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
927 sizeof(list->list[i].addr));
928 list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA;
929 }
930
931 list->vsi_id = hw->vsi_res->vsi_id;
932 list->num_elements = mc_addrs_num;
933
934 memset(&args, 0, sizeof(args));
935 args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
936 VIRTCHNL_OP_DEL_ETH_ADDR;
937 args.req_msg = (uint8_t *)list;
938 args.req_msglen = len;
939 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
940 if (err)
941 PMD_DRV_LOG(ERR, "fail to execute command %s",
942 add ? "OP_ADD_ETHER_ADDRESS" :
943 "OP_DEL_ETHER_ADDRESS");
944 rte_free(list);
945 return err;
946 }
947
948 static int
dcf_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addrs,uint32_t mc_addrs_num)949 dcf_set_mc_addr_list(struct rte_eth_dev *dev,
950 struct rte_ether_addr *mc_addrs,
951 uint32_t mc_addrs_num)
952 {
953 struct ice_dcf_adapter *adapter = dev->data->dev_private;
954 struct ice_dcf_hw *hw = &adapter->real_hw;
955 uint32_t i;
956 int ret;
957
958
959 if (mc_addrs_num > DCF_NUM_MACADDR_MAX) {
960 PMD_DRV_LOG(ERR,
961 "can't add more than a limited number (%u) of addresses.",
962 (uint32_t)DCF_NUM_MACADDR_MAX);
963 return -EINVAL;
964 }
965
966 for (i = 0; i < mc_addrs_num; i++) {
967 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
968 const uint8_t *mac = mc_addrs[i].addr_bytes;
969
970 PMD_DRV_LOG(ERR,
971 "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x",
972 mac[0], mac[1], mac[2], mac[3], mac[4],
973 mac[5]);
974 return -EINVAL;
975 }
976 }
977
978 if (adapter->mc_addrs_num) {
979 /* flush previous addresses */
980 ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
981 adapter->mc_addrs_num, false);
982 if (ret)
983 return ret;
984 }
985 if (!mc_addrs_num) {
986 adapter->mc_addrs_num = 0;
987 return 0;
988 }
989
990 /* add new ones */
991 ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true);
992 if (ret) {
993 /* if adding mac address list fails, should add the
994 * previous addresses back.
995 */
996 if (adapter->mc_addrs_num)
997 (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs,
998 adapter->mc_addrs_num,
999 true);
1000 return ret;
1001 }
1002 adapter->mc_addrs_num = mc_addrs_num;
1003 memcpy(adapter->mc_addrs,
1004 mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
1005
1006 return 0;
1007 }
1008
1009 static int
dcf_dev_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)1010 dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
1011 struct rte_ether_addr *mac_addr)
1012 {
1013 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1014 struct ice_dcf_hw *hw = &adapter->real_hw;
1015 struct rte_ether_addr *old_addr;
1016 int ret;
1017
1018 old_addr = hw->eth_dev->data->mac_addrs;
1019 if (rte_is_same_ether_addr(old_addr, mac_addr))
1020 return 0;
1021
1022 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false,
1023 VIRTCHNL_ETHER_ADDR_PRIMARY);
1024 if (ret)
1025 PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
1026 " %02X:%02X:%02X:%02X:%02X:%02X",
1027 old_addr->addr_bytes[0],
1028 old_addr->addr_bytes[1],
1029 old_addr->addr_bytes[2],
1030 old_addr->addr_bytes[3],
1031 old_addr->addr_bytes[4],
1032 old_addr->addr_bytes[5]);
1033
1034 ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true,
1035 VIRTCHNL_ETHER_ADDR_PRIMARY);
1036 if (ret)
1037 PMD_DRV_LOG(ERR, "Fail to add new MAC:"
1038 " %02X:%02X:%02X:%02X:%02X:%02X",
1039 mac_addr->addr_bytes[0],
1040 mac_addr->addr_bytes[1],
1041 mac_addr->addr_bytes[2],
1042 mac_addr->addr_bytes[3],
1043 mac_addr->addr_bytes[4],
1044 mac_addr->addr_bytes[5]);
1045
1046 if (ret)
1047 return -EIO;
1048
1049 rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs);
1050 return 0;
1051 }
1052
1053 static int
dcf_add_del_vlan(struct ice_dcf_hw * hw,uint16_t vlanid,bool add)1054 dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add)
1055 {
1056 struct virtchnl_vlan_filter_list *vlan_list;
1057 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
1058 sizeof(uint16_t)];
1059 struct dcf_virtchnl_cmd args;
1060 int err;
1061
1062 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
1063 vlan_list->vsi_id = hw->vsi_res->vsi_id;
1064 vlan_list->num_elements = 1;
1065 vlan_list->vlan_id[0] = vlanid;
1066
1067 memset(&args, 0, sizeof(args));
1068 args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
1069 args.req_msg = cmd_buffer;
1070 args.req_msglen = sizeof(cmd_buffer);
1071 err = ice_dcf_execute_virtchnl_cmd(hw, &args);
1072 if (err)
1073 PMD_DRV_LOG(ERR, "fail to execute command %s",
1074 add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
1075
1076 return err;
1077 }
1078
1079 static int
dcf_enable_vlan_strip(struct ice_dcf_hw * hw)1080 dcf_enable_vlan_strip(struct ice_dcf_hw *hw)
1081 {
1082 struct dcf_virtchnl_cmd args;
1083 int ret;
1084
1085 memset(&args, 0, sizeof(args));
1086 args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1087 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1088 if (ret)
1089 PMD_DRV_LOG(ERR,
1090 "Failed to execute command of OP_ENABLE_VLAN_STRIPPING");
1091
1092 return ret;
1093 }
1094
1095 static int
dcf_disable_vlan_strip(struct ice_dcf_hw * hw)1096 dcf_disable_vlan_strip(struct ice_dcf_hw *hw)
1097 {
1098 struct dcf_virtchnl_cmd args;
1099 int ret;
1100
1101 memset(&args, 0, sizeof(args));
1102 args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1103 ret = ice_dcf_execute_virtchnl_cmd(hw, &args);
1104 if (ret)
1105 PMD_DRV_LOG(ERR,
1106 "Failed to execute command of OP_DISABLE_VLAN_STRIPPING");
1107
1108 return ret;
1109 }
1110
1111 static int
dcf_dev_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1112 dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1113 {
1114 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1115 struct ice_dcf_hw *hw = &adapter->real_hw;
1116 int err;
1117
1118 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1119 return -ENOTSUP;
1120
1121 err = dcf_add_del_vlan(hw, vlan_id, on);
1122 if (err)
1123 return -EIO;
1124 return 0;
1125 }
1126
1127 static int
dcf_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)1128 dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1129 {
1130 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1131 struct ice_dcf_hw *hw = &adapter->real_hw;
1132 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1133 int err;
1134
1135 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1136 return -ENOTSUP;
1137
1138 /* Vlan stripping setting */
1139 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1140 /* Enable or disable VLAN stripping */
1141 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1142 err = dcf_enable_vlan_strip(hw);
1143 else
1144 err = dcf_disable_vlan_strip(hw);
1145
1146 if (err)
1147 return -EIO;
1148 }
1149 return 0;
1150 }
1151
1152 static int
ice_dcf_dev_flow_ops_get(struct rte_eth_dev * dev,const struct rte_flow_ops ** ops)1153 ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
1154 const struct rte_flow_ops **ops)
1155 {
1156 if (!dev)
1157 return -EINVAL;
1158
1159 *ops = &ice_flow_ops;
1160 return 0;
1161 }
1162
1163 static int
ice_dcf_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1164 ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev,
1165 struct rte_eth_rss_reta_entry64 *reta_conf,
1166 uint16_t reta_size)
1167 {
1168 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1169 struct ice_dcf_hw *hw = &adapter->real_hw;
1170 uint8_t *lut;
1171 uint16_t i, idx, shift;
1172 int ret;
1173
1174 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1175 return -ENOTSUP;
1176
1177 if (reta_size != hw->vf_res->rss_lut_size) {
1178 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1179 "(%d) doesn't match the number of hardware can "
1180 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1181 return -EINVAL;
1182 }
1183
1184 lut = rte_zmalloc("rss_lut", reta_size, 0);
1185 if (!lut) {
1186 PMD_DRV_LOG(ERR, "No memory can be allocated");
1187 return -ENOMEM;
1188 }
1189 /* store the old lut table temporarily */
1190 rte_memcpy(lut, hw->rss_lut, reta_size);
1191
1192 for (i = 0; i < reta_size; i++) {
1193 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1194 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1195 if (reta_conf[idx].mask & (1ULL << shift))
1196 lut[i] = reta_conf[idx].reta[shift];
1197 }
1198
1199 rte_memcpy(hw->rss_lut, lut, reta_size);
1200 /* send virtchnnl ops to configure rss*/
1201 ret = ice_dcf_configure_rss_lut(hw);
1202 if (ret) /* revert back */
1203 rte_memcpy(hw->rss_lut, lut, reta_size);
1204 rte_free(lut);
1205
1206 return ret;
1207 }
1208
1209 static int
ice_dcf_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1210 ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev,
1211 struct rte_eth_rss_reta_entry64 *reta_conf,
1212 uint16_t reta_size)
1213 {
1214 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1215 struct ice_dcf_hw *hw = &adapter->real_hw;
1216 uint16_t i, idx, shift;
1217
1218 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1219 return -ENOTSUP;
1220
1221 if (reta_size != hw->vf_res->rss_lut_size) {
1222 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1223 "(%d) doesn't match the number of hardware can "
1224 "support (%d)", reta_size, hw->vf_res->rss_lut_size);
1225 return -EINVAL;
1226 }
1227
1228 for (i = 0; i < reta_size; i++) {
1229 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1230 shift = i % RTE_ETH_RETA_GROUP_SIZE;
1231 if (reta_conf[idx].mask & (1ULL << shift))
1232 reta_conf[idx].reta[shift] = hw->rss_lut[i];
1233 }
1234
1235 return 0;
1236 }
1237
1238 static int
ice_dcf_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1239 ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev,
1240 struct rte_eth_rss_conf *rss_conf)
1241 {
1242 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1243 struct ice_dcf_hw *hw = &adapter->real_hw;
1244
1245 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1246 return -ENOTSUP;
1247
1248 /* HENA setting, it is enabled by default, no change */
1249 if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
1250 PMD_DRV_LOG(DEBUG, "No key to be configured");
1251 return 0;
1252 } else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) {
1253 PMD_DRV_LOG(ERR, "The size of hash key configured "
1254 "(%d) doesn't match the size of hardware can "
1255 "support (%d)", rss_conf->rss_key_len,
1256 hw->vf_res->rss_key_size);
1257 return -EINVAL;
1258 }
1259
1260 rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
1261
1262 return ice_dcf_configure_rss_key(hw);
1263 }
1264
1265 static int
ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1266 ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1267 struct rte_eth_rss_conf *rss_conf)
1268 {
1269 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1270 struct ice_dcf_hw *hw = &adapter->real_hw;
1271
1272 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
1273 return -ENOTSUP;
1274
1275 /* Just set it to default value now. */
1276 rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL;
1277
1278 if (!rss_conf->rss_key)
1279 return 0;
1280
1281 rss_conf->rss_key_len = hw->vf_res->rss_key_size;
1282 rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len);
1283
1284 return 0;
1285 }
1286
1287 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
1288 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
1289 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
1290
1291 static void
ice_dcf_stat_update_48(uint64_t * offset,uint64_t * stat)1292 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
1293 {
1294 if (*stat >= *offset)
1295 *stat = *stat - *offset;
1296 else
1297 *stat = (uint64_t)((*stat +
1298 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
1299
1300 *stat &= ICE_DCF_48_BIT_MASK;
1301 }
1302
1303 static void
ice_dcf_stat_update_32(uint64_t * offset,uint64_t * stat)1304 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
1305 {
1306 if (*stat >= *offset)
1307 *stat = (uint64_t)(*stat - *offset);
1308 else
1309 *stat = (uint64_t)((*stat +
1310 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
1311 }
1312
1313 static void
ice_dcf_update_stats(struct virtchnl_eth_stats * oes,struct virtchnl_eth_stats * nes)1314 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
1315 struct virtchnl_eth_stats *nes)
1316 {
1317 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
1318 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
1319 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
1320 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
1321 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
1322 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
1323 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
1324 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
1325 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
1326 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
1327 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
1328 }
1329
1330
1331 static int
ice_dcf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1332 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1333 {
1334 struct ice_dcf_adapter *ad = dev->data->dev_private;
1335 struct ice_dcf_hw *hw = &ad->real_hw;
1336 struct virtchnl_eth_stats pstats;
1337 int ret;
1338
1339 if (hw->resetting) {
1340 PMD_DRV_LOG(ERR,
1341 "The DCF has been reset by PF, please reinit first");
1342 return -EIO;
1343 }
1344
1345 ret = ice_dcf_query_stats(hw, &pstats);
1346 if (ret == 0) {
1347 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
1348 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
1349 pstats.rx_broadcast - pstats.rx_discards;
1350 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
1351 pstats.tx_unicast;
1352 stats->imissed = pstats.rx_discards;
1353 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
1354 stats->ibytes = pstats.rx_bytes;
1355 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
1356 stats->obytes = pstats.tx_bytes;
1357 } else {
1358 PMD_DRV_LOG(ERR, "Get statistics failed");
1359 }
1360 return ret;
1361 }
1362
1363 static int
ice_dcf_stats_reset(struct rte_eth_dev * dev)1364 ice_dcf_stats_reset(struct rte_eth_dev *dev)
1365 {
1366 struct ice_dcf_adapter *ad = dev->data->dev_private;
1367 struct ice_dcf_hw *hw = &ad->real_hw;
1368 struct virtchnl_eth_stats pstats;
1369 int ret;
1370
1371 if (hw->resetting)
1372 return 0;
1373
1374 /* read stat values to clear hardware registers */
1375 ret = ice_dcf_query_stats(hw, &pstats);
1376 if (ret != 0)
1377 return ret;
1378
1379 /* set stats offset base on current values */
1380 hw->eth_stats_offset = pstats;
1381
1382 return 0;
1383 }
1384
ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int limit)1385 static int ice_dcf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1386 struct rte_eth_xstat_name *xstats_names,
1387 __rte_unused unsigned int limit)
1388 {
1389 unsigned int i;
1390
1391 if (xstats_names != NULL)
1392 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1393 snprintf(xstats_names[i].name,
1394 sizeof(xstats_names[i].name),
1395 "%s", rte_ice_dcf_stats_strings[i].name);
1396 }
1397 return ICE_DCF_NB_XSTATS;
1398 }
1399
ice_dcf_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1400 static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
1401 struct rte_eth_xstat *xstats, unsigned int n)
1402 {
1403 int ret;
1404 unsigned int i;
1405 struct ice_dcf_adapter *adapter =
1406 ICE_DCF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1407 struct ice_dcf_hw *hw = &adapter->real_hw;
1408 struct virtchnl_eth_stats *postats = &hw->eth_stats_offset;
1409 struct virtchnl_eth_stats pnstats;
1410
1411 if (n < ICE_DCF_NB_XSTATS)
1412 return ICE_DCF_NB_XSTATS;
1413
1414 ret = ice_dcf_query_stats(hw, &pnstats);
1415 if (ret != 0)
1416 return 0;
1417
1418 if (!xstats)
1419 return 0;
1420
1421 ice_dcf_update_stats(postats, &pnstats);
1422
1423 /* loop over xstats array and values from pstats */
1424 for (i = 0; i < ICE_DCF_NB_XSTATS; i++) {
1425 xstats[i].id = i;
1426 xstats[i].value = *(uint64_t *)(((char *)&pnstats) +
1427 rte_ice_dcf_stats_strings[i].offset);
1428 }
1429
1430 return ICE_DCF_NB_XSTATS;
1431 }
1432
1433 static void
ice_dcf_free_repr_info(struct ice_dcf_adapter * dcf_adapter)1434 ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter)
1435 {
1436 if (dcf_adapter->repr_infos) {
1437 rte_free(dcf_adapter->repr_infos);
1438 dcf_adapter->repr_infos = NULL;
1439 }
1440 }
1441
1442 static int
ice_dcf_init_repr_info(struct ice_dcf_adapter * dcf_adapter)1443 ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter)
1444 {
1445 dcf_adapter->repr_infos =
1446 rte_calloc("ice_dcf_rep_info",
1447 dcf_adapter->real_hw.num_vfs,
1448 sizeof(dcf_adapter->repr_infos[0]), 0);
1449 if (!dcf_adapter->repr_infos) {
1450 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n");
1451 return -ENOMEM;
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int
ice_dcf_dev_close(struct rte_eth_dev * dev)1458 ice_dcf_dev_close(struct rte_eth_dev *dev)
1459 {
1460 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1461
1462 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1463 return 0;
1464
1465 (void)ice_dcf_dev_stop(dev);
1466
1467 ice_free_queues(dev);
1468
1469 ice_dcf_free_repr_info(adapter);
1470 ice_dcf_uninit_parent_adapter(dev);
1471 ice_dcf_uninit_hw(dev, &adapter->real_hw);
1472
1473 return 0;
1474 }
1475
1476 int
ice_dcf_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)1477 ice_dcf_link_update(struct rte_eth_dev *dev,
1478 __rte_unused int wait_to_complete)
1479 {
1480 struct ice_dcf_adapter *ad = dev->data->dev_private;
1481 struct ice_dcf_hw *hw = &ad->real_hw;
1482 struct rte_eth_link new_link;
1483
1484 memset(&new_link, 0, sizeof(new_link));
1485
1486 /* Only read status info stored in VF, and the info is updated
1487 * when receive LINK_CHANGE event from PF by virtchnl.
1488 */
1489 switch (hw->link_speed) {
1490 case 10:
1491 new_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1492 break;
1493 case 100:
1494 new_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1495 break;
1496 case 1000:
1497 new_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1498 break;
1499 case 10000:
1500 new_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1501 break;
1502 case 20000:
1503 new_link.link_speed = RTE_ETH_SPEED_NUM_20G;
1504 break;
1505 case 25000:
1506 new_link.link_speed = RTE_ETH_SPEED_NUM_25G;
1507 break;
1508 case 40000:
1509 new_link.link_speed = RTE_ETH_SPEED_NUM_40G;
1510 break;
1511 case 50000:
1512 new_link.link_speed = RTE_ETH_SPEED_NUM_50G;
1513 break;
1514 case 100000:
1515 new_link.link_speed = RTE_ETH_SPEED_NUM_100G;
1516 break;
1517 default:
1518 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1519 break;
1520 }
1521
1522 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1523 new_link.link_status = hw->link_up ? RTE_ETH_LINK_UP :
1524 RTE_ETH_LINK_DOWN;
1525 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1526 RTE_ETH_LINK_SPEED_FIXED);
1527
1528 return rte_eth_linkstatus_set(dev, &new_link);
1529 }
1530
1531 static int
ice_dcf_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu __rte_unused)1532 ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
1533 {
1534 /* mtu setting is forbidden if port is start */
1535 if (dev->data->dev_started != 0) {
1536 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1537 dev->data->port_id);
1538 return -EBUSY;
1539 }
1540
1541 return 0;
1542 }
1543
1544 bool
ice_dcf_adminq_need_retry(struct ice_adapter * ad)1545 ice_dcf_adminq_need_retry(struct ice_adapter *ad)
1546 {
1547 return ad->hw.dcf_enabled &&
1548 !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
1549 }
1550
1551 /* Add UDP tunneling port */
1552 static int
ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)1553 ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1554 struct rte_eth_udp_tunnel *udp_tunnel)
1555 {
1556 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1557 struct ice_adapter *parent_adapter = &adapter->parent;
1558 struct ice_hw *parent_hw = &parent_adapter->hw;
1559 int ret = 0;
1560
1561 if (!udp_tunnel)
1562 return -EINVAL;
1563
1564 switch (udp_tunnel->prot_type) {
1565 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1566 ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
1567 udp_tunnel->udp_port);
1568 break;
1569 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1570 ret = ice_create_tunnel(parent_hw, TNL_ECPRI,
1571 udp_tunnel->udp_port);
1572 break;
1573 default:
1574 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1575 ret = -EINVAL;
1576 break;
1577 }
1578
1579 return ret;
1580 }
1581
1582 /* Delete UDP tunneling port */
1583 static int
ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)1584 ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1585 struct rte_eth_udp_tunnel *udp_tunnel)
1586 {
1587 struct ice_dcf_adapter *adapter = dev->data->dev_private;
1588 struct ice_adapter *parent_adapter = &adapter->parent;
1589 struct ice_hw *parent_hw = &parent_adapter->hw;
1590 int ret = 0;
1591
1592 if (!udp_tunnel)
1593 return -EINVAL;
1594
1595 switch (udp_tunnel->prot_type) {
1596 case RTE_ETH_TUNNEL_TYPE_VXLAN:
1597 case RTE_ETH_TUNNEL_TYPE_ECPRI:
1598 ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
1599 break;
1600 default:
1601 PMD_DRV_LOG(ERR, "Invalid tunnel type");
1602 ret = -EINVAL;
1603 break;
1604 }
1605
1606 return ret;
1607 }
1608
1609 static int
ice_dcf_tm_ops_get(struct rte_eth_dev * dev __rte_unused,void * arg)1610 ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
1611 void *arg)
1612 {
1613 if (!arg)
1614 return -EINVAL;
1615
1616 *(const void **)arg = &ice_dcf_tm_ops;
1617
1618 return 0;
1619 }
1620
1621 static inline void
ice_dcf_reset_hw(struct rte_eth_dev * eth_dev,struct ice_dcf_hw * hw)1622 ice_dcf_reset_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
1623 {
1624 ice_dcf_uninit_hw(eth_dev, hw);
1625 ice_dcf_init_hw(eth_dev, hw);
1626 }
1627
1628 /* Check if reset has been triggered by PF */
1629 static inline bool
ice_dcf_is_reset(struct rte_eth_dev * dev)1630 ice_dcf_is_reset(struct rte_eth_dev *dev)
1631 {
1632 struct ice_dcf_adapter *ad = dev->data->dev_private;
1633 struct iavf_hw *hw = &ad->real_hw.avf;
1634
1635 return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
1636 IAVF_VF_ARQLEN1_ARQENABLE_MASK);
1637 }
1638
1639 static int
ice_dcf_dev_reset(struct rte_eth_dev * dev)1640 ice_dcf_dev_reset(struct rte_eth_dev *dev)
1641 {
1642 struct ice_dcf_adapter *ad = dev->data->dev_private;
1643 struct ice_dcf_hw *hw = &ad->real_hw;
1644 int ret;
1645
1646 if (ice_dcf_is_reset(dev)) {
1647 if (!ad->real_hw.resetting)
1648 ad->real_hw.resetting = true;
1649 PMD_DRV_LOG(ERR, "The DCF has been reset by PF");
1650
1651 /*
1652 * Simply reset hw to trigger an additional DCF enable/disable
1653 * cycle which help to workaround the issue that kernel driver
1654 * may not clean up resource during previous reset.
1655 */
1656 ice_dcf_reset_hw(dev, hw);
1657 }
1658
1659 ret = ice_dcf_dev_uninit(dev);
1660 if (ret)
1661 return ret;
1662
1663 ret = ice_dcf_dev_init(dev);
1664
1665 return ret;
1666 }
1667
1668 static const uint32_t *
ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused)1669 ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1670 {
1671 static const uint32_t ptypes[] = {
1672 RTE_PTYPE_L2_ETHER,
1673 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1674 RTE_PTYPE_L4_FRAG,
1675 RTE_PTYPE_L4_ICMP,
1676 RTE_PTYPE_L4_NONFRAG,
1677 RTE_PTYPE_L4_SCTP,
1678 RTE_PTYPE_L4_TCP,
1679 RTE_PTYPE_L4_UDP,
1680 RTE_PTYPE_UNKNOWN
1681 };
1682 return ptypes;
1683 }
1684
1685 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
1686 .dev_start = ice_dcf_dev_start,
1687 .dev_stop = ice_dcf_dev_stop,
1688 .dev_close = ice_dcf_dev_close,
1689 .dev_reset = ice_dcf_dev_reset,
1690 .dev_configure = ice_dcf_dev_configure,
1691 .dev_infos_get = ice_dcf_dev_info_get,
1692 .dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get,
1693 .rx_queue_setup = ice_rx_queue_setup,
1694 .tx_queue_setup = ice_tx_queue_setup,
1695 .rx_queue_release = ice_dev_rx_queue_release,
1696 .tx_queue_release = ice_dev_tx_queue_release,
1697 .rx_queue_start = ice_dcf_rx_queue_start,
1698 .tx_queue_start = ice_dcf_tx_queue_start,
1699 .rx_queue_stop = ice_dcf_rx_queue_stop,
1700 .tx_queue_stop = ice_dcf_tx_queue_stop,
1701 .rxq_info_get = ice_rxq_info_get,
1702 .txq_info_get = ice_txq_info_get,
1703 .get_monitor_addr = ice_get_monitor_addr,
1704 .link_update = ice_dcf_link_update,
1705 .stats_get = ice_dcf_stats_get,
1706 .stats_reset = ice_dcf_stats_reset,
1707 .xstats_get = ice_dcf_xstats_get,
1708 .xstats_get_names = ice_dcf_xstats_get_names,
1709 .xstats_reset = ice_dcf_stats_reset,
1710 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
1711 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
1712 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
1713 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
1714 .mac_addr_add = dcf_dev_add_mac_addr,
1715 .mac_addr_remove = dcf_dev_del_mac_addr,
1716 .set_mc_addr_list = dcf_set_mc_addr_list,
1717 .mac_addr_set = dcf_dev_set_default_mac_addr,
1718 .vlan_filter_set = dcf_dev_vlan_filter_set,
1719 .vlan_offload_set = dcf_dev_vlan_offload_set,
1720 .flow_ops_get = ice_dcf_dev_flow_ops_get,
1721 .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
1722 .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
1723 .tm_ops_get = ice_dcf_tm_ops_get,
1724 .reta_update = ice_dcf_dev_rss_reta_update,
1725 .reta_query = ice_dcf_dev_rss_reta_query,
1726 .rss_hash_update = ice_dcf_dev_rss_hash_update,
1727 .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get,
1728 .tx_done_cleanup = ice_tx_done_cleanup,
1729 .mtu_set = ice_dcf_dev_mtu_set,
1730 };
1731
1732 static int
ice_dcf_dev_init(struct rte_eth_dev * eth_dev)1733 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
1734 {
1735 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
1736 struct ice_adapter *parent_adapter = &adapter->parent;
1737
1738 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
1739 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
1740 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
1741
1742 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1743 return 0;
1744
1745 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
1746 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
1747 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
1748 __atomic_store_n(&parent_adapter->dcf_state_on, false,
1749 __ATOMIC_RELAXED);
1750 return -1;
1751 }
1752
1753 __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
1754
1755 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
1756 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
1757 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
1758 return -1;
1759 }
1760
1761 dcf_config_promisc(adapter, false, false);
1762 return 0;
1763 }
1764
1765 static int
ice_dcf_dev_uninit(struct rte_eth_dev * eth_dev)1766 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
1767 {
1768 ice_dcf_dev_close(eth_dev);
1769
1770 return 0;
1771 }
1772
1773 static int
ice_dcf_cap_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)1774 ice_dcf_cap_check_handler(__rte_unused const char *key,
1775 const char *value, __rte_unused void *opaque)
1776 {
1777 if (strcmp(value, "dcf"))
1778 return -1;
1779
1780 return 0;
1781 }
1782
1783 static int
ice_dcf_cap_selected(struct rte_devargs * devargs)1784 ice_dcf_cap_selected(struct rte_devargs *devargs)
1785 {
1786 struct rte_kvargs *kvlist;
1787 const char *key = "cap";
1788 int ret = 0;
1789
1790 if (devargs == NULL)
1791 return 0;
1792
1793 kvlist = rte_kvargs_parse(devargs->args, NULL);
1794 if (kvlist == NULL)
1795 return 0;
1796
1797 if (!rte_kvargs_count(kvlist, key))
1798 goto exit;
1799
1800 /* dcf capability selected when there's a key-value pair: cap=dcf */
1801 if (rte_kvargs_process(kvlist, key,
1802 ice_dcf_cap_check_handler, NULL) < 0)
1803 goto exit;
1804
1805 ret = 1;
1806
1807 exit:
1808 rte_kvargs_free(kvlist);
1809 return ret;
1810 }
1811
1812 static int
eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)1813 eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
1814 struct rte_pci_device *pci_dev)
1815 {
1816 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1817 struct ice_dcf_vf_repr_param repr_param;
1818 char repr_name[RTE_ETH_NAME_MAX_LEN];
1819 struct ice_dcf_adapter *dcf_adapter;
1820 struct rte_eth_dev *dcf_ethdev;
1821 uint16_t dcf_vsi_id;
1822 int i, ret;
1823
1824 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
1825 return 1;
1826
1827 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
1828 if (ret)
1829 return ret;
1830
1831 ret = rte_eth_dev_pci_generic_probe(pci_dev,
1832 sizeof(struct ice_dcf_adapter),
1833 ice_dcf_dev_init);
1834 if (ret || !eth_da.nb_representor_ports)
1835 return ret;
1836 if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
1837 return -ENOTSUP;
1838
1839 dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1840 if (dcf_ethdev == NULL)
1841 return -ENODEV;
1842
1843 dcf_adapter = dcf_ethdev->data->dev_private;
1844 ret = ice_dcf_init_repr_info(dcf_adapter);
1845 if (ret)
1846 return ret;
1847
1848 if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
1849 eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
1850 PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
1851 eth_da.nb_representor_ports);
1852 ice_dcf_free_repr_info(dcf_adapter);
1853 return -EINVAL;
1854 }
1855
1856 dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
1857
1858 repr_param.dcf_eth_dev = dcf_ethdev;
1859 repr_param.switch_domain_id = 0;
1860
1861 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1862 uint16_t vf_id = eth_da.representor_ports[i];
1863 struct rte_eth_dev *vf_rep_eth_dev;
1864
1865 if (vf_id >= dcf_adapter->real_hw.num_vfs) {
1866 PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
1867 vf_id, dcf_adapter->real_hw.num_vfs - 1);
1868 ret = -EINVAL;
1869 break;
1870 }
1871
1872 if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
1873 PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
1874 ret = -EINVAL;
1875 break;
1876 }
1877
1878 repr_param.vf_id = vf_id;
1879 snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
1880 pci_dev->device.name, vf_id);
1881 ret = rte_eth_dev_create(&pci_dev->device, repr_name,
1882 sizeof(struct ice_dcf_vf_repr),
1883 NULL, NULL, ice_dcf_vf_repr_init,
1884 &repr_param);
1885 if (ret) {
1886 PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
1887 repr_name);
1888 break;
1889 }
1890
1891 vf_rep_eth_dev = rte_eth_dev_allocated(repr_name);
1892 if (!vf_rep_eth_dev) {
1893 PMD_DRV_LOG(ERR,
1894 "Failed to find the ethdev for DCF VF representor: %s",
1895 repr_name);
1896 ret = -ENODEV;
1897 break;
1898 }
1899
1900 dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev = vf_rep_eth_dev;
1901 dcf_adapter->num_reprs++;
1902 }
1903
1904 return ret;
1905 }
1906
1907 static int
eth_ice_dcf_pci_remove(struct rte_pci_device * pci_dev)1908 eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
1909 {
1910 struct rte_eth_dev *eth_dev;
1911
1912 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1913 if (!eth_dev)
1914 return 0;
1915
1916 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1917 return rte_eth_dev_pci_generic_remove(pci_dev,
1918 ice_dcf_vf_repr_uninit);
1919 else
1920 return rte_eth_dev_pci_generic_remove(pci_dev,
1921 ice_dcf_dev_uninit);
1922 }
1923
1924 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
1925 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
1926 { .vendor_id = 0, /* sentinel */ },
1927 };
1928
1929 static struct rte_pci_driver rte_ice_dcf_pmd = {
1930 .id_table = pci_id_ice_dcf_map,
1931 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1932 .probe = eth_ice_dcf_pci_probe,
1933 .remove = eth_ice_dcf_pci_remove,
1934 };
1935
1936 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1937 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1938 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1939 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
1940