xref: /f-stack/dpdk/drivers/net/ice/ice_dcf_ethdev.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <unistd.h>
10 
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
13 #include <rte_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_eal.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_dev.h>
22 
23 #include <iavf_devids.h>
24 
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 #include "ice_rxtx.h"
28 
29 static uint16_t
ice_dcf_recv_pkts(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)30 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31 		  __rte_unused struct rte_mbuf **bufs,
32 		  __rte_unused uint16_t nb_pkts)
33 {
34 	return 0;
35 }
36 
37 static uint16_t
ice_dcf_xmit_pkts(__rte_unused void * tx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)38 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39 		  __rte_unused struct rte_mbuf **bufs,
40 		  __rte_unused uint16_t nb_pkts)
41 {
42 	return 0;
43 }
44 
45 static int
ice_dcf_init_rxq(struct rte_eth_dev * dev,struct ice_rx_queue * rxq)46 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
47 {
48 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
49 	struct rte_eth_dev_data *dev_data = dev->data;
50 	struct iavf_hw *hw = &dcf_ad->real_hw.avf;
51 	uint16_t buf_size, max_pkt_len, len;
52 
53 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
54 	rxq->rx_hdr_len = 0;
55 	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
56 	len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
57 	max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
58 
59 	/* Check if the jumbo frame and maximum packet length are set
60 	 * correctly.
61 	 */
62 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
63 		if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
64 		    max_pkt_len > ICE_FRAME_SIZE_MAX) {
65 			PMD_DRV_LOG(ERR, "maximum packet length must be "
66 				    "larger than %u and smaller than %u, "
67 				    "as jumbo frame is enabled",
68 				    (uint32_t)RTE_ETHER_MAX_LEN,
69 				    (uint32_t)ICE_FRAME_SIZE_MAX);
70 			return -EINVAL;
71 		}
72 	} else {
73 		if (max_pkt_len < RTE_ETHER_MIN_LEN ||
74 		    max_pkt_len > RTE_ETHER_MAX_LEN) {
75 			PMD_DRV_LOG(ERR, "maximum packet length must be "
76 				    "larger than %u and smaller than %u, "
77 				    "as jumbo frame is disabled",
78 				    (uint32_t)RTE_ETHER_MIN_LEN,
79 				    (uint32_t)RTE_ETHER_MAX_LEN);
80 			return -EINVAL;
81 		}
82 	}
83 
84 	rxq->max_pkt_len = max_pkt_len;
85 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
86 	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
87 		dev_data->scattered_rx = 1;
88 	}
89 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
90 	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
91 	IAVF_WRITE_FLUSH(hw);
92 
93 	return 0;
94 }
95 
96 static int
ice_dcf_init_rx_queues(struct rte_eth_dev * dev)97 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
98 {
99 	struct ice_rx_queue **rxq =
100 		(struct ice_rx_queue **)dev->data->rx_queues;
101 	int i, ret;
102 
103 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
104 		if (!rxq[i] || !rxq[i]->q_set)
105 			continue;
106 		ret = ice_dcf_init_rxq(dev, rxq[i]);
107 		if (ret)
108 			return ret;
109 	}
110 
111 	ice_set_rx_function(dev);
112 	ice_set_tx_function(dev);
113 
114 	return 0;
115 }
116 
117 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
118 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
119 
120 #define IAVF_ITR_INDEX_DEFAULT          0
121 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
122 #define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
123 
124 static inline uint16_t
iavf_calc_itr_interval(int16_t interval)125 iavf_calc_itr_interval(int16_t interval)
126 {
127 	if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
128 		interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
129 
130 	/* Convert to hardware count, as writing each 1 represents 2 us */
131 	return interval / 2;
132 }
133 
134 static int
ice_dcf_config_rx_queues_irqs(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)135 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
136 				     struct rte_intr_handle *intr_handle)
137 {
138 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
139 	struct ice_dcf_hw *hw = &adapter->real_hw;
140 	uint16_t interval, i;
141 	int vec;
142 
143 	if (rte_intr_cap_multiple(intr_handle) &&
144 	    dev->data->dev_conf.intr_conf.rxq) {
145 		if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
146 			return -1;
147 	}
148 
149 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
150 		intr_handle->intr_vec =
151 			rte_zmalloc("intr_vec",
152 				    dev->data->nb_rx_queues * sizeof(int), 0);
153 		if (!intr_handle->intr_vec) {
154 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
155 				    dev->data->nb_rx_queues);
156 			return -1;
157 		}
158 	}
159 
160 	if (!dev->data->dev_conf.intr_conf.rxq ||
161 	    !rte_intr_dp_is_en(intr_handle)) {
162 		/* Rx interrupt disabled, Map interrupt only for writeback */
163 		hw->nb_msix = 1;
164 		if (hw->vf_res->vf_cap_flags &
165 		    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
166 			/* If WB_ON_ITR supports, enable it */
167 			hw->msix_base = IAVF_RX_VEC_START;
168 			IAVF_WRITE_REG(&hw->avf,
169 				       IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
170 				       IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
171 				       IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
172 		} else {
173 			/* If no WB_ON_ITR offload flags, need to set
174 			 * interrupt for descriptor write back.
175 			 */
176 			hw->msix_base = IAVF_MISC_VEC_ID;
177 
178 			/* set ITR to max */
179 			interval =
180 			iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
181 			IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
182 				       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
183 				       (IAVF_ITR_INDEX_DEFAULT <<
184 					IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
185 				       (interval <<
186 					IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
187 		}
188 		IAVF_WRITE_FLUSH(&hw->avf);
189 		/* map all queues to the same interrupt */
190 		for (i = 0; i < dev->data->nb_rx_queues; i++)
191 			hw->rxq_map[hw->msix_base] |= 1 << i;
192 	} else {
193 		if (!rte_intr_allow_others(intr_handle)) {
194 			hw->nb_msix = 1;
195 			hw->msix_base = IAVF_MISC_VEC_ID;
196 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
197 				hw->rxq_map[hw->msix_base] |= 1 << i;
198 				intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
199 			}
200 			PMD_DRV_LOG(DEBUG,
201 				    "vector %u are mapping to all Rx queues",
202 				    hw->msix_base);
203 		} else {
204 			/* If Rx interrupt is reuquired, and we can use
205 			 * multi interrupts, then the vec is from 1
206 			 */
207 			hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
208 					      intr_handle->nb_efd);
209 			hw->msix_base = IAVF_MISC_VEC_ID;
210 			vec = IAVF_MISC_VEC_ID;
211 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
212 				hw->rxq_map[vec] |= 1 << i;
213 				intr_handle->intr_vec[i] = vec++;
214 				if (vec >= hw->nb_msix)
215 					vec = IAVF_RX_VEC_START;
216 			}
217 			PMD_DRV_LOG(DEBUG,
218 				    "%u vectors are mapping to %u Rx queues",
219 				    hw->nb_msix, dev->data->nb_rx_queues);
220 		}
221 	}
222 
223 	if (ice_dcf_config_irq_map(hw)) {
224 		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
225 		return -1;
226 	}
227 	return 0;
228 }
229 
230 static int
alloc_rxq_mbufs(struct ice_rx_queue * rxq)231 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
232 {
233 	volatile union ice_rx_flex_desc *rxd;
234 	struct rte_mbuf *mbuf = NULL;
235 	uint64_t dma_addr;
236 	uint16_t i;
237 
238 	for (i = 0; i < rxq->nb_rx_desc; i++) {
239 		mbuf = rte_mbuf_raw_alloc(rxq->mp);
240 		if (unlikely(!mbuf)) {
241 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
242 			return -ENOMEM;
243 		}
244 
245 		rte_mbuf_refcnt_set(mbuf, 1);
246 		mbuf->next = NULL;
247 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
248 		mbuf->nb_segs = 1;
249 		mbuf->port = rxq->port_id;
250 
251 		dma_addr =
252 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
253 
254 		rxd = &rxq->rx_ring[i];
255 		rxd->read.pkt_addr = dma_addr;
256 		rxd->read.hdr_addr = 0;
257 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
258 		rxd->read.rsvd1 = 0;
259 		rxd->read.rsvd2 = 0;
260 #endif
261 
262 		rxq->sw_ring[i].mbuf = (void *)mbuf;
263 	}
264 
265 	return 0;
266 }
267 
268 static int
ice_dcf_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)269 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
270 {
271 	struct ice_dcf_adapter *ad = dev->data->dev_private;
272 	struct iavf_hw *hw = &ad->real_hw.avf;
273 	struct ice_rx_queue *rxq;
274 	int err = 0;
275 
276 	if (rx_queue_id >= dev->data->nb_rx_queues)
277 		return -EINVAL;
278 
279 	rxq = dev->data->rx_queues[rx_queue_id];
280 
281 	err = alloc_rxq_mbufs(rxq);
282 	if (err) {
283 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
284 		return err;
285 	}
286 
287 	rte_wmb();
288 
289 	/* Init the RX tail register. */
290 	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
291 	IAVF_WRITE_FLUSH(hw);
292 
293 	/* Ready to switch the queue on */
294 	err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
295 	if (err) {
296 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
297 			    rx_queue_id);
298 		return err;
299 	}
300 
301 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
302 
303 	return 0;
304 }
305 
306 static inline void
reset_rx_queue(struct ice_rx_queue * rxq)307 reset_rx_queue(struct ice_rx_queue *rxq)
308 {
309 	uint16_t len;
310 	uint32_t i;
311 
312 	if (!rxq)
313 		return;
314 
315 	len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
316 
317 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
318 		((volatile char *)rxq->rx_ring)[i] = 0;
319 
320 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
321 
322 	for (i = 0; i < ICE_RX_MAX_BURST; i++)
323 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
324 
325 	/* for rx bulk */
326 	rxq->rx_nb_avail = 0;
327 	rxq->rx_next_avail = 0;
328 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
329 
330 	rxq->rx_tail = 0;
331 	rxq->nb_rx_hold = 0;
332 	rxq->pkt_first_seg = NULL;
333 	rxq->pkt_last_seg = NULL;
334 }
335 
336 static inline void
reset_tx_queue(struct ice_tx_queue * txq)337 reset_tx_queue(struct ice_tx_queue *txq)
338 {
339 	struct ice_tx_entry *txe;
340 	uint32_t i, size;
341 	uint16_t prev;
342 
343 	if (!txq) {
344 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
345 		return;
346 	}
347 
348 	txe = txq->sw_ring;
349 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
350 	for (i = 0; i < size; i++)
351 		((volatile char *)txq->tx_ring)[i] = 0;
352 
353 	prev = (uint16_t)(txq->nb_tx_desc - 1);
354 	for (i = 0; i < txq->nb_tx_desc; i++) {
355 		txq->tx_ring[i].cmd_type_offset_bsz =
356 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
357 		txe[i].mbuf =  NULL;
358 		txe[i].last_id = i;
359 		txe[prev].next_id = i;
360 		prev = i;
361 	}
362 
363 	txq->tx_tail = 0;
364 	txq->nb_tx_used = 0;
365 
366 	txq->last_desc_cleaned = txq->nb_tx_desc - 1;
367 	txq->nb_tx_free = txq->nb_tx_desc - 1;
368 
369 	txq->tx_next_dd = txq->tx_rs_thresh - 1;
370 	txq->tx_next_rs = txq->tx_rs_thresh - 1;
371 }
372 
373 static int
ice_dcf_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)374 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
375 {
376 	struct ice_dcf_adapter *ad = dev->data->dev_private;
377 	struct ice_dcf_hw *hw = &ad->real_hw;
378 	struct ice_rx_queue *rxq;
379 	int err;
380 
381 	if (rx_queue_id >= dev->data->nb_rx_queues)
382 		return -EINVAL;
383 
384 	err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
385 	if (err) {
386 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
387 			    rx_queue_id);
388 		return err;
389 	}
390 
391 	rxq = dev->data->rx_queues[rx_queue_id];
392 	rxq->rx_rel_mbufs(rxq);
393 	reset_rx_queue(rxq);
394 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
395 
396 	return 0;
397 }
398 
399 static int
ice_dcf_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)400 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
401 {
402 	struct ice_dcf_adapter *ad = dev->data->dev_private;
403 	struct iavf_hw *hw = &ad->real_hw.avf;
404 	struct ice_tx_queue *txq;
405 	int err = 0;
406 
407 	if (tx_queue_id >= dev->data->nb_tx_queues)
408 		return -EINVAL;
409 
410 	txq = dev->data->tx_queues[tx_queue_id];
411 
412 	/* Init the RX tail register. */
413 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
414 	IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
415 	IAVF_WRITE_FLUSH(hw);
416 
417 	/* Ready to switch the queue on */
418 	err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
419 
420 	if (err) {
421 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
422 			    tx_queue_id);
423 		return err;
424 	}
425 
426 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
427 
428 	return 0;
429 }
430 
431 static int
ice_dcf_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)432 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
433 {
434 	struct ice_dcf_adapter *ad = dev->data->dev_private;
435 	struct ice_dcf_hw *hw = &ad->real_hw;
436 	struct ice_tx_queue *txq;
437 	int err;
438 
439 	if (tx_queue_id >= dev->data->nb_tx_queues)
440 		return -EINVAL;
441 
442 	err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
443 	if (err) {
444 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
445 			    tx_queue_id);
446 		return err;
447 	}
448 
449 	txq = dev->data->tx_queues[tx_queue_id];
450 	txq->tx_rel_mbufs(txq);
451 	reset_tx_queue(txq);
452 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
453 
454 	return 0;
455 }
456 
457 static int
ice_dcf_start_queues(struct rte_eth_dev * dev)458 ice_dcf_start_queues(struct rte_eth_dev *dev)
459 {
460 	struct ice_rx_queue *rxq;
461 	struct ice_tx_queue *txq;
462 	int nb_rxq = 0;
463 	int nb_txq, i;
464 
465 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
466 		txq = dev->data->tx_queues[nb_txq];
467 		if (txq->tx_deferred_start)
468 			continue;
469 		if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
470 			PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
471 			goto tx_err;
472 		}
473 	}
474 
475 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
476 		rxq = dev->data->rx_queues[nb_rxq];
477 		if (rxq->rx_deferred_start)
478 			continue;
479 		if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
480 			PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
481 			goto rx_err;
482 		}
483 	}
484 
485 	return 0;
486 
487 	/* stop the started queues if failed to start all queues */
488 rx_err:
489 	for (i = 0; i < nb_rxq; i++)
490 		ice_dcf_rx_queue_stop(dev, i);
491 tx_err:
492 	for (i = 0; i < nb_txq; i++)
493 		ice_dcf_tx_queue_stop(dev, i);
494 
495 	return -1;
496 }
497 
498 static int
ice_dcf_dev_start(struct rte_eth_dev * dev)499 ice_dcf_dev_start(struct rte_eth_dev *dev)
500 {
501 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
502 	struct rte_intr_handle *intr_handle = dev->intr_handle;
503 	struct ice_adapter *ad = &dcf_ad->parent;
504 	struct ice_dcf_hw *hw = &dcf_ad->real_hw;
505 	int ret;
506 
507 	ad->pf.adapter_stopped = 0;
508 
509 	hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
510 				      dev->data->nb_tx_queues);
511 
512 	ret = ice_dcf_init_rx_queues(dev);
513 	if (ret) {
514 		PMD_DRV_LOG(ERR, "Fail to init queues");
515 		return ret;
516 	}
517 
518 	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
519 		ret = ice_dcf_init_rss(hw);
520 		if (ret) {
521 			PMD_DRV_LOG(ERR, "Failed to configure RSS");
522 			return ret;
523 		}
524 	}
525 
526 	ret = ice_dcf_configure_queues(hw);
527 	if (ret) {
528 		PMD_DRV_LOG(ERR, "Fail to config queues");
529 		return ret;
530 	}
531 
532 	ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
533 	if (ret) {
534 		PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
535 		return ret;
536 	}
537 
538 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
539 		rte_intr_disable(intr_handle);
540 		rte_intr_enable(intr_handle);
541 	}
542 
543 	ret = ice_dcf_start_queues(dev);
544 	if (ret) {
545 		PMD_DRV_LOG(ERR, "Failed to enable queues");
546 		return ret;
547 	}
548 
549 	ret = ice_dcf_add_del_all_mac_addr(hw, true);
550 	if (ret) {
551 		PMD_DRV_LOG(ERR, "Failed to add mac addr");
552 		return ret;
553 	}
554 
555 	dev->data->dev_link.link_status = ETH_LINK_UP;
556 
557 	return 0;
558 }
559 
560 static void
ice_dcf_stop_queues(struct rte_eth_dev * dev)561 ice_dcf_stop_queues(struct rte_eth_dev *dev)
562 {
563 	struct ice_dcf_adapter *ad = dev->data->dev_private;
564 	struct ice_dcf_hw *hw = &ad->real_hw;
565 	struct ice_rx_queue *rxq;
566 	struct ice_tx_queue *txq;
567 	int ret, i;
568 
569 	/* Stop All queues */
570 	ret = ice_dcf_disable_queues(hw);
571 	if (ret)
572 		PMD_DRV_LOG(WARNING, "Fail to stop queues");
573 
574 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
575 		txq = dev->data->tx_queues[i];
576 		if (!txq)
577 			continue;
578 		txq->tx_rel_mbufs(txq);
579 		reset_tx_queue(txq);
580 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
581 	}
582 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
583 		rxq = dev->data->rx_queues[i];
584 		if (!rxq)
585 			continue;
586 		rxq->rx_rel_mbufs(rxq);
587 		reset_rx_queue(rxq);
588 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
589 	}
590 }
591 
592 static int
ice_dcf_dev_stop(struct rte_eth_dev * dev)593 ice_dcf_dev_stop(struct rte_eth_dev *dev)
594 {
595 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
596 	struct rte_intr_handle *intr_handle = dev->intr_handle;
597 	struct ice_adapter *ad = &dcf_ad->parent;
598 
599 	if (ad->pf.adapter_stopped == 1) {
600 		PMD_DRV_LOG(DEBUG, "Port is already stopped");
601 		return 0;
602 	}
603 
604 	ice_dcf_stop_queues(dev);
605 
606 	rte_intr_efd_disable(intr_handle);
607 	if (intr_handle->intr_vec) {
608 		rte_free(intr_handle->intr_vec);
609 		intr_handle->intr_vec = NULL;
610 	}
611 
612 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
613 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
614 	ad->pf.adapter_stopped = 1;
615 
616 	return 0;
617 }
618 
619 static int
ice_dcf_dev_configure(struct rte_eth_dev * dev)620 ice_dcf_dev_configure(struct rte_eth_dev *dev)
621 {
622 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
623 	struct ice_adapter *ad = &dcf_ad->parent;
624 
625 	ad->rx_bulk_alloc_allowed = true;
626 	ad->tx_simple_allowed = true;
627 
628 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
629 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
630 
631 	return 0;
632 }
633 
634 static int
ice_dcf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)635 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
636 		     struct rte_eth_dev_info *dev_info)
637 {
638 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
639 	struct ice_dcf_hw *hw = &adapter->real_hw;
640 
641 	dev_info->max_mac_addrs = 1;
642 	dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
643 	dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
644 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
645 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
646 	dev_info->hash_key_size = hw->vf_res->rss_key_size;
647 	dev_info->reta_size = hw->vf_res->rss_lut_size;
648 	dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
649 
650 	dev_info->rx_offload_capa =
651 		DEV_RX_OFFLOAD_VLAN_STRIP |
652 		DEV_RX_OFFLOAD_IPV4_CKSUM |
653 		DEV_RX_OFFLOAD_UDP_CKSUM |
654 		DEV_RX_OFFLOAD_TCP_CKSUM |
655 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
656 		DEV_RX_OFFLOAD_SCATTER |
657 		DEV_RX_OFFLOAD_JUMBO_FRAME |
658 		DEV_RX_OFFLOAD_VLAN_FILTER |
659 		DEV_RX_OFFLOAD_RSS_HASH;
660 	dev_info->tx_offload_capa =
661 		DEV_TX_OFFLOAD_VLAN_INSERT |
662 		DEV_TX_OFFLOAD_IPV4_CKSUM |
663 		DEV_TX_OFFLOAD_UDP_CKSUM |
664 		DEV_TX_OFFLOAD_TCP_CKSUM |
665 		DEV_TX_OFFLOAD_SCTP_CKSUM |
666 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
667 		DEV_TX_OFFLOAD_TCP_TSO |
668 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
669 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
670 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
671 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
672 		DEV_TX_OFFLOAD_MULTI_SEGS;
673 
674 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
675 		.rx_thresh = {
676 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
677 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
678 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
679 		},
680 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
681 		.rx_drop_en = 0,
682 		.offloads = 0,
683 	};
684 
685 	dev_info->default_txconf = (struct rte_eth_txconf) {
686 		.tx_thresh = {
687 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
688 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
689 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
690 		},
691 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
692 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
693 		.offloads = 0,
694 	};
695 
696 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
697 		.nb_max = ICE_MAX_RING_DESC,
698 		.nb_min = ICE_MIN_RING_DESC,
699 		.nb_align = ICE_ALIGN_RING_DESC,
700 	};
701 
702 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
703 		.nb_max = ICE_MAX_RING_DESC,
704 		.nb_min = ICE_MIN_RING_DESC,
705 		.nb_align = ICE_ALIGN_RING_DESC,
706 	};
707 
708 	return 0;
709 }
710 
711 static int
ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev * dev)712 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
713 {
714 	return 0;
715 }
716 
717 static int
ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev * dev)718 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
719 {
720 	return 0;
721 }
722 
723 static int
ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev * dev)724 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
725 {
726 	return 0;
727 }
728 
729 static int
ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev * dev)730 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
731 {
732 	return 0;
733 }
734 
735 static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)736 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
737 			enum rte_filter_type filter_type,
738 			enum rte_filter_op filter_op,
739 			void *arg)
740 {
741 	int ret = 0;
742 
743 	if (!dev)
744 		return -EINVAL;
745 
746 	switch (filter_type) {
747 	case RTE_ETH_FILTER_GENERIC:
748 		if (filter_op != RTE_ETH_FILTER_GET)
749 			return -EINVAL;
750 		*(const void **)arg = &ice_flow_ops;
751 		break;
752 
753 	default:
754 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
755 			    filter_type);
756 		ret = -EINVAL;
757 		break;
758 	}
759 
760 	return ret;
761 }
762 
763 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
764 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
765 #define ICE_DCF_48_BIT_MASK  RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
766 
767 static void
ice_dcf_stat_update_48(uint64_t * offset,uint64_t * stat)768 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
769 {
770 	if (*stat >= *offset)
771 		*stat = *stat - *offset;
772 	else
773 		*stat = (uint64_t)((*stat +
774 			((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
775 
776 	*stat &= ICE_DCF_48_BIT_MASK;
777 }
778 
779 static void
ice_dcf_stat_update_32(uint64_t * offset,uint64_t * stat)780 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
781 {
782 	if (*stat >= *offset)
783 		*stat = (uint64_t)(*stat - *offset);
784 	else
785 		*stat = (uint64_t)((*stat +
786 			((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
787 }
788 
789 static void
ice_dcf_update_stats(struct virtchnl_eth_stats * oes,struct virtchnl_eth_stats * nes)790 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
791 		     struct virtchnl_eth_stats *nes)
792 {
793 	ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
794 	ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
795 	ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
796 	ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
797 	ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
798 	ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
799 	ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
800 	ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
801 	ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
802 	ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
803 	ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
804 }
805 
806 
807 static int
ice_dcf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)808 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
809 {
810 	struct ice_dcf_adapter *ad = dev->data->dev_private;
811 	struct ice_dcf_hw *hw = &ad->real_hw;
812 	struct virtchnl_eth_stats pstats;
813 	int ret;
814 
815 	ret = ice_dcf_query_stats(hw, &pstats);
816 	if (ret == 0) {
817 		ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
818 		stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
819 				pstats.rx_broadcast - pstats.rx_discards;
820 		stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
821 						pstats.tx_unicast;
822 		stats->imissed = pstats.rx_discards;
823 		stats->oerrors = pstats.tx_errors + pstats.tx_discards;
824 		stats->ibytes = pstats.rx_bytes;
825 		stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
826 		stats->obytes = pstats.tx_bytes;
827 	} else {
828 		PMD_DRV_LOG(ERR, "Get statistics failed");
829 	}
830 	return ret;
831 }
832 
833 static int
ice_dcf_stats_reset(struct rte_eth_dev * dev)834 ice_dcf_stats_reset(struct rte_eth_dev *dev)
835 {
836 	struct ice_dcf_adapter *ad = dev->data->dev_private;
837 	struct ice_dcf_hw *hw = &ad->real_hw;
838 	struct virtchnl_eth_stats pstats;
839 	int ret;
840 
841 	/* read stat values to clear hardware registers */
842 	ret = ice_dcf_query_stats(hw, &pstats);
843 	if (ret != 0)
844 		return ret;
845 
846 	/* set stats offset base on current values */
847 	hw->eth_stats_offset = pstats;
848 
849 	return 0;
850 }
851 
852 static int
ice_dcf_dev_close(struct rte_eth_dev * dev)853 ice_dcf_dev_close(struct rte_eth_dev *dev)
854 {
855 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
856 
857 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
858 		return 0;
859 
860 	ice_dcf_uninit_parent_adapter(dev);
861 	ice_dcf_uninit_hw(dev, &adapter->real_hw);
862 
863 	return 0;
864 }
865 
866 static int
ice_dcf_link_update(__rte_unused struct rte_eth_dev * dev,__rte_unused int wait_to_complete)867 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
868 		    __rte_unused int wait_to_complete)
869 {
870 	return 0;
871 }
872 
873 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
874 	.dev_start               = ice_dcf_dev_start,
875 	.dev_stop                = ice_dcf_dev_stop,
876 	.dev_close               = ice_dcf_dev_close,
877 	.dev_configure           = ice_dcf_dev_configure,
878 	.dev_infos_get           = ice_dcf_dev_info_get,
879 	.rx_queue_setup          = ice_rx_queue_setup,
880 	.tx_queue_setup          = ice_tx_queue_setup,
881 	.rx_queue_release        = ice_rx_queue_release,
882 	.tx_queue_release        = ice_tx_queue_release,
883 	.rx_queue_start          = ice_dcf_rx_queue_start,
884 	.tx_queue_start          = ice_dcf_tx_queue_start,
885 	.rx_queue_stop           = ice_dcf_rx_queue_stop,
886 	.tx_queue_stop           = ice_dcf_tx_queue_stop,
887 	.link_update             = ice_dcf_link_update,
888 	.stats_get               = ice_dcf_stats_get,
889 	.stats_reset             = ice_dcf_stats_reset,
890 	.promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
891 	.promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
892 	.allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
893 	.allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
894 	.filter_ctrl             = ice_dcf_dev_filter_ctrl,
895 };
896 
897 static int
ice_dcf_dev_init(struct rte_eth_dev * eth_dev)898 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
899 {
900 	struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
901 
902 	eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
903 	eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
904 	eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
905 
906 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
907 		return 0;
908 
909 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
910 
911 	adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
912 	if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
913 		PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
914 		return -1;
915 	}
916 
917 	if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
918 		PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
919 		ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
920 		return -1;
921 	}
922 
923 	return 0;
924 }
925 
926 static int
ice_dcf_dev_uninit(struct rte_eth_dev * eth_dev)927 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
928 {
929 	ice_dcf_dev_close(eth_dev);
930 
931 	return 0;
932 }
933 
934 static int
ice_dcf_cap_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)935 ice_dcf_cap_check_handler(__rte_unused const char *key,
936 			  const char *value, __rte_unused void *opaque)
937 {
938 	if (strcmp(value, "dcf"))
939 		return -1;
940 
941 	return 0;
942 }
943 
944 static int
ice_dcf_cap_selected(struct rte_devargs * devargs)945 ice_dcf_cap_selected(struct rte_devargs *devargs)
946 {
947 	struct rte_kvargs *kvlist;
948 	const char *key = "cap";
949 	int ret = 0;
950 
951 	if (devargs == NULL)
952 		return 0;
953 
954 	kvlist = rte_kvargs_parse(devargs->args, NULL);
955 	if (kvlist == NULL)
956 		return 0;
957 
958 	if (!rte_kvargs_count(kvlist, key))
959 		goto exit;
960 
961 	/* dcf capability selected when there's a key-value pair: cap=dcf */
962 	if (rte_kvargs_process(kvlist, key,
963 			       ice_dcf_cap_check_handler, NULL) < 0)
964 		goto exit;
965 
966 	ret = 1;
967 
968 exit:
969 	rte_kvargs_free(kvlist);
970 	return ret;
971 }
972 
eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)973 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
974 			     struct rte_pci_device *pci_dev)
975 {
976 	if (!ice_dcf_cap_selected(pci_dev->device.devargs))
977 		return 1;
978 
979 	return rte_eth_dev_pci_generic_probe(pci_dev,
980 					     sizeof(struct ice_dcf_adapter),
981 					     ice_dcf_dev_init);
982 }
983 
eth_ice_dcf_pci_remove(struct rte_pci_device * pci_dev)984 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
985 {
986 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
987 }
988 
989 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
990 	{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
991 	{ .vendor_id = 0, /* sentinel */ },
992 };
993 
994 static struct rte_pci_driver rte_ice_dcf_pmd = {
995 	.id_table = pci_id_ice_dcf_map,
996 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
997 	.probe = eth_ice_dcf_pci_probe,
998 	.remove = eth_ice_dcf_pci_remove,
999 };
1000 
1001 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1002 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1003 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1004 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
1005