1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 * Copyright(c) 2020 Intel Corporation
3*2d9fd380Sjfb8856606 */
4*2d9fd380Sjfb8856606
5*2d9fd380Sjfb8856606 #include <errno.h>
6*2d9fd380Sjfb8856606 #include <stdbool.h>
7*2d9fd380Sjfb8856606 #include <sys/types.h>
8*2d9fd380Sjfb8856606 #include <sys/ioctl.h>
9*2d9fd380Sjfb8856606 #include <unistd.h>
10*2d9fd380Sjfb8856606
11*2d9fd380Sjfb8856606 #include <rte_interrupts.h>
12*2d9fd380Sjfb8856606 #include <rte_debug.h>
13*2d9fd380Sjfb8856606 #include <rte_pci.h>
14*2d9fd380Sjfb8856606 #include <rte_atomic.h>
15*2d9fd380Sjfb8856606 #include <rte_eal.h>
16*2d9fd380Sjfb8856606 #include <rte_ether.h>
17*2d9fd380Sjfb8856606 #include <rte_ethdev_pci.h>
18*2d9fd380Sjfb8856606 #include <rte_kvargs.h>
19*2d9fd380Sjfb8856606 #include <rte_malloc.h>
20*2d9fd380Sjfb8856606 #include <rte_memzone.h>
21*2d9fd380Sjfb8856606 #include <rte_dev.h>
22*2d9fd380Sjfb8856606
23*2d9fd380Sjfb8856606 #include <iavf_devids.h>
24*2d9fd380Sjfb8856606
25*2d9fd380Sjfb8856606 #include "ice_generic_flow.h"
26*2d9fd380Sjfb8856606 #include "ice_dcf_ethdev.h"
27*2d9fd380Sjfb8856606 #include "ice_rxtx.h"
28*2d9fd380Sjfb8856606
29*2d9fd380Sjfb8856606 static uint16_t
ice_dcf_recv_pkts(__rte_unused void * rx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)30*2d9fd380Sjfb8856606 ice_dcf_recv_pkts(__rte_unused void *rx_queue,
31*2d9fd380Sjfb8856606 __rte_unused struct rte_mbuf **bufs,
32*2d9fd380Sjfb8856606 __rte_unused uint16_t nb_pkts)
33*2d9fd380Sjfb8856606 {
34*2d9fd380Sjfb8856606 return 0;
35*2d9fd380Sjfb8856606 }
36*2d9fd380Sjfb8856606
37*2d9fd380Sjfb8856606 static uint16_t
ice_dcf_xmit_pkts(__rte_unused void * tx_queue,__rte_unused struct rte_mbuf ** bufs,__rte_unused uint16_t nb_pkts)38*2d9fd380Sjfb8856606 ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
39*2d9fd380Sjfb8856606 __rte_unused struct rte_mbuf **bufs,
40*2d9fd380Sjfb8856606 __rte_unused uint16_t nb_pkts)
41*2d9fd380Sjfb8856606 {
42*2d9fd380Sjfb8856606 return 0;
43*2d9fd380Sjfb8856606 }
44*2d9fd380Sjfb8856606
45*2d9fd380Sjfb8856606 static int
ice_dcf_init_rxq(struct rte_eth_dev * dev,struct ice_rx_queue * rxq)46*2d9fd380Sjfb8856606 ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
47*2d9fd380Sjfb8856606 {
48*2d9fd380Sjfb8856606 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
49*2d9fd380Sjfb8856606 struct rte_eth_dev_data *dev_data = dev->data;
50*2d9fd380Sjfb8856606 struct iavf_hw *hw = &dcf_ad->real_hw.avf;
51*2d9fd380Sjfb8856606 uint16_t buf_size, max_pkt_len, len;
52*2d9fd380Sjfb8856606
53*2d9fd380Sjfb8856606 buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
54*2d9fd380Sjfb8856606 rxq->rx_hdr_len = 0;
55*2d9fd380Sjfb8856606 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
56*2d9fd380Sjfb8856606 len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
57*2d9fd380Sjfb8856606 max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
58*2d9fd380Sjfb8856606
59*2d9fd380Sjfb8856606 /* Check if the jumbo frame and maximum packet length are set
60*2d9fd380Sjfb8856606 * correctly.
61*2d9fd380Sjfb8856606 */
62*2d9fd380Sjfb8856606 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
63*2d9fd380Sjfb8856606 if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
64*2d9fd380Sjfb8856606 max_pkt_len > ICE_FRAME_SIZE_MAX) {
65*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "maximum packet length must be "
66*2d9fd380Sjfb8856606 "larger than %u and smaller than %u, "
67*2d9fd380Sjfb8856606 "as jumbo frame is enabled",
68*2d9fd380Sjfb8856606 (uint32_t)RTE_ETHER_MAX_LEN,
69*2d9fd380Sjfb8856606 (uint32_t)ICE_FRAME_SIZE_MAX);
70*2d9fd380Sjfb8856606 return -EINVAL;
71*2d9fd380Sjfb8856606 }
72*2d9fd380Sjfb8856606 } else {
73*2d9fd380Sjfb8856606 if (max_pkt_len < RTE_ETHER_MIN_LEN ||
74*2d9fd380Sjfb8856606 max_pkt_len > RTE_ETHER_MAX_LEN) {
75*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "maximum packet length must be "
76*2d9fd380Sjfb8856606 "larger than %u and smaller than %u, "
77*2d9fd380Sjfb8856606 "as jumbo frame is disabled",
78*2d9fd380Sjfb8856606 (uint32_t)RTE_ETHER_MIN_LEN,
79*2d9fd380Sjfb8856606 (uint32_t)RTE_ETHER_MAX_LEN);
80*2d9fd380Sjfb8856606 return -EINVAL;
81*2d9fd380Sjfb8856606 }
82*2d9fd380Sjfb8856606 }
83*2d9fd380Sjfb8856606
84*2d9fd380Sjfb8856606 rxq->max_pkt_len = max_pkt_len;
85*2d9fd380Sjfb8856606 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
86*2d9fd380Sjfb8856606 (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
87*2d9fd380Sjfb8856606 dev_data->scattered_rx = 1;
88*2d9fd380Sjfb8856606 }
89*2d9fd380Sjfb8856606 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
90*2d9fd380Sjfb8856606 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
91*2d9fd380Sjfb8856606 IAVF_WRITE_FLUSH(hw);
92*2d9fd380Sjfb8856606
93*2d9fd380Sjfb8856606 return 0;
94*2d9fd380Sjfb8856606 }
95*2d9fd380Sjfb8856606
96*2d9fd380Sjfb8856606 static int
ice_dcf_init_rx_queues(struct rte_eth_dev * dev)97*2d9fd380Sjfb8856606 ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
98*2d9fd380Sjfb8856606 {
99*2d9fd380Sjfb8856606 struct ice_rx_queue **rxq =
100*2d9fd380Sjfb8856606 (struct ice_rx_queue **)dev->data->rx_queues;
101*2d9fd380Sjfb8856606 int i, ret;
102*2d9fd380Sjfb8856606
103*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
104*2d9fd380Sjfb8856606 if (!rxq[i] || !rxq[i]->q_set)
105*2d9fd380Sjfb8856606 continue;
106*2d9fd380Sjfb8856606 ret = ice_dcf_init_rxq(dev, rxq[i]);
107*2d9fd380Sjfb8856606 if (ret)
108*2d9fd380Sjfb8856606 return ret;
109*2d9fd380Sjfb8856606 }
110*2d9fd380Sjfb8856606
111*2d9fd380Sjfb8856606 ice_set_rx_function(dev);
112*2d9fd380Sjfb8856606 ice_set_tx_function(dev);
113*2d9fd380Sjfb8856606
114*2d9fd380Sjfb8856606 return 0;
115*2d9fd380Sjfb8856606 }
116*2d9fd380Sjfb8856606
117*2d9fd380Sjfb8856606 #define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
118*2d9fd380Sjfb8856606 #define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
119*2d9fd380Sjfb8856606
120*2d9fd380Sjfb8856606 #define IAVF_ITR_INDEX_DEFAULT 0
121*2d9fd380Sjfb8856606 #define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
122*2d9fd380Sjfb8856606 #define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
123*2d9fd380Sjfb8856606
124*2d9fd380Sjfb8856606 static inline uint16_t
iavf_calc_itr_interval(int16_t interval)125*2d9fd380Sjfb8856606 iavf_calc_itr_interval(int16_t interval)
126*2d9fd380Sjfb8856606 {
127*2d9fd380Sjfb8856606 if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
128*2d9fd380Sjfb8856606 interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
129*2d9fd380Sjfb8856606
130*2d9fd380Sjfb8856606 /* Convert to hardware count, as writing each 1 represents 2 us */
131*2d9fd380Sjfb8856606 return interval / 2;
132*2d9fd380Sjfb8856606 }
133*2d9fd380Sjfb8856606
134*2d9fd380Sjfb8856606 static int
ice_dcf_config_rx_queues_irqs(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)135*2d9fd380Sjfb8856606 ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
136*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle)
137*2d9fd380Sjfb8856606 {
138*2d9fd380Sjfb8856606 struct ice_dcf_adapter *adapter = dev->data->dev_private;
139*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &adapter->real_hw;
140*2d9fd380Sjfb8856606 uint16_t interval, i;
141*2d9fd380Sjfb8856606 int vec;
142*2d9fd380Sjfb8856606
143*2d9fd380Sjfb8856606 if (rte_intr_cap_multiple(intr_handle) &&
144*2d9fd380Sjfb8856606 dev->data->dev_conf.intr_conf.rxq) {
145*2d9fd380Sjfb8856606 if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
146*2d9fd380Sjfb8856606 return -1;
147*2d9fd380Sjfb8856606 }
148*2d9fd380Sjfb8856606
149*2d9fd380Sjfb8856606 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
150*2d9fd380Sjfb8856606 intr_handle->intr_vec =
151*2d9fd380Sjfb8856606 rte_zmalloc("intr_vec",
152*2d9fd380Sjfb8856606 dev->data->nb_rx_queues * sizeof(int), 0);
153*2d9fd380Sjfb8856606 if (!intr_handle->intr_vec) {
154*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
155*2d9fd380Sjfb8856606 dev->data->nb_rx_queues);
156*2d9fd380Sjfb8856606 return -1;
157*2d9fd380Sjfb8856606 }
158*2d9fd380Sjfb8856606 }
159*2d9fd380Sjfb8856606
160*2d9fd380Sjfb8856606 if (!dev->data->dev_conf.intr_conf.rxq ||
161*2d9fd380Sjfb8856606 !rte_intr_dp_is_en(intr_handle)) {
162*2d9fd380Sjfb8856606 /* Rx interrupt disabled, Map interrupt only for writeback */
163*2d9fd380Sjfb8856606 hw->nb_msix = 1;
164*2d9fd380Sjfb8856606 if (hw->vf_res->vf_cap_flags &
165*2d9fd380Sjfb8856606 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
166*2d9fd380Sjfb8856606 /* If WB_ON_ITR supports, enable it */
167*2d9fd380Sjfb8856606 hw->msix_base = IAVF_RX_VEC_START;
168*2d9fd380Sjfb8856606 IAVF_WRITE_REG(&hw->avf,
169*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
170*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
171*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
172*2d9fd380Sjfb8856606 } else {
173*2d9fd380Sjfb8856606 /* If no WB_ON_ITR offload flags, need to set
174*2d9fd380Sjfb8856606 * interrupt for descriptor write back.
175*2d9fd380Sjfb8856606 */
176*2d9fd380Sjfb8856606 hw->msix_base = IAVF_MISC_VEC_ID;
177*2d9fd380Sjfb8856606
178*2d9fd380Sjfb8856606 /* set ITR to max */
179*2d9fd380Sjfb8856606 interval =
180*2d9fd380Sjfb8856606 iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
181*2d9fd380Sjfb8856606 IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
182*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTL01_INTENA_MASK |
183*2d9fd380Sjfb8856606 (IAVF_ITR_INDEX_DEFAULT <<
184*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
185*2d9fd380Sjfb8856606 (interval <<
186*2d9fd380Sjfb8856606 IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
187*2d9fd380Sjfb8856606 }
188*2d9fd380Sjfb8856606 IAVF_WRITE_FLUSH(&hw->avf);
189*2d9fd380Sjfb8856606 /* map all queues to the same interrupt */
190*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++)
191*2d9fd380Sjfb8856606 hw->rxq_map[hw->msix_base] |= 1 << i;
192*2d9fd380Sjfb8856606 } else {
193*2d9fd380Sjfb8856606 if (!rte_intr_allow_others(intr_handle)) {
194*2d9fd380Sjfb8856606 hw->nb_msix = 1;
195*2d9fd380Sjfb8856606 hw->msix_base = IAVF_MISC_VEC_ID;
196*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
197*2d9fd380Sjfb8856606 hw->rxq_map[hw->msix_base] |= 1 << i;
198*2d9fd380Sjfb8856606 intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
199*2d9fd380Sjfb8856606 }
200*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG,
201*2d9fd380Sjfb8856606 "vector %u are mapping to all Rx queues",
202*2d9fd380Sjfb8856606 hw->msix_base);
203*2d9fd380Sjfb8856606 } else {
204*2d9fd380Sjfb8856606 /* If Rx interrupt is reuquired, and we can use
205*2d9fd380Sjfb8856606 * multi interrupts, then the vec is from 1
206*2d9fd380Sjfb8856606 */
207*2d9fd380Sjfb8856606 hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
208*2d9fd380Sjfb8856606 intr_handle->nb_efd);
209*2d9fd380Sjfb8856606 hw->msix_base = IAVF_MISC_VEC_ID;
210*2d9fd380Sjfb8856606 vec = IAVF_MISC_VEC_ID;
211*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
212*2d9fd380Sjfb8856606 hw->rxq_map[vec] |= 1 << i;
213*2d9fd380Sjfb8856606 intr_handle->intr_vec[i] = vec++;
214*2d9fd380Sjfb8856606 if (vec >= hw->nb_msix)
215*2d9fd380Sjfb8856606 vec = IAVF_RX_VEC_START;
216*2d9fd380Sjfb8856606 }
217*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG,
218*2d9fd380Sjfb8856606 "%u vectors are mapping to %u Rx queues",
219*2d9fd380Sjfb8856606 hw->nb_msix, dev->data->nb_rx_queues);
220*2d9fd380Sjfb8856606 }
221*2d9fd380Sjfb8856606 }
222*2d9fd380Sjfb8856606
223*2d9fd380Sjfb8856606 if (ice_dcf_config_irq_map(hw)) {
224*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
225*2d9fd380Sjfb8856606 return -1;
226*2d9fd380Sjfb8856606 }
227*2d9fd380Sjfb8856606 return 0;
228*2d9fd380Sjfb8856606 }
229*2d9fd380Sjfb8856606
230*2d9fd380Sjfb8856606 static int
alloc_rxq_mbufs(struct ice_rx_queue * rxq)231*2d9fd380Sjfb8856606 alloc_rxq_mbufs(struct ice_rx_queue *rxq)
232*2d9fd380Sjfb8856606 {
233*2d9fd380Sjfb8856606 volatile union ice_rx_flex_desc *rxd;
234*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf = NULL;
235*2d9fd380Sjfb8856606 uint64_t dma_addr;
236*2d9fd380Sjfb8856606 uint16_t i;
237*2d9fd380Sjfb8856606
238*2d9fd380Sjfb8856606 for (i = 0; i < rxq->nb_rx_desc; i++) {
239*2d9fd380Sjfb8856606 mbuf = rte_mbuf_raw_alloc(rxq->mp);
240*2d9fd380Sjfb8856606 if (unlikely(!mbuf)) {
241*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
242*2d9fd380Sjfb8856606 return -ENOMEM;
243*2d9fd380Sjfb8856606 }
244*2d9fd380Sjfb8856606
245*2d9fd380Sjfb8856606 rte_mbuf_refcnt_set(mbuf, 1);
246*2d9fd380Sjfb8856606 mbuf->next = NULL;
247*2d9fd380Sjfb8856606 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
248*2d9fd380Sjfb8856606 mbuf->nb_segs = 1;
249*2d9fd380Sjfb8856606 mbuf->port = rxq->port_id;
250*2d9fd380Sjfb8856606
251*2d9fd380Sjfb8856606 dma_addr =
252*2d9fd380Sjfb8856606 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
253*2d9fd380Sjfb8856606
254*2d9fd380Sjfb8856606 rxd = &rxq->rx_ring[i];
255*2d9fd380Sjfb8856606 rxd->read.pkt_addr = dma_addr;
256*2d9fd380Sjfb8856606 rxd->read.hdr_addr = 0;
257*2d9fd380Sjfb8856606 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
258*2d9fd380Sjfb8856606 rxd->read.rsvd1 = 0;
259*2d9fd380Sjfb8856606 rxd->read.rsvd2 = 0;
260*2d9fd380Sjfb8856606 #endif
261*2d9fd380Sjfb8856606
262*2d9fd380Sjfb8856606 rxq->sw_ring[i].mbuf = (void *)mbuf;
263*2d9fd380Sjfb8856606 }
264*2d9fd380Sjfb8856606
265*2d9fd380Sjfb8856606 return 0;
266*2d9fd380Sjfb8856606 }
267*2d9fd380Sjfb8856606
268*2d9fd380Sjfb8856606 static int
ice_dcf_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)269*2d9fd380Sjfb8856606 ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
270*2d9fd380Sjfb8856606 {
271*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
272*2d9fd380Sjfb8856606 struct iavf_hw *hw = &ad->real_hw.avf;
273*2d9fd380Sjfb8856606 struct ice_rx_queue *rxq;
274*2d9fd380Sjfb8856606 int err = 0;
275*2d9fd380Sjfb8856606
276*2d9fd380Sjfb8856606 if (rx_queue_id >= dev->data->nb_rx_queues)
277*2d9fd380Sjfb8856606 return -EINVAL;
278*2d9fd380Sjfb8856606
279*2d9fd380Sjfb8856606 rxq = dev->data->rx_queues[rx_queue_id];
280*2d9fd380Sjfb8856606
281*2d9fd380Sjfb8856606 err = alloc_rxq_mbufs(rxq);
282*2d9fd380Sjfb8856606 if (err) {
283*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
284*2d9fd380Sjfb8856606 return err;
285*2d9fd380Sjfb8856606 }
286*2d9fd380Sjfb8856606
287*2d9fd380Sjfb8856606 rte_wmb();
288*2d9fd380Sjfb8856606
289*2d9fd380Sjfb8856606 /* Init the RX tail register. */
290*2d9fd380Sjfb8856606 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
291*2d9fd380Sjfb8856606 IAVF_WRITE_FLUSH(hw);
292*2d9fd380Sjfb8856606
293*2d9fd380Sjfb8856606 /* Ready to switch the queue on */
294*2d9fd380Sjfb8856606 err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
295*2d9fd380Sjfb8856606 if (err) {
296*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
297*2d9fd380Sjfb8856606 rx_queue_id);
298*2d9fd380Sjfb8856606 return err;
299*2d9fd380Sjfb8856606 }
300*2d9fd380Sjfb8856606
301*2d9fd380Sjfb8856606 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
302*2d9fd380Sjfb8856606
303*2d9fd380Sjfb8856606 return 0;
304*2d9fd380Sjfb8856606 }
305*2d9fd380Sjfb8856606
306*2d9fd380Sjfb8856606 static inline void
reset_rx_queue(struct ice_rx_queue * rxq)307*2d9fd380Sjfb8856606 reset_rx_queue(struct ice_rx_queue *rxq)
308*2d9fd380Sjfb8856606 {
309*2d9fd380Sjfb8856606 uint16_t len;
310*2d9fd380Sjfb8856606 uint32_t i;
311*2d9fd380Sjfb8856606
312*2d9fd380Sjfb8856606 if (!rxq)
313*2d9fd380Sjfb8856606 return;
314*2d9fd380Sjfb8856606
315*2d9fd380Sjfb8856606 len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
316*2d9fd380Sjfb8856606
317*2d9fd380Sjfb8856606 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
318*2d9fd380Sjfb8856606 ((volatile char *)rxq->rx_ring)[i] = 0;
319*2d9fd380Sjfb8856606
320*2d9fd380Sjfb8856606 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
321*2d9fd380Sjfb8856606
322*2d9fd380Sjfb8856606 for (i = 0; i < ICE_RX_MAX_BURST; i++)
323*2d9fd380Sjfb8856606 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
324*2d9fd380Sjfb8856606
325*2d9fd380Sjfb8856606 /* for rx bulk */
326*2d9fd380Sjfb8856606 rxq->rx_nb_avail = 0;
327*2d9fd380Sjfb8856606 rxq->rx_next_avail = 0;
328*2d9fd380Sjfb8856606 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
329*2d9fd380Sjfb8856606
330*2d9fd380Sjfb8856606 rxq->rx_tail = 0;
331*2d9fd380Sjfb8856606 rxq->nb_rx_hold = 0;
332*2d9fd380Sjfb8856606 rxq->pkt_first_seg = NULL;
333*2d9fd380Sjfb8856606 rxq->pkt_last_seg = NULL;
334*2d9fd380Sjfb8856606 }
335*2d9fd380Sjfb8856606
336*2d9fd380Sjfb8856606 static inline void
reset_tx_queue(struct ice_tx_queue * txq)337*2d9fd380Sjfb8856606 reset_tx_queue(struct ice_tx_queue *txq)
338*2d9fd380Sjfb8856606 {
339*2d9fd380Sjfb8856606 struct ice_tx_entry *txe;
340*2d9fd380Sjfb8856606 uint32_t i, size;
341*2d9fd380Sjfb8856606 uint16_t prev;
342*2d9fd380Sjfb8856606
343*2d9fd380Sjfb8856606 if (!txq) {
344*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
345*2d9fd380Sjfb8856606 return;
346*2d9fd380Sjfb8856606 }
347*2d9fd380Sjfb8856606
348*2d9fd380Sjfb8856606 txe = txq->sw_ring;
349*2d9fd380Sjfb8856606 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
350*2d9fd380Sjfb8856606 for (i = 0; i < size; i++)
351*2d9fd380Sjfb8856606 ((volatile char *)txq->tx_ring)[i] = 0;
352*2d9fd380Sjfb8856606
353*2d9fd380Sjfb8856606 prev = (uint16_t)(txq->nb_tx_desc - 1);
354*2d9fd380Sjfb8856606 for (i = 0; i < txq->nb_tx_desc; i++) {
355*2d9fd380Sjfb8856606 txq->tx_ring[i].cmd_type_offset_bsz =
356*2d9fd380Sjfb8856606 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
357*2d9fd380Sjfb8856606 txe[i].mbuf = NULL;
358*2d9fd380Sjfb8856606 txe[i].last_id = i;
359*2d9fd380Sjfb8856606 txe[prev].next_id = i;
360*2d9fd380Sjfb8856606 prev = i;
361*2d9fd380Sjfb8856606 }
362*2d9fd380Sjfb8856606
363*2d9fd380Sjfb8856606 txq->tx_tail = 0;
364*2d9fd380Sjfb8856606 txq->nb_tx_used = 0;
365*2d9fd380Sjfb8856606
366*2d9fd380Sjfb8856606 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
367*2d9fd380Sjfb8856606 txq->nb_tx_free = txq->nb_tx_desc - 1;
368*2d9fd380Sjfb8856606
369*2d9fd380Sjfb8856606 txq->tx_next_dd = txq->tx_rs_thresh - 1;
370*2d9fd380Sjfb8856606 txq->tx_next_rs = txq->tx_rs_thresh - 1;
371*2d9fd380Sjfb8856606 }
372*2d9fd380Sjfb8856606
373*2d9fd380Sjfb8856606 static int
ice_dcf_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)374*2d9fd380Sjfb8856606 ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
375*2d9fd380Sjfb8856606 {
376*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
377*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &ad->real_hw;
378*2d9fd380Sjfb8856606 struct ice_rx_queue *rxq;
379*2d9fd380Sjfb8856606 int err;
380*2d9fd380Sjfb8856606
381*2d9fd380Sjfb8856606 if (rx_queue_id >= dev->data->nb_rx_queues)
382*2d9fd380Sjfb8856606 return -EINVAL;
383*2d9fd380Sjfb8856606
384*2d9fd380Sjfb8856606 err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
385*2d9fd380Sjfb8856606 if (err) {
386*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
387*2d9fd380Sjfb8856606 rx_queue_id);
388*2d9fd380Sjfb8856606 return err;
389*2d9fd380Sjfb8856606 }
390*2d9fd380Sjfb8856606
391*2d9fd380Sjfb8856606 rxq = dev->data->rx_queues[rx_queue_id];
392*2d9fd380Sjfb8856606 rxq->rx_rel_mbufs(rxq);
393*2d9fd380Sjfb8856606 reset_rx_queue(rxq);
394*2d9fd380Sjfb8856606 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
395*2d9fd380Sjfb8856606
396*2d9fd380Sjfb8856606 return 0;
397*2d9fd380Sjfb8856606 }
398*2d9fd380Sjfb8856606
399*2d9fd380Sjfb8856606 static int
ice_dcf_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)400*2d9fd380Sjfb8856606 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
401*2d9fd380Sjfb8856606 {
402*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
403*2d9fd380Sjfb8856606 struct iavf_hw *hw = &ad->real_hw.avf;
404*2d9fd380Sjfb8856606 struct ice_tx_queue *txq;
405*2d9fd380Sjfb8856606 int err = 0;
406*2d9fd380Sjfb8856606
407*2d9fd380Sjfb8856606 if (tx_queue_id >= dev->data->nb_tx_queues)
408*2d9fd380Sjfb8856606 return -EINVAL;
409*2d9fd380Sjfb8856606
410*2d9fd380Sjfb8856606 txq = dev->data->tx_queues[tx_queue_id];
411*2d9fd380Sjfb8856606
412*2d9fd380Sjfb8856606 /* Init the RX tail register. */
413*2d9fd380Sjfb8856606 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
414*2d9fd380Sjfb8856606 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
415*2d9fd380Sjfb8856606 IAVF_WRITE_FLUSH(hw);
416*2d9fd380Sjfb8856606
417*2d9fd380Sjfb8856606 /* Ready to switch the queue on */
418*2d9fd380Sjfb8856606 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
419*2d9fd380Sjfb8856606
420*2d9fd380Sjfb8856606 if (err) {
421*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
422*2d9fd380Sjfb8856606 tx_queue_id);
423*2d9fd380Sjfb8856606 return err;
424*2d9fd380Sjfb8856606 }
425*2d9fd380Sjfb8856606
426*2d9fd380Sjfb8856606 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
427*2d9fd380Sjfb8856606
428*2d9fd380Sjfb8856606 return 0;
429*2d9fd380Sjfb8856606 }
430*2d9fd380Sjfb8856606
431*2d9fd380Sjfb8856606 static int
ice_dcf_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)432*2d9fd380Sjfb8856606 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
433*2d9fd380Sjfb8856606 {
434*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
435*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &ad->real_hw;
436*2d9fd380Sjfb8856606 struct ice_tx_queue *txq;
437*2d9fd380Sjfb8856606 int err;
438*2d9fd380Sjfb8856606
439*2d9fd380Sjfb8856606 if (tx_queue_id >= dev->data->nb_tx_queues)
440*2d9fd380Sjfb8856606 return -EINVAL;
441*2d9fd380Sjfb8856606
442*2d9fd380Sjfb8856606 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
443*2d9fd380Sjfb8856606 if (err) {
444*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
445*2d9fd380Sjfb8856606 tx_queue_id);
446*2d9fd380Sjfb8856606 return err;
447*2d9fd380Sjfb8856606 }
448*2d9fd380Sjfb8856606
449*2d9fd380Sjfb8856606 txq = dev->data->tx_queues[tx_queue_id];
450*2d9fd380Sjfb8856606 txq->tx_rel_mbufs(txq);
451*2d9fd380Sjfb8856606 reset_tx_queue(txq);
452*2d9fd380Sjfb8856606 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
453*2d9fd380Sjfb8856606
454*2d9fd380Sjfb8856606 return 0;
455*2d9fd380Sjfb8856606 }
456*2d9fd380Sjfb8856606
457*2d9fd380Sjfb8856606 static int
ice_dcf_start_queues(struct rte_eth_dev * dev)458*2d9fd380Sjfb8856606 ice_dcf_start_queues(struct rte_eth_dev *dev)
459*2d9fd380Sjfb8856606 {
460*2d9fd380Sjfb8856606 struct ice_rx_queue *rxq;
461*2d9fd380Sjfb8856606 struct ice_tx_queue *txq;
462*2d9fd380Sjfb8856606 int nb_rxq = 0;
463*2d9fd380Sjfb8856606 int nb_txq, i;
464*2d9fd380Sjfb8856606
465*2d9fd380Sjfb8856606 for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
466*2d9fd380Sjfb8856606 txq = dev->data->tx_queues[nb_txq];
467*2d9fd380Sjfb8856606 if (txq->tx_deferred_start)
468*2d9fd380Sjfb8856606 continue;
469*2d9fd380Sjfb8856606 if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {
470*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq);
471*2d9fd380Sjfb8856606 goto tx_err;
472*2d9fd380Sjfb8856606 }
473*2d9fd380Sjfb8856606 }
474*2d9fd380Sjfb8856606
475*2d9fd380Sjfb8856606 for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
476*2d9fd380Sjfb8856606 rxq = dev->data->rx_queues[nb_rxq];
477*2d9fd380Sjfb8856606 if (rxq->rx_deferred_start)
478*2d9fd380Sjfb8856606 continue;
479*2d9fd380Sjfb8856606 if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {
480*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq);
481*2d9fd380Sjfb8856606 goto rx_err;
482*2d9fd380Sjfb8856606 }
483*2d9fd380Sjfb8856606 }
484*2d9fd380Sjfb8856606
485*2d9fd380Sjfb8856606 return 0;
486*2d9fd380Sjfb8856606
487*2d9fd380Sjfb8856606 /* stop the started queues if failed to start all queues */
488*2d9fd380Sjfb8856606 rx_err:
489*2d9fd380Sjfb8856606 for (i = 0; i < nb_rxq; i++)
490*2d9fd380Sjfb8856606 ice_dcf_rx_queue_stop(dev, i);
491*2d9fd380Sjfb8856606 tx_err:
492*2d9fd380Sjfb8856606 for (i = 0; i < nb_txq; i++)
493*2d9fd380Sjfb8856606 ice_dcf_tx_queue_stop(dev, i);
494*2d9fd380Sjfb8856606
495*2d9fd380Sjfb8856606 return -1;
496*2d9fd380Sjfb8856606 }
497*2d9fd380Sjfb8856606
498*2d9fd380Sjfb8856606 static int
ice_dcf_dev_start(struct rte_eth_dev * dev)499*2d9fd380Sjfb8856606 ice_dcf_dev_start(struct rte_eth_dev *dev)
500*2d9fd380Sjfb8856606 {
501*2d9fd380Sjfb8856606 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
502*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle = dev->intr_handle;
503*2d9fd380Sjfb8856606 struct ice_adapter *ad = &dcf_ad->parent;
504*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &dcf_ad->real_hw;
505*2d9fd380Sjfb8856606 int ret;
506*2d9fd380Sjfb8856606
507*2d9fd380Sjfb8856606 ad->pf.adapter_stopped = 0;
508*2d9fd380Sjfb8856606
509*2d9fd380Sjfb8856606 hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
510*2d9fd380Sjfb8856606 dev->data->nb_tx_queues);
511*2d9fd380Sjfb8856606
512*2d9fd380Sjfb8856606 ret = ice_dcf_init_rx_queues(dev);
513*2d9fd380Sjfb8856606 if (ret) {
514*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Fail to init queues");
515*2d9fd380Sjfb8856606 return ret;
516*2d9fd380Sjfb8856606 }
517*2d9fd380Sjfb8856606
518*2d9fd380Sjfb8856606 if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
519*2d9fd380Sjfb8856606 ret = ice_dcf_init_rss(hw);
520*2d9fd380Sjfb8856606 if (ret) {
521*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to configure RSS");
522*2d9fd380Sjfb8856606 return ret;
523*2d9fd380Sjfb8856606 }
524*2d9fd380Sjfb8856606 }
525*2d9fd380Sjfb8856606
526*2d9fd380Sjfb8856606 ret = ice_dcf_configure_queues(hw);
527*2d9fd380Sjfb8856606 if (ret) {
528*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Fail to config queues");
529*2d9fd380Sjfb8856606 return ret;
530*2d9fd380Sjfb8856606 }
531*2d9fd380Sjfb8856606
532*2d9fd380Sjfb8856606 ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
533*2d9fd380Sjfb8856606 if (ret) {
534*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
535*2d9fd380Sjfb8856606 return ret;
536*2d9fd380Sjfb8856606 }
537*2d9fd380Sjfb8856606
538*2d9fd380Sjfb8856606 if (dev->data->dev_conf.intr_conf.rxq != 0) {
539*2d9fd380Sjfb8856606 rte_intr_disable(intr_handle);
540*2d9fd380Sjfb8856606 rte_intr_enable(intr_handle);
541*2d9fd380Sjfb8856606 }
542*2d9fd380Sjfb8856606
543*2d9fd380Sjfb8856606 ret = ice_dcf_start_queues(dev);
544*2d9fd380Sjfb8856606 if (ret) {
545*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to enable queues");
546*2d9fd380Sjfb8856606 return ret;
547*2d9fd380Sjfb8856606 }
548*2d9fd380Sjfb8856606
549*2d9fd380Sjfb8856606 ret = ice_dcf_add_del_all_mac_addr(hw, true);
550*2d9fd380Sjfb8856606 if (ret) {
551*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to add mac addr");
552*2d9fd380Sjfb8856606 return ret;
553*2d9fd380Sjfb8856606 }
554*2d9fd380Sjfb8856606
555*2d9fd380Sjfb8856606 dev->data->dev_link.link_status = ETH_LINK_UP;
556*2d9fd380Sjfb8856606
557*2d9fd380Sjfb8856606 return 0;
558*2d9fd380Sjfb8856606 }
559*2d9fd380Sjfb8856606
560*2d9fd380Sjfb8856606 static void
ice_dcf_stop_queues(struct rte_eth_dev * dev)561*2d9fd380Sjfb8856606 ice_dcf_stop_queues(struct rte_eth_dev *dev)
562*2d9fd380Sjfb8856606 {
563*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
564*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &ad->real_hw;
565*2d9fd380Sjfb8856606 struct ice_rx_queue *rxq;
566*2d9fd380Sjfb8856606 struct ice_tx_queue *txq;
567*2d9fd380Sjfb8856606 int ret, i;
568*2d9fd380Sjfb8856606
569*2d9fd380Sjfb8856606 /* Stop All queues */
570*2d9fd380Sjfb8856606 ret = ice_dcf_disable_queues(hw);
571*2d9fd380Sjfb8856606 if (ret)
572*2d9fd380Sjfb8856606 PMD_DRV_LOG(WARNING, "Fail to stop queues");
573*2d9fd380Sjfb8856606
574*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
575*2d9fd380Sjfb8856606 txq = dev->data->tx_queues[i];
576*2d9fd380Sjfb8856606 if (!txq)
577*2d9fd380Sjfb8856606 continue;
578*2d9fd380Sjfb8856606 txq->tx_rel_mbufs(txq);
579*2d9fd380Sjfb8856606 reset_tx_queue(txq);
580*2d9fd380Sjfb8856606 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
581*2d9fd380Sjfb8856606 }
582*2d9fd380Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
583*2d9fd380Sjfb8856606 rxq = dev->data->rx_queues[i];
584*2d9fd380Sjfb8856606 if (!rxq)
585*2d9fd380Sjfb8856606 continue;
586*2d9fd380Sjfb8856606 rxq->rx_rel_mbufs(rxq);
587*2d9fd380Sjfb8856606 reset_rx_queue(rxq);
588*2d9fd380Sjfb8856606 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
589*2d9fd380Sjfb8856606 }
590*2d9fd380Sjfb8856606 }
591*2d9fd380Sjfb8856606
592*2d9fd380Sjfb8856606 static int
ice_dcf_dev_stop(struct rte_eth_dev * dev)593*2d9fd380Sjfb8856606 ice_dcf_dev_stop(struct rte_eth_dev *dev)
594*2d9fd380Sjfb8856606 {
595*2d9fd380Sjfb8856606 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
596*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle = dev->intr_handle;
597*2d9fd380Sjfb8856606 struct ice_adapter *ad = &dcf_ad->parent;
598*2d9fd380Sjfb8856606
599*2d9fd380Sjfb8856606 if (ad->pf.adapter_stopped == 1) {
600*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG, "Port is already stopped");
601*2d9fd380Sjfb8856606 return 0;
602*2d9fd380Sjfb8856606 }
603*2d9fd380Sjfb8856606
604*2d9fd380Sjfb8856606 ice_dcf_stop_queues(dev);
605*2d9fd380Sjfb8856606
606*2d9fd380Sjfb8856606 rte_intr_efd_disable(intr_handle);
607*2d9fd380Sjfb8856606 if (intr_handle->intr_vec) {
608*2d9fd380Sjfb8856606 rte_free(intr_handle->intr_vec);
609*2d9fd380Sjfb8856606 intr_handle->intr_vec = NULL;
610*2d9fd380Sjfb8856606 }
611*2d9fd380Sjfb8856606
612*2d9fd380Sjfb8856606 ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
613*2d9fd380Sjfb8856606 dev->data->dev_link.link_status = ETH_LINK_DOWN;
614*2d9fd380Sjfb8856606 ad->pf.adapter_stopped = 1;
615*2d9fd380Sjfb8856606
616*2d9fd380Sjfb8856606 return 0;
617*2d9fd380Sjfb8856606 }
618*2d9fd380Sjfb8856606
619*2d9fd380Sjfb8856606 static int
ice_dcf_dev_configure(struct rte_eth_dev * dev)620*2d9fd380Sjfb8856606 ice_dcf_dev_configure(struct rte_eth_dev *dev)
621*2d9fd380Sjfb8856606 {
622*2d9fd380Sjfb8856606 struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
623*2d9fd380Sjfb8856606 struct ice_adapter *ad = &dcf_ad->parent;
624*2d9fd380Sjfb8856606
625*2d9fd380Sjfb8856606 ad->rx_bulk_alloc_allowed = true;
626*2d9fd380Sjfb8856606 ad->tx_simple_allowed = true;
627*2d9fd380Sjfb8856606
628*2d9fd380Sjfb8856606 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
629*2d9fd380Sjfb8856606 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
630*2d9fd380Sjfb8856606
631*2d9fd380Sjfb8856606 return 0;
632*2d9fd380Sjfb8856606 }
633*2d9fd380Sjfb8856606
634*2d9fd380Sjfb8856606 static int
ice_dcf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)635*2d9fd380Sjfb8856606 ice_dcf_dev_info_get(struct rte_eth_dev *dev,
636*2d9fd380Sjfb8856606 struct rte_eth_dev_info *dev_info)
637*2d9fd380Sjfb8856606 {
638*2d9fd380Sjfb8856606 struct ice_dcf_adapter *adapter = dev->data->dev_private;
639*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &adapter->real_hw;
640*2d9fd380Sjfb8856606
641*2d9fd380Sjfb8856606 dev_info->max_mac_addrs = 1;
642*2d9fd380Sjfb8856606 dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs;
643*2d9fd380Sjfb8856606 dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs;
644*2d9fd380Sjfb8856606 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
645*2d9fd380Sjfb8856606 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
646*2d9fd380Sjfb8856606 dev_info->hash_key_size = hw->vf_res->rss_key_size;
647*2d9fd380Sjfb8856606 dev_info->reta_size = hw->vf_res->rss_lut_size;
648*2d9fd380Sjfb8856606 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
649*2d9fd380Sjfb8856606
650*2d9fd380Sjfb8856606 dev_info->rx_offload_capa =
651*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_VLAN_STRIP |
652*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_IPV4_CKSUM |
653*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_UDP_CKSUM |
654*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_TCP_CKSUM |
655*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
656*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_SCATTER |
657*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME |
658*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_VLAN_FILTER |
659*2d9fd380Sjfb8856606 DEV_RX_OFFLOAD_RSS_HASH;
660*2d9fd380Sjfb8856606 dev_info->tx_offload_capa =
661*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_VLAN_INSERT |
662*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_IPV4_CKSUM |
663*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_UDP_CKSUM |
664*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_TCP_CKSUM |
665*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_SCTP_CKSUM |
666*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
667*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_TCP_TSO |
668*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
669*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_GRE_TNL_TSO |
670*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
671*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
672*2d9fd380Sjfb8856606 DEV_TX_OFFLOAD_MULTI_SEGS;
673*2d9fd380Sjfb8856606
674*2d9fd380Sjfb8856606 dev_info->default_rxconf = (struct rte_eth_rxconf) {
675*2d9fd380Sjfb8856606 .rx_thresh = {
676*2d9fd380Sjfb8856606 .pthresh = ICE_DEFAULT_RX_PTHRESH,
677*2d9fd380Sjfb8856606 .hthresh = ICE_DEFAULT_RX_HTHRESH,
678*2d9fd380Sjfb8856606 .wthresh = ICE_DEFAULT_RX_WTHRESH,
679*2d9fd380Sjfb8856606 },
680*2d9fd380Sjfb8856606 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
681*2d9fd380Sjfb8856606 .rx_drop_en = 0,
682*2d9fd380Sjfb8856606 .offloads = 0,
683*2d9fd380Sjfb8856606 };
684*2d9fd380Sjfb8856606
685*2d9fd380Sjfb8856606 dev_info->default_txconf = (struct rte_eth_txconf) {
686*2d9fd380Sjfb8856606 .tx_thresh = {
687*2d9fd380Sjfb8856606 .pthresh = ICE_DEFAULT_TX_PTHRESH,
688*2d9fd380Sjfb8856606 .hthresh = ICE_DEFAULT_TX_HTHRESH,
689*2d9fd380Sjfb8856606 .wthresh = ICE_DEFAULT_TX_WTHRESH,
690*2d9fd380Sjfb8856606 },
691*2d9fd380Sjfb8856606 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
692*2d9fd380Sjfb8856606 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
693*2d9fd380Sjfb8856606 .offloads = 0,
694*2d9fd380Sjfb8856606 };
695*2d9fd380Sjfb8856606
696*2d9fd380Sjfb8856606 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
697*2d9fd380Sjfb8856606 .nb_max = ICE_MAX_RING_DESC,
698*2d9fd380Sjfb8856606 .nb_min = ICE_MIN_RING_DESC,
699*2d9fd380Sjfb8856606 .nb_align = ICE_ALIGN_RING_DESC,
700*2d9fd380Sjfb8856606 };
701*2d9fd380Sjfb8856606
702*2d9fd380Sjfb8856606 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
703*2d9fd380Sjfb8856606 .nb_max = ICE_MAX_RING_DESC,
704*2d9fd380Sjfb8856606 .nb_min = ICE_MIN_RING_DESC,
705*2d9fd380Sjfb8856606 .nb_align = ICE_ALIGN_RING_DESC,
706*2d9fd380Sjfb8856606 };
707*2d9fd380Sjfb8856606
708*2d9fd380Sjfb8856606 return 0;
709*2d9fd380Sjfb8856606 }
710*2d9fd380Sjfb8856606
711*2d9fd380Sjfb8856606 static int
ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev * dev)712*2d9fd380Sjfb8856606 ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
713*2d9fd380Sjfb8856606 {
714*2d9fd380Sjfb8856606 return 0;
715*2d9fd380Sjfb8856606 }
716*2d9fd380Sjfb8856606
717*2d9fd380Sjfb8856606 static int
ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev * dev)718*2d9fd380Sjfb8856606 ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
719*2d9fd380Sjfb8856606 {
720*2d9fd380Sjfb8856606 return 0;
721*2d9fd380Sjfb8856606 }
722*2d9fd380Sjfb8856606
723*2d9fd380Sjfb8856606 static int
ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev * dev)724*2d9fd380Sjfb8856606 ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
725*2d9fd380Sjfb8856606 {
726*2d9fd380Sjfb8856606 return 0;
727*2d9fd380Sjfb8856606 }
728*2d9fd380Sjfb8856606
729*2d9fd380Sjfb8856606 static int
ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev * dev)730*2d9fd380Sjfb8856606 ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
731*2d9fd380Sjfb8856606 {
732*2d9fd380Sjfb8856606 return 0;
733*2d9fd380Sjfb8856606 }
734*2d9fd380Sjfb8856606
735*2d9fd380Sjfb8856606 static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)736*2d9fd380Sjfb8856606 ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
737*2d9fd380Sjfb8856606 enum rte_filter_type filter_type,
738*2d9fd380Sjfb8856606 enum rte_filter_op filter_op,
739*2d9fd380Sjfb8856606 void *arg)
740*2d9fd380Sjfb8856606 {
741*2d9fd380Sjfb8856606 int ret = 0;
742*2d9fd380Sjfb8856606
743*2d9fd380Sjfb8856606 if (!dev)
744*2d9fd380Sjfb8856606 return -EINVAL;
745*2d9fd380Sjfb8856606
746*2d9fd380Sjfb8856606 switch (filter_type) {
747*2d9fd380Sjfb8856606 case RTE_ETH_FILTER_GENERIC:
748*2d9fd380Sjfb8856606 if (filter_op != RTE_ETH_FILTER_GET)
749*2d9fd380Sjfb8856606 return -EINVAL;
750*2d9fd380Sjfb8856606 *(const void **)arg = &ice_flow_ops;
751*2d9fd380Sjfb8856606 break;
752*2d9fd380Sjfb8856606
753*2d9fd380Sjfb8856606 default:
754*2d9fd380Sjfb8856606 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
755*2d9fd380Sjfb8856606 filter_type);
756*2d9fd380Sjfb8856606 ret = -EINVAL;
757*2d9fd380Sjfb8856606 break;
758*2d9fd380Sjfb8856606 }
759*2d9fd380Sjfb8856606
760*2d9fd380Sjfb8856606 return ret;
761*2d9fd380Sjfb8856606 }
762*2d9fd380Sjfb8856606
763*2d9fd380Sjfb8856606 #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
764*2d9fd380Sjfb8856606 #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6)
765*2d9fd380Sjfb8856606 #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t)
766*2d9fd380Sjfb8856606
767*2d9fd380Sjfb8856606 static void
ice_dcf_stat_update_48(uint64_t * offset,uint64_t * stat)768*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat)
769*2d9fd380Sjfb8856606 {
770*2d9fd380Sjfb8856606 if (*stat >= *offset)
771*2d9fd380Sjfb8856606 *stat = *stat - *offset;
772*2d9fd380Sjfb8856606 else
773*2d9fd380Sjfb8856606 *stat = (uint64_t)((*stat +
774*2d9fd380Sjfb8856606 ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset);
775*2d9fd380Sjfb8856606
776*2d9fd380Sjfb8856606 *stat &= ICE_DCF_48_BIT_MASK;
777*2d9fd380Sjfb8856606 }
778*2d9fd380Sjfb8856606
779*2d9fd380Sjfb8856606 static void
ice_dcf_stat_update_32(uint64_t * offset,uint64_t * stat)780*2d9fd380Sjfb8856606 ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat)
781*2d9fd380Sjfb8856606 {
782*2d9fd380Sjfb8856606 if (*stat >= *offset)
783*2d9fd380Sjfb8856606 *stat = (uint64_t)(*stat - *offset);
784*2d9fd380Sjfb8856606 else
785*2d9fd380Sjfb8856606 *stat = (uint64_t)((*stat +
786*2d9fd380Sjfb8856606 ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset);
787*2d9fd380Sjfb8856606 }
788*2d9fd380Sjfb8856606
789*2d9fd380Sjfb8856606 static void
ice_dcf_update_stats(struct virtchnl_eth_stats * oes,struct virtchnl_eth_stats * nes)790*2d9fd380Sjfb8856606 ice_dcf_update_stats(struct virtchnl_eth_stats *oes,
791*2d9fd380Sjfb8856606 struct virtchnl_eth_stats *nes)
792*2d9fd380Sjfb8856606 {
793*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
794*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
795*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast);
796*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast);
797*2d9fd380Sjfb8856606 ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards);
798*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes);
799*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast);
800*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast);
801*2d9fd380Sjfb8856606 ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast);
802*2d9fd380Sjfb8856606 ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
803*2d9fd380Sjfb8856606 ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
804*2d9fd380Sjfb8856606 }
805*2d9fd380Sjfb8856606
806*2d9fd380Sjfb8856606
807*2d9fd380Sjfb8856606 static int
ice_dcf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)808*2d9fd380Sjfb8856606 ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
809*2d9fd380Sjfb8856606 {
810*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
811*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &ad->real_hw;
812*2d9fd380Sjfb8856606 struct virtchnl_eth_stats pstats;
813*2d9fd380Sjfb8856606 int ret;
814*2d9fd380Sjfb8856606
815*2d9fd380Sjfb8856606 ret = ice_dcf_query_stats(hw, &pstats);
816*2d9fd380Sjfb8856606 if (ret == 0) {
817*2d9fd380Sjfb8856606 ice_dcf_update_stats(&hw->eth_stats_offset, &pstats);
818*2d9fd380Sjfb8856606 stats->ipackets = pstats.rx_unicast + pstats.rx_multicast +
819*2d9fd380Sjfb8856606 pstats.rx_broadcast - pstats.rx_discards;
820*2d9fd380Sjfb8856606 stats->opackets = pstats.tx_broadcast + pstats.tx_multicast +
821*2d9fd380Sjfb8856606 pstats.tx_unicast;
822*2d9fd380Sjfb8856606 stats->imissed = pstats.rx_discards;
823*2d9fd380Sjfb8856606 stats->oerrors = pstats.tx_errors + pstats.tx_discards;
824*2d9fd380Sjfb8856606 stats->ibytes = pstats.rx_bytes;
825*2d9fd380Sjfb8856606 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
826*2d9fd380Sjfb8856606 stats->obytes = pstats.tx_bytes;
827*2d9fd380Sjfb8856606 } else {
828*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Get statistics failed");
829*2d9fd380Sjfb8856606 }
830*2d9fd380Sjfb8856606 return ret;
831*2d9fd380Sjfb8856606 }
832*2d9fd380Sjfb8856606
833*2d9fd380Sjfb8856606 static int
ice_dcf_stats_reset(struct rte_eth_dev * dev)834*2d9fd380Sjfb8856606 ice_dcf_stats_reset(struct rte_eth_dev *dev)
835*2d9fd380Sjfb8856606 {
836*2d9fd380Sjfb8856606 struct ice_dcf_adapter *ad = dev->data->dev_private;
837*2d9fd380Sjfb8856606 struct ice_dcf_hw *hw = &ad->real_hw;
838*2d9fd380Sjfb8856606 struct virtchnl_eth_stats pstats;
839*2d9fd380Sjfb8856606 int ret;
840*2d9fd380Sjfb8856606
841*2d9fd380Sjfb8856606 /* read stat values to clear hardware registers */
842*2d9fd380Sjfb8856606 ret = ice_dcf_query_stats(hw, &pstats);
843*2d9fd380Sjfb8856606 if (ret != 0)
844*2d9fd380Sjfb8856606 return ret;
845*2d9fd380Sjfb8856606
846*2d9fd380Sjfb8856606 /* set stats offset base on current values */
847*2d9fd380Sjfb8856606 hw->eth_stats_offset = pstats;
848*2d9fd380Sjfb8856606
849*2d9fd380Sjfb8856606 return 0;
850*2d9fd380Sjfb8856606 }
851*2d9fd380Sjfb8856606
852*2d9fd380Sjfb8856606 static int
ice_dcf_dev_close(struct rte_eth_dev * dev)853*2d9fd380Sjfb8856606 ice_dcf_dev_close(struct rte_eth_dev *dev)
854*2d9fd380Sjfb8856606 {
855*2d9fd380Sjfb8856606 struct ice_dcf_adapter *adapter = dev->data->dev_private;
856*2d9fd380Sjfb8856606
857*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
858*2d9fd380Sjfb8856606 return 0;
859*2d9fd380Sjfb8856606
860*2d9fd380Sjfb8856606 ice_dcf_uninit_parent_adapter(dev);
861*2d9fd380Sjfb8856606 ice_dcf_uninit_hw(dev, &adapter->real_hw);
862*2d9fd380Sjfb8856606
863*2d9fd380Sjfb8856606 return 0;
864*2d9fd380Sjfb8856606 }
865*2d9fd380Sjfb8856606
866*2d9fd380Sjfb8856606 static int
ice_dcf_link_update(__rte_unused struct rte_eth_dev * dev,__rte_unused int wait_to_complete)867*2d9fd380Sjfb8856606 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
868*2d9fd380Sjfb8856606 __rte_unused int wait_to_complete)
869*2d9fd380Sjfb8856606 {
870*2d9fd380Sjfb8856606 return 0;
871*2d9fd380Sjfb8856606 }
872*2d9fd380Sjfb8856606
873*2d9fd380Sjfb8856606 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
874*2d9fd380Sjfb8856606 .dev_start = ice_dcf_dev_start,
875*2d9fd380Sjfb8856606 .dev_stop = ice_dcf_dev_stop,
876*2d9fd380Sjfb8856606 .dev_close = ice_dcf_dev_close,
877*2d9fd380Sjfb8856606 .dev_configure = ice_dcf_dev_configure,
878*2d9fd380Sjfb8856606 .dev_infos_get = ice_dcf_dev_info_get,
879*2d9fd380Sjfb8856606 .rx_queue_setup = ice_rx_queue_setup,
880*2d9fd380Sjfb8856606 .tx_queue_setup = ice_tx_queue_setup,
881*2d9fd380Sjfb8856606 .rx_queue_release = ice_rx_queue_release,
882*2d9fd380Sjfb8856606 .tx_queue_release = ice_tx_queue_release,
883*2d9fd380Sjfb8856606 .rx_queue_start = ice_dcf_rx_queue_start,
884*2d9fd380Sjfb8856606 .tx_queue_start = ice_dcf_tx_queue_start,
885*2d9fd380Sjfb8856606 .rx_queue_stop = ice_dcf_rx_queue_stop,
886*2d9fd380Sjfb8856606 .tx_queue_stop = ice_dcf_tx_queue_stop,
887*2d9fd380Sjfb8856606 .link_update = ice_dcf_link_update,
888*2d9fd380Sjfb8856606 .stats_get = ice_dcf_stats_get,
889*2d9fd380Sjfb8856606 .stats_reset = ice_dcf_stats_reset,
890*2d9fd380Sjfb8856606 .promiscuous_enable = ice_dcf_dev_promiscuous_enable,
891*2d9fd380Sjfb8856606 .promiscuous_disable = ice_dcf_dev_promiscuous_disable,
892*2d9fd380Sjfb8856606 .allmulticast_enable = ice_dcf_dev_allmulticast_enable,
893*2d9fd380Sjfb8856606 .allmulticast_disable = ice_dcf_dev_allmulticast_disable,
894*2d9fd380Sjfb8856606 .filter_ctrl = ice_dcf_dev_filter_ctrl,
895*2d9fd380Sjfb8856606 };
896*2d9fd380Sjfb8856606
897*2d9fd380Sjfb8856606 static int
ice_dcf_dev_init(struct rte_eth_dev * eth_dev)898*2d9fd380Sjfb8856606 ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
899*2d9fd380Sjfb8856606 {
900*2d9fd380Sjfb8856606 struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
901*2d9fd380Sjfb8856606
902*2d9fd380Sjfb8856606 eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
903*2d9fd380Sjfb8856606 eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
904*2d9fd380Sjfb8856606 eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
905*2d9fd380Sjfb8856606
906*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
907*2d9fd380Sjfb8856606 return 0;
908*2d9fd380Sjfb8856606
909*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
910*2d9fd380Sjfb8856606
911*2d9fd380Sjfb8856606 adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
912*2d9fd380Sjfb8856606 if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
913*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
914*2d9fd380Sjfb8856606 return -1;
915*2d9fd380Sjfb8856606 }
916*2d9fd380Sjfb8856606
917*2d9fd380Sjfb8856606 if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
918*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
919*2d9fd380Sjfb8856606 ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
920*2d9fd380Sjfb8856606 return -1;
921*2d9fd380Sjfb8856606 }
922*2d9fd380Sjfb8856606
923*2d9fd380Sjfb8856606 return 0;
924*2d9fd380Sjfb8856606 }
925*2d9fd380Sjfb8856606
926*2d9fd380Sjfb8856606 static int
ice_dcf_dev_uninit(struct rte_eth_dev * eth_dev)927*2d9fd380Sjfb8856606 ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
928*2d9fd380Sjfb8856606 {
929*2d9fd380Sjfb8856606 ice_dcf_dev_close(eth_dev);
930*2d9fd380Sjfb8856606
931*2d9fd380Sjfb8856606 return 0;
932*2d9fd380Sjfb8856606 }
933*2d9fd380Sjfb8856606
934*2d9fd380Sjfb8856606 static int
ice_dcf_cap_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)935*2d9fd380Sjfb8856606 ice_dcf_cap_check_handler(__rte_unused const char *key,
936*2d9fd380Sjfb8856606 const char *value, __rte_unused void *opaque)
937*2d9fd380Sjfb8856606 {
938*2d9fd380Sjfb8856606 if (strcmp(value, "dcf"))
939*2d9fd380Sjfb8856606 return -1;
940*2d9fd380Sjfb8856606
941*2d9fd380Sjfb8856606 return 0;
942*2d9fd380Sjfb8856606 }
943*2d9fd380Sjfb8856606
944*2d9fd380Sjfb8856606 static int
ice_dcf_cap_selected(struct rte_devargs * devargs)945*2d9fd380Sjfb8856606 ice_dcf_cap_selected(struct rte_devargs *devargs)
946*2d9fd380Sjfb8856606 {
947*2d9fd380Sjfb8856606 struct rte_kvargs *kvlist;
948*2d9fd380Sjfb8856606 const char *key = "cap";
949*2d9fd380Sjfb8856606 int ret = 0;
950*2d9fd380Sjfb8856606
951*2d9fd380Sjfb8856606 if (devargs == NULL)
952*2d9fd380Sjfb8856606 return 0;
953*2d9fd380Sjfb8856606
954*2d9fd380Sjfb8856606 kvlist = rte_kvargs_parse(devargs->args, NULL);
955*2d9fd380Sjfb8856606 if (kvlist == NULL)
956*2d9fd380Sjfb8856606 return 0;
957*2d9fd380Sjfb8856606
958*2d9fd380Sjfb8856606 if (!rte_kvargs_count(kvlist, key))
959*2d9fd380Sjfb8856606 goto exit;
960*2d9fd380Sjfb8856606
961*2d9fd380Sjfb8856606 /* dcf capability selected when there's a key-value pair: cap=dcf */
962*2d9fd380Sjfb8856606 if (rte_kvargs_process(kvlist, key,
963*2d9fd380Sjfb8856606 ice_dcf_cap_check_handler, NULL) < 0)
964*2d9fd380Sjfb8856606 goto exit;
965*2d9fd380Sjfb8856606
966*2d9fd380Sjfb8856606 ret = 1;
967*2d9fd380Sjfb8856606
968*2d9fd380Sjfb8856606 exit:
969*2d9fd380Sjfb8856606 rte_kvargs_free(kvlist);
970*2d9fd380Sjfb8856606 return ret;
971*2d9fd380Sjfb8856606 }
972*2d9fd380Sjfb8856606
eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)973*2d9fd380Sjfb8856606 static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
974*2d9fd380Sjfb8856606 struct rte_pci_device *pci_dev)
975*2d9fd380Sjfb8856606 {
976*2d9fd380Sjfb8856606 if (!ice_dcf_cap_selected(pci_dev->device.devargs))
977*2d9fd380Sjfb8856606 return 1;
978*2d9fd380Sjfb8856606
979*2d9fd380Sjfb8856606 return rte_eth_dev_pci_generic_probe(pci_dev,
980*2d9fd380Sjfb8856606 sizeof(struct ice_dcf_adapter),
981*2d9fd380Sjfb8856606 ice_dcf_dev_init);
982*2d9fd380Sjfb8856606 }
983*2d9fd380Sjfb8856606
eth_ice_dcf_pci_remove(struct rte_pci_device * pci_dev)984*2d9fd380Sjfb8856606 static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
985*2d9fd380Sjfb8856606 {
986*2d9fd380Sjfb8856606 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
987*2d9fd380Sjfb8856606 }
988*2d9fd380Sjfb8856606
989*2d9fd380Sjfb8856606 static const struct rte_pci_id pci_id_ice_dcf_map[] = {
990*2d9fd380Sjfb8856606 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
991*2d9fd380Sjfb8856606 { .vendor_id = 0, /* sentinel */ },
992*2d9fd380Sjfb8856606 };
993*2d9fd380Sjfb8856606
994*2d9fd380Sjfb8856606 static struct rte_pci_driver rte_ice_dcf_pmd = {
995*2d9fd380Sjfb8856606 .id_table = pci_id_ice_dcf_map,
996*2d9fd380Sjfb8856606 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
997*2d9fd380Sjfb8856606 .probe = eth_ice_dcf_pci_probe,
998*2d9fd380Sjfb8856606 .remove = eth_ice_dcf_pci_remove,
999*2d9fd380Sjfb8856606 };
1000*2d9fd380Sjfb8856606
1001*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
1002*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
1003*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
1004*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
1005