xref: /f-stack/dpdk/drivers/net/ice/ice_rxtx.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_ethdev_driver.h>
6 #include <rte_net.h>
7 #include <rte_vect.h>
8 
9 #include "rte_pmd_ice.h"
10 #include "ice_rxtx.h"
11 
12 #define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
13 		PKT_TX_IP_CKSUM |		 \
14 		PKT_TX_L4_MASK |		 \
15 		PKT_TX_TCP_SEG |		 \
16 		PKT_TX_OUTER_IP_CKSUM)
17 
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20 
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
28 
29 static inline uint8_t
ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)30 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
31 {
32 	static uint8_t rxdid_map[] = {
33 		[PROTO_XTR_NONE]      = ICE_RXDID_COMMS_OVS,
34 		[PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
35 		[PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
36 		[PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
37 		[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
38 		[PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
39 		[PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
40 	};
41 
42 	return xtr_type < RTE_DIM(rxdid_map) ?
43 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
44 }
45 
46 static inline void
ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)47 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
48 				       struct rte_mbuf *mb,
49 				       volatile union ice_rx_flex_desc *rxdp)
50 {
51 	volatile struct ice_32b_rx_flex_desc_comms *desc =
52 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
53 	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
54 
55 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
56 		mb->ol_flags |= PKT_RX_RSS_HASH;
57 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
58 	}
59 
60 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
61 	if (desc->flow_id != 0xFFFFFFFF) {
62 		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
63 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
64 	}
65 #endif
66 }
67 
68 static inline void
ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)69 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
70 				   struct rte_mbuf *mb,
71 				   volatile union ice_rx_flex_desc *rxdp)
72 {
73 	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
74 			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
75 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
76 	uint16_t stat_err;
77 #endif
78 
79 	if (desc->flow_id != 0xFFFFFFFF) {
80 		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
81 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
82 	}
83 
84 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
85 	stat_err = rte_le_to_cpu_16(desc->status_error0);
86 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
87 		mb->ol_flags |= PKT_RX_RSS_HASH;
88 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
89 	}
90 #endif
91 }
92 
93 static inline void
ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)94 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
95 				      struct rte_mbuf *mb,
96 				      volatile union ice_rx_flex_desc *rxdp)
97 {
98 	volatile struct ice_32b_rx_flex_desc_comms *desc =
99 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
100 	uint16_t stat_err;
101 
102 	stat_err = rte_le_to_cpu_16(desc->status_error0);
103 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
104 		mb->ol_flags |= PKT_RX_RSS_HASH;
105 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
106 	}
107 
108 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
109 	if (desc->flow_id != 0xFFFFFFFF) {
110 		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
111 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
112 	}
113 
114 	if (rxq->xtr_ol_flag) {
115 		uint32_t metadata = 0;
116 
117 		stat_err = rte_le_to_cpu_16(desc->status_error1);
118 
119 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
120 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
121 
122 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
123 			metadata |=
124 				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
125 
126 		if (metadata) {
127 			mb->ol_flags |= rxq->xtr_ol_flag;
128 
129 			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
130 		}
131 	}
132 #endif
133 }
134 
135 static inline void
ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)136 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
137 				      struct rte_mbuf *mb,
138 				      volatile union ice_rx_flex_desc *rxdp)
139 {
140 	volatile struct ice_32b_rx_flex_desc_comms *desc =
141 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
142 	uint16_t stat_err;
143 
144 	stat_err = rte_le_to_cpu_16(desc->status_error0);
145 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
146 		mb->ol_flags |= PKT_RX_RSS_HASH;
147 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
148 	}
149 
150 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
151 	if (desc->flow_id != 0xFFFFFFFF) {
152 		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
153 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
154 	}
155 
156 	if (rxq->xtr_ol_flag) {
157 		uint32_t metadata = 0;
158 
159 		if (desc->flex_ts.flex.aux0 != 0xFFFF)
160 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
161 		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
162 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
163 
164 		if (metadata) {
165 			mb->ol_flags |= rxq->xtr_ol_flag;
166 
167 			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
168 		}
169 	}
170 #endif
171 }
172 
173 void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue * rxq,uint32_t rxdid)174 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
175 {
176 	switch (rxdid) {
177 	case ICE_RXDID_COMMS_AUX_VLAN:
178 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
179 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
180 		break;
181 
182 	case ICE_RXDID_COMMS_AUX_IPV4:
183 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
184 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
185 		break;
186 
187 	case ICE_RXDID_COMMS_AUX_IPV6:
188 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
189 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
190 		break;
191 
192 	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
193 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
194 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
195 		break;
196 
197 	case ICE_RXDID_COMMS_AUX_TCP:
198 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
199 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
200 		break;
201 
202 	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
203 		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
204 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
205 		break;
206 
207 	case ICE_RXDID_COMMS_GENERIC:
208 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
209 		break;
210 
211 	case ICE_RXDID_COMMS_OVS:
212 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
213 		break;
214 
215 	default:
216 		/* update this according to the RXDID for PROTO_XTR_NONE */
217 		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
218 		break;
219 	}
220 
221 	if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
222 		rxq->xtr_ol_flag = 0;
223 }
224 
225 static enum ice_status
ice_program_hw_rx_queue(struct ice_rx_queue * rxq)226 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
227 {
228 	struct ice_vsi *vsi = rxq->vsi;
229 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
230 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
231 	struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
232 	struct ice_rlan_ctx rx_ctx;
233 	enum ice_status err;
234 	uint16_t buf_size, len;
235 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
236 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
237 	uint32_t regval;
238 
239 	/* Set buffer size as the head split is disabled. */
240 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
241 			      RTE_PKTMBUF_HEADROOM);
242 	rxq->rx_hdr_len = 0;
243 	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
244 	len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
245 	rxq->max_pkt_len = RTE_MIN(len,
246 				   dev->data->dev_conf.rxmode.max_rx_pkt_len);
247 
248 	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
249 		if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
250 		    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
251 			PMD_DRV_LOG(ERR, "maximum packet length must "
252 				    "be larger than %u and smaller than %u,"
253 				    "as jumbo frame is enabled",
254 				    (uint32_t)RTE_ETHER_MAX_LEN,
255 				    (uint32_t)ICE_FRAME_SIZE_MAX);
256 			return -EINVAL;
257 		}
258 	} else {
259 		if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
260 		    rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
261 			PMD_DRV_LOG(ERR, "maximum packet length must be "
262 				    "larger than %u and smaller than %u, "
263 				    "as jumbo frame is disabled",
264 				    (uint32_t)RTE_ETHER_MIN_LEN,
265 				    (uint32_t)RTE_ETHER_MAX_LEN);
266 			return -EINVAL;
267 		}
268 	}
269 
270 	memset(&rx_ctx, 0, sizeof(rx_ctx));
271 
272 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
273 	rx_ctx.qlen = rxq->nb_rx_desc;
274 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
275 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
276 	rx_ctx.dtype = 0; /* No Header Split mode */
277 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
278 	rx_ctx.dsize = 1; /* 32B descriptors */
279 #endif
280 	rx_ctx.rxmax = rxq->max_pkt_len;
281 	/* TPH: Transaction Layer Packet (TLP) processing hints */
282 	rx_ctx.tphrdesc_ena = 1;
283 	rx_ctx.tphwdesc_ena = 1;
284 	rx_ctx.tphdata_ena = 1;
285 	rx_ctx.tphhead_ena = 1;
286 	/* Low Receive Queue Threshold defined in 64 descriptors units.
287 	 * When the number of free descriptors goes below the lrxqthresh,
288 	 * an immediate interrupt is triggered.
289 	 */
290 	rx_ctx.lrxqthresh = 2;
291 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
292 	rx_ctx.l2tsel = 1;
293 	rx_ctx.showiv = 0;
294 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
295 
296 	rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
297 
298 	PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
299 		    rxq->port_id, rxq->queue_id, rxdid);
300 
301 	if (!(pf->supported_rxdid & BIT(rxdid))) {
302 		PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
303 			    rxdid);
304 		return -EINVAL;
305 	}
306 
307 	ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
308 
309 	/* Enable Flexible Descriptors in the queue context which
310 	 * allows this driver to select a specific receive descriptor format
311 	 */
312 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
313 		QRXFLXP_CNTXT_RXDID_IDX_M;
314 
315 	/* increasing context priority to pick up profile ID;
316 	 * default is 0x01; setting to 0x03 to ensure profile
317 	 * is programming if prev context is of same priority
318 	 */
319 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
320 		QRXFLXP_CNTXT_RXDID_PRIO_M;
321 
322 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
323 
324 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
325 	if (err) {
326 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
327 			    rxq->queue_id);
328 		return -EINVAL;
329 	}
330 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
331 	if (err) {
332 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
333 			    rxq->queue_id);
334 		return -EINVAL;
335 	}
336 
337 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
338 			      RTE_PKTMBUF_HEADROOM);
339 
340 	/* Check if scattered RX needs to be used. */
341 	if (rxq->max_pkt_len > buf_size)
342 		dev->data->scattered_rx = 1;
343 
344 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
345 
346 	/* Init the Rx tail register*/
347 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
348 
349 	return 0;
350 }
351 
352 /* Allocate mbufs for all descriptors in rx queue */
353 static int
ice_alloc_rx_queue_mbufs(struct ice_rx_queue * rxq)354 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
355 {
356 	struct ice_rx_entry *rxe = rxq->sw_ring;
357 	uint64_t dma_addr;
358 	uint16_t i;
359 
360 	for (i = 0; i < rxq->nb_rx_desc; i++) {
361 		volatile union ice_rx_flex_desc *rxd;
362 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
363 
364 		if (unlikely(!mbuf)) {
365 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
366 			return -ENOMEM;
367 		}
368 
369 		rte_mbuf_refcnt_set(mbuf, 1);
370 		mbuf->next = NULL;
371 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
372 		mbuf->nb_segs = 1;
373 		mbuf->port = rxq->port_id;
374 
375 		dma_addr =
376 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
377 
378 		rxd = &rxq->rx_ring[i];
379 		rxd->read.pkt_addr = dma_addr;
380 		rxd->read.hdr_addr = 0;
381 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
382 		rxd->read.rsvd1 = 0;
383 		rxd->read.rsvd2 = 0;
384 #endif
385 		rxe[i].mbuf = mbuf;
386 	}
387 
388 	return 0;
389 }
390 
391 /* Free all mbufs for descriptors in rx queue */
392 static void
_ice_rx_queue_release_mbufs(struct ice_rx_queue * rxq)393 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
394 {
395 	uint16_t i;
396 
397 	if (!rxq || !rxq->sw_ring) {
398 		PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
399 		return;
400 	}
401 
402 	for (i = 0; i < rxq->nb_rx_desc; i++) {
403 		if (rxq->sw_ring[i].mbuf) {
404 			rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
405 			rxq->sw_ring[i].mbuf = NULL;
406 		}
407 	}
408 	if (rxq->rx_nb_avail == 0)
409 		return;
410 	for (i = 0; i < rxq->rx_nb_avail; i++)
411 		rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
412 
413 	rxq->rx_nb_avail = 0;
414 }
415 
416 /* turn on or off rx queue
417  * @q_idx: queue index in pf scope
418  * @on: turn on or off the queue
419  */
420 static int
ice_switch_rx_queue(struct ice_hw * hw,uint16_t q_idx,bool on)421 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
422 {
423 	uint32_t reg;
424 	uint16_t j;
425 
426 	/* QRX_CTRL = QRX_ENA */
427 	reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
428 
429 	if (on) {
430 		if (reg & QRX_CTRL_QENA_STAT_M)
431 			return 0; /* Already on, skip */
432 		reg |= QRX_CTRL_QENA_REQ_M;
433 	} else {
434 		if (!(reg & QRX_CTRL_QENA_STAT_M))
435 			return 0; /* Already off, skip */
436 		reg &= ~QRX_CTRL_QENA_REQ_M;
437 	}
438 
439 	/* Write the register */
440 	ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
441 	/* Check the result. It is said that QENA_STAT
442 	 * follows the QENA_REQ not more than 10 use.
443 	 * TODO: need to change the wait counter later
444 	 */
445 	for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
446 		rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
447 		reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
448 		if (on) {
449 			if ((reg & QRX_CTRL_QENA_REQ_M) &&
450 			    (reg & QRX_CTRL_QENA_STAT_M))
451 				break;
452 		} else {
453 			if (!(reg & QRX_CTRL_QENA_REQ_M) &&
454 			    !(reg & QRX_CTRL_QENA_STAT_M))
455 				break;
456 		}
457 	}
458 
459 	/* Check if it is timeout */
460 	if (j >= ICE_CHK_Q_ENA_COUNT) {
461 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
462 			    (on ? "enable" : "disable"), q_idx);
463 		return -ETIMEDOUT;
464 	}
465 
466 	return 0;
467 }
468 
469 static inline int
ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue * rxq)470 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
471 {
472 	int ret = 0;
473 
474 	if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
475 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
476 			     "rxq->rx_free_thresh=%d, "
477 			     "ICE_RX_MAX_BURST=%d",
478 			     rxq->rx_free_thresh, ICE_RX_MAX_BURST);
479 		ret = -EINVAL;
480 	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
481 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
482 			     "rxq->rx_free_thresh=%d, "
483 			     "rxq->nb_rx_desc=%d",
484 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
485 		ret = -EINVAL;
486 	} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
487 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
488 			     "rxq->nb_rx_desc=%d, "
489 			     "rxq->rx_free_thresh=%d",
490 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
491 		ret = -EINVAL;
492 	}
493 
494 	return ret;
495 }
496 
497 /* reset fields in ice_rx_queue back to default */
498 static void
ice_reset_rx_queue(struct ice_rx_queue * rxq)499 ice_reset_rx_queue(struct ice_rx_queue *rxq)
500 {
501 	unsigned int i;
502 	uint16_t len;
503 
504 	if (!rxq) {
505 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
506 		return;
507 	}
508 
509 	len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
510 
511 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
512 		((volatile char *)rxq->rx_ring)[i] = 0;
513 
514 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
515 	for (i = 0; i < ICE_RX_MAX_BURST; ++i)
516 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
517 
518 	rxq->rx_nb_avail = 0;
519 	rxq->rx_next_avail = 0;
520 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
521 
522 	rxq->rx_tail = 0;
523 	rxq->nb_rx_hold = 0;
524 	rxq->pkt_first_seg = NULL;
525 	rxq->pkt_last_seg = NULL;
526 
527 	rxq->rxrearm_start = 0;
528 	rxq->rxrearm_nb = 0;
529 }
530 
531 int
ice_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)532 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
533 {
534 	struct ice_rx_queue *rxq;
535 	int err;
536 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537 
538 	PMD_INIT_FUNC_TRACE();
539 
540 	if (rx_queue_id >= dev->data->nb_rx_queues) {
541 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
542 			    rx_queue_id, dev->data->nb_rx_queues);
543 		return -EINVAL;
544 	}
545 
546 	rxq = dev->data->rx_queues[rx_queue_id];
547 	if (!rxq || !rxq->q_set) {
548 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
549 			    rx_queue_id);
550 		return -EINVAL;
551 	}
552 
553 	err = ice_program_hw_rx_queue(rxq);
554 	if (err) {
555 		PMD_DRV_LOG(ERR, "fail to program RX queue %u",
556 			    rx_queue_id);
557 		return -EIO;
558 	}
559 
560 	err = ice_alloc_rx_queue_mbufs(rxq);
561 	if (err) {
562 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
563 		return -ENOMEM;
564 	}
565 
566 	/* Init the RX tail register. */
567 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
568 
569 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
570 	if (err) {
571 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
572 			    rx_queue_id);
573 
574 		rxq->rx_rel_mbufs(rxq);
575 		ice_reset_rx_queue(rxq);
576 		return -EINVAL;
577 	}
578 
579 	dev->data->rx_queue_state[rx_queue_id] =
580 		RTE_ETH_QUEUE_STATE_STARTED;
581 
582 	return 0;
583 }
584 
585 int
ice_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)586 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
587 {
588 	struct ice_rx_queue *rxq;
589 	int err;
590 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
591 
592 	if (rx_queue_id < dev->data->nb_rx_queues) {
593 		rxq = dev->data->rx_queues[rx_queue_id];
594 
595 		err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
596 		if (err) {
597 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
598 				    rx_queue_id);
599 			return -EINVAL;
600 		}
601 		rxq->rx_rel_mbufs(rxq);
602 		ice_reset_rx_queue(rxq);
603 		dev->data->rx_queue_state[rx_queue_id] =
604 			RTE_ETH_QUEUE_STATE_STOPPED;
605 	}
606 
607 	return 0;
608 }
609 
610 int
ice_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)611 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
612 {
613 	struct ice_tx_queue *txq;
614 	int err;
615 	struct ice_vsi *vsi;
616 	struct ice_hw *hw;
617 	struct ice_aqc_add_tx_qgrp *txq_elem;
618 	struct ice_tlan_ctx tx_ctx;
619 	int buf_len;
620 
621 	PMD_INIT_FUNC_TRACE();
622 
623 	if (tx_queue_id >= dev->data->nb_tx_queues) {
624 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
625 			    tx_queue_id, dev->data->nb_tx_queues);
626 		return -EINVAL;
627 	}
628 
629 	txq = dev->data->tx_queues[tx_queue_id];
630 	if (!txq || !txq->q_set) {
631 		PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
632 			    tx_queue_id);
633 		return -EINVAL;
634 	}
635 
636 	buf_len = ice_struct_size(txq_elem, txqs, 1);
637 	txq_elem = ice_malloc(hw, buf_len);
638 	if (!txq_elem)
639 		return -ENOMEM;
640 
641 	vsi = txq->vsi;
642 	hw = ICE_VSI_TO_HW(vsi);
643 
644 	memset(&tx_ctx, 0, sizeof(tx_ctx));
645 	txq_elem->num_txqs = 1;
646 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
647 
648 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
649 	tx_ctx.qlen = txq->nb_tx_desc;
650 	tx_ctx.pf_num = hw->pf_id;
651 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
652 	tx_ctx.src_vsi = vsi->vsi_id;
653 	tx_ctx.port_num = hw->port_info->lport;
654 	tx_ctx.tso_ena = 1; /* tso enable */
655 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
656 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
657 
658 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
659 		    ice_tlan_ctx_info);
660 
661 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
662 
663 	/* Init the Tx tail register*/
664 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
665 
666 	/* Fix me, we assume TC always 0 here */
667 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
668 			txq_elem, buf_len, NULL);
669 	if (err) {
670 		PMD_DRV_LOG(ERR, "Failed to add lan txq");
671 		rte_free(txq_elem);
672 		return -EIO;
673 	}
674 	/* store the schedule node id */
675 	txq->q_teid = txq_elem->txqs[0].q_teid;
676 
677 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
678 
679 	rte_free(txq_elem);
680 	return 0;
681 }
682 
683 static enum ice_status
ice_fdir_program_hw_rx_queue(struct ice_rx_queue * rxq)684 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
685 {
686 	struct ice_vsi *vsi = rxq->vsi;
687 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
688 	uint32_t rxdid = ICE_RXDID_LEGACY_1;
689 	struct ice_rlan_ctx rx_ctx;
690 	enum ice_status err;
691 	uint32_t regval;
692 
693 	rxq->rx_hdr_len = 0;
694 	rxq->rx_buf_len = 1024;
695 
696 	memset(&rx_ctx, 0, sizeof(rx_ctx));
697 
698 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
699 	rx_ctx.qlen = rxq->nb_rx_desc;
700 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
701 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
702 	rx_ctx.dtype = 0; /* No Header Split mode */
703 	rx_ctx.dsize = 1; /* 32B descriptors */
704 	rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
705 	/* TPH: Transaction Layer Packet (TLP) processing hints */
706 	rx_ctx.tphrdesc_ena = 1;
707 	rx_ctx.tphwdesc_ena = 1;
708 	rx_ctx.tphdata_ena = 1;
709 	rx_ctx.tphhead_ena = 1;
710 	/* Low Receive Queue Threshold defined in 64 descriptors units.
711 	 * When the number of free descriptors goes below the lrxqthresh,
712 	 * an immediate interrupt is triggered.
713 	 */
714 	rx_ctx.lrxqthresh = 2;
715 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
716 	rx_ctx.l2tsel = 1;
717 	rx_ctx.showiv = 0;
718 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
719 
720 	/* Enable Flexible Descriptors in the queue context which
721 	 * allows this driver to select a specific receive descriptor format
722 	 */
723 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
724 		QRXFLXP_CNTXT_RXDID_IDX_M;
725 
726 	/* increasing context priority to pick up profile ID;
727 	 * default is 0x01; setting to 0x03 to ensure profile
728 	 * is programming if prev context is of same priority
729 	 */
730 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
731 		QRXFLXP_CNTXT_RXDID_PRIO_M;
732 
733 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
734 
735 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
736 	if (err) {
737 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
738 			    rxq->queue_id);
739 		return -EINVAL;
740 	}
741 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
742 	if (err) {
743 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
744 			    rxq->queue_id);
745 		return -EINVAL;
746 	}
747 
748 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
749 
750 	/* Init the Rx tail register*/
751 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
752 
753 	return 0;
754 }
755 
756 int
ice_fdir_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)757 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
758 {
759 	struct ice_rx_queue *rxq;
760 	int err;
761 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
762 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763 
764 	PMD_INIT_FUNC_TRACE();
765 
766 	rxq = pf->fdir.rxq;
767 	if (!rxq || !rxq->q_set) {
768 		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
769 			    rx_queue_id);
770 		return -EINVAL;
771 	}
772 
773 	err = ice_fdir_program_hw_rx_queue(rxq);
774 	if (err) {
775 		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
776 			    rx_queue_id);
777 		return -EIO;
778 	}
779 
780 	/* Init the RX tail register. */
781 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
782 
783 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
784 	if (err) {
785 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
786 			    rx_queue_id);
787 
788 		ice_reset_rx_queue(rxq);
789 		return -EINVAL;
790 	}
791 
792 	return 0;
793 }
794 
795 int
ice_fdir_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)796 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
797 {
798 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
799 	struct ice_tx_queue *txq;
800 	int err;
801 	struct ice_vsi *vsi;
802 	struct ice_hw *hw;
803 	struct ice_aqc_add_tx_qgrp *txq_elem;
804 	struct ice_tlan_ctx tx_ctx;
805 	int buf_len;
806 
807 	PMD_INIT_FUNC_TRACE();
808 
809 	txq = pf->fdir.txq;
810 	if (!txq || !txq->q_set) {
811 		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
812 			    tx_queue_id);
813 		return -EINVAL;
814 	}
815 
816 	buf_len = ice_struct_size(txq_elem, txqs, 1);
817 	txq_elem = ice_malloc(hw, buf_len);
818 	if (!txq_elem)
819 		return -ENOMEM;
820 
821 	vsi = txq->vsi;
822 	hw = ICE_VSI_TO_HW(vsi);
823 
824 	memset(&tx_ctx, 0, sizeof(tx_ctx));
825 	txq_elem->num_txqs = 1;
826 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
827 
828 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
829 	tx_ctx.qlen = txq->nb_tx_desc;
830 	tx_ctx.pf_num = hw->pf_id;
831 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
832 	tx_ctx.src_vsi = vsi->vsi_id;
833 	tx_ctx.port_num = hw->port_info->lport;
834 	tx_ctx.tso_ena = 1; /* tso enable */
835 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
836 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
837 
838 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
839 		    ice_tlan_ctx_info);
840 
841 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
842 
843 	/* Init the Tx tail register*/
844 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
845 
846 	/* Fix me, we assume TC always 0 here */
847 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
848 			      txq_elem, buf_len, NULL);
849 	if (err) {
850 		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
851 		rte_free(txq_elem);
852 		return -EIO;
853 	}
854 	/* store the schedule node id */
855 	txq->q_teid = txq_elem->txqs[0].q_teid;
856 
857 	rte_free(txq_elem);
858 	return 0;
859 }
860 
861 /* Free all mbufs for descriptors in tx queue */
862 static void
_ice_tx_queue_release_mbufs(struct ice_tx_queue * txq)863 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
864 {
865 	uint16_t i;
866 
867 	if (!txq || !txq->sw_ring) {
868 		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
869 		return;
870 	}
871 
872 	for (i = 0; i < txq->nb_tx_desc; i++) {
873 		if (txq->sw_ring[i].mbuf) {
874 			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
875 			txq->sw_ring[i].mbuf = NULL;
876 		}
877 	}
878 }
879 
880 static void
ice_reset_tx_queue(struct ice_tx_queue * txq)881 ice_reset_tx_queue(struct ice_tx_queue *txq)
882 {
883 	struct ice_tx_entry *txe;
884 	uint16_t i, prev, size;
885 
886 	if (!txq) {
887 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
888 		return;
889 	}
890 
891 	txe = txq->sw_ring;
892 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
893 	for (i = 0; i < size; i++)
894 		((volatile char *)txq->tx_ring)[i] = 0;
895 
896 	prev = (uint16_t)(txq->nb_tx_desc - 1);
897 	for (i = 0; i < txq->nb_tx_desc; i++) {
898 		volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
899 
900 		txd->cmd_type_offset_bsz =
901 			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
902 		txe[i].mbuf =  NULL;
903 		txe[i].last_id = i;
904 		txe[prev].next_id = i;
905 		prev = i;
906 	}
907 
908 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
909 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
910 
911 	txq->tx_tail = 0;
912 	txq->nb_tx_used = 0;
913 
914 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
915 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
916 }
917 
918 int
ice_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)919 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
920 {
921 	struct ice_tx_queue *txq;
922 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
923 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
924 	struct ice_vsi *vsi = pf->main_vsi;
925 	enum ice_status status;
926 	uint16_t q_ids[1];
927 	uint32_t q_teids[1];
928 	uint16_t q_handle = tx_queue_id;
929 
930 	if (tx_queue_id >= dev->data->nb_tx_queues) {
931 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
932 			    tx_queue_id, dev->data->nb_tx_queues);
933 		return -EINVAL;
934 	}
935 
936 	txq = dev->data->tx_queues[tx_queue_id];
937 	if (!txq) {
938 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
939 			    tx_queue_id);
940 		return -EINVAL;
941 	}
942 
943 	q_ids[0] = txq->reg_idx;
944 	q_teids[0] = txq->q_teid;
945 
946 	/* Fix me, we assume TC always 0 here */
947 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
948 				q_ids, q_teids, ICE_NO_RESET, 0, NULL);
949 	if (status != ICE_SUCCESS) {
950 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
951 		return -EINVAL;
952 	}
953 
954 	txq->tx_rel_mbufs(txq);
955 	ice_reset_tx_queue(txq);
956 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
957 
958 	return 0;
959 }
960 
961 int
ice_fdir_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)962 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
963 {
964 	struct ice_rx_queue *rxq;
965 	int err;
966 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
967 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
968 
969 	rxq = pf->fdir.rxq;
970 
971 	err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
972 	if (err) {
973 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
974 			    rx_queue_id);
975 		return -EINVAL;
976 	}
977 	rxq->rx_rel_mbufs(rxq);
978 
979 	return 0;
980 }
981 
982 int
ice_fdir_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)983 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
984 {
985 	struct ice_tx_queue *txq;
986 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
987 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
988 	struct ice_vsi *vsi = pf->main_vsi;
989 	enum ice_status status;
990 	uint16_t q_ids[1];
991 	uint32_t q_teids[1];
992 	uint16_t q_handle = tx_queue_id;
993 
994 	txq = pf->fdir.txq;
995 	if (!txq) {
996 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
997 			    tx_queue_id);
998 		return -EINVAL;
999 	}
1000 	vsi = txq->vsi;
1001 
1002 	q_ids[0] = txq->reg_idx;
1003 	q_teids[0] = txq->q_teid;
1004 
1005 	/* Fix me, we assume TC always 0 here */
1006 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1007 				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1008 	if (status != ICE_SUCCESS) {
1009 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1010 		return -EINVAL;
1011 	}
1012 
1013 	txq->tx_rel_mbufs(txq);
1014 
1015 	return 0;
1016 }
1017 
1018 int
ice_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1019 ice_rx_queue_setup(struct rte_eth_dev *dev,
1020 		   uint16_t queue_idx,
1021 		   uint16_t nb_desc,
1022 		   unsigned int socket_id,
1023 		   const struct rte_eth_rxconf *rx_conf,
1024 		   struct rte_mempool *mp)
1025 {
1026 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1027 	struct ice_adapter *ad =
1028 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1029 	struct ice_vsi *vsi = pf->main_vsi;
1030 	struct ice_rx_queue *rxq;
1031 	const struct rte_memzone *rz;
1032 	uint32_t ring_size;
1033 	uint16_t len;
1034 	int use_def_burst_func = 1;
1035 
1036 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1037 	    nb_desc > ICE_MAX_RING_DESC ||
1038 	    nb_desc < ICE_MIN_RING_DESC) {
1039 		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1040 			     "invalid", nb_desc);
1041 		return -EINVAL;
1042 	}
1043 
1044 	/* Free memory if needed */
1045 	if (dev->data->rx_queues[queue_idx]) {
1046 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1047 		dev->data->rx_queues[queue_idx] = NULL;
1048 	}
1049 
1050 	/* Allocate the rx queue data structure */
1051 	rxq = rte_zmalloc_socket(NULL,
1052 				 sizeof(struct ice_rx_queue),
1053 				 RTE_CACHE_LINE_SIZE,
1054 				 socket_id);
1055 	if (!rxq) {
1056 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1057 			     "rx queue data structure");
1058 		return -ENOMEM;
1059 	}
1060 	rxq->mp = mp;
1061 	rxq->nb_rx_desc = nb_desc;
1062 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1063 	rxq->queue_id = queue_idx;
1064 
1065 	rxq->reg_idx = vsi->base_queue + queue_idx;
1066 	rxq->port_id = dev->data->port_id;
1067 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1068 		rxq->crc_len = RTE_ETHER_CRC_LEN;
1069 	else
1070 		rxq->crc_len = 0;
1071 
1072 	rxq->drop_en = rx_conf->rx_drop_en;
1073 	rxq->vsi = vsi;
1074 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1075 	rxq->proto_xtr = pf->proto_xtr != NULL ?
1076 			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1077 
1078 	/* Allocate the maximun number of RX ring hardware descriptor. */
1079 	len = ICE_MAX_RING_DESC;
1080 
1081 	/**
1082 	 * Allocating a little more memory because vectorized/bulk_alloc Rx
1083 	 * functions doesn't check boundaries each time.
1084 	 */
1085 	len += ICE_RX_MAX_BURST;
1086 
1087 	/* Allocate the maximum number of RX ring hardware descriptor. */
1088 	ring_size = sizeof(union ice_rx_flex_desc) * len;
1089 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1090 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1091 				      ring_size, ICE_RING_BASE_ALIGN,
1092 				      socket_id);
1093 	if (!rz) {
1094 		ice_rx_queue_release(rxq);
1095 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1096 		return -ENOMEM;
1097 	}
1098 
1099 	/* Zero all the descriptors in the ring. */
1100 	memset(rz->addr, 0, ring_size);
1101 
1102 	rxq->rx_ring_dma = rz->iova;
1103 	rxq->rx_ring = rz->addr;
1104 
1105 	/* always reserve more for bulk alloc */
1106 	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1107 
1108 	/* Allocate the software ring. */
1109 	rxq->sw_ring = rte_zmalloc_socket(NULL,
1110 					  sizeof(struct ice_rx_entry) * len,
1111 					  RTE_CACHE_LINE_SIZE,
1112 					  socket_id);
1113 	if (!rxq->sw_ring) {
1114 		ice_rx_queue_release(rxq);
1115 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1116 		return -ENOMEM;
1117 	}
1118 
1119 	ice_reset_rx_queue(rxq);
1120 	rxq->q_set = true;
1121 	dev->data->rx_queues[queue_idx] = rxq;
1122 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1123 
1124 	use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1125 
1126 	if (!use_def_burst_func) {
1127 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1128 			     "satisfied. Rx Burst Bulk Alloc function will be "
1129 			     "used on port=%d, queue=%d.",
1130 			     rxq->port_id, rxq->queue_id);
1131 	} else {
1132 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1133 			     "not satisfied, Scattered Rx is requested. "
1134 			     "on port=%d, queue=%d.",
1135 			     rxq->port_id, rxq->queue_id);
1136 		ad->rx_bulk_alloc_allowed = false;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 void
ice_rx_queue_release(void * rxq)1143 ice_rx_queue_release(void *rxq)
1144 {
1145 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1146 
1147 	if (!q) {
1148 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1149 		return;
1150 	}
1151 
1152 	q->rx_rel_mbufs(q);
1153 	rte_free(q->sw_ring);
1154 	rte_free(q);
1155 }
1156 
1157 int
ice_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)1158 ice_tx_queue_setup(struct rte_eth_dev *dev,
1159 		   uint16_t queue_idx,
1160 		   uint16_t nb_desc,
1161 		   unsigned int socket_id,
1162 		   const struct rte_eth_txconf *tx_conf)
1163 {
1164 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1165 	struct ice_vsi *vsi = pf->main_vsi;
1166 	struct ice_tx_queue *txq;
1167 	const struct rte_memzone *tz;
1168 	uint32_t ring_size;
1169 	uint16_t tx_rs_thresh, tx_free_thresh;
1170 	uint64_t offloads;
1171 
1172 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1173 
1174 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1175 	    nb_desc > ICE_MAX_RING_DESC ||
1176 	    nb_desc < ICE_MIN_RING_DESC) {
1177 		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1178 			     "invalid", nb_desc);
1179 		return -EINVAL;
1180 	}
1181 
1182 	/**
1183 	 * The following two parameters control the setting of the RS bit on
1184 	 * transmit descriptors. TX descriptors will have their RS bit set
1185 	 * after txq->tx_rs_thresh descriptors have been used. The TX
1186 	 * descriptor ring will be cleaned after txq->tx_free_thresh
1187 	 * descriptors are used or if the number of descriptors required to
1188 	 * transmit a packet is greater than the number of free TX descriptors.
1189 	 *
1190 	 * The following constraints must be satisfied:
1191 	 *  - tx_rs_thresh must be greater than 0.
1192 	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
1193 	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1194 	 *  - tx_rs_thresh must be a divisor of the ring size.
1195 	 *  - tx_free_thresh must be greater than 0.
1196 	 *  - tx_free_thresh must be less than the size of the ring minus 3.
1197 	 *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1198 	 *
1199 	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1200 	 * race condition, hence the maximum threshold constraints. When set
1201 	 * to zero use default values.
1202 	 */
1203 	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1204 				    tx_conf->tx_free_thresh :
1205 				    ICE_DEFAULT_TX_FREE_THRESH);
1206 	/* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1207 	tx_rs_thresh =
1208 		(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1209 			nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1210 	if (tx_conf->tx_rs_thresh)
1211 		tx_rs_thresh = tx_conf->tx_rs_thresh;
1212 	if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1213 		PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1214 				"exceed nb_desc. (tx_rs_thresh=%u "
1215 				"tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1216 				(unsigned int)tx_rs_thresh,
1217 				(unsigned int)tx_free_thresh,
1218 				(unsigned int)nb_desc,
1219 				(int)dev->data->port_id,
1220 				(int)queue_idx);
1221 		return -EINVAL;
1222 	}
1223 	if (tx_rs_thresh >= (nb_desc - 2)) {
1224 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1225 			     "number of TX descriptors minus 2. "
1226 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1227 			     (unsigned int)tx_rs_thresh,
1228 			     (int)dev->data->port_id,
1229 			     (int)queue_idx);
1230 		return -EINVAL;
1231 	}
1232 	if (tx_free_thresh >= (nb_desc - 3)) {
1233 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1234 			     "tx_free_thresh must be less than the "
1235 			     "number of TX descriptors minus 3. "
1236 			     "(tx_free_thresh=%u port=%d queue=%d)",
1237 			     (unsigned int)tx_free_thresh,
1238 			     (int)dev->data->port_id,
1239 			     (int)queue_idx);
1240 		return -EINVAL;
1241 	}
1242 	if (tx_rs_thresh > tx_free_thresh) {
1243 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1244 			     "equal to tx_free_thresh. (tx_free_thresh=%u"
1245 			     " tx_rs_thresh=%u port=%d queue=%d)",
1246 			     (unsigned int)tx_free_thresh,
1247 			     (unsigned int)tx_rs_thresh,
1248 			     (int)dev->data->port_id,
1249 			     (int)queue_idx);
1250 		return -EINVAL;
1251 	}
1252 	if ((nb_desc % tx_rs_thresh) != 0) {
1253 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1254 			     "number of TX descriptors. (tx_rs_thresh=%u"
1255 			     " port=%d queue=%d)",
1256 			     (unsigned int)tx_rs_thresh,
1257 			     (int)dev->data->port_id,
1258 			     (int)queue_idx);
1259 		return -EINVAL;
1260 	}
1261 	if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1262 		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1263 			     "tx_rs_thresh is greater than 1. "
1264 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1265 			     (unsigned int)tx_rs_thresh,
1266 			     (int)dev->data->port_id,
1267 			     (int)queue_idx);
1268 		return -EINVAL;
1269 	}
1270 
1271 	/* Free memory if needed. */
1272 	if (dev->data->tx_queues[queue_idx]) {
1273 		ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1274 		dev->data->tx_queues[queue_idx] = NULL;
1275 	}
1276 
1277 	/* Allocate the TX queue data structure. */
1278 	txq = rte_zmalloc_socket(NULL,
1279 				 sizeof(struct ice_tx_queue),
1280 				 RTE_CACHE_LINE_SIZE,
1281 				 socket_id);
1282 	if (!txq) {
1283 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1284 			     "tx queue structure");
1285 		return -ENOMEM;
1286 	}
1287 
1288 	/* Allocate TX hardware ring descriptors. */
1289 	ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1290 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1291 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1292 				      ring_size, ICE_RING_BASE_ALIGN,
1293 				      socket_id);
1294 	if (!tz) {
1295 		ice_tx_queue_release(txq);
1296 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1297 		return -ENOMEM;
1298 	}
1299 
1300 	txq->nb_tx_desc = nb_desc;
1301 	txq->tx_rs_thresh = tx_rs_thresh;
1302 	txq->tx_free_thresh = tx_free_thresh;
1303 	txq->pthresh = tx_conf->tx_thresh.pthresh;
1304 	txq->hthresh = tx_conf->tx_thresh.hthresh;
1305 	txq->wthresh = tx_conf->tx_thresh.wthresh;
1306 	txq->queue_id = queue_idx;
1307 
1308 	txq->reg_idx = vsi->base_queue + queue_idx;
1309 	txq->port_id = dev->data->port_id;
1310 	txq->offloads = offloads;
1311 	txq->vsi = vsi;
1312 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
1313 
1314 	txq->tx_ring_dma = tz->iova;
1315 	txq->tx_ring = tz->addr;
1316 
1317 	/* Allocate software ring */
1318 	txq->sw_ring =
1319 		rte_zmalloc_socket(NULL,
1320 				   sizeof(struct ice_tx_entry) * nb_desc,
1321 				   RTE_CACHE_LINE_SIZE,
1322 				   socket_id);
1323 	if (!txq->sw_ring) {
1324 		ice_tx_queue_release(txq);
1325 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1326 		return -ENOMEM;
1327 	}
1328 
1329 	ice_reset_tx_queue(txq);
1330 	txq->q_set = true;
1331 	dev->data->tx_queues[queue_idx] = txq;
1332 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1333 	ice_set_tx_function_flag(dev, txq);
1334 
1335 	return 0;
1336 }
1337 
1338 void
ice_tx_queue_release(void * txq)1339 ice_tx_queue_release(void *txq)
1340 {
1341 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1342 
1343 	if (!q) {
1344 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1345 		return;
1346 	}
1347 
1348 	q->tx_rel_mbufs(q);
1349 	rte_free(q->sw_ring);
1350 	rte_free(q);
1351 }
1352 
1353 void
ice_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)1354 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1355 		 struct rte_eth_rxq_info *qinfo)
1356 {
1357 	struct ice_rx_queue *rxq;
1358 
1359 	rxq = dev->data->rx_queues[queue_id];
1360 
1361 	qinfo->mp = rxq->mp;
1362 	qinfo->scattered_rx = dev->data->scattered_rx;
1363 	qinfo->nb_desc = rxq->nb_rx_desc;
1364 
1365 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1366 	qinfo->conf.rx_drop_en = rxq->drop_en;
1367 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1368 }
1369 
1370 void
ice_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)1371 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1372 		 struct rte_eth_txq_info *qinfo)
1373 {
1374 	struct ice_tx_queue *txq;
1375 
1376 	txq = dev->data->tx_queues[queue_id];
1377 
1378 	qinfo->nb_desc = txq->nb_tx_desc;
1379 
1380 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1381 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1382 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1383 
1384 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1385 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1386 	qinfo->conf.offloads = txq->offloads;
1387 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1388 }
1389 
1390 uint32_t
ice_rx_queue_count(struct rte_eth_dev * dev,uint16_t rx_queue_id)1391 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1392 {
1393 #define ICE_RXQ_SCAN_INTERVAL 4
1394 	volatile union ice_rx_flex_desc *rxdp;
1395 	struct ice_rx_queue *rxq;
1396 	uint16_t desc = 0;
1397 
1398 	rxq = dev->data->rx_queues[rx_queue_id];
1399 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1400 	while ((desc < rxq->nb_rx_desc) &&
1401 	       rte_le_to_cpu_16(rxdp->wb.status_error0) &
1402 	       (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1403 		/**
1404 		 * Check the DD bit of a rx descriptor of each 4 in a group,
1405 		 * to avoid checking too frequently and downgrading performance
1406 		 * too much.
1407 		 */
1408 		desc += ICE_RXQ_SCAN_INTERVAL;
1409 		rxdp += ICE_RXQ_SCAN_INTERVAL;
1410 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1411 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
1412 				 desc - rxq->nb_rx_desc]);
1413 	}
1414 
1415 	return desc;
1416 }
1417 
1418 #define ICE_RX_FLEX_ERR0_BITS	\
1419 	((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |	\
1420 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |	\
1421 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |	\
1422 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |	\
1423 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |	\
1424 	 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1425 
1426 /* Rx L3/L4 checksum */
1427 static inline uint64_t
ice_rxd_error_to_pkt_flags(uint16_t stat_err0)1428 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1429 {
1430 	uint64_t flags = 0;
1431 
1432 	/* check if HW has decoded the packet and checksum */
1433 	if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1434 		return 0;
1435 
1436 	if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1437 		flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1438 		return flags;
1439 	}
1440 
1441 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1442 		flags |= PKT_RX_IP_CKSUM_BAD;
1443 	else
1444 		flags |= PKT_RX_IP_CKSUM_GOOD;
1445 
1446 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1447 		flags |= PKT_RX_L4_CKSUM_BAD;
1448 	else
1449 		flags |= PKT_RX_L4_CKSUM_GOOD;
1450 
1451 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1452 		flags |= PKT_RX_EIP_CKSUM_BAD;
1453 
1454 	return flags;
1455 }
1456 
1457 static inline void
ice_rxd_to_vlan_tci(struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)1458 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1459 {
1460 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1461 	    (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1462 		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1463 		mb->vlan_tci =
1464 			rte_le_to_cpu_16(rxdp->wb.l2tag1);
1465 		PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1466 			   rte_le_to_cpu_16(rxdp->wb.l2tag1));
1467 	} else {
1468 		mb->vlan_tci = 0;
1469 	}
1470 
1471 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1472 	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1473 	    (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1474 		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1475 				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1476 		mb->vlan_tci_outer = mb->vlan_tci;
1477 		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1478 		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1479 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1480 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1481 	} else {
1482 		mb->vlan_tci_outer = 0;
1483 	}
1484 #endif
1485 	PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1486 		   mb->vlan_tci, mb->vlan_tci_outer);
1487 }
1488 
1489 #define ICE_LOOK_AHEAD 8
1490 #if (ICE_LOOK_AHEAD != 8)
1491 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1492 #endif
1493 static inline int
ice_rx_scan_hw_ring(struct ice_rx_queue * rxq)1494 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1495 {
1496 	volatile union ice_rx_flex_desc *rxdp;
1497 	struct ice_rx_entry *rxep;
1498 	struct rte_mbuf *mb;
1499 	uint16_t stat_err0;
1500 	uint16_t pkt_len;
1501 	int32_t s[ICE_LOOK_AHEAD], nb_dd;
1502 	int32_t i, j, nb_rx = 0;
1503 	uint64_t pkt_flags = 0;
1504 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1505 
1506 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1507 	rxep = &rxq->sw_ring[rxq->rx_tail];
1508 
1509 	stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1510 
1511 	/* Make sure there is at least 1 packet to receive */
1512 	if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1513 		return 0;
1514 
1515 	/**
1516 	 * Scan LOOK_AHEAD descriptors at a time to determine which
1517 	 * descriptors reference packets that are ready to be received.
1518 	 */
1519 	for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1520 	     rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1521 		/* Read desc statuses backwards to avoid race condition */
1522 		for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1523 			s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1524 
1525 		rte_smp_rmb();
1526 
1527 		/* Compute how many status bits were set */
1528 		for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1529 			nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1530 
1531 		nb_rx += nb_dd;
1532 
1533 		/* Translate descriptor info to mbuf parameters */
1534 		for (j = 0; j < nb_dd; j++) {
1535 			mb = rxep[j].mbuf;
1536 			pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1537 				   ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1538 			mb->data_len = pkt_len;
1539 			mb->pkt_len = pkt_len;
1540 			mb->ol_flags = 0;
1541 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1542 			pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1543 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1544 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1545 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1546 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1547 
1548 			mb->ol_flags |= pkt_flags;
1549 		}
1550 
1551 		for (j = 0; j < ICE_LOOK_AHEAD; j++)
1552 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1553 
1554 		if (nb_dd != ICE_LOOK_AHEAD)
1555 			break;
1556 	}
1557 
1558 	/* Clear software ring entries */
1559 	for (i = 0; i < nb_rx; i++)
1560 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1561 
1562 	PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1563 		   "port_id=%u, queue_id=%u, nb_rx=%d",
1564 		   rxq->port_id, rxq->queue_id, nb_rx);
1565 
1566 	return nb_rx;
1567 }
1568 
1569 static inline uint16_t
ice_rx_fill_from_stage(struct ice_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1570 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1571 		       struct rte_mbuf **rx_pkts,
1572 		       uint16_t nb_pkts)
1573 {
1574 	uint16_t i;
1575 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1576 
1577 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1578 
1579 	for (i = 0; i < nb_pkts; i++)
1580 		rx_pkts[i] = stage[i];
1581 
1582 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1583 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1584 
1585 	return nb_pkts;
1586 }
1587 
1588 static inline int
ice_rx_alloc_bufs(struct ice_rx_queue * rxq)1589 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1590 {
1591 	volatile union ice_rx_flex_desc *rxdp;
1592 	struct ice_rx_entry *rxep;
1593 	struct rte_mbuf *mb;
1594 	uint16_t alloc_idx, i;
1595 	uint64_t dma_addr;
1596 	int diag;
1597 
1598 	/* Allocate buffers in bulk */
1599 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1600 			       (rxq->rx_free_thresh - 1));
1601 	rxep = &rxq->sw_ring[alloc_idx];
1602 	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1603 				    rxq->rx_free_thresh);
1604 	if (unlikely(diag != 0)) {
1605 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1606 		return -ENOMEM;
1607 	}
1608 
1609 	rxdp = &rxq->rx_ring[alloc_idx];
1610 	for (i = 0; i < rxq->rx_free_thresh; i++) {
1611 		if (likely(i < (rxq->rx_free_thresh - 1)))
1612 			/* Prefetch next mbuf */
1613 			rte_prefetch0(rxep[i + 1].mbuf);
1614 
1615 		mb = rxep[i].mbuf;
1616 		rte_mbuf_refcnt_set(mb, 1);
1617 		mb->next = NULL;
1618 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1619 		mb->nb_segs = 1;
1620 		mb->port = rxq->port_id;
1621 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1622 		rxdp[i].read.hdr_addr = 0;
1623 		rxdp[i].read.pkt_addr = dma_addr;
1624 	}
1625 
1626 	/* Update rx tail regsiter */
1627 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1628 
1629 	rxq->rx_free_trigger =
1630 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1631 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1632 		rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1633 
1634 	return 0;
1635 }
1636 
1637 static inline uint16_t
rx_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1638 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1639 {
1640 	struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1641 	uint16_t nb_rx = 0;
1642 	struct rte_eth_dev *dev;
1643 
1644 	if (!nb_pkts)
1645 		return 0;
1646 
1647 	if (rxq->rx_nb_avail)
1648 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1649 
1650 	nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1651 	rxq->rx_next_avail = 0;
1652 	rxq->rx_nb_avail = nb_rx;
1653 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1654 
1655 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1656 		if (ice_rx_alloc_bufs(rxq) != 0) {
1657 			uint16_t i, j;
1658 
1659 			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1660 			dev->data->rx_mbuf_alloc_failed +=
1661 				rxq->rx_free_thresh;
1662 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1663 				   "port_id=%u, queue_id=%u",
1664 				   rxq->port_id, rxq->queue_id);
1665 			rxq->rx_nb_avail = 0;
1666 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1667 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1668 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1669 
1670 			return 0;
1671 		}
1672 	}
1673 
1674 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1675 		rxq->rx_tail = 0;
1676 
1677 	if (rxq->rx_nb_avail)
1678 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1679 
1680 	return 0;
1681 }
1682 
1683 static uint16_t
ice_recv_pkts_bulk_alloc(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1684 ice_recv_pkts_bulk_alloc(void *rx_queue,
1685 			 struct rte_mbuf **rx_pkts,
1686 			 uint16_t nb_pkts)
1687 {
1688 	uint16_t nb_rx = 0;
1689 	uint16_t n;
1690 	uint16_t count;
1691 
1692 	if (unlikely(nb_pkts == 0))
1693 		return nb_rx;
1694 
1695 	if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1696 		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1697 
1698 	while (nb_pkts) {
1699 		n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1700 		count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1701 		nb_rx = (uint16_t)(nb_rx + count);
1702 		nb_pkts = (uint16_t)(nb_pkts - count);
1703 		if (count < n)
1704 			break;
1705 	}
1706 
1707 	return nb_rx;
1708 }
1709 
1710 static uint16_t
ice_recv_scattered_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1711 ice_recv_scattered_pkts(void *rx_queue,
1712 			struct rte_mbuf **rx_pkts,
1713 			uint16_t nb_pkts)
1714 {
1715 	struct ice_rx_queue *rxq = rx_queue;
1716 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1717 	volatile union ice_rx_flex_desc *rxdp;
1718 	union ice_rx_flex_desc rxd;
1719 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
1720 	struct ice_rx_entry *rxe;
1721 	struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1722 	struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1723 	struct rte_mbuf *nmb; /* new allocated mbuf */
1724 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1725 	uint16_t rx_id = rxq->rx_tail;
1726 	uint16_t nb_rx = 0;
1727 	uint16_t nb_hold = 0;
1728 	uint16_t rx_packet_len;
1729 	uint16_t rx_stat_err0;
1730 	uint64_t dma_addr;
1731 	uint64_t pkt_flags;
1732 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1733 	struct rte_eth_dev *dev;
1734 
1735 	while (nb_rx < nb_pkts) {
1736 		rxdp = &rx_ring[rx_id];
1737 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1738 
1739 		/* Check the DD bit first */
1740 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1741 			break;
1742 
1743 		/* allocate mbuf */
1744 		nmb = rte_mbuf_raw_alloc(rxq->mp);
1745 		if (unlikely(!nmb)) {
1746 			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1747 			dev->data->rx_mbuf_alloc_failed++;
1748 			break;
1749 		}
1750 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1751 
1752 		nb_hold++;
1753 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1754 		rx_id++;
1755 		if (unlikely(rx_id == rxq->nb_rx_desc))
1756 			rx_id = 0;
1757 
1758 		/* Prefetch next mbuf */
1759 		rte_prefetch0(sw_ring[rx_id].mbuf);
1760 
1761 		/**
1762 		 * When next RX descriptor is on a cache line boundary,
1763 		 * prefetch the next 4 RX descriptors and next 8 pointers
1764 		 * to mbufs.
1765 		 */
1766 		if ((rx_id & 0x3) == 0) {
1767 			rte_prefetch0(&rx_ring[rx_id]);
1768 			rte_prefetch0(&sw_ring[rx_id]);
1769 		}
1770 
1771 		rxm = rxe->mbuf;
1772 		rxe->mbuf = nmb;
1773 		dma_addr =
1774 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1775 
1776 		/* Set data buffer address and data length of the mbuf */
1777 		rxdp->read.hdr_addr = 0;
1778 		rxdp->read.pkt_addr = dma_addr;
1779 		rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1780 				ICE_RX_FLX_DESC_PKT_LEN_M;
1781 		rxm->data_len = rx_packet_len;
1782 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1783 
1784 		/**
1785 		 * If this is the first buffer of the received packet, set the
1786 		 * pointer to the first mbuf of the packet and initialize its
1787 		 * context. Otherwise, update the total length and the number
1788 		 * of segments of the current scattered packet, and update the
1789 		 * pointer to the last mbuf of the current packet.
1790 		 */
1791 		if (!first_seg) {
1792 			first_seg = rxm;
1793 			first_seg->nb_segs = 1;
1794 			first_seg->pkt_len = rx_packet_len;
1795 		} else {
1796 			first_seg->pkt_len =
1797 				(uint16_t)(first_seg->pkt_len +
1798 					   rx_packet_len);
1799 			first_seg->nb_segs++;
1800 			last_seg->next = rxm;
1801 		}
1802 
1803 		/**
1804 		 * If this is not the last buffer of the received packet,
1805 		 * update the pointer to the last mbuf of the current scattered
1806 		 * packet and continue to parse the RX ring.
1807 		 */
1808 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1809 			last_seg = rxm;
1810 			continue;
1811 		}
1812 
1813 		/**
1814 		 * This is the last buffer of the received packet. If the CRC
1815 		 * is not stripped by the hardware:
1816 		 *  - Subtract the CRC length from the total packet length.
1817 		 *  - If the last buffer only contains the whole CRC or a part
1818 		 *  of it, free the mbuf associated to the last buffer. If part
1819 		 *  of the CRC is also contained in the previous mbuf, subtract
1820 		 *  the length of that CRC part from the data length of the
1821 		 *  previous mbuf.
1822 		 */
1823 		rxm->next = NULL;
1824 		if (unlikely(rxq->crc_len > 0)) {
1825 			first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1826 			if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1827 				rte_pktmbuf_free_seg(rxm);
1828 				first_seg->nb_segs--;
1829 				last_seg->data_len =
1830 					(uint16_t)(last_seg->data_len -
1831 					(RTE_ETHER_CRC_LEN - rx_packet_len));
1832 				last_seg->next = NULL;
1833 			} else
1834 				rxm->data_len = (uint16_t)(rx_packet_len -
1835 							   RTE_ETHER_CRC_LEN);
1836 		}
1837 
1838 		first_seg->port = rxq->port_id;
1839 		first_seg->ol_flags = 0;
1840 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1841 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1842 		ice_rxd_to_vlan_tci(first_seg, &rxd);
1843 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1844 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1845 		first_seg->ol_flags |= pkt_flags;
1846 		/* Prefetch data of first segment, if configured to do so. */
1847 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1848 					  first_seg->data_off));
1849 		rx_pkts[nb_rx++] = first_seg;
1850 		first_seg = NULL;
1851 	}
1852 
1853 	/* Record index of the next RX descriptor to probe. */
1854 	rxq->rx_tail = rx_id;
1855 	rxq->pkt_first_seg = first_seg;
1856 	rxq->pkt_last_seg = last_seg;
1857 
1858 	/**
1859 	 * If the number of free RX descriptors is greater than the RX free
1860 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1861 	 * register. Update the RDT with the value of the last processed RX
1862 	 * descriptor minus 1, to guarantee that the RDT register is never
1863 	 * equal to the RDH register, which creates a "full" ring situtation
1864 	 * from the hardware point of view.
1865 	 */
1866 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1867 	if (nb_hold > rxq->rx_free_thresh) {
1868 		rx_id = (uint16_t)(rx_id == 0 ?
1869 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
1870 		/* write TAIL register */
1871 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1872 		nb_hold = 0;
1873 	}
1874 	rxq->nb_rx_hold = nb_hold;
1875 
1876 	/* return received packet in the burst */
1877 	return nb_rx;
1878 }
1879 
1880 const uint32_t *
ice_dev_supported_ptypes_get(struct rte_eth_dev * dev)1881 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1882 {
1883 	struct ice_adapter *ad =
1884 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1885 	const uint32_t *ptypes;
1886 
1887 	static const uint32_t ptypes_os[] = {
1888 		/* refers to ice_get_default_pkt_type() */
1889 		RTE_PTYPE_L2_ETHER,
1890 		RTE_PTYPE_L2_ETHER_TIMESYNC,
1891 		RTE_PTYPE_L2_ETHER_LLDP,
1892 		RTE_PTYPE_L2_ETHER_ARP,
1893 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1894 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1895 		RTE_PTYPE_L4_FRAG,
1896 		RTE_PTYPE_L4_ICMP,
1897 		RTE_PTYPE_L4_NONFRAG,
1898 		RTE_PTYPE_L4_SCTP,
1899 		RTE_PTYPE_L4_TCP,
1900 		RTE_PTYPE_L4_UDP,
1901 		RTE_PTYPE_TUNNEL_GRENAT,
1902 		RTE_PTYPE_TUNNEL_IP,
1903 		RTE_PTYPE_INNER_L2_ETHER,
1904 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1905 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1906 		RTE_PTYPE_INNER_L4_FRAG,
1907 		RTE_PTYPE_INNER_L4_ICMP,
1908 		RTE_PTYPE_INNER_L4_NONFRAG,
1909 		RTE_PTYPE_INNER_L4_SCTP,
1910 		RTE_PTYPE_INNER_L4_TCP,
1911 		RTE_PTYPE_INNER_L4_UDP,
1912 		RTE_PTYPE_UNKNOWN
1913 	};
1914 
1915 	static const uint32_t ptypes_comms[] = {
1916 		/* refers to ice_get_default_pkt_type() */
1917 		RTE_PTYPE_L2_ETHER,
1918 		RTE_PTYPE_L2_ETHER_TIMESYNC,
1919 		RTE_PTYPE_L2_ETHER_LLDP,
1920 		RTE_PTYPE_L2_ETHER_ARP,
1921 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1922 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1923 		RTE_PTYPE_L4_FRAG,
1924 		RTE_PTYPE_L4_ICMP,
1925 		RTE_PTYPE_L4_NONFRAG,
1926 		RTE_PTYPE_L4_SCTP,
1927 		RTE_PTYPE_L4_TCP,
1928 		RTE_PTYPE_L4_UDP,
1929 		RTE_PTYPE_TUNNEL_GRENAT,
1930 		RTE_PTYPE_TUNNEL_IP,
1931 		RTE_PTYPE_INNER_L2_ETHER,
1932 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1933 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1934 		RTE_PTYPE_INNER_L4_FRAG,
1935 		RTE_PTYPE_INNER_L4_ICMP,
1936 		RTE_PTYPE_INNER_L4_NONFRAG,
1937 		RTE_PTYPE_INNER_L4_SCTP,
1938 		RTE_PTYPE_INNER_L4_TCP,
1939 		RTE_PTYPE_INNER_L4_UDP,
1940 		RTE_PTYPE_TUNNEL_GTPC,
1941 		RTE_PTYPE_TUNNEL_GTPU,
1942 		RTE_PTYPE_L2_ETHER_PPPOE,
1943 		RTE_PTYPE_UNKNOWN
1944 	};
1945 
1946 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1947 		ptypes = ptypes_comms;
1948 	else
1949 		ptypes = ptypes_os;
1950 
1951 	if (dev->rx_pkt_burst == ice_recv_pkts ||
1952 	    dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1953 	    dev->rx_pkt_burst == ice_recv_scattered_pkts)
1954 		return ptypes;
1955 
1956 #ifdef RTE_ARCH_X86
1957 	if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1958 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1959 #ifdef CC_AVX512_SUPPORT
1960 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
1961 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
1962 #endif
1963 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1964 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1965 		return ptypes;
1966 #endif
1967 
1968 	return NULL;
1969 }
1970 
1971 int
ice_rx_descriptor_status(void * rx_queue,uint16_t offset)1972 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1973 {
1974 	volatile union ice_rx_flex_desc *rxdp;
1975 	struct ice_rx_queue *rxq = rx_queue;
1976 	uint32_t desc;
1977 
1978 	if (unlikely(offset >= rxq->nb_rx_desc))
1979 		return -EINVAL;
1980 
1981 	if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1982 		return RTE_ETH_RX_DESC_UNAVAIL;
1983 
1984 	desc = rxq->rx_tail + offset;
1985 	if (desc >= rxq->nb_rx_desc)
1986 		desc -= rxq->nb_rx_desc;
1987 
1988 	rxdp = &rxq->rx_ring[desc];
1989 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1990 	    (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1991 		return RTE_ETH_RX_DESC_DONE;
1992 
1993 	return RTE_ETH_RX_DESC_AVAIL;
1994 }
1995 
1996 int
ice_tx_descriptor_status(void * tx_queue,uint16_t offset)1997 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1998 {
1999 	struct ice_tx_queue *txq = tx_queue;
2000 	volatile uint64_t *status;
2001 	uint64_t mask, expect;
2002 	uint32_t desc;
2003 
2004 	if (unlikely(offset >= txq->nb_tx_desc))
2005 		return -EINVAL;
2006 
2007 	desc = txq->tx_tail + offset;
2008 	/* go to next desc that has the RS bit */
2009 	desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2010 		txq->tx_rs_thresh;
2011 	if (desc >= txq->nb_tx_desc) {
2012 		desc -= txq->nb_tx_desc;
2013 		if (desc >= txq->nb_tx_desc)
2014 			desc -= txq->nb_tx_desc;
2015 	}
2016 
2017 	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2018 	mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2019 	expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2020 				  ICE_TXD_QW1_DTYPE_S);
2021 	if ((*status & mask) == expect)
2022 		return RTE_ETH_TX_DESC_DONE;
2023 
2024 	return RTE_ETH_TX_DESC_FULL;
2025 }
2026 
2027 void
ice_free_queues(struct rte_eth_dev * dev)2028 ice_free_queues(struct rte_eth_dev *dev)
2029 {
2030 	uint16_t i;
2031 
2032 	PMD_INIT_FUNC_TRACE();
2033 
2034 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2035 		if (!dev->data->rx_queues[i])
2036 			continue;
2037 		ice_rx_queue_release(dev->data->rx_queues[i]);
2038 		dev->data->rx_queues[i] = NULL;
2039 		rte_eth_dma_zone_free(dev, "rx_ring", i);
2040 	}
2041 	dev->data->nb_rx_queues = 0;
2042 
2043 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2044 		if (!dev->data->tx_queues[i])
2045 			continue;
2046 		ice_tx_queue_release(dev->data->tx_queues[i]);
2047 		dev->data->tx_queues[i] = NULL;
2048 		rte_eth_dma_zone_free(dev, "tx_ring", i);
2049 	}
2050 	dev->data->nb_tx_queues = 0;
2051 }
2052 
2053 #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
2054 #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
2055 
2056 int
ice_fdir_setup_tx_resources(struct ice_pf * pf)2057 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2058 {
2059 	struct ice_tx_queue *txq;
2060 	const struct rte_memzone *tz = NULL;
2061 	uint32_t ring_size;
2062 	struct rte_eth_dev *dev;
2063 
2064 	if (!pf) {
2065 		PMD_DRV_LOG(ERR, "PF is not available");
2066 		return -EINVAL;
2067 	}
2068 
2069 	dev = pf->adapter->eth_dev;
2070 
2071 	/* Allocate the TX queue data structure. */
2072 	txq = rte_zmalloc_socket("ice fdir tx queue",
2073 				 sizeof(struct ice_tx_queue),
2074 				 RTE_CACHE_LINE_SIZE,
2075 				 SOCKET_ID_ANY);
2076 	if (!txq) {
2077 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2078 			    "tx queue structure.");
2079 		return -ENOMEM;
2080 	}
2081 
2082 	/* Allocate TX hardware ring descriptors. */
2083 	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2084 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2085 
2086 	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2087 				      ICE_FDIR_QUEUE_ID, ring_size,
2088 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2089 	if (!tz) {
2090 		ice_tx_queue_release(txq);
2091 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2092 		return -ENOMEM;
2093 	}
2094 
2095 	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2096 	txq->queue_id = ICE_FDIR_QUEUE_ID;
2097 	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2098 	txq->vsi = pf->fdir.fdir_vsi;
2099 
2100 	txq->tx_ring_dma = tz->iova;
2101 	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2102 	/*
2103 	 * don't need to allocate software ring and reset for the fdir
2104 	 * program queue just set the queue has been configured.
2105 	 */
2106 	txq->q_set = true;
2107 	pf->fdir.txq = txq;
2108 
2109 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2110 
2111 	return ICE_SUCCESS;
2112 }
2113 
2114 int
ice_fdir_setup_rx_resources(struct ice_pf * pf)2115 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2116 {
2117 	struct ice_rx_queue *rxq;
2118 	const struct rte_memzone *rz = NULL;
2119 	uint32_t ring_size;
2120 	struct rte_eth_dev *dev;
2121 
2122 	if (!pf) {
2123 		PMD_DRV_LOG(ERR, "PF is not available");
2124 		return -EINVAL;
2125 	}
2126 
2127 	dev = pf->adapter->eth_dev;
2128 
2129 	/* Allocate the RX queue data structure. */
2130 	rxq = rte_zmalloc_socket("ice fdir rx queue",
2131 				 sizeof(struct ice_rx_queue),
2132 				 RTE_CACHE_LINE_SIZE,
2133 				 SOCKET_ID_ANY);
2134 	if (!rxq) {
2135 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2136 			    "rx queue structure.");
2137 		return -ENOMEM;
2138 	}
2139 
2140 	/* Allocate RX hardware ring descriptors. */
2141 	ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2142 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2143 
2144 	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2145 				      ICE_FDIR_QUEUE_ID, ring_size,
2146 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2147 	if (!rz) {
2148 		ice_rx_queue_release(rxq);
2149 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2150 		return -ENOMEM;
2151 	}
2152 
2153 	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2154 	rxq->queue_id = ICE_FDIR_QUEUE_ID;
2155 	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2156 	rxq->vsi = pf->fdir.fdir_vsi;
2157 
2158 	rxq->rx_ring_dma = rz->iova;
2159 	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2160 	       sizeof(union ice_32byte_rx_desc));
2161 	rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2162 
2163 	/*
2164 	 * Don't need to allocate software ring and reset for the fdir
2165 	 * rx queue, just set the queue has been configured.
2166 	 */
2167 	rxq->q_set = true;
2168 	pf->fdir.rxq = rxq;
2169 
2170 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2171 
2172 	return ICE_SUCCESS;
2173 }
2174 
2175 uint16_t
ice_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2176 ice_recv_pkts(void *rx_queue,
2177 	      struct rte_mbuf **rx_pkts,
2178 	      uint16_t nb_pkts)
2179 {
2180 	struct ice_rx_queue *rxq = rx_queue;
2181 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2182 	volatile union ice_rx_flex_desc *rxdp;
2183 	union ice_rx_flex_desc rxd;
2184 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2185 	struct ice_rx_entry *rxe;
2186 	struct rte_mbuf *nmb; /* new allocated mbuf */
2187 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2188 	uint16_t rx_id = rxq->rx_tail;
2189 	uint16_t nb_rx = 0;
2190 	uint16_t nb_hold = 0;
2191 	uint16_t rx_packet_len;
2192 	uint16_t rx_stat_err0;
2193 	uint64_t dma_addr;
2194 	uint64_t pkt_flags;
2195 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2196 	struct rte_eth_dev *dev;
2197 
2198 	while (nb_rx < nb_pkts) {
2199 		rxdp = &rx_ring[rx_id];
2200 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2201 
2202 		/* Check the DD bit first */
2203 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2204 			break;
2205 
2206 		/* allocate mbuf */
2207 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2208 		if (unlikely(!nmb)) {
2209 			dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2210 			dev->data->rx_mbuf_alloc_failed++;
2211 			break;
2212 		}
2213 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2214 
2215 		nb_hold++;
2216 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2217 		rx_id++;
2218 		if (unlikely(rx_id == rxq->nb_rx_desc))
2219 			rx_id = 0;
2220 		rxm = rxe->mbuf;
2221 		rxe->mbuf = nmb;
2222 		dma_addr =
2223 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2224 
2225 		/**
2226 		 * fill the read format of descriptor with physic address in
2227 		 * new allocated mbuf: nmb
2228 		 */
2229 		rxdp->read.hdr_addr = 0;
2230 		rxdp->read.pkt_addr = dma_addr;
2231 
2232 		/* calculate rx_packet_len of the received pkt */
2233 		rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2234 				 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2235 
2236 		/* fill old mbuf with received descriptor: rxd */
2237 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2238 		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2239 		rxm->nb_segs = 1;
2240 		rxm->next = NULL;
2241 		rxm->pkt_len = rx_packet_len;
2242 		rxm->data_len = rx_packet_len;
2243 		rxm->port = rxq->port_id;
2244 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2245 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2246 		ice_rxd_to_vlan_tci(rxm, &rxd);
2247 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2248 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2249 		rxm->ol_flags |= pkt_flags;
2250 		/* copy old mbuf to rx_pkts */
2251 		rx_pkts[nb_rx++] = rxm;
2252 	}
2253 	rxq->rx_tail = rx_id;
2254 	/**
2255 	 * If the number of free RX descriptors is greater than the RX free
2256 	 * threshold of the queue, advance the receive tail register of queue.
2257 	 * Update that register with the value of the last processed RX
2258 	 * descriptor minus 1.
2259 	 */
2260 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2261 	if (nb_hold > rxq->rx_free_thresh) {
2262 		rx_id = (uint16_t)(rx_id == 0 ?
2263 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2264 		/* write TAIL register */
2265 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2266 		nb_hold = 0;
2267 	}
2268 	rxq->nb_rx_hold = nb_hold;
2269 
2270 	/* return received packet in the burst */
2271 	return nb_rx;
2272 }
2273 
2274 static inline void
ice_parse_tunneling_params(uint64_t ol_flags,union ice_tx_offload tx_offload,uint32_t * cd_tunneling)2275 ice_parse_tunneling_params(uint64_t ol_flags,
2276 			    union ice_tx_offload tx_offload,
2277 			    uint32_t *cd_tunneling)
2278 {
2279 	/* EIPT: External (outer) IP header type */
2280 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2281 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2282 	else if (ol_flags & PKT_TX_OUTER_IPV4)
2283 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2284 	else if (ol_flags & PKT_TX_OUTER_IPV6)
2285 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2286 
2287 	/* EIPLEN: External (outer) IP header length, in DWords */
2288 	*cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2289 		ICE_TXD_CTX_QW0_EIPLEN_S;
2290 
2291 	/* L4TUNT: L4 Tunneling Type */
2292 	switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2293 	case PKT_TX_TUNNEL_IPIP:
2294 		/* for non UDP / GRE tunneling, set to 00b */
2295 		break;
2296 	case PKT_TX_TUNNEL_VXLAN:
2297 	case PKT_TX_TUNNEL_GTP:
2298 	case PKT_TX_TUNNEL_GENEVE:
2299 		*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2300 		break;
2301 	case PKT_TX_TUNNEL_GRE:
2302 		*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2303 		break;
2304 	default:
2305 		PMD_TX_LOG(ERR, "Tunnel type not supported");
2306 		return;
2307 	}
2308 
2309 	/* L4TUNLEN: L4 Tunneling Length, in Words
2310 	 *
2311 	 * We depend on app to set rte_mbuf.l2_len correctly.
2312 	 * For IP in GRE it should be set to the length of the GRE
2313 	 * header;
2314 	 * For MAC in GRE or MAC in UDP it should be set to the length
2315 	 * of the GRE or UDP headers plus the inner MAC up to including
2316 	 * its last Ethertype.
2317 	 * If MPLS labels exists, it should include them as well.
2318 	 */
2319 	*cd_tunneling |= (tx_offload.l2_len >> 1) <<
2320 		ICE_TXD_CTX_QW0_NATLEN_S;
2321 
2322 	if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2323 	    (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2324 	    (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2325 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2326 }
2327 
2328 static inline void
ice_txd_enable_checksum(uint64_t ol_flags,uint32_t * td_cmd,uint32_t * td_offset,union ice_tx_offload tx_offload)2329 ice_txd_enable_checksum(uint64_t ol_flags,
2330 			uint32_t *td_cmd,
2331 			uint32_t *td_offset,
2332 			union ice_tx_offload tx_offload)
2333 {
2334 	/* Set MACLEN */
2335 	if (ol_flags & PKT_TX_TUNNEL_MASK)
2336 		*td_offset |= (tx_offload.outer_l2_len >> 1)
2337 			<< ICE_TX_DESC_LEN_MACLEN_S;
2338 	else
2339 		*td_offset |= (tx_offload.l2_len >> 1)
2340 			<< ICE_TX_DESC_LEN_MACLEN_S;
2341 
2342 	/* Enable L3 checksum offloads */
2343 	if (ol_flags & PKT_TX_IP_CKSUM) {
2344 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2345 		*td_offset |= (tx_offload.l3_len >> 2) <<
2346 			      ICE_TX_DESC_LEN_IPLEN_S;
2347 	} else if (ol_flags & PKT_TX_IPV4) {
2348 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2349 		*td_offset |= (tx_offload.l3_len >> 2) <<
2350 			      ICE_TX_DESC_LEN_IPLEN_S;
2351 	} else if (ol_flags & PKT_TX_IPV6) {
2352 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2353 		*td_offset |= (tx_offload.l3_len >> 2) <<
2354 			      ICE_TX_DESC_LEN_IPLEN_S;
2355 	}
2356 
2357 	if (ol_flags & PKT_TX_TCP_SEG) {
2358 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2359 		*td_offset |= (tx_offload.l4_len >> 2) <<
2360 			      ICE_TX_DESC_LEN_L4_LEN_S;
2361 		return;
2362 	}
2363 
2364 	/* Enable L4 checksum offloads */
2365 	switch (ol_flags & PKT_TX_L4_MASK) {
2366 	case PKT_TX_TCP_CKSUM:
2367 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2368 		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2369 			      ICE_TX_DESC_LEN_L4_LEN_S;
2370 		break;
2371 	case PKT_TX_SCTP_CKSUM:
2372 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2373 		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2374 			      ICE_TX_DESC_LEN_L4_LEN_S;
2375 		break;
2376 	case PKT_TX_UDP_CKSUM:
2377 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2378 		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2379 			      ICE_TX_DESC_LEN_L4_LEN_S;
2380 		break;
2381 	default:
2382 		break;
2383 	}
2384 }
2385 
2386 static inline int
ice_xmit_cleanup(struct ice_tx_queue * txq)2387 ice_xmit_cleanup(struct ice_tx_queue *txq)
2388 {
2389 	struct ice_tx_entry *sw_ring = txq->sw_ring;
2390 	volatile struct ice_tx_desc *txd = txq->tx_ring;
2391 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2392 	uint16_t nb_tx_desc = txq->nb_tx_desc;
2393 	uint16_t desc_to_clean_to;
2394 	uint16_t nb_tx_to_clean;
2395 
2396 	/* Determine the last descriptor needing to be cleaned */
2397 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2398 	if (desc_to_clean_to >= nb_tx_desc)
2399 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2400 
2401 	/* Check to make sure the last descriptor to clean is done */
2402 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2403 	if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2404 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2405 		PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2406 				"(port=%d queue=%d) value=0x%"PRIx64"\n",
2407 				desc_to_clean_to,
2408 				txq->port_id, txq->queue_id,
2409 				txd[desc_to_clean_to].cmd_type_offset_bsz);
2410 		/* Failed to clean any descriptors */
2411 		return -1;
2412 	}
2413 
2414 	/* Figure out how many descriptors will be cleaned */
2415 	if (last_desc_cleaned > desc_to_clean_to)
2416 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2417 					    desc_to_clean_to);
2418 	else
2419 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2420 					    last_desc_cleaned);
2421 
2422 	/* The last descriptor to clean is done, so that means all the
2423 	 * descriptors from the last descriptor that was cleaned
2424 	 * up to the last descriptor with the RS bit set
2425 	 * are done. Only reset the threshold descriptor.
2426 	 */
2427 	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2428 
2429 	/* Update the txq to reflect the last descriptor that was cleaned */
2430 	txq->last_desc_cleaned = desc_to_clean_to;
2431 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2432 
2433 	return 0;
2434 }
2435 
2436 /* Construct the tx flags */
2437 static inline uint64_t
ice_build_ctob(uint32_t td_cmd,uint32_t td_offset,uint16_t size,uint32_t td_tag)2438 ice_build_ctob(uint32_t td_cmd,
2439 	       uint32_t td_offset,
2440 	       uint16_t size,
2441 	       uint32_t td_tag)
2442 {
2443 	return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2444 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2445 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2446 				((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2447 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2448 }
2449 
2450 /* Check if the context descriptor is needed for TX offloading */
2451 static inline uint16_t
ice_calc_context_desc(uint64_t flags)2452 ice_calc_context_desc(uint64_t flags)
2453 {
2454 	static uint64_t mask = PKT_TX_TCP_SEG |
2455 		PKT_TX_QINQ |
2456 		PKT_TX_OUTER_IP_CKSUM |
2457 		PKT_TX_TUNNEL_MASK;
2458 
2459 	return (flags & mask) ? 1 : 0;
2460 }
2461 
2462 /* set ice TSO context descriptor */
2463 static inline uint64_t
ice_set_tso_ctx(struct rte_mbuf * mbuf,union ice_tx_offload tx_offload)2464 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2465 {
2466 	uint64_t ctx_desc = 0;
2467 	uint32_t cd_cmd, hdr_len, cd_tso_len;
2468 
2469 	if (!tx_offload.l4_len) {
2470 		PMD_TX_LOG(DEBUG, "L4 length set to 0");
2471 		return ctx_desc;
2472 	}
2473 
2474 	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2475 	hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2476 		   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2477 
2478 	cd_cmd = ICE_TX_CTX_DESC_TSO;
2479 	cd_tso_len = mbuf->pkt_len - hdr_len;
2480 	ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2481 		    ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2482 		    ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2483 
2484 	return ctx_desc;
2485 }
2486 
2487 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2488 #define ICE_MAX_DATA_PER_TXD \
2489 	(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2490 /* Calculate the number of TX descriptors needed for each pkt */
2491 static inline uint16_t
ice_calc_pkt_desc(struct rte_mbuf * tx_pkt)2492 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2493 {
2494 	struct rte_mbuf *txd = tx_pkt;
2495 	uint16_t count = 0;
2496 
2497 	while (txd != NULL) {
2498 		count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2499 		txd = txd->next;
2500 	}
2501 
2502 	return count;
2503 }
2504 
2505 uint16_t
ice_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2506 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2507 {
2508 	struct ice_tx_queue *txq;
2509 	volatile struct ice_tx_desc *tx_ring;
2510 	volatile struct ice_tx_desc *txd;
2511 	struct ice_tx_entry *sw_ring;
2512 	struct ice_tx_entry *txe, *txn;
2513 	struct rte_mbuf *tx_pkt;
2514 	struct rte_mbuf *m_seg;
2515 	uint32_t cd_tunneling_params;
2516 	uint16_t tx_id;
2517 	uint16_t nb_tx;
2518 	uint16_t nb_used;
2519 	uint16_t nb_ctx;
2520 	uint32_t td_cmd = 0;
2521 	uint32_t td_offset = 0;
2522 	uint32_t td_tag = 0;
2523 	uint16_t tx_last;
2524 	uint16_t slen;
2525 	uint64_t buf_dma_addr;
2526 	uint64_t ol_flags;
2527 	union ice_tx_offload tx_offload = {0};
2528 
2529 	txq = tx_queue;
2530 	sw_ring = txq->sw_ring;
2531 	tx_ring = txq->tx_ring;
2532 	tx_id = txq->tx_tail;
2533 	txe = &sw_ring[tx_id];
2534 
2535 	/* Check if the descriptor ring needs to be cleaned. */
2536 	if (txq->nb_tx_free < txq->tx_free_thresh)
2537 		(void)ice_xmit_cleanup(txq);
2538 
2539 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2540 		tx_pkt = *tx_pkts++;
2541 
2542 		td_cmd = 0;
2543 		td_tag = 0;
2544 		td_offset = 0;
2545 		ol_flags = tx_pkt->ol_flags;
2546 		tx_offload.l2_len = tx_pkt->l2_len;
2547 		tx_offload.l3_len = tx_pkt->l3_len;
2548 		tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2549 		tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2550 		tx_offload.l4_len = tx_pkt->l4_len;
2551 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
2552 		/* Calculate the number of context descriptors needed. */
2553 		nb_ctx = ice_calc_context_desc(ol_flags);
2554 
2555 		/* The number of descriptors that must be allocated for
2556 		 * a packet equals to the number of the segments of that
2557 		 * packet plus the number of context descriptor if needed.
2558 		 * Recalculate the needed tx descs when TSO enabled in case
2559 		 * the mbuf data size exceeds max data size that hw allows
2560 		 * per tx desc.
2561 		 */
2562 		if (ol_flags & PKT_TX_TCP_SEG)
2563 			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2564 					     nb_ctx);
2565 		else
2566 			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2567 		tx_last = (uint16_t)(tx_id + nb_used - 1);
2568 
2569 		/* Circular ring */
2570 		if (tx_last >= txq->nb_tx_desc)
2571 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2572 
2573 		if (nb_used > txq->nb_tx_free) {
2574 			if (ice_xmit_cleanup(txq) != 0) {
2575 				if (nb_tx == 0)
2576 					return 0;
2577 				goto end_of_tx;
2578 			}
2579 			if (unlikely(nb_used > txq->tx_rs_thresh)) {
2580 				while (nb_used > txq->nb_tx_free) {
2581 					if (ice_xmit_cleanup(txq) != 0) {
2582 						if (nb_tx == 0)
2583 							return 0;
2584 						goto end_of_tx;
2585 					}
2586 				}
2587 			}
2588 		}
2589 
2590 		/* Descriptor based VLAN insertion */
2591 		if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2592 			td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2593 			td_tag = tx_pkt->vlan_tci;
2594 		}
2595 
2596 		/* Fill in tunneling parameters if necessary */
2597 		cd_tunneling_params = 0;
2598 		if (ol_flags & PKT_TX_TUNNEL_MASK)
2599 			ice_parse_tunneling_params(ol_flags, tx_offload,
2600 						   &cd_tunneling_params);
2601 
2602 		/* Enable checksum offloading */
2603 		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2604 			ice_txd_enable_checksum(ol_flags, &td_cmd,
2605 						&td_offset, tx_offload);
2606 
2607 		if (nb_ctx) {
2608 			/* Setup TX context descriptor if required */
2609 			volatile struct ice_tx_ctx_desc *ctx_txd =
2610 				(volatile struct ice_tx_ctx_desc *)
2611 					&tx_ring[tx_id];
2612 			uint16_t cd_l2tag2 = 0;
2613 			uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2614 
2615 			txn = &sw_ring[txe->next_id];
2616 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2617 			if (txe->mbuf) {
2618 				rte_pktmbuf_free_seg(txe->mbuf);
2619 				txe->mbuf = NULL;
2620 			}
2621 
2622 			if (ol_flags & PKT_TX_TCP_SEG)
2623 				cd_type_cmd_tso_mss |=
2624 					ice_set_tso_ctx(tx_pkt, tx_offload);
2625 
2626 			ctx_txd->tunneling_params =
2627 				rte_cpu_to_le_32(cd_tunneling_params);
2628 
2629 			/* TX context descriptor based double VLAN insert */
2630 			if (ol_flags & PKT_TX_QINQ) {
2631 				cd_l2tag2 = tx_pkt->vlan_tci_outer;
2632 				cd_type_cmd_tso_mss |=
2633 					((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2634 					 ICE_TXD_CTX_QW1_CMD_S);
2635 			}
2636 			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2637 			ctx_txd->qw1 =
2638 				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2639 
2640 			txe->last_id = tx_last;
2641 			tx_id = txe->next_id;
2642 			txe = txn;
2643 		}
2644 		m_seg = tx_pkt;
2645 
2646 		do {
2647 			txd = &tx_ring[tx_id];
2648 			txn = &sw_ring[txe->next_id];
2649 
2650 			if (txe->mbuf)
2651 				rte_pktmbuf_free_seg(txe->mbuf);
2652 			txe->mbuf = m_seg;
2653 
2654 			/* Setup TX Descriptor */
2655 			slen = m_seg->data_len;
2656 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
2657 
2658 			while ((ol_flags & PKT_TX_TCP_SEG) &&
2659 				unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2660 				txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2661 				txd->cmd_type_offset_bsz =
2662 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2663 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2664 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2665 				((uint64_t)ICE_MAX_DATA_PER_TXD <<
2666 				 ICE_TXD_QW1_TX_BUF_SZ_S) |
2667 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2668 
2669 				buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2670 				slen -= ICE_MAX_DATA_PER_TXD;
2671 
2672 				txe->last_id = tx_last;
2673 				tx_id = txe->next_id;
2674 				txe = txn;
2675 				txd = &tx_ring[tx_id];
2676 				txn = &sw_ring[txe->next_id];
2677 			}
2678 
2679 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2680 			txd->cmd_type_offset_bsz =
2681 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2682 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2683 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2684 				((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2685 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2686 
2687 			txe->last_id = tx_last;
2688 			tx_id = txe->next_id;
2689 			txe = txn;
2690 			m_seg = m_seg->next;
2691 		} while (m_seg);
2692 
2693 		/* fill the last descriptor with End of Packet (EOP) bit */
2694 		td_cmd |= ICE_TX_DESC_CMD_EOP;
2695 		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2696 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2697 
2698 		/* set RS bit on the last descriptor of one packet */
2699 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2700 			PMD_TX_FREE_LOG(DEBUG,
2701 					"Setting RS bit on TXD id="
2702 					"%4u (port=%d queue=%d)",
2703 					tx_last, txq->port_id, txq->queue_id);
2704 
2705 			td_cmd |= ICE_TX_DESC_CMD_RS;
2706 
2707 			/* Update txq RS bit counters */
2708 			txq->nb_tx_used = 0;
2709 		}
2710 		txd->cmd_type_offset_bsz |=
2711 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2712 					 ICE_TXD_QW1_CMD_S);
2713 	}
2714 end_of_tx:
2715 	/* update Tail register */
2716 	ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2717 	txq->tx_tail = tx_id;
2718 
2719 	return nb_tx;
2720 }
2721 
2722 static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue * txq)2723 ice_tx_free_bufs(struct ice_tx_queue *txq)
2724 {
2725 	struct ice_tx_entry *txep;
2726 	uint16_t i;
2727 
2728 	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2729 	     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2730 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2731 		return 0;
2732 
2733 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2734 
2735 	for (i = 0; i < txq->tx_rs_thresh; i++)
2736 		rte_prefetch0((txep + i)->mbuf);
2737 
2738 	if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2739 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2740 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2741 			txep->mbuf = NULL;
2742 		}
2743 	} else {
2744 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2745 			rte_pktmbuf_free_seg(txep->mbuf);
2746 			txep->mbuf = NULL;
2747 		}
2748 	}
2749 
2750 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2751 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2752 	if (txq->tx_next_dd >= txq->nb_tx_desc)
2753 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2754 
2755 	return txq->tx_rs_thresh;
2756 }
2757 
2758 static int
ice_tx_done_cleanup_full(struct ice_tx_queue * txq,uint32_t free_cnt)2759 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2760 			uint32_t free_cnt)
2761 {
2762 	struct ice_tx_entry *swr_ring = txq->sw_ring;
2763 	uint16_t i, tx_last, tx_id;
2764 	uint16_t nb_tx_free_last;
2765 	uint16_t nb_tx_to_clean;
2766 	uint32_t pkt_cnt;
2767 
2768 	/* Start free mbuf from the next of tx_tail */
2769 	tx_last = txq->tx_tail;
2770 	tx_id  = swr_ring[tx_last].next_id;
2771 
2772 	if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2773 		return 0;
2774 
2775 	nb_tx_to_clean = txq->nb_tx_free;
2776 	nb_tx_free_last = txq->nb_tx_free;
2777 	if (!free_cnt)
2778 		free_cnt = txq->nb_tx_desc;
2779 
2780 	/* Loop through swr_ring to count the amount of
2781 	 * freeable mubfs and packets.
2782 	 */
2783 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2784 		for (i = 0; i < nb_tx_to_clean &&
2785 			pkt_cnt < free_cnt &&
2786 			tx_id != tx_last; i++) {
2787 			if (swr_ring[tx_id].mbuf != NULL) {
2788 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2789 				swr_ring[tx_id].mbuf = NULL;
2790 
2791 				/*
2792 				 * last segment in the packet,
2793 				 * increment packet count
2794 				 */
2795 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2796 			}
2797 
2798 			tx_id = swr_ring[tx_id].next_id;
2799 		}
2800 
2801 		if (txq->tx_rs_thresh > txq->nb_tx_desc -
2802 			txq->nb_tx_free || tx_id == tx_last)
2803 			break;
2804 
2805 		if (pkt_cnt < free_cnt) {
2806 			if (ice_xmit_cleanup(txq))
2807 				break;
2808 
2809 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2810 			nb_tx_free_last = txq->nb_tx_free;
2811 		}
2812 	}
2813 
2814 	return (int)pkt_cnt;
2815 }
2816 
2817 #ifdef RTE_ARCH_X86
2818 static int
ice_tx_done_cleanup_vec(struct ice_tx_queue * txq __rte_unused,uint32_t free_cnt __rte_unused)2819 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2820 			uint32_t free_cnt __rte_unused)
2821 {
2822 	return -ENOTSUP;
2823 }
2824 #endif
2825 
2826 static int
ice_tx_done_cleanup_simple(struct ice_tx_queue * txq,uint32_t free_cnt)2827 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2828 			uint32_t free_cnt)
2829 {
2830 	int i, n, cnt;
2831 
2832 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2833 		free_cnt = txq->nb_tx_desc;
2834 
2835 	cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2836 
2837 	for (i = 0; i < cnt; i += n) {
2838 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2839 			break;
2840 
2841 		n = ice_tx_free_bufs(txq);
2842 
2843 		if (n == 0)
2844 			break;
2845 	}
2846 
2847 	return i;
2848 }
2849 
2850 int
ice_tx_done_cleanup(void * txq,uint32_t free_cnt)2851 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2852 {
2853 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2854 	struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2855 	struct ice_adapter *ad =
2856 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2857 
2858 #ifdef RTE_ARCH_X86
2859 	if (ad->tx_vec_allowed)
2860 		return ice_tx_done_cleanup_vec(q, free_cnt);
2861 #endif
2862 	if (ad->tx_simple_allowed)
2863 		return ice_tx_done_cleanup_simple(q, free_cnt);
2864 	else
2865 		return ice_tx_done_cleanup_full(q, free_cnt);
2866 }
2867 
2868 /* Populate 4 descriptors with data from 4 mbufs */
2869 static inline void
tx4(volatile struct ice_tx_desc * txdp,struct rte_mbuf ** pkts)2870 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2871 {
2872 	uint64_t dma_addr;
2873 	uint32_t i;
2874 
2875 	for (i = 0; i < 4; i++, txdp++, pkts++) {
2876 		dma_addr = rte_mbuf_data_iova(*pkts);
2877 		txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2878 		txdp->cmd_type_offset_bsz =
2879 			ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2880 				       (*pkts)->data_len, 0);
2881 	}
2882 }
2883 
2884 /* Populate 1 descriptor with data from 1 mbuf */
2885 static inline void
tx1(volatile struct ice_tx_desc * txdp,struct rte_mbuf ** pkts)2886 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2887 {
2888 	uint64_t dma_addr;
2889 
2890 	dma_addr = rte_mbuf_data_iova(*pkts);
2891 	txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2892 	txdp->cmd_type_offset_bsz =
2893 		ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2894 			       (*pkts)->data_len, 0);
2895 }
2896 
2897 static inline void
ice_tx_fill_hw_ring(struct ice_tx_queue * txq,struct rte_mbuf ** pkts,uint16_t nb_pkts)2898 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2899 		    uint16_t nb_pkts)
2900 {
2901 	volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2902 	struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2903 	const int N_PER_LOOP = 4;
2904 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2905 	int mainpart, leftover;
2906 	int i, j;
2907 
2908 	/**
2909 	 * Process most of the packets in chunks of N pkts.  Any
2910 	 * leftover packets will get processed one at a time.
2911 	 */
2912 	mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2913 	leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2914 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
2915 		/* Copy N mbuf pointers to the S/W ring */
2916 		for (j = 0; j < N_PER_LOOP; ++j)
2917 			(txep + i + j)->mbuf = *(pkts + i + j);
2918 		tx4(txdp + i, pkts + i);
2919 	}
2920 
2921 	if (unlikely(leftover > 0)) {
2922 		for (i = 0; i < leftover; ++i) {
2923 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2924 			tx1(txdp + mainpart + i, pkts + mainpart + i);
2925 		}
2926 	}
2927 }
2928 
2929 static inline uint16_t
tx_xmit_pkts(struct ice_tx_queue * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2930 tx_xmit_pkts(struct ice_tx_queue *txq,
2931 	     struct rte_mbuf **tx_pkts,
2932 	     uint16_t nb_pkts)
2933 {
2934 	volatile struct ice_tx_desc *txr = txq->tx_ring;
2935 	uint16_t n = 0;
2936 
2937 	/**
2938 	 * Begin scanning the H/W ring for done descriptors when the number
2939 	 * of available descriptors drops below tx_free_thresh. For each done
2940 	 * descriptor, free the associated buffer.
2941 	 */
2942 	if (txq->nb_tx_free < txq->tx_free_thresh)
2943 		ice_tx_free_bufs(txq);
2944 
2945 	/* Use available descriptor only */
2946 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2947 	if (unlikely(!nb_pkts))
2948 		return 0;
2949 
2950 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2951 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2952 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2953 		ice_tx_fill_hw_ring(txq, tx_pkts, n);
2954 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2955 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2956 					 ICE_TXD_QW1_CMD_S);
2957 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2958 		txq->tx_tail = 0;
2959 	}
2960 
2961 	/* Fill hardware descriptor ring with mbuf data */
2962 	ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2963 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2964 
2965 	/* Determin if RS bit needs to be set */
2966 	if (txq->tx_tail > txq->tx_next_rs) {
2967 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2968 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2969 					 ICE_TXD_QW1_CMD_S);
2970 		txq->tx_next_rs =
2971 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2972 		if (txq->tx_next_rs >= txq->nb_tx_desc)
2973 			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2974 	}
2975 
2976 	if (txq->tx_tail >= txq->nb_tx_desc)
2977 		txq->tx_tail = 0;
2978 
2979 	/* Update the tx tail register */
2980 	ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
2981 
2982 	return nb_pkts;
2983 }
2984 
2985 static uint16_t
ice_xmit_pkts_simple(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2986 ice_xmit_pkts_simple(void *tx_queue,
2987 		     struct rte_mbuf **tx_pkts,
2988 		     uint16_t nb_pkts)
2989 {
2990 	uint16_t nb_tx = 0;
2991 
2992 	if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2993 		return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2994 				    tx_pkts, nb_pkts);
2995 
2996 	while (nb_pkts) {
2997 		uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2998 						      ICE_TX_MAX_BURST);
2999 
3000 		ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3001 				   &tx_pkts[nb_tx], num);
3002 		nb_tx = (uint16_t)(nb_tx + ret);
3003 		nb_pkts = (uint16_t)(nb_pkts - ret);
3004 		if (ret < num)
3005 			break;
3006 	}
3007 
3008 	return nb_tx;
3009 }
3010 
3011 void __rte_cold
ice_set_rx_function(struct rte_eth_dev * dev)3012 ice_set_rx_function(struct rte_eth_dev *dev)
3013 {
3014 	PMD_INIT_FUNC_TRACE();
3015 	struct ice_adapter *ad =
3016 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3017 #ifdef RTE_ARCH_X86
3018 	struct ice_rx_queue *rxq;
3019 	int i;
3020 	bool use_avx512 = false;
3021 	bool use_avx2 = false;
3022 
3023 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3024 		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
3025 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3026 			ad->rx_vec_allowed = true;
3027 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
3028 				rxq = dev->data->rx_queues[i];
3029 				if (rxq && ice_rxq_vec_setup(rxq)) {
3030 					ad->rx_vec_allowed = false;
3031 					break;
3032 				}
3033 			}
3034 
3035 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3036 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3037 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3038 #ifdef CC_AVX512_SUPPORT
3039 				use_avx512 = true;
3040 #else
3041 			PMD_DRV_LOG(NOTICE,
3042 				"AVX512 is not supported in build env");
3043 #endif
3044 			if (!use_avx512 &&
3045 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3046 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3047 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3048 				use_avx2 = true;
3049 
3050 		} else {
3051 			ad->rx_vec_allowed = false;
3052 		}
3053 	}
3054 
3055 	if (ad->rx_vec_allowed) {
3056 		if (dev->data->scattered_rx) {
3057 			if (use_avx512) {
3058 #ifdef CC_AVX512_SUPPORT
3059 				PMD_DRV_LOG(NOTICE,
3060 					"Using AVX512 Vector Scattered Rx (port %d).",
3061 					dev->data->port_id);
3062 				dev->rx_pkt_burst =
3063 					ice_recv_scattered_pkts_vec_avx512;
3064 #endif
3065 			} else {
3066 				PMD_DRV_LOG(DEBUG,
3067 					"Using %sVector Scattered Rx (port %d).",
3068 					use_avx2 ? "avx2 " : "",
3069 					dev->data->port_id);
3070 				dev->rx_pkt_burst = use_avx2 ?
3071 					ice_recv_scattered_pkts_vec_avx2 :
3072 					ice_recv_scattered_pkts_vec;
3073 			}
3074 		} else {
3075 			if (use_avx512) {
3076 #ifdef CC_AVX512_SUPPORT
3077 				PMD_DRV_LOG(NOTICE,
3078 					"Using AVX512 Vector Rx (port %d).",
3079 					dev->data->port_id);
3080 				dev->rx_pkt_burst =
3081 					ice_recv_pkts_vec_avx512;
3082 #endif
3083 			} else {
3084 				PMD_DRV_LOG(DEBUG,
3085 					"Using %sVector Rx (port %d).",
3086 					use_avx2 ? "avx2 " : "",
3087 					dev->data->port_id);
3088 				dev->rx_pkt_burst = use_avx2 ?
3089 					ice_recv_pkts_vec_avx2 :
3090 					ice_recv_pkts_vec;
3091 			}
3092 		}
3093 		return;
3094 	}
3095 
3096 #endif
3097 
3098 	if (dev->data->scattered_rx) {
3099 		/* Set the non-LRO scattered function */
3100 		PMD_INIT_LOG(DEBUG,
3101 			     "Using a Scattered function on port %d.",
3102 			     dev->data->port_id);
3103 		dev->rx_pkt_burst = ice_recv_scattered_pkts;
3104 	} else if (ad->rx_bulk_alloc_allowed) {
3105 		PMD_INIT_LOG(DEBUG,
3106 			     "Rx Burst Bulk Alloc Preconditions are "
3107 			     "satisfied. Rx Burst Bulk Alloc function "
3108 			     "will be used on port %d.",
3109 			     dev->data->port_id);
3110 		dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3111 	} else {
3112 		PMD_INIT_LOG(DEBUG,
3113 			     "Rx Burst Bulk Alloc Preconditions are not "
3114 			     "satisfied, Normal Rx will be used on port %d.",
3115 			     dev->data->port_id);
3116 		dev->rx_pkt_burst = ice_recv_pkts;
3117 	}
3118 }
3119 
3120 static const struct {
3121 	eth_rx_burst_t pkt_burst;
3122 	const char *info;
3123 } ice_rx_burst_infos[] = {
3124 	{ ice_recv_scattered_pkts,          "Scalar Scattered" },
3125 	{ ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
3126 	{ ice_recv_pkts,                    "Scalar" },
3127 #ifdef RTE_ARCH_X86
3128 #ifdef CC_AVX512_SUPPORT
3129 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3130 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
3131 #endif
3132 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3133 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
3134 	{ ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
3135 	{ ice_recv_pkts_vec,                "Vector SSE" },
3136 #endif
3137 };
3138 
3139 int
ice_rx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)3140 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3141 		      struct rte_eth_burst_mode *mode)
3142 {
3143 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3144 	int ret = -EINVAL;
3145 	unsigned int i;
3146 
3147 	for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3148 		if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3149 			snprintf(mode->info, sizeof(mode->info), "%s",
3150 				 ice_rx_burst_infos[i].info);
3151 			ret = 0;
3152 			break;
3153 		}
3154 	}
3155 
3156 	return ret;
3157 }
3158 
3159 void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev * dev,struct ice_tx_queue * txq)3160 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3161 {
3162 	struct ice_adapter *ad =
3163 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3164 
3165 	/* Use a simple Tx queue if possible (only fast free is allowed) */
3166 	ad->tx_simple_allowed =
3167 		(txq->offloads ==
3168 		(txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3169 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3170 
3171 	if (ad->tx_simple_allowed)
3172 		PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3173 			     txq->queue_id);
3174 	else
3175 		PMD_INIT_LOG(DEBUG,
3176 			     "Simple Tx can NOT be enabled on Tx queue %u.",
3177 			     txq->queue_id);
3178 }
3179 
3180 /*********************************************************************
3181  *
3182  *  TX prep functions
3183  *
3184  **********************************************************************/
3185 /* The default values of TSO MSS */
3186 #define ICE_MIN_TSO_MSS            64
3187 #define ICE_MAX_TSO_MSS            9728
3188 #define ICE_MAX_TSO_FRAME_SIZE     262144
3189 uint16_t
ice_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)3190 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3191 	      uint16_t nb_pkts)
3192 {
3193 	int i, ret;
3194 	uint64_t ol_flags;
3195 	struct rte_mbuf *m;
3196 
3197 	for (i = 0; i < nb_pkts; i++) {
3198 		m = tx_pkts[i];
3199 		ol_flags = m->ol_flags;
3200 
3201 		if (ol_flags & PKT_TX_TCP_SEG &&
3202 		    (m->tso_segsz < ICE_MIN_TSO_MSS ||
3203 		     m->tso_segsz > ICE_MAX_TSO_MSS ||
3204 		     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3205 			/**
3206 			 * MSS outside the range are considered malicious
3207 			 */
3208 			rte_errno = EINVAL;
3209 			return i;
3210 		}
3211 
3212 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3213 		ret = rte_validate_tx_offload(m);
3214 		if (ret != 0) {
3215 			rte_errno = -ret;
3216 			return i;
3217 		}
3218 #endif
3219 		ret = rte_net_intel_cksum_prepare(m);
3220 		if (ret != 0) {
3221 			rte_errno = -ret;
3222 			return i;
3223 		}
3224 	}
3225 	return i;
3226 }
3227 
3228 void __rte_cold
ice_set_tx_function(struct rte_eth_dev * dev)3229 ice_set_tx_function(struct rte_eth_dev *dev)
3230 {
3231 	struct ice_adapter *ad =
3232 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3233 #ifdef RTE_ARCH_X86
3234 	struct ice_tx_queue *txq;
3235 	int i;
3236 	bool use_avx512 = false;
3237 	bool use_avx2 = false;
3238 
3239 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3240 		if (!ice_tx_vec_dev_check(dev) &&
3241 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3242 			ad->tx_vec_allowed = true;
3243 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
3244 				txq = dev->data->tx_queues[i];
3245 				if (txq && ice_txq_vec_setup(txq)) {
3246 					ad->tx_vec_allowed = false;
3247 					break;
3248 				}
3249 			}
3250 
3251 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3252 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3253 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3254 #ifdef CC_AVX512_SUPPORT
3255 				use_avx512 = true;
3256 #else
3257 			PMD_DRV_LOG(NOTICE,
3258 				"AVX512 is not supported in build env");
3259 #endif
3260 			if (!use_avx512 &&
3261 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3262 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3263 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3264 				use_avx2 = true;
3265 
3266 		} else {
3267 			ad->tx_vec_allowed = false;
3268 		}
3269 	}
3270 
3271 	if (ad->tx_vec_allowed) {
3272 		if (use_avx512) {
3273 #ifdef CC_AVX512_SUPPORT
3274 			PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
3275 				    dev->data->port_id);
3276 			dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3277 #endif
3278 		} else {
3279 			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3280 				    use_avx2 ? "avx2 " : "",
3281 				    dev->data->port_id);
3282 			dev->tx_pkt_burst = use_avx2 ?
3283 					    ice_xmit_pkts_vec_avx2 :
3284 					    ice_xmit_pkts_vec;
3285 		}
3286 		dev->tx_pkt_prepare = NULL;
3287 
3288 		return;
3289 	}
3290 #endif
3291 
3292 	if (ad->tx_simple_allowed) {
3293 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3294 		dev->tx_pkt_burst = ice_xmit_pkts_simple;
3295 		dev->tx_pkt_prepare = NULL;
3296 	} else {
3297 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3298 		dev->tx_pkt_burst = ice_xmit_pkts;
3299 		dev->tx_pkt_prepare = ice_prep_pkts;
3300 	}
3301 }
3302 
3303 static const struct {
3304 	eth_tx_burst_t pkt_burst;
3305 	const char *info;
3306 } ice_tx_burst_infos[] = {
3307 	{ ice_xmit_pkts_simple,   "Scalar Simple" },
3308 	{ ice_xmit_pkts,          "Scalar" },
3309 #ifdef RTE_ARCH_X86
3310 #ifdef CC_AVX512_SUPPORT
3311 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3312 #endif
3313 	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3314 	{ ice_xmit_pkts_vec,      "Vector SSE" },
3315 #endif
3316 };
3317 
3318 int
ice_tx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)3319 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3320 		      struct rte_eth_burst_mode *mode)
3321 {
3322 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3323 	int ret = -EINVAL;
3324 	unsigned int i;
3325 
3326 	for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3327 		if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3328 			snprintf(mode->info, sizeof(mode->info), "%s",
3329 				 ice_tx_burst_infos[i].info);
3330 			ret = 0;
3331 			break;
3332 		}
3333 	}
3334 
3335 	return ret;
3336 }
3337 
3338 /* For each value it means, datasheet of hardware can tell more details
3339  *
3340  * @note: fix ice_dev_supported_ptypes_get() if any change here.
3341  */
3342 static inline uint32_t
ice_get_default_pkt_type(uint16_t ptype)3343 ice_get_default_pkt_type(uint16_t ptype)
3344 {
3345 	static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3346 		__rte_cache_aligned = {
3347 		/* L2 types */
3348 		/* [0] reserved */
3349 		[1] = RTE_PTYPE_L2_ETHER,
3350 		[2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3351 		/* [3] - [5] reserved */
3352 		[6] = RTE_PTYPE_L2_ETHER_LLDP,
3353 		/* [7] - [10] reserved */
3354 		[11] = RTE_PTYPE_L2_ETHER_ARP,
3355 		/* [12] - [21] reserved */
3356 
3357 		/* Non tunneled IPv4 */
3358 		[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3359 		       RTE_PTYPE_L4_FRAG,
3360 		[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3361 		       RTE_PTYPE_L4_NONFRAG,
3362 		[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3363 		       RTE_PTYPE_L4_UDP,
3364 		/* [25] reserved */
3365 		[26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3366 		       RTE_PTYPE_L4_TCP,
3367 		[27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3368 		       RTE_PTYPE_L4_SCTP,
3369 		[28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3370 		       RTE_PTYPE_L4_ICMP,
3371 
3372 		/* IPv4 --> IPv4 */
3373 		[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3374 		       RTE_PTYPE_TUNNEL_IP |
3375 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3376 		       RTE_PTYPE_INNER_L4_FRAG,
3377 		[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3378 		       RTE_PTYPE_TUNNEL_IP |
3379 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3380 		       RTE_PTYPE_INNER_L4_NONFRAG,
3381 		[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3382 		       RTE_PTYPE_TUNNEL_IP |
3383 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3384 		       RTE_PTYPE_INNER_L4_UDP,
3385 		/* [32] reserved */
3386 		[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3387 		       RTE_PTYPE_TUNNEL_IP |
3388 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3389 		       RTE_PTYPE_INNER_L4_TCP,
3390 		[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3391 		       RTE_PTYPE_TUNNEL_IP |
3392 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3393 		       RTE_PTYPE_INNER_L4_SCTP,
3394 		[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3395 		       RTE_PTYPE_TUNNEL_IP |
3396 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3397 		       RTE_PTYPE_INNER_L4_ICMP,
3398 
3399 		/* IPv4 --> IPv6 */
3400 		[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3401 		       RTE_PTYPE_TUNNEL_IP |
3402 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3403 		       RTE_PTYPE_INNER_L4_FRAG,
3404 		[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3405 		       RTE_PTYPE_TUNNEL_IP |
3406 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3407 		       RTE_PTYPE_INNER_L4_NONFRAG,
3408 		[38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3409 		       RTE_PTYPE_TUNNEL_IP |
3410 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3411 		       RTE_PTYPE_INNER_L4_UDP,
3412 		/* [39] reserved */
3413 		[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3414 		       RTE_PTYPE_TUNNEL_IP |
3415 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3416 		       RTE_PTYPE_INNER_L4_TCP,
3417 		[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3418 		       RTE_PTYPE_TUNNEL_IP |
3419 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3420 		       RTE_PTYPE_INNER_L4_SCTP,
3421 		[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3422 		       RTE_PTYPE_TUNNEL_IP |
3423 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3424 		       RTE_PTYPE_INNER_L4_ICMP,
3425 
3426 		/* IPv4 --> GRE/Teredo/VXLAN */
3427 		[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3428 		       RTE_PTYPE_TUNNEL_GRENAT,
3429 
3430 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3431 		[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3432 		       RTE_PTYPE_TUNNEL_GRENAT |
3433 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3434 		       RTE_PTYPE_INNER_L4_FRAG,
3435 		[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3436 		       RTE_PTYPE_TUNNEL_GRENAT |
3437 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3438 		       RTE_PTYPE_INNER_L4_NONFRAG,
3439 		[46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3440 		       RTE_PTYPE_TUNNEL_GRENAT |
3441 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3442 		       RTE_PTYPE_INNER_L4_UDP,
3443 		/* [47] reserved */
3444 		[48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3445 		       RTE_PTYPE_TUNNEL_GRENAT |
3446 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3447 		       RTE_PTYPE_INNER_L4_TCP,
3448 		[49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3449 		       RTE_PTYPE_TUNNEL_GRENAT |
3450 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3451 		       RTE_PTYPE_INNER_L4_SCTP,
3452 		[50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3453 		       RTE_PTYPE_TUNNEL_GRENAT |
3454 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3455 		       RTE_PTYPE_INNER_L4_ICMP,
3456 
3457 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3458 		[51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3459 		       RTE_PTYPE_TUNNEL_GRENAT |
3460 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3461 		       RTE_PTYPE_INNER_L4_FRAG,
3462 		[52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3463 		       RTE_PTYPE_TUNNEL_GRENAT |
3464 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3465 		       RTE_PTYPE_INNER_L4_NONFRAG,
3466 		[53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3467 		       RTE_PTYPE_TUNNEL_GRENAT |
3468 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3469 		       RTE_PTYPE_INNER_L4_UDP,
3470 		/* [54] reserved */
3471 		[55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3472 		       RTE_PTYPE_TUNNEL_GRENAT |
3473 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3474 		       RTE_PTYPE_INNER_L4_TCP,
3475 		[56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3476 		       RTE_PTYPE_TUNNEL_GRENAT |
3477 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3478 		       RTE_PTYPE_INNER_L4_SCTP,
3479 		[57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3480 		       RTE_PTYPE_TUNNEL_GRENAT |
3481 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3482 		       RTE_PTYPE_INNER_L4_ICMP,
3483 
3484 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3485 		[58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3486 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3487 
3488 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3489 		[59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3490 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3491 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3492 		       RTE_PTYPE_INNER_L4_FRAG,
3493 		[60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3494 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3495 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3496 		       RTE_PTYPE_INNER_L4_NONFRAG,
3497 		[61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3498 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3499 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3500 		       RTE_PTYPE_INNER_L4_UDP,
3501 		/* [62] reserved */
3502 		[63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3503 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3504 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3505 		       RTE_PTYPE_INNER_L4_TCP,
3506 		[64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3507 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3508 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3509 		       RTE_PTYPE_INNER_L4_SCTP,
3510 		[65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3511 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3512 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3513 		       RTE_PTYPE_INNER_L4_ICMP,
3514 
3515 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3516 		[66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3517 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3518 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3519 		       RTE_PTYPE_INNER_L4_FRAG,
3520 		[67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3521 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3522 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3523 		       RTE_PTYPE_INNER_L4_NONFRAG,
3524 		[68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3525 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3526 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3527 		       RTE_PTYPE_INNER_L4_UDP,
3528 		/* [69] reserved */
3529 		[70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3530 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3531 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3532 		       RTE_PTYPE_INNER_L4_TCP,
3533 		[71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3534 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3535 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3536 		       RTE_PTYPE_INNER_L4_SCTP,
3537 		[72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3538 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3539 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3540 		       RTE_PTYPE_INNER_L4_ICMP,
3541 		/* [73] - [87] reserved */
3542 
3543 		/* Non tunneled IPv6 */
3544 		[88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3545 		       RTE_PTYPE_L4_FRAG,
3546 		[89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3547 		       RTE_PTYPE_L4_NONFRAG,
3548 		[90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3549 		       RTE_PTYPE_L4_UDP,
3550 		/* [91] reserved */
3551 		[92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3552 		       RTE_PTYPE_L4_TCP,
3553 		[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3554 		       RTE_PTYPE_L4_SCTP,
3555 		[94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3556 		       RTE_PTYPE_L4_ICMP,
3557 
3558 		/* IPv6 --> IPv4 */
3559 		[95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3560 		       RTE_PTYPE_TUNNEL_IP |
3561 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3562 		       RTE_PTYPE_INNER_L4_FRAG,
3563 		[96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3564 		       RTE_PTYPE_TUNNEL_IP |
3565 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3566 		       RTE_PTYPE_INNER_L4_NONFRAG,
3567 		[97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3568 		       RTE_PTYPE_TUNNEL_IP |
3569 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3570 		       RTE_PTYPE_INNER_L4_UDP,
3571 		/* [98] reserved */
3572 		[99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3573 		       RTE_PTYPE_TUNNEL_IP |
3574 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3575 		       RTE_PTYPE_INNER_L4_TCP,
3576 		[100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3577 			RTE_PTYPE_TUNNEL_IP |
3578 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3579 			RTE_PTYPE_INNER_L4_SCTP,
3580 		[101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3581 			RTE_PTYPE_TUNNEL_IP |
3582 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3583 			RTE_PTYPE_INNER_L4_ICMP,
3584 
3585 		/* IPv6 --> IPv6 */
3586 		[102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3587 			RTE_PTYPE_TUNNEL_IP |
3588 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3589 			RTE_PTYPE_INNER_L4_FRAG,
3590 		[103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3591 			RTE_PTYPE_TUNNEL_IP |
3592 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3593 			RTE_PTYPE_INNER_L4_NONFRAG,
3594 		[104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3595 			RTE_PTYPE_TUNNEL_IP |
3596 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3597 			RTE_PTYPE_INNER_L4_UDP,
3598 		/* [105] reserved */
3599 		[106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3600 			RTE_PTYPE_TUNNEL_IP |
3601 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3602 			RTE_PTYPE_INNER_L4_TCP,
3603 		[107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3604 			RTE_PTYPE_TUNNEL_IP |
3605 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3606 			RTE_PTYPE_INNER_L4_SCTP,
3607 		[108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3608 			RTE_PTYPE_TUNNEL_IP |
3609 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3610 			RTE_PTYPE_INNER_L4_ICMP,
3611 
3612 		/* IPv6 --> GRE/Teredo/VXLAN */
3613 		[109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3614 			RTE_PTYPE_TUNNEL_GRENAT,
3615 
3616 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3617 		[110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3618 			RTE_PTYPE_TUNNEL_GRENAT |
3619 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3620 			RTE_PTYPE_INNER_L4_FRAG,
3621 		[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3622 			RTE_PTYPE_TUNNEL_GRENAT |
3623 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3624 			RTE_PTYPE_INNER_L4_NONFRAG,
3625 		[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3626 			RTE_PTYPE_TUNNEL_GRENAT |
3627 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3628 			RTE_PTYPE_INNER_L4_UDP,
3629 		/* [113] reserved */
3630 		[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3631 			RTE_PTYPE_TUNNEL_GRENAT |
3632 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3633 			RTE_PTYPE_INNER_L4_TCP,
3634 		[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3635 			RTE_PTYPE_TUNNEL_GRENAT |
3636 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3637 			RTE_PTYPE_INNER_L4_SCTP,
3638 		[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3639 			RTE_PTYPE_TUNNEL_GRENAT |
3640 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3641 			RTE_PTYPE_INNER_L4_ICMP,
3642 
3643 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3644 		[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3645 			RTE_PTYPE_TUNNEL_GRENAT |
3646 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3647 			RTE_PTYPE_INNER_L4_FRAG,
3648 		[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3649 			RTE_PTYPE_TUNNEL_GRENAT |
3650 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3651 			RTE_PTYPE_INNER_L4_NONFRAG,
3652 		[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3653 			RTE_PTYPE_TUNNEL_GRENAT |
3654 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3655 			RTE_PTYPE_INNER_L4_UDP,
3656 		/* [120] reserved */
3657 		[121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3658 			RTE_PTYPE_TUNNEL_GRENAT |
3659 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3660 			RTE_PTYPE_INNER_L4_TCP,
3661 		[122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3662 			RTE_PTYPE_TUNNEL_GRENAT |
3663 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3664 			RTE_PTYPE_INNER_L4_SCTP,
3665 		[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3666 			RTE_PTYPE_TUNNEL_GRENAT |
3667 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3668 			RTE_PTYPE_INNER_L4_ICMP,
3669 
3670 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3671 		[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3672 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3673 
3674 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3675 		[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3676 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3677 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3678 			RTE_PTYPE_INNER_L4_FRAG,
3679 		[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3680 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3681 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3682 			RTE_PTYPE_INNER_L4_NONFRAG,
3683 		[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3684 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3685 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3686 			RTE_PTYPE_INNER_L4_UDP,
3687 		/* [128] reserved */
3688 		[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3689 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3690 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3691 			RTE_PTYPE_INNER_L4_TCP,
3692 		[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3693 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3694 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3695 			RTE_PTYPE_INNER_L4_SCTP,
3696 		[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3697 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3698 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3699 			RTE_PTYPE_INNER_L4_ICMP,
3700 
3701 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3702 		[132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3703 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3704 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3705 			RTE_PTYPE_INNER_L4_FRAG,
3706 		[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3707 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3708 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3709 			RTE_PTYPE_INNER_L4_NONFRAG,
3710 		[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3711 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3712 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3713 			RTE_PTYPE_INNER_L4_UDP,
3714 		/* [135] reserved */
3715 		[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3716 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3717 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3718 			RTE_PTYPE_INNER_L4_TCP,
3719 		[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3720 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3721 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3722 			RTE_PTYPE_INNER_L4_SCTP,
3723 		[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3724 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3725 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3726 			RTE_PTYPE_INNER_L4_ICMP,
3727 		/* [139] - [299] reserved */
3728 
3729 		/* PPPoE */
3730 		[300] = RTE_PTYPE_L2_ETHER_PPPOE,
3731 		[301] = RTE_PTYPE_L2_ETHER_PPPOE,
3732 
3733 		/* PPPoE --> IPv4 */
3734 		[302] = RTE_PTYPE_L2_ETHER_PPPOE |
3735 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3736 			RTE_PTYPE_L4_FRAG,
3737 		[303] = RTE_PTYPE_L2_ETHER_PPPOE |
3738 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3739 			RTE_PTYPE_L4_NONFRAG,
3740 		[304] = RTE_PTYPE_L2_ETHER_PPPOE |
3741 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3742 			RTE_PTYPE_L4_UDP,
3743 		[305] = RTE_PTYPE_L2_ETHER_PPPOE |
3744 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3745 			RTE_PTYPE_L4_TCP,
3746 		[306] = RTE_PTYPE_L2_ETHER_PPPOE |
3747 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3748 			RTE_PTYPE_L4_SCTP,
3749 		[307] = RTE_PTYPE_L2_ETHER_PPPOE |
3750 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3751 			RTE_PTYPE_L4_ICMP,
3752 
3753 		/* PPPoE --> IPv6 */
3754 		[308] = RTE_PTYPE_L2_ETHER_PPPOE |
3755 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3756 			RTE_PTYPE_L4_FRAG,
3757 		[309] = RTE_PTYPE_L2_ETHER_PPPOE |
3758 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3759 			RTE_PTYPE_L4_NONFRAG,
3760 		[310] = RTE_PTYPE_L2_ETHER_PPPOE |
3761 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3762 			RTE_PTYPE_L4_UDP,
3763 		[311] = RTE_PTYPE_L2_ETHER_PPPOE |
3764 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3765 			RTE_PTYPE_L4_TCP,
3766 		[312] = RTE_PTYPE_L2_ETHER_PPPOE |
3767 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3768 			RTE_PTYPE_L4_SCTP,
3769 		[313] = RTE_PTYPE_L2_ETHER_PPPOE |
3770 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3771 			RTE_PTYPE_L4_ICMP,
3772 		/* [314] - [324] reserved */
3773 
3774 		/* IPv4/IPv6 --> GTPC/GTPU */
3775 		[325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3776 			RTE_PTYPE_TUNNEL_GTPC,
3777 		[326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3778 			RTE_PTYPE_TUNNEL_GTPC,
3779 		[327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3780 			RTE_PTYPE_TUNNEL_GTPC,
3781 		[328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782 			RTE_PTYPE_TUNNEL_GTPC,
3783 		[329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3784 			RTE_PTYPE_TUNNEL_GTPU,
3785 		[330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3786 			RTE_PTYPE_TUNNEL_GTPU,
3787 
3788 		/* IPv4 --> GTPU --> IPv4 */
3789 		[331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3790 			RTE_PTYPE_TUNNEL_GTPU |
3791 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3792 			RTE_PTYPE_INNER_L4_FRAG,
3793 		[332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3794 			RTE_PTYPE_TUNNEL_GTPU |
3795 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3796 			RTE_PTYPE_INNER_L4_NONFRAG,
3797 		[333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3798 			RTE_PTYPE_TUNNEL_GTPU |
3799 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3800 			RTE_PTYPE_INNER_L4_UDP,
3801 		[334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3802 			RTE_PTYPE_TUNNEL_GTPU |
3803 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3804 			RTE_PTYPE_INNER_L4_TCP,
3805 		[335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3806 			RTE_PTYPE_TUNNEL_GTPU |
3807 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3808 			RTE_PTYPE_INNER_L4_ICMP,
3809 
3810 		/* IPv6 --> GTPU --> IPv4 */
3811 		[336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3812 			RTE_PTYPE_TUNNEL_GTPU |
3813 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3814 			RTE_PTYPE_INNER_L4_FRAG,
3815 		[337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3816 			RTE_PTYPE_TUNNEL_GTPU |
3817 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3818 			RTE_PTYPE_INNER_L4_NONFRAG,
3819 		[338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3820 			RTE_PTYPE_TUNNEL_GTPU |
3821 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3822 			RTE_PTYPE_INNER_L4_UDP,
3823 		[339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3824 			RTE_PTYPE_TUNNEL_GTPU |
3825 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3826 			RTE_PTYPE_INNER_L4_TCP,
3827 		[340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3828 			RTE_PTYPE_TUNNEL_GTPU |
3829 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3830 			RTE_PTYPE_INNER_L4_ICMP,
3831 
3832 		/* IPv4 --> GTPU --> IPv6 */
3833 		[341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3834 			RTE_PTYPE_TUNNEL_GTPU |
3835 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3836 			RTE_PTYPE_INNER_L4_FRAG,
3837 		[342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3838 			RTE_PTYPE_TUNNEL_GTPU |
3839 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3840 			RTE_PTYPE_INNER_L4_NONFRAG,
3841 		[343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3842 			RTE_PTYPE_TUNNEL_GTPU |
3843 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3844 			RTE_PTYPE_INNER_L4_UDP,
3845 		[344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3846 			RTE_PTYPE_TUNNEL_GTPU |
3847 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3848 			RTE_PTYPE_INNER_L4_TCP,
3849 		[345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3850 			RTE_PTYPE_TUNNEL_GTPU |
3851 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3852 			RTE_PTYPE_INNER_L4_ICMP,
3853 
3854 		/* IPv6 --> GTPU --> IPv6 */
3855 		[346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3856 			RTE_PTYPE_TUNNEL_GTPU |
3857 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3858 			RTE_PTYPE_INNER_L4_FRAG,
3859 		[347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3860 			RTE_PTYPE_TUNNEL_GTPU |
3861 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3862 			RTE_PTYPE_INNER_L4_NONFRAG,
3863 		[348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3864 			RTE_PTYPE_TUNNEL_GTPU |
3865 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3866 			RTE_PTYPE_INNER_L4_UDP,
3867 		[349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3868 			RTE_PTYPE_TUNNEL_GTPU |
3869 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3870 			RTE_PTYPE_INNER_L4_TCP,
3871 		[350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3872 			RTE_PTYPE_TUNNEL_GTPU |
3873 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3874 			RTE_PTYPE_INNER_L4_ICMP,
3875 		/* All others reserved */
3876 	};
3877 
3878 	return type_table[ptype];
3879 }
3880 
3881 void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev * dev)3882 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3883 {
3884 	struct ice_adapter *ad =
3885 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3886 	int i;
3887 
3888 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3889 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3890 }
3891 
3892 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S	1
3893 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M	\
3894 			(0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3895 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3896 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3897 
3898 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S	4
3899 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M	\
3900 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3901 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S	5
3902 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M	\
3903 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3904 
3905 /*
3906  * check the programming status descriptor in rx queue.
3907  * done after Programming Flow Director is programmed on
3908  * tx queue
3909  */
3910 static inline int
ice_check_fdir_programming_status(struct ice_rx_queue * rxq)3911 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3912 {
3913 	volatile union ice_32byte_rx_desc *rxdp;
3914 	uint64_t qword1;
3915 	uint32_t rx_status;
3916 	uint32_t error;
3917 	uint32_t id;
3918 	int ret = -EAGAIN;
3919 
3920 	rxdp = (volatile union ice_32byte_rx_desc *)
3921 		(&rxq->rx_ring[rxq->rx_tail]);
3922 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3923 	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3924 			>> ICE_RXD_QW1_STATUS_S;
3925 
3926 	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3927 		ret = 0;
3928 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3929 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3930 		id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3931 			ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3932 		if (error) {
3933 			if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3934 				PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3935 			else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3936 				PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3937 			ret = -EINVAL;
3938 			goto err;
3939 		}
3940 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3941 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3942 		if (error) {
3943 			PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3944 			ret = -EINVAL;
3945 		}
3946 err:
3947 		rxdp->wb.qword1.status_error_len = 0;
3948 		rxq->rx_tail++;
3949 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3950 			rxq->rx_tail = 0;
3951 		if (rxq->rx_tail == 0)
3952 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3953 		else
3954 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3955 	}
3956 
3957 	return ret;
3958 }
3959 
3960 #define ICE_FDIR_MAX_WAIT_US 10000
3961 
3962 int
ice_fdir_programming(struct ice_pf * pf,struct ice_fltr_desc * fdir_desc)3963 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3964 {
3965 	struct ice_tx_queue *txq = pf->fdir.txq;
3966 	struct ice_rx_queue *rxq = pf->fdir.rxq;
3967 	volatile struct ice_fltr_desc *fdirdp;
3968 	volatile struct ice_tx_desc *txdp;
3969 	uint32_t td_cmd;
3970 	uint16_t i;
3971 
3972 	fdirdp = (volatile struct ice_fltr_desc *)
3973 		(&txq->tx_ring[txq->tx_tail]);
3974 	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3975 	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3976 
3977 	txdp = &txq->tx_ring[txq->tx_tail + 1];
3978 	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3979 	td_cmd = ICE_TX_DESC_CMD_EOP |
3980 		ICE_TX_DESC_CMD_RS  |
3981 		ICE_TX_DESC_CMD_DUMMY;
3982 
3983 	txdp->cmd_type_offset_bsz =
3984 		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3985 
3986 	txq->tx_tail += 2;
3987 	if (txq->tx_tail >= txq->nb_tx_desc)
3988 		txq->tx_tail = 0;
3989 	/* Update the tx tail register */
3990 	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3991 	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3992 		if ((txdp->cmd_type_offset_bsz &
3993 		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3994 		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3995 			break;
3996 		rte_delay_us(1);
3997 	}
3998 	if (i >= ICE_FDIR_MAX_WAIT_US) {
3999 		PMD_DRV_LOG(ERR,
4000 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
4001 		return -ETIMEDOUT;
4002 	}
4003 
4004 	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4005 		int ret;
4006 
4007 		ret = ice_check_fdir_programming_status(rxq);
4008 		if (ret == -EAGAIN)
4009 			rte_delay_us(1);
4010 		else
4011 			return ret;
4012 	}
4013 
4014 	PMD_DRV_LOG(ERR,
4015 		    "Failed to program FDIR filter: programming status reported.");
4016 	return -ETIMEDOUT;
4017 
4018 
4019 }
4020