1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <ethdev_driver.h>
6 #include <rte_net.h>
7 #include <rte_vect.h>
8
9 #include "rte_pmd_ice.h"
10 #include "ice_rxtx.h"
11 #include "ice_rxtx_vec_common.h"
12
13 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
14 RTE_MBUF_F_TX_L4_MASK | \
15 RTE_MBUF_F_TX_TCP_SEG | \
16 RTE_MBUF_F_TX_OUTER_IP_CKSUM)
17
18 /* Offset of mbuf dynamic field for protocol extraction data */
19 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
20
21 /* Mask of mbuf dynamic flags for protocol extraction type */
22 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
28
29 static int
ice_monitor_callback(const uint64_t value,const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ]__rte_unused)30 ice_monitor_callback(const uint64_t value,
31 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
32 {
33 const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
34 /*
35 * we expect the DD bit to be set to 1 if this descriptor was already
36 * written to.
37 */
38 return (value & m) == m ? -1 : 0;
39 }
40
41 int
ice_get_monitor_addr(void * rx_queue,struct rte_power_monitor_cond * pmc)42 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
43 {
44 volatile union ice_rx_flex_desc *rxdp;
45 struct ice_rx_queue *rxq = rx_queue;
46 uint16_t desc;
47
48 desc = rxq->rx_tail;
49 rxdp = &rxq->rx_ring[desc];
50 /* watch for changes in status bit */
51 pmc->addr = &rxdp->wb.status_error0;
52
53 /* comparison callback */
54 pmc->fn = ice_monitor_callback;
55
56 /* register is 16-bit */
57 pmc->size = sizeof(uint16_t);
58
59 return 0;
60 }
61
62
63 static inline uint8_t
ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)64 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
65 {
66 static uint8_t rxdid_map[] = {
67 [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS,
68 [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
69 [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
70 [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
71 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
72 [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
73 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
74 };
75
76 return xtr_type < RTE_DIM(rxdid_map) ?
77 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
78 }
79
80 static inline void
ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)81 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
82 struct rte_mbuf *mb,
83 volatile union ice_rx_flex_desc *rxdp)
84 {
85 volatile struct ice_32b_rx_flex_desc_comms *desc =
86 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
87 uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
88
89 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
90 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
91 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
92 }
93
94 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
95 if (desc->flow_id != 0xFFFFFFFF) {
96 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
97 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
98 }
99 #endif
100 }
101
102 static inline void
ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)103 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
104 struct rte_mbuf *mb,
105 volatile union ice_rx_flex_desc *rxdp)
106 {
107 volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
108 (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
109 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
110 uint16_t stat_err;
111 #endif
112
113 if (desc->flow_id != 0xFFFFFFFF) {
114 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
115 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
116 }
117
118 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
119 stat_err = rte_le_to_cpu_16(desc->status_error0);
120 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
121 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
122 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
123 }
124 #endif
125 }
126
127 static inline void
ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)128 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
129 struct rte_mbuf *mb,
130 volatile union ice_rx_flex_desc *rxdp)
131 {
132 volatile struct ice_32b_rx_flex_desc_comms *desc =
133 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
134 uint16_t stat_err;
135
136 stat_err = rte_le_to_cpu_16(desc->status_error0);
137 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
138 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
139 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
140 }
141
142 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
143 if (desc->flow_id != 0xFFFFFFFF) {
144 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
145 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
146 }
147
148 if (rxq->xtr_ol_flag) {
149 uint32_t metadata = 0;
150
151 stat_err = rte_le_to_cpu_16(desc->status_error1);
152
153 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
154 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
155
156 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
157 metadata |=
158 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
159
160 if (metadata) {
161 mb->ol_flags |= rxq->xtr_ol_flag;
162
163 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
164 }
165 }
166 #else
167 RTE_SET_USED(rxq);
168 #endif
169 }
170
171 static inline void
ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue * rxq,struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)172 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
173 struct rte_mbuf *mb,
174 volatile union ice_rx_flex_desc *rxdp)
175 {
176 volatile struct ice_32b_rx_flex_desc_comms *desc =
177 (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
178 uint16_t stat_err;
179
180 stat_err = rte_le_to_cpu_16(desc->status_error0);
181 if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
182 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
183 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
184 }
185
186 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
187 if (desc->flow_id != 0xFFFFFFFF) {
188 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
189 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
190 }
191
192 if (rxq->xtr_ol_flag) {
193 uint32_t metadata = 0;
194
195 if (desc->flex_ts.flex.aux0 != 0xFFFF)
196 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
197 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
198 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
199
200 if (metadata) {
201 mb->ol_flags |= rxq->xtr_ol_flag;
202
203 *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
204 }
205 }
206 #else
207 RTE_SET_USED(rxq);
208 #endif
209 }
210
211 static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
212 [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
213 [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
214 [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
215 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
216 [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
217 [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
218 [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
219 [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
220 };
221
222 void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue * rxq,uint32_t rxdid)223 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
224 {
225 rxq->rxdid = rxdid;
226
227 switch (rxdid) {
228 case ICE_RXDID_COMMS_AUX_VLAN:
229 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
230 break;
231
232 case ICE_RXDID_COMMS_AUX_IPV4:
233 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
234 break;
235
236 case ICE_RXDID_COMMS_AUX_IPV6:
237 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
238 break;
239
240 case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
241 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
242 break;
243
244 case ICE_RXDID_COMMS_AUX_TCP:
245 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
246 break;
247
248 case ICE_RXDID_COMMS_AUX_IP_OFFSET:
249 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
250 break;
251
252 case ICE_RXDID_COMMS_GENERIC:
253 /* fallthrough */
254 case ICE_RXDID_COMMS_OVS:
255 break;
256
257 default:
258 /* update this according to the RXDID for PROTO_XTR_NONE */
259 rxq->rxdid = ICE_RXDID_COMMS_OVS;
260 break;
261 }
262
263 if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
264 rxq->xtr_ol_flag = 0;
265 }
266
267 static enum ice_status
ice_program_hw_rx_queue(struct ice_rx_queue * rxq)268 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
269 {
270 struct ice_vsi *vsi = rxq->vsi;
271 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
272 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
273 struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
274 struct ice_rlan_ctx rx_ctx;
275 enum ice_status err;
276 uint16_t buf_size;
277 uint32_t rxdid = ICE_RXDID_COMMS_OVS;
278 uint32_t regval;
279 struct ice_adapter *ad = rxq->vsi->adapter;
280 uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
281
282 /* Set buffer size as the head split is disabled. */
283 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
284 RTE_PKTMBUF_HEADROOM);
285 rxq->rx_hdr_len = 0;
286 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
287 rxq->max_pkt_len =
288 RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
289 frame_size);
290
291 if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
292 rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
293 PMD_DRV_LOG(ERR, "maximum packet length must "
294 "be larger than %u and smaller than %u",
295 (uint32_t)RTE_ETHER_MIN_LEN,
296 (uint32_t)ICE_FRAME_SIZE_MAX);
297 return -EINVAL;
298 }
299
300 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
301 /* Register mbuf field and flag for Rx timestamp */
302 err = rte_mbuf_dyn_rx_timestamp_register(
303 &ice_timestamp_dynfield_offset,
304 &ice_timestamp_dynflag);
305 if (err) {
306 PMD_DRV_LOG(ERR,
307 "Cannot register mbuf field/flag for timestamp");
308 return -EINVAL;
309 }
310 }
311
312 memset(&rx_ctx, 0, sizeof(rx_ctx));
313
314 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
315 rx_ctx.qlen = rxq->nb_rx_desc;
316 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
317 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
318 rx_ctx.dtype = 0; /* No Header Split mode */
319 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
320 rx_ctx.dsize = 1; /* 32B descriptors */
321 #endif
322 rx_ctx.rxmax = rxq->max_pkt_len;
323 /* TPH: Transaction Layer Packet (TLP) processing hints */
324 rx_ctx.tphrdesc_ena = 1;
325 rx_ctx.tphwdesc_ena = 1;
326 rx_ctx.tphdata_ena = 1;
327 rx_ctx.tphhead_ena = 1;
328 /* Low Receive Queue Threshold defined in 64 descriptors units.
329 * When the number of free descriptors goes below the lrxqthresh,
330 * an immediate interrupt is triggered.
331 */
332 rx_ctx.lrxqthresh = 2;
333 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
334 rx_ctx.l2tsel = 1;
335 rx_ctx.showiv = 0;
336 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
337
338 rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
339
340 PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
341 rxq->port_id, rxq->queue_id, rxdid);
342
343 if (!(pf->supported_rxdid & BIT(rxdid))) {
344 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
345 rxdid);
346 return -EINVAL;
347 }
348
349 ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
350
351 /* Enable Flexible Descriptors in the queue context which
352 * allows this driver to select a specific receive descriptor format
353 */
354 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
355 QRXFLXP_CNTXT_RXDID_IDX_M;
356
357 /* increasing context priority to pick up profile ID;
358 * default is 0x01; setting to 0x03 to ensure profile
359 * is programming if prev context is of same priority
360 */
361 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
362 QRXFLXP_CNTXT_RXDID_PRIO_M;
363
364 if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
365 regval |= QRXFLXP_CNTXT_TS_M;
366
367 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
368
369 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
370 if (err) {
371 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
372 rxq->queue_id);
373 return -EINVAL;
374 }
375 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
376 if (err) {
377 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
378 rxq->queue_id);
379 return -EINVAL;
380 }
381
382 /* Check if scattered RX needs to be used. */
383 if (frame_size > buf_size)
384 dev_data->scattered_rx = 1;
385
386 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
387
388 /* Init the Rx tail register*/
389 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
390
391 return 0;
392 }
393
394 /* Allocate mbufs for all descriptors in rx queue */
395 static int
ice_alloc_rx_queue_mbufs(struct ice_rx_queue * rxq)396 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
397 {
398 struct ice_rx_entry *rxe = rxq->sw_ring;
399 uint64_t dma_addr;
400 uint16_t i;
401
402 for (i = 0; i < rxq->nb_rx_desc; i++) {
403 volatile union ice_rx_flex_desc *rxd;
404 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
405
406 if (unlikely(!mbuf)) {
407 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
408 return -ENOMEM;
409 }
410
411 rte_mbuf_refcnt_set(mbuf, 1);
412 mbuf->next = NULL;
413 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
414 mbuf->nb_segs = 1;
415 mbuf->port = rxq->port_id;
416
417 dma_addr =
418 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
419
420 rxd = &rxq->rx_ring[i];
421 rxd->read.pkt_addr = dma_addr;
422 rxd->read.hdr_addr = 0;
423 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
424 rxd->read.rsvd1 = 0;
425 rxd->read.rsvd2 = 0;
426 #endif
427 rxe[i].mbuf = mbuf;
428 }
429
430 return 0;
431 }
432
433 /* Free all mbufs for descriptors in rx queue */
434 static void
_ice_rx_queue_release_mbufs(struct ice_rx_queue * rxq)435 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
436 {
437 uint16_t i;
438
439 if (!rxq || !rxq->sw_ring) {
440 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
441 return;
442 }
443
444 for (i = 0; i < rxq->nb_rx_desc; i++) {
445 if (rxq->sw_ring[i].mbuf) {
446 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
447 rxq->sw_ring[i].mbuf = NULL;
448 }
449 }
450 if (rxq->rx_nb_avail == 0)
451 return;
452 for (i = 0; i < rxq->rx_nb_avail; i++)
453 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
454
455 rxq->rx_nb_avail = 0;
456 }
457
458 /* turn on or off rx queue
459 * @q_idx: queue index in pf scope
460 * @on: turn on or off the queue
461 */
462 static int
ice_switch_rx_queue(struct ice_hw * hw,uint16_t q_idx,bool on)463 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
464 {
465 uint32_t reg;
466 uint16_t j;
467
468 /* QRX_CTRL = QRX_ENA */
469 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
470
471 if (on) {
472 if (reg & QRX_CTRL_QENA_STAT_M)
473 return 0; /* Already on, skip */
474 reg |= QRX_CTRL_QENA_REQ_M;
475 } else {
476 if (!(reg & QRX_CTRL_QENA_STAT_M))
477 return 0; /* Already off, skip */
478 reg &= ~QRX_CTRL_QENA_REQ_M;
479 }
480
481 /* Write the register */
482 ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
483 /* Check the result. It is said that QENA_STAT
484 * follows the QENA_REQ not more than 10 use.
485 * TODO: need to change the wait counter later
486 */
487 for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
488 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
489 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
490 if (on) {
491 if ((reg & QRX_CTRL_QENA_REQ_M) &&
492 (reg & QRX_CTRL_QENA_STAT_M))
493 break;
494 } else {
495 if (!(reg & QRX_CTRL_QENA_REQ_M) &&
496 !(reg & QRX_CTRL_QENA_STAT_M))
497 break;
498 }
499 }
500
501 /* Check if it is timeout */
502 if (j >= ICE_CHK_Q_ENA_COUNT) {
503 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
504 (on ? "enable" : "disable"), q_idx);
505 return -ETIMEDOUT;
506 }
507
508 return 0;
509 }
510
511 static inline int
ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue * rxq)512 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
513 {
514 int ret = 0;
515
516 if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
517 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
518 "rxq->rx_free_thresh=%d, "
519 "ICE_RX_MAX_BURST=%d",
520 rxq->rx_free_thresh, ICE_RX_MAX_BURST);
521 ret = -EINVAL;
522 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
523 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
524 "rxq->rx_free_thresh=%d, "
525 "rxq->nb_rx_desc=%d",
526 rxq->rx_free_thresh, rxq->nb_rx_desc);
527 ret = -EINVAL;
528 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
529 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
530 "rxq->nb_rx_desc=%d, "
531 "rxq->rx_free_thresh=%d",
532 rxq->nb_rx_desc, rxq->rx_free_thresh);
533 ret = -EINVAL;
534 }
535
536 return ret;
537 }
538
539 /* reset fields in ice_rx_queue back to default */
540 static void
ice_reset_rx_queue(struct ice_rx_queue * rxq)541 ice_reset_rx_queue(struct ice_rx_queue *rxq)
542 {
543 unsigned int i;
544 uint16_t len;
545
546 if (!rxq) {
547 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
548 return;
549 }
550
551 len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
552
553 for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
554 ((volatile char *)rxq->rx_ring)[i] = 0;
555
556 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
557 for (i = 0; i < ICE_RX_MAX_BURST; ++i)
558 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
559
560 rxq->rx_nb_avail = 0;
561 rxq->rx_next_avail = 0;
562 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
563
564 rxq->rx_tail = 0;
565 rxq->nb_rx_hold = 0;
566 rxq->pkt_first_seg = NULL;
567 rxq->pkt_last_seg = NULL;
568
569 rxq->rxrearm_start = 0;
570 rxq->rxrearm_nb = 0;
571 }
572
573 int
ice_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)574 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
575 {
576 struct ice_rx_queue *rxq;
577 int err;
578 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579
580 PMD_INIT_FUNC_TRACE();
581
582 if (rx_queue_id >= dev->data->nb_rx_queues) {
583 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
584 rx_queue_id, dev->data->nb_rx_queues);
585 return -EINVAL;
586 }
587
588 rxq = dev->data->rx_queues[rx_queue_id];
589 if (!rxq || !rxq->q_set) {
590 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
591 rx_queue_id);
592 return -EINVAL;
593 }
594
595 err = ice_program_hw_rx_queue(rxq);
596 if (err) {
597 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
598 rx_queue_id);
599 return -EIO;
600 }
601
602 err = ice_alloc_rx_queue_mbufs(rxq);
603 if (err) {
604 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
605 return -ENOMEM;
606 }
607
608 /* Init the RX tail register. */
609 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
610
611 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
612 if (err) {
613 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
614 rx_queue_id);
615
616 rxq->rx_rel_mbufs(rxq);
617 ice_reset_rx_queue(rxq);
618 return -EINVAL;
619 }
620
621 dev->data->rx_queue_state[rx_queue_id] =
622 RTE_ETH_QUEUE_STATE_STARTED;
623
624 return 0;
625 }
626
627 int
ice_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)628 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
629 {
630 struct ice_rx_queue *rxq;
631 int err;
632 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
633
634 if (rx_queue_id < dev->data->nb_rx_queues) {
635 rxq = dev->data->rx_queues[rx_queue_id];
636
637 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
638 if (err) {
639 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
640 rx_queue_id);
641 return -EINVAL;
642 }
643 rxq->rx_rel_mbufs(rxq);
644 ice_reset_rx_queue(rxq);
645 dev->data->rx_queue_state[rx_queue_id] =
646 RTE_ETH_QUEUE_STATE_STOPPED;
647 }
648
649 return 0;
650 }
651
652 int
ice_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)653 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
654 {
655 struct ice_tx_queue *txq;
656 int err;
657 struct ice_vsi *vsi;
658 struct ice_hw *hw;
659 struct ice_aqc_add_tx_qgrp *txq_elem;
660 struct ice_tlan_ctx tx_ctx;
661 int buf_len;
662
663 PMD_INIT_FUNC_TRACE();
664
665 if (tx_queue_id >= dev->data->nb_tx_queues) {
666 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
667 tx_queue_id, dev->data->nb_tx_queues);
668 return -EINVAL;
669 }
670
671 txq = dev->data->tx_queues[tx_queue_id];
672 if (!txq || !txq->q_set) {
673 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
674 tx_queue_id);
675 return -EINVAL;
676 }
677
678 buf_len = ice_struct_size(txq_elem, txqs, 1);
679 txq_elem = ice_malloc(hw, buf_len);
680 if (!txq_elem)
681 return -ENOMEM;
682
683 vsi = txq->vsi;
684 hw = ICE_VSI_TO_HW(vsi);
685
686 memset(&tx_ctx, 0, sizeof(tx_ctx));
687 txq_elem->num_txqs = 1;
688 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
689
690 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
691 tx_ctx.qlen = txq->nb_tx_desc;
692 tx_ctx.pf_num = hw->pf_id;
693 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
694 tx_ctx.src_vsi = vsi->vsi_id;
695 tx_ctx.port_num = hw->port_info->lport;
696 tx_ctx.tso_ena = 1; /* tso enable */
697 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
698 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
699 tx_ctx.tsyn_ena = 1;
700
701 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
702 ice_tlan_ctx_info);
703
704 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
705
706 /* Init the Tx tail register*/
707 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
708
709 /* Fix me, we assume TC always 0 here */
710 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
711 txq_elem, buf_len, NULL);
712 if (err) {
713 PMD_DRV_LOG(ERR, "Failed to add lan txq");
714 rte_free(txq_elem);
715 return -EIO;
716 }
717 /* store the schedule node id */
718 txq->q_teid = txq_elem->txqs[0].q_teid;
719
720 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
721
722 rte_free(txq_elem);
723 return 0;
724 }
725
726 static enum ice_status
ice_fdir_program_hw_rx_queue(struct ice_rx_queue * rxq)727 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
728 {
729 struct ice_vsi *vsi = rxq->vsi;
730 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
731 uint32_t rxdid = ICE_RXDID_LEGACY_1;
732 struct ice_rlan_ctx rx_ctx;
733 enum ice_status err;
734 uint32_t regval;
735
736 rxq->rx_hdr_len = 0;
737 rxq->rx_buf_len = 1024;
738
739 memset(&rx_ctx, 0, sizeof(rx_ctx));
740
741 rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
742 rx_ctx.qlen = rxq->nb_rx_desc;
743 rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
744 rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
745 rx_ctx.dtype = 0; /* No Header Split mode */
746 rx_ctx.dsize = 1; /* 32B descriptors */
747 rx_ctx.rxmax = ICE_ETH_MAX_LEN;
748 /* TPH: Transaction Layer Packet (TLP) processing hints */
749 rx_ctx.tphrdesc_ena = 1;
750 rx_ctx.tphwdesc_ena = 1;
751 rx_ctx.tphdata_ena = 1;
752 rx_ctx.tphhead_ena = 1;
753 /* Low Receive Queue Threshold defined in 64 descriptors units.
754 * When the number of free descriptors goes below the lrxqthresh,
755 * an immediate interrupt is triggered.
756 */
757 rx_ctx.lrxqthresh = 2;
758 /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
759 rx_ctx.l2tsel = 1;
760 rx_ctx.showiv = 0;
761 rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
762
763 /* Enable Flexible Descriptors in the queue context which
764 * allows this driver to select a specific receive descriptor format
765 */
766 regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
767 QRXFLXP_CNTXT_RXDID_IDX_M;
768
769 /* increasing context priority to pick up profile ID;
770 * default is 0x01; setting to 0x03 to ensure profile
771 * is programming if prev context is of same priority
772 */
773 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
774 QRXFLXP_CNTXT_RXDID_PRIO_M;
775
776 ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
777
778 err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
779 if (err) {
780 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
781 rxq->queue_id);
782 return -EINVAL;
783 }
784 err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
785 if (err) {
786 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
787 rxq->queue_id);
788 return -EINVAL;
789 }
790
791 rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
792
793 /* Init the Rx tail register*/
794 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
795
796 return 0;
797 }
798
799 int
ice_fdir_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)800 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
801 {
802 struct ice_rx_queue *rxq;
803 int err;
804 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
805 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
806
807 PMD_INIT_FUNC_TRACE();
808
809 rxq = pf->fdir.rxq;
810 if (!rxq || !rxq->q_set) {
811 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
812 rx_queue_id);
813 return -EINVAL;
814 }
815
816 err = ice_fdir_program_hw_rx_queue(rxq);
817 if (err) {
818 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
819 rx_queue_id);
820 return -EIO;
821 }
822
823 /* Init the RX tail register. */
824 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
825
826 err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
827 if (err) {
828 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
829 rx_queue_id);
830
831 ice_reset_rx_queue(rxq);
832 return -EINVAL;
833 }
834
835 return 0;
836 }
837
838 int
ice_fdir_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)839 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
840 {
841 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
842 struct ice_tx_queue *txq;
843 int err;
844 struct ice_vsi *vsi;
845 struct ice_hw *hw;
846 struct ice_aqc_add_tx_qgrp *txq_elem;
847 struct ice_tlan_ctx tx_ctx;
848 int buf_len;
849
850 PMD_INIT_FUNC_TRACE();
851
852 txq = pf->fdir.txq;
853 if (!txq || !txq->q_set) {
854 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
855 tx_queue_id);
856 return -EINVAL;
857 }
858
859 buf_len = ice_struct_size(txq_elem, txqs, 1);
860 txq_elem = ice_malloc(hw, buf_len);
861 if (!txq_elem)
862 return -ENOMEM;
863
864 vsi = txq->vsi;
865 hw = ICE_VSI_TO_HW(vsi);
866
867 memset(&tx_ctx, 0, sizeof(tx_ctx));
868 txq_elem->num_txqs = 1;
869 txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
870
871 tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
872 tx_ctx.qlen = txq->nb_tx_desc;
873 tx_ctx.pf_num = hw->pf_id;
874 tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
875 tx_ctx.src_vsi = vsi->vsi_id;
876 tx_ctx.port_num = hw->port_info->lport;
877 tx_ctx.tso_ena = 1; /* tso enable */
878 tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
879 tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
880
881 ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
882 ice_tlan_ctx_info);
883
884 txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
885
886 /* Init the Tx tail register*/
887 ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
888
889 /* Fix me, we assume TC always 0 here */
890 err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
891 txq_elem, buf_len, NULL);
892 if (err) {
893 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
894 rte_free(txq_elem);
895 return -EIO;
896 }
897 /* store the schedule node id */
898 txq->q_teid = txq_elem->txqs[0].q_teid;
899
900 rte_free(txq_elem);
901 return 0;
902 }
903
904 /* Free all mbufs for descriptors in tx queue */
905 static void
_ice_tx_queue_release_mbufs(struct ice_tx_queue * txq)906 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
907 {
908 uint16_t i;
909
910 if (!txq || !txq->sw_ring) {
911 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
912 return;
913 }
914
915 for (i = 0; i < txq->nb_tx_desc; i++) {
916 if (txq->sw_ring[i].mbuf) {
917 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
918 txq->sw_ring[i].mbuf = NULL;
919 }
920 }
921 }
922
923 static void
ice_reset_tx_queue(struct ice_tx_queue * txq)924 ice_reset_tx_queue(struct ice_tx_queue *txq)
925 {
926 struct ice_tx_entry *txe;
927 uint16_t i, prev, size;
928
929 if (!txq) {
930 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
931 return;
932 }
933
934 txe = txq->sw_ring;
935 size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
936 for (i = 0; i < size; i++)
937 ((volatile char *)txq->tx_ring)[i] = 0;
938
939 prev = (uint16_t)(txq->nb_tx_desc - 1);
940 for (i = 0; i < txq->nb_tx_desc; i++) {
941 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
942
943 txd->cmd_type_offset_bsz =
944 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
945 txe[i].mbuf = NULL;
946 txe[i].last_id = i;
947 txe[prev].next_id = i;
948 prev = i;
949 }
950
951 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
952 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
953
954 txq->tx_tail = 0;
955 txq->nb_tx_used = 0;
956
957 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
958 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
959 }
960
961 int
ice_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)962 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
963 {
964 struct ice_tx_queue *txq;
965 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
966 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
967 struct ice_vsi *vsi = pf->main_vsi;
968 enum ice_status status;
969 uint16_t q_ids[1];
970 uint32_t q_teids[1];
971 uint16_t q_handle = tx_queue_id;
972
973 if (tx_queue_id >= dev->data->nb_tx_queues) {
974 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
975 tx_queue_id, dev->data->nb_tx_queues);
976 return -EINVAL;
977 }
978
979 txq = dev->data->tx_queues[tx_queue_id];
980 if (!txq) {
981 PMD_DRV_LOG(ERR, "TX queue %u is not available",
982 tx_queue_id);
983 return -EINVAL;
984 }
985
986 q_ids[0] = txq->reg_idx;
987 q_teids[0] = txq->q_teid;
988
989 /* Fix me, we assume TC always 0 here */
990 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
991 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
992 if (status != ICE_SUCCESS) {
993 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
994 return -EINVAL;
995 }
996
997 txq->tx_rel_mbufs(txq);
998 ice_reset_tx_queue(txq);
999 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1000
1001 return 0;
1002 }
1003
1004 int
ice_fdir_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)1005 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1006 {
1007 struct ice_rx_queue *rxq;
1008 int err;
1009 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1010 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1011
1012 rxq = pf->fdir.rxq;
1013
1014 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1015 if (err) {
1016 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1017 rx_queue_id);
1018 return -EINVAL;
1019 }
1020 rxq->rx_rel_mbufs(rxq);
1021
1022 return 0;
1023 }
1024
1025 int
ice_fdir_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)1026 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1027 {
1028 struct ice_tx_queue *txq;
1029 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1030 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1031 struct ice_vsi *vsi = pf->main_vsi;
1032 enum ice_status status;
1033 uint16_t q_ids[1];
1034 uint32_t q_teids[1];
1035 uint16_t q_handle = tx_queue_id;
1036
1037 txq = pf->fdir.txq;
1038 if (!txq) {
1039 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1040 tx_queue_id);
1041 return -EINVAL;
1042 }
1043 vsi = txq->vsi;
1044
1045 q_ids[0] = txq->reg_idx;
1046 q_teids[0] = txq->q_teid;
1047
1048 /* Fix me, we assume TC always 0 here */
1049 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1050 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1051 if (status != ICE_SUCCESS) {
1052 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1053 return -EINVAL;
1054 }
1055
1056 txq->tx_rel_mbufs(txq);
1057
1058 return 0;
1059 }
1060
1061 int
ice_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1062 ice_rx_queue_setup(struct rte_eth_dev *dev,
1063 uint16_t queue_idx,
1064 uint16_t nb_desc,
1065 unsigned int socket_id,
1066 const struct rte_eth_rxconf *rx_conf,
1067 struct rte_mempool *mp)
1068 {
1069 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1070 struct ice_adapter *ad =
1071 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1072 struct ice_vsi *vsi = pf->main_vsi;
1073 struct ice_rx_queue *rxq;
1074 const struct rte_memzone *rz;
1075 uint32_t ring_size;
1076 uint16_t len;
1077 int use_def_burst_func = 1;
1078 uint64_t offloads;
1079
1080 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1081 nb_desc > ICE_MAX_RING_DESC ||
1082 nb_desc < ICE_MIN_RING_DESC) {
1083 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1084 "invalid", nb_desc);
1085 return -EINVAL;
1086 }
1087
1088 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1089
1090 /* Free memory if needed */
1091 if (dev->data->rx_queues[queue_idx]) {
1092 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1093 dev->data->rx_queues[queue_idx] = NULL;
1094 }
1095
1096 /* Allocate the rx queue data structure */
1097 rxq = rte_zmalloc_socket(NULL,
1098 sizeof(struct ice_rx_queue),
1099 RTE_CACHE_LINE_SIZE,
1100 socket_id);
1101 if (!rxq) {
1102 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1103 "rx queue data structure");
1104 return -ENOMEM;
1105 }
1106 rxq->mp = mp;
1107 rxq->nb_rx_desc = nb_desc;
1108 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1109 rxq->queue_id = queue_idx;
1110 rxq->offloads = offloads;
1111
1112 rxq->reg_idx = vsi->base_queue + queue_idx;
1113 rxq->port_id = dev->data->port_id;
1114 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1115 rxq->crc_len = RTE_ETHER_CRC_LEN;
1116 else
1117 rxq->crc_len = 0;
1118
1119 rxq->drop_en = rx_conf->rx_drop_en;
1120 rxq->vsi = vsi;
1121 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1122 rxq->proto_xtr = pf->proto_xtr != NULL ?
1123 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1124
1125 /* Allocate the maximum number of RX ring hardware descriptor. */
1126 len = ICE_MAX_RING_DESC;
1127
1128 /**
1129 * Allocating a little more memory because vectorized/bulk_alloc Rx
1130 * functions doesn't check boundaries each time.
1131 */
1132 len += ICE_RX_MAX_BURST;
1133
1134 /* Allocate the maximum number of RX ring hardware descriptor. */
1135 ring_size = sizeof(union ice_rx_flex_desc) * len;
1136 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1137 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1138 ring_size, ICE_RING_BASE_ALIGN,
1139 socket_id);
1140 if (!rz) {
1141 ice_rx_queue_release(rxq);
1142 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1143 return -ENOMEM;
1144 }
1145
1146 rxq->mz = rz;
1147 /* Zero all the descriptors in the ring. */
1148 memset(rz->addr, 0, ring_size);
1149
1150 rxq->rx_ring_dma = rz->iova;
1151 rxq->rx_ring = rz->addr;
1152
1153 /* always reserve more for bulk alloc */
1154 len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1155
1156 /* Allocate the software ring. */
1157 rxq->sw_ring = rte_zmalloc_socket(NULL,
1158 sizeof(struct ice_rx_entry) * len,
1159 RTE_CACHE_LINE_SIZE,
1160 socket_id);
1161 if (!rxq->sw_ring) {
1162 ice_rx_queue_release(rxq);
1163 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1164 return -ENOMEM;
1165 }
1166
1167 ice_reset_rx_queue(rxq);
1168 rxq->q_set = true;
1169 dev->data->rx_queues[queue_idx] = rxq;
1170 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1171
1172 use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1173
1174 if (!use_def_burst_func) {
1175 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1176 "satisfied. Rx Burst Bulk Alloc function will be "
1177 "used on port=%d, queue=%d.",
1178 rxq->port_id, rxq->queue_id);
1179 } else {
1180 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1181 "not satisfied, Scattered Rx is requested. "
1182 "on port=%d, queue=%d.",
1183 rxq->port_id, rxq->queue_id);
1184 ad->rx_bulk_alloc_allowed = false;
1185 }
1186
1187 return 0;
1188 }
1189
1190 void
ice_rx_queue_release(void * rxq)1191 ice_rx_queue_release(void *rxq)
1192 {
1193 struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1194
1195 if (!q) {
1196 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1197 return;
1198 }
1199
1200 q->rx_rel_mbufs(q);
1201 rte_free(q->sw_ring);
1202 rte_memzone_free(q->mz);
1203 rte_free(q);
1204 }
1205
1206 int
ice_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)1207 ice_tx_queue_setup(struct rte_eth_dev *dev,
1208 uint16_t queue_idx,
1209 uint16_t nb_desc,
1210 unsigned int socket_id,
1211 const struct rte_eth_txconf *tx_conf)
1212 {
1213 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1214 struct ice_vsi *vsi = pf->main_vsi;
1215 struct ice_tx_queue *txq;
1216 const struct rte_memzone *tz;
1217 uint32_t ring_size;
1218 uint16_t tx_rs_thresh, tx_free_thresh;
1219 uint64_t offloads;
1220
1221 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1222
1223 if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1224 nb_desc > ICE_MAX_RING_DESC ||
1225 nb_desc < ICE_MIN_RING_DESC) {
1226 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1227 "invalid", nb_desc);
1228 return -EINVAL;
1229 }
1230
1231 /**
1232 * The following two parameters control the setting of the RS bit on
1233 * transmit descriptors. TX descriptors will have their RS bit set
1234 * after txq->tx_rs_thresh descriptors have been used. The TX
1235 * descriptor ring will be cleaned after txq->tx_free_thresh
1236 * descriptors are used or if the number of descriptors required to
1237 * transmit a packet is greater than the number of free TX descriptors.
1238 *
1239 * The following constraints must be satisfied:
1240 * - tx_rs_thresh must be greater than 0.
1241 * - tx_rs_thresh must be less than the size of the ring minus 2.
1242 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
1243 * - tx_rs_thresh must be a divisor of the ring size.
1244 * - tx_free_thresh must be greater than 0.
1245 * - tx_free_thresh must be less than the size of the ring minus 3.
1246 * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1247 *
1248 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1249 * race condition, hence the maximum threshold constraints. When set
1250 * to zero use default values.
1251 */
1252 tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1253 tx_conf->tx_free_thresh :
1254 ICE_DEFAULT_TX_FREE_THRESH);
1255 /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
1256 tx_rs_thresh =
1257 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1258 nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1259 if (tx_conf->tx_rs_thresh)
1260 tx_rs_thresh = tx_conf->tx_rs_thresh;
1261 if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1262 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1263 "exceed nb_desc. (tx_rs_thresh=%u "
1264 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1265 (unsigned int)tx_rs_thresh,
1266 (unsigned int)tx_free_thresh,
1267 (unsigned int)nb_desc,
1268 (int)dev->data->port_id,
1269 (int)queue_idx);
1270 return -EINVAL;
1271 }
1272 if (tx_rs_thresh >= (nb_desc - 2)) {
1273 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1274 "number of TX descriptors minus 2. "
1275 "(tx_rs_thresh=%u port=%d queue=%d)",
1276 (unsigned int)tx_rs_thresh,
1277 (int)dev->data->port_id,
1278 (int)queue_idx);
1279 return -EINVAL;
1280 }
1281 if (tx_free_thresh >= (nb_desc - 3)) {
1282 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1283 "tx_free_thresh must be less than the "
1284 "number of TX descriptors minus 3. "
1285 "(tx_free_thresh=%u port=%d queue=%d)",
1286 (unsigned int)tx_free_thresh,
1287 (int)dev->data->port_id,
1288 (int)queue_idx);
1289 return -EINVAL;
1290 }
1291 if (tx_rs_thresh > tx_free_thresh) {
1292 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1293 "equal to tx_free_thresh. (tx_free_thresh=%u"
1294 " tx_rs_thresh=%u port=%d queue=%d)",
1295 (unsigned int)tx_free_thresh,
1296 (unsigned int)tx_rs_thresh,
1297 (int)dev->data->port_id,
1298 (int)queue_idx);
1299 return -EINVAL;
1300 }
1301 if ((nb_desc % tx_rs_thresh) != 0) {
1302 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1303 "number of TX descriptors. (tx_rs_thresh=%u"
1304 " port=%d queue=%d)",
1305 (unsigned int)tx_rs_thresh,
1306 (int)dev->data->port_id,
1307 (int)queue_idx);
1308 return -EINVAL;
1309 }
1310 if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1311 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1312 "tx_rs_thresh is greater than 1. "
1313 "(tx_rs_thresh=%u port=%d queue=%d)",
1314 (unsigned int)tx_rs_thresh,
1315 (int)dev->data->port_id,
1316 (int)queue_idx);
1317 return -EINVAL;
1318 }
1319
1320 /* Free memory if needed. */
1321 if (dev->data->tx_queues[queue_idx]) {
1322 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1323 dev->data->tx_queues[queue_idx] = NULL;
1324 }
1325
1326 /* Allocate the TX queue data structure. */
1327 txq = rte_zmalloc_socket(NULL,
1328 sizeof(struct ice_tx_queue),
1329 RTE_CACHE_LINE_SIZE,
1330 socket_id);
1331 if (!txq) {
1332 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1333 "tx queue structure");
1334 return -ENOMEM;
1335 }
1336
1337 /* Allocate TX hardware ring descriptors. */
1338 ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1339 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1340 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1341 ring_size, ICE_RING_BASE_ALIGN,
1342 socket_id);
1343 if (!tz) {
1344 ice_tx_queue_release(txq);
1345 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1346 return -ENOMEM;
1347 }
1348
1349 txq->mz = tz;
1350 txq->nb_tx_desc = nb_desc;
1351 txq->tx_rs_thresh = tx_rs_thresh;
1352 txq->tx_free_thresh = tx_free_thresh;
1353 txq->pthresh = tx_conf->tx_thresh.pthresh;
1354 txq->hthresh = tx_conf->tx_thresh.hthresh;
1355 txq->wthresh = tx_conf->tx_thresh.wthresh;
1356 txq->queue_id = queue_idx;
1357
1358 txq->reg_idx = vsi->base_queue + queue_idx;
1359 txq->port_id = dev->data->port_id;
1360 txq->offloads = offloads;
1361 txq->vsi = vsi;
1362 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1363
1364 txq->tx_ring_dma = tz->iova;
1365 txq->tx_ring = tz->addr;
1366
1367 /* Allocate software ring */
1368 txq->sw_ring =
1369 rte_zmalloc_socket(NULL,
1370 sizeof(struct ice_tx_entry) * nb_desc,
1371 RTE_CACHE_LINE_SIZE,
1372 socket_id);
1373 if (!txq->sw_ring) {
1374 ice_tx_queue_release(txq);
1375 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1376 return -ENOMEM;
1377 }
1378
1379 ice_reset_tx_queue(txq);
1380 txq->q_set = true;
1381 dev->data->tx_queues[queue_idx] = txq;
1382 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1383 ice_set_tx_function_flag(dev, txq);
1384
1385 return 0;
1386 }
1387
1388 void
ice_dev_rx_queue_release(struct rte_eth_dev * dev,uint16_t qid)1389 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1390 {
1391 ice_rx_queue_release(dev->data->rx_queues[qid]);
1392 }
1393
1394 void
ice_dev_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)1395 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1396 {
1397 ice_tx_queue_release(dev->data->tx_queues[qid]);
1398 }
1399
1400 void
ice_tx_queue_release(void * txq)1401 ice_tx_queue_release(void *txq)
1402 {
1403 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1404
1405 if (!q) {
1406 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1407 return;
1408 }
1409
1410 q->tx_rel_mbufs(q);
1411 rte_free(q->sw_ring);
1412 rte_memzone_free(q->mz);
1413 rte_free(q);
1414 }
1415
1416 void
ice_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)1417 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1418 struct rte_eth_rxq_info *qinfo)
1419 {
1420 struct ice_rx_queue *rxq;
1421
1422 rxq = dev->data->rx_queues[queue_id];
1423
1424 qinfo->mp = rxq->mp;
1425 qinfo->scattered_rx = dev->data->scattered_rx;
1426 qinfo->nb_desc = rxq->nb_rx_desc;
1427
1428 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1429 qinfo->conf.rx_drop_en = rxq->drop_en;
1430 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1431 }
1432
1433 void
ice_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)1434 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1435 struct rte_eth_txq_info *qinfo)
1436 {
1437 struct ice_tx_queue *txq;
1438
1439 txq = dev->data->tx_queues[queue_id];
1440
1441 qinfo->nb_desc = txq->nb_tx_desc;
1442
1443 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1444 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1445 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1446
1447 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1448 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1449 qinfo->conf.offloads = txq->offloads;
1450 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1451 }
1452
1453 uint32_t
ice_rx_queue_count(void * rx_queue)1454 ice_rx_queue_count(void *rx_queue)
1455 {
1456 #define ICE_RXQ_SCAN_INTERVAL 4
1457 volatile union ice_rx_flex_desc *rxdp;
1458 struct ice_rx_queue *rxq;
1459 uint16_t desc = 0;
1460
1461 rxq = rx_queue;
1462 rxdp = &rxq->rx_ring[rxq->rx_tail];
1463 while ((desc < rxq->nb_rx_desc) &&
1464 rte_le_to_cpu_16(rxdp->wb.status_error0) &
1465 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1466 /**
1467 * Check the DD bit of a rx descriptor of each 4 in a group,
1468 * to avoid checking too frequently and downgrading performance
1469 * too much.
1470 */
1471 desc += ICE_RXQ_SCAN_INTERVAL;
1472 rxdp += ICE_RXQ_SCAN_INTERVAL;
1473 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1474 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1475 desc - rxq->nb_rx_desc]);
1476 }
1477
1478 return desc;
1479 }
1480
1481 #define ICE_RX_FLEX_ERR0_BITS \
1482 ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
1483 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1484 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1485 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1486 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1487 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1488
1489 /* Rx L3/L4 checksum */
1490 static inline uint64_t
ice_rxd_error_to_pkt_flags(uint16_t stat_err0)1491 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1492 {
1493 uint64_t flags = 0;
1494
1495 /* check if HW has decoded the packet and checksum */
1496 if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1497 return 0;
1498
1499 if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1500 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1501 return flags;
1502 }
1503
1504 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1505 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1506 else
1507 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1508
1509 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1510 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1511 else
1512 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1513
1514 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1515 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1516
1517 if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1518 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1519 else
1520 flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1521
1522 return flags;
1523 }
1524
1525 static inline void
ice_rxd_to_vlan_tci(struct rte_mbuf * mb,volatile union ice_rx_flex_desc * rxdp)1526 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1527 {
1528 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1529 (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1530 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1531 mb->vlan_tci =
1532 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1533 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1534 rte_le_to_cpu_16(rxdp->wb.l2tag1));
1535 } else {
1536 mb->vlan_tci = 0;
1537 }
1538
1539 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1540 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1541 (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1542 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1543 RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1544 mb->vlan_tci_outer = mb->vlan_tci;
1545 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1546 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1547 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1548 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1549 } else {
1550 mb->vlan_tci_outer = 0;
1551 }
1552 #endif
1553 PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1554 mb->vlan_tci, mb->vlan_tci_outer);
1555 }
1556
1557 #define ICE_LOOK_AHEAD 8
1558 #if (ICE_LOOK_AHEAD != 8)
1559 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1560 #endif
1561
1562 #define ICE_PTP_TS_VALID 0x1
1563
1564 static inline int
ice_rx_scan_hw_ring(struct ice_rx_queue * rxq)1565 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1566 {
1567 volatile union ice_rx_flex_desc *rxdp;
1568 struct ice_rx_entry *rxep;
1569 struct rte_mbuf *mb;
1570 uint16_t stat_err0;
1571 uint16_t pkt_len;
1572 int32_t s[ICE_LOOK_AHEAD], nb_dd;
1573 int32_t i, j, nb_rx = 0;
1574 uint64_t pkt_flags = 0;
1575 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1576 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1577 bool is_tsinit = false;
1578 uint64_t ts_ns;
1579 struct ice_vsi *vsi = rxq->vsi;
1580 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1581 struct ice_adapter *ad = rxq->vsi->adapter;
1582 #endif
1583 rxdp = &rxq->rx_ring[rxq->rx_tail];
1584 rxep = &rxq->sw_ring[rxq->rx_tail];
1585
1586 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1587
1588 /* Make sure there is at least 1 packet to receive */
1589 if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1590 return 0;
1591
1592 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1593 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1594 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1595
1596 if (unlikely(sw_cur_time - ad->hw_time_update > 4))
1597 is_tsinit = 1;
1598 }
1599 #endif
1600
1601 /**
1602 * Scan LOOK_AHEAD descriptors at a time to determine which
1603 * descriptors reference packets that are ready to be received.
1604 */
1605 for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1606 rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1607 /* Read desc statuses backwards to avoid race condition */
1608 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1609 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1610
1611 rte_smp_rmb();
1612
1613 /* Compute how many status bits were set */
1614 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1615 nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1616
1617 nb_rx += nb_dd;
1618
1619 /* Translate descriptor info to mbuf parameters */
1620 for (j = 0; j < nb_dd; j++) {
1621 mb = rxep[j].mbuf;
1622 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1623 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1624 mb->data_len = pkt_len;
1625 mb->pkt_len = pkt_len;
1626 mb->ol_flags = 0;
1627 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1628 pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1629 mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1630 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1631 ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1632 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1633 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1634 if (ice_timestamp_dynflag > 0) {
1635 rxq->time_high =
1636 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1637 if (unlikely(is_tsinit)) {
1638 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
1639 rxq->time_high);
1640 ad->hw_time_low = (uint32_t)ts_ns;
1641 ad->hw_time_high = (uint32_t)(ts_ns >> 32);
1642 is_tsinit = false;
1643 } else {
1644 if (rxq->time_high < ad->hw_time_low)
1645 ad->hw_time_high += 1;
1646 ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
1647 ad->hw_time_low = rxq->time_high;
1648 }
1649 ad->hw_time_update = rte_get_timer_cycles() /
1650 (rte_get_timer_hz() / 1000);
1651 *RTE_MBUF_DYNFIELD(mb,
1652 ice_timestamp_dynfield_offset,
1653 rte_mbuf_timestamp_t *) = ts_ns;
1654 pkt_flags |= ice_timestamp_dynflag;
1655 }
1656
1657 if (ad->ptp_ena && ((mb->packet_type &
1658 RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1659 rxq->time_high =
1660 rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1661 mb->timesync = rxq->queue_id;
1662 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1663 if (rxdp[j].wb.time_stamp_low &
1664 ICE_PTP_TS_VALID)
1665 pkt_flags |=
1666 RTE_MBUF_F_RX_IEEE1588_TMST;
1667 }
1668 #endif
1669 mb->ol_flags |= pkt_flags;
1670 }
1671
1672 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1673 rxq->rx_stage[i + j] = rxep[j].mbuf;
1674
1675 if (nb_dd != ICE_LOOK_AHEAD)
1676 break;
1677 }
1678
1679 /* Clear software ring entries */
1680 for (i = 0; i < nb_rx; i++)
1681 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1682
1683 PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1684 "port_id=%u, queue_id=%u, nb_rx=%d",
1685 rxq->port_id, rxq->queue_id, nb_rx);
1686
1687 return nb_rx;
1688 }
1689
1690 static inline uint16_t
ice_rx_fill_from_stage(struct ice_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1691 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1692 struct rte_mbuf **rx_pkts,
1693 uint16_t nb_pkts)
1694 {
1695 uint16_t i;
1696 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1697
1698 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1699
1700 for (i = 0; i < nb_pkts; i++)
1701 rx_pkts[i] = stage[i];
1702
1703 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1704 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1705
1706 return nb_pkts;
1707 }
1708
1709 static inline int
ice_rx_alloc_bufs(struct ice_rx_queue * rxq)1710 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1711 {
1712 volatile union ice_rx_flex_desc *rxdp;
1713 struct ice_rx_entry *rxep;
1714 struct rte_mbuf *mb;
1715 uint16_t alloc_idx, i;
1716 uint64_t dma_addr;
1717 int diag;
1718
1719 /* Allocate buffers in bulk */
1720 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1721 (rxq->rx_free_thresh - 1));
1722 rxep = &rxq->sw_ring[alloc_idx];
1723 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1724 rxq->rx_free_thresh);
1725 if (unlikely(diag != 0)) {
1726 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1727 return -ENOMEM;
1728 }
1729
1730 rxdp = &rxq->rx_ring[alloc_idx];
1731 for (i = 0; i < rxq->rx_free_thresh; i++) {
1732 if (likely(i < (rxq->rx_free_thresh - 1)))
1733 /* Prefetch next mbuf */
1734 rte_prefetch0(rxep[i + 1].mbuf);
1735
1736 mb = rxep[i].mbuf;
1737 rte_mbuf_refcnt_set(mb, 1);
1738 mb->next = NULL;
1739 mb->data_off = RTE_PKTMBUF_HEADROOM;
1740 mb->nb_segs = 1;
1741 mb->port = rxq->port_id;
1742 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1743 rxdp[i].read.hdr_addr = 0;
1744 rxdp[i].read.pkt_addr = dma_addr;
1745 }
1746
1747 /* Update Rx tail register */
1748 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1749
1750 rxq->rx_free_trigger =
1751 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1752 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1753 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1754
1755 return 0;
1756 }
1757
1758 static inline uint16_t
rx_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1759 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1760 {
1761 struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1762 uint16_t nb_rx = 0;
1763
1764 if (!nb_pkts)
1765 return 0;
1766
1767 if (rxq->rx_nb_avail)
1768 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1769
1770 nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1771 rxq->rx_next_avail = 0;
1772 rxq->rx_nb_avail = nb_rx;
1773 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1774
1775 if (rxq->rx_tail > rxq->rx_free_trigger) {
1776 if (ice_rx_alloc_bufs(rxq) != 0) {
1777 uint16_t i, j;
1778
1779 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1780 rxq->rx_free_thresh;
1781 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1782 "port_id=%u, queue_id=%u",
1783 rxq->port_id, rxq->queue_id);
1784 rxq->rx_nb_avail = 0;
1785 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1786 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1787 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1788
1789 return 0;
1790 }
1791 }
1792
1793 if (rxq->rx_tail >= rxq->nb_rx_desc)
1794 rxq->rx_tail = 0;
1795
1796 if (rxq->rx_nb_avail)
1797 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1798
1799 return 0;
1800 }
1801
1802 static uint16_t
ice_recv_pkts_bulk_alloc(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1803 ice_recv_pkts_bulk_alloc(void *rx_queue,
1804 struct rte_mbuf **rx_pkts,
1805 uint16_t nb_pkts)
1806 {
1807 uint16_t nb_rx = 0;
1808 uint16_t n;
1809 uint16_t count;
1810
1811 if (unlikely(nb_pkts == 0))
1812 return nb_rx;
1813
1814 if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1815 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1816
1817 while (nb_pkts) {
1818 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1819 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1820 nb_rx = (uint16_t)(nb_rx + count);
1821 nb_pkts = (uint16_t)(nb_pkts - count);
1822 if (count < n)
1823 break;
1824 }
1825
1826 return nb_rx;
1827 }
1828
1829 static uint16_t
ice_recv_scattered_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1830 ice_recv_scattered_pkts(void *rx_queue,
1831 struct rte_mbuf **rx_pkts,
1832 uint16_t nb_pkts)
1833 {
1834 struct ice_rx_queue *rxq = rx_queue;
1835 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1836 volatile union ice_rx_flex_desc *rxdp;
1837 union ice_rx_flex_desc rxd;
1838 struct ice_rx_entry *sw_ring = rxq->sw_ring;
1839 struct ice_rx_entry *rxe;
1840 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1841 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1842 struct rte_mbuf *nmb; /* new allocated mbuf */
1843 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1844 uint16_t rx_id = rxq->rx_tail;
1845 uint16_t nb_rx = 0;
1846 uint16_t nb_hold = 0;
1847 uint16_t rx_packet_len;
1848 uint16_t rx_stat_err0;
1849 uint64_t dma_addr;
1850 uint64_t pkt_flags;
1851 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1852 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1853 bool is_tsinit = false;
1854 uint64_t ts_ns;
1855 struct ice_vsi *vsi = rxq->vsi;
1856 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1857 struct ice_adapter *ad = rxq->vsi->adapter;
1858
1859 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1860 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1861
1862 if (unlikely(sw_cur_time - ad->hw_time_update > 4))
1863 is_tsinit = true;
1864 }
1865 #endif
1866
1867 while (nb_rx < nb_pkts) {
1868 rxdp = &rx_ring[rx_id];
1869 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1870
1871 /* Check the DD bit first */
1872 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1873 break;
1874
1875 /* allocate mbuf */
1876 nmb = rte_mbuf_raw_alloc(rxq->mp);
1877 if (unlikely(!nmb)) {
1878 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1879 break;
1880 }
1881 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1882
1883 nb_hold++;
1884 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1885 rx_id++;
1886 if (unlikely(rx_id == rxq->nb_rx_desc))
1887 rx_id = 0;
1888
1889 /* Prefetch next mbuf */
1890 rte_prefetch0(sw_ring[rx_id].mbuf);
1891
1892 /**
1893 * When next RX descriptor is on a cache line boundary,
1894 * prefetch the next 4 RX descriptors and next 8 pointers
1895 * to mbufs.
1896 */
1897 if ((rx_id & 0x3) == 0) {
1898 rte_prefetch0(&rx_ring[rx_id]);
1899 rte_prefetch0(&sw_ring[rx_id]);
1900 }
1901
1902 rxm = rxe->mbuf;
1903 rxe->mbuf = nmb;
1904 dma_addr =
1905 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1906
1907 /* Set data buffer address and data length of the mbuf */
1908 rxdp->read.hdr_addr = 0;
1909 rxdp->read.pkt_addr = dma_addr;
1910 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1911 ICE_RX_FLX_DESC_PKT_LEN_M;
1912 rxm->data_len = rx_packet_len;
1913 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1914
1915 /**
1916 * If this is the first buffer of the received packet, set the
1917 * pointer to the first mbuf of the packet and initialize its
1918 * context. Otherwise, update the total length and the number
1919 * of segments of the current scattered packet, and update the
1920 * pointer to the last mbuf of the current packet.
1921 */
1922 if (!first_seg) {
1923 first_seg = rxm;
1924 first_seg->nb_segs = 1;
1925 first_seg->pkt_len = rx_packet_len;
1926 } else {
1927 first_seg->pkt_len =
1928 (uint16_t)(first_seg->pkt_len +
1929 rx_packet_len);
1930 first_seg->nb_segs++;
1931 last_seg->next = rxm;
1932 }
1933
1934 /**
1935 * If this is not the last buffer of the received packet,
1936 * update the pointer to the last mbuf of the current scattered
1937 * packet and continue to parse the RX ring.
1938 */
1939 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1940 last_seg = rxm;
1941 continue;
1942 }
1943
1944 /**
1945 * This is the last buffer of the received packet. If the CRC
1946 * is not stripped by the hardware:
1947 * - Subtract the CRC length from the total packet length.
1948 * - If the last buffer only contains the whole CRC or a part
1949 * of it, free the mbuf associated to the last buffer. If part
1950 * of the CRC is also contained in the previous mbuf, subtract
1951 * the length of that CRC part from the data length of the
1952 * previous mbuf.
1953 */
1954 rxm->next = NULL;
1955 if (unlikely(rxq->crc_len > 0)) {
1956 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1957 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1958 rte_pktmbuf_free_seg(rxm);
1959 first_seg->nb_segs--;
1960 last_seg->data_len =
1961 (uint16_t)(last_seg->data_len -
1962 (RTE_ETHER_CRC_LEN - rx_packet_len));
1963 last_seg->next = NULL;
1964 } else
1965 rxm->data_len = (uint16_t)(rx_packet_len -
1966 RTE_ETHER_CRC_LEN);
1967 }
1968
1969 first_seg->port = rxq->port_id;
1970 first_seg->ol_flags = 0;
1971 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1972 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1973 ice_rxd_to_vlan_tci(first_seg, &rxd);
1974 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
1975 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1976 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1977 if (ice_timestamp_dynflag > 0) {
1978 rxq->time_high =
1979 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
1980 if (unlikely(is_tsinit)) {
1981 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
1982 ad->hw_time_low = (uint32_t)ts_ns;
1983 ad->hw_time_high = (uint32_t)(ts_ns >> 32);
1984 is_tsinit = false;
1985 } else {
1986 if (rxq->time_high < ad->hw_time_low)
1987 ad->hw_time_high += 1;
1988 ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
1989 ad->hw_time_low = rxq->time_high;
1990 }
1991 ad->hw_time_update = rte_get_timer_cycles() /
1992 (rte_get_timer_hz() / 1000);
1993 *RTE_MBUF_DYNFIELD(rxm,
1994 (ice_timestamp_dynfield_offset),
1995 rte_mbuf_timestamp_t *) = ts_ns;
1996 pkt_flags |= ice_timestamp_dynflag;
1997 }
1998
1999 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
2000 == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2001 rxq->time_high =
2002 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2003 first_seg->timesync = rxq->queue_id;
2004 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2005 }
2006 #endif
2007 first_seg->ol_flags |= pkt_flags;
2008 /* Prefetch data of first segment, if configured to do so. */
2009 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
2010 first_seg->data_off));
2011 rx_pkts[nb_rx++] = first_seg;
2012 first_seg = NULL;
2013 }
2014
2015 /* Record index of the next RX descriptor to probe. */
2016 rxq->rx_tail = rx_id;
2017 rxq->pkt_first_seg = first_seg;
2018 rxq->pkt_last_seg = last_seg;
2019
2020 /**
2021 * If the number of free RX descriptors is greater than the RX free
2022 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2023 * register. Update the RDT with the value of the last processed RX
2024 * descriptor minus 1, to guarantee that the RDT register is never
2025 * equal to the RDH register, which creates a "full" ring situation
2026 * from the hardware point of view.
2027 */
2028 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2029 if (nb_hold > rxq->rx_free_thresh) {
2030 rx_id = (uint16_t)(rx_id == 0 ?
2031 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2032 /* write TAIL register */
2033 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2034 nb_hold = 0;
2035 }
2036 rxq->nb_rx_hold = nb_hold;
2037
2038 /* return received packet in the burst */
2039 return nb_rx;
2040 }
2041
2042 const uint32_t *
ice_dev_supported_ptypes_get(struct rte_eth_dev * dev)2043 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2044 {
2045 struct ice_adapter *ad =
2046 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2047 const uint32_t *ptypes;
2048
2049 static const uint32_t ptypes_os[] = {
2050 /* refers to ice_get_default_pkt_type() */
2051 RTE_PTYPE_L2_ETHER,
2052 RTE_PTYPE_L2_ETHER_TIMESYNC,
2053 RTE_PTYPE_L2_ETHER_LLDP,
2054 RTE_PTYPE_L2_ETHER_ARP,
2055 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2056 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2057 RTE_PTYPE_L4_FRAG,
2058 RTE_PTYPE_L4_ICMP,
2059 RTE_PTYPE_L4_NONFRAG,
2060 RTE_PTYPE_L4_SCTP,
2061 RTE_PTYPE_L4_TCP,
2062 RTE_PTYPE_L4_UDP,
2063 RTE_PTYPE_TUNNEL_GRENAT,
2064 RTE_PTYPE_TUNNEL_IP,
2065 RTE_PTYPE_INNER_L2_ETHER,
2066 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2067 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2068 RTE_PTYPE_INNER_L4_FRAG,
2069 RTE_PTYPE_INNER_L4_ICMP,
2070 RTE_PTYPE_INNER_L4_NONFRAG,
2071 RTE_PTYPE_INNER_L4_SCTP,
2072 RTE_PTYPE_INNER_L4_TCP,
2073 RTE_PTYPE_INNER_L4_UDP,
2074 RTE_PTYPE_UNKNOWN
2075 };
2076
2077 static const uint32_t ptypes_comms[] = {
2078 /* refers to ice_get_default_pkt_type() */
2079 RTE_PTYPE_L2_ETHER,
2080 RTE_PTYPE_L2_ETHER_TIMESYNC,
2081 RTE_PTYPE_L2_ETHER_LLDP,
2082 RTE_PTYPE_L2_ETHER_ARP,
2083 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2084 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2085 RTE_PTYPE_L4_FRAG,
2086 RTE_PTYPE_L4_ICMP,
2087 RTE_PTYPE_L4_NONFRAG,
2088 RTE_PTYPE_L4_SCTP,
2089 RTE_PTYPE_L4_TCP,
2090 RTE_PTYPE_L4_UDP,
2091 RTE_PTYPE_TUNNEL_GRENAT,
2092 RTE_PTYPE_TUNNEL_IP,
2093 RTE_PTYPE_INNER_L2_ETHER,
2094 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2095 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2096 RTE_PTYPE_INNER_L4_FRAG,
2097 RTE_PTYPE_INNER_L4_ICMP,
2098 RTE_PTYPE_INNER_L4_NONFRAG,
2099 RTE_PTYPE_INNER_L4_SCTP,
2100 RTE_PTYPE_INNER_L4_TCP,
2101 RTE_PTYPE_INNER_L4_UDP,
2102 RTE_PTYPE_TUNNEL_GTPC,
2103 RTE_PTYPE_TUNNEL_GTPU,
2104 RTE_PTYPE_L2_ETHER_PPPOE,
2105 RTE_PTYPE_UNKNOWN
2106 };
2107
2108 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2109 ptypes = ptypes_comms;
2110 else
2111 ptypes = ptypes_os;
2112
2113 if (dev->rx_pkt_burst == ice_recv_pkts ||
2114 dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2115 dev->rx_pkt_burst == ice_recv_scattered_pkts)
2116 return ptypes;
2117
2118 #ifdef RTE_ARCH_X86
2119 if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2120 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2121 #ifdef CC_AVX512_SUPPORT
2122 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2123 dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2124 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2125 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2126 #endif
2127 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2128 dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2129 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2130 dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2131 return ptypes;
2132 #endif
2133
2134 return NULL;
2135 }
2136
2137 int
ice_rx_descriptor_status(void * rx_queue,uint16_t offset)2138 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2139 {
2140 volatile union ice_rx_flex_desc *rxdp;
2141 struct ice_rx_queue *rxq = rx_queue;
2142 uint32_t desc;
2143
2144 if (unlikely(offset >= rxq->nb_rx_desc))
2145 return -EINVAL;
2146
2147 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2148 return RTE_ETH_RX_DESC_UNAVAIL;
2149
2150 desc = rxq->rx_tail + offset;
2151 if (desc >= rxq->nb_rx_desc)
2152 desc -= rxq->nb_rx_desc;
2153
2154 rxdp = &rxq->rx_ring[desc];
2155 if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2156 (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2157 return RTE_ETH_RX_DESC_DONE;
2158
2159 return RTE_ETH_RX_DESC_AVAIL;
2160 }
2161
2162 int
ice_tx_descriptor_status(void * tx_queue,uint16_t offset)2163 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2164 {
2165 struct ice_tx_queue *txq = tx_queue;
2166 volatile uint64_t *status;
2167 uint64_t mask, expect;
2168 uint32_t desc;
2169
2170 if (unlikely(offset >= txq->nb_tx_desc))
2171 return -EINVAL;
2172
2173 desc = txq->tx_tail + offset;
2174 /* go to next desc that has the RS bit */
2175 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2176 txq->tx_rs_thresh;
2177 if (desc >= txq->nb_tx_desc) {
2178 desc -= txq->nb_tx_desc;
2179 if (desc >= txq->nb_tx_desc)
2180 desc -= txq->nb_tx_desc;
2181 }
2182
2183 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2184 mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2185 expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2186 ICE_TXD_QW1_DTYPE_S);
2187 if ((*status & mask) == expect)
2188 return RTE_ETH_TX_DESC_DONE;
2189
2190 return RTE_ETH_TX_DESC_FULL;
2191 }
2192
2193 void
ice_free_queues(struct rte_eth_dev * dev)2194 ice_free_queues(struct rte_eth_dev *dev)
2195 {
2196 uint16_t i;
2197
2198 PMD_INIT_FUNC_TRACE();
2199
2200 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2201 if (!dev->data->rx_queues[i])
2202 continue;
2203 ice_rx_queue_release(dev->data->rx_queues[i]);
2204 dev->data->rx_queues[i] = NULL;
2205 }
2206 dev->data->nb_rx_queues = 0;
2207
2208 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2209 if (!dev->data->tx_queues[i])
2210 continue;
2211 ice_tx_queue_release(dev->data->tx_queues[i]);
2212 dev->data->tx_queues[i] = NULL;
2213 }
2214 dev->data->nb_tx_queues = 0;
2215 }
2216
2217 #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC
2218 #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC
2219
2220 int
ice_fdir_setup_tx_resources(struct ice_pf * pf)2221 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2222 {
2223 struct ice_tx_queue *txq;
2224 const struct rte_memzone *tz = NULL;
2225 uint32_t ring_size;
2226 struct rte_eth_dev *dev;
2227
2228 if (!pf) {
2229 PMD_DRV_LOG(ERR, "PF is not available");
2230 return -EINVAL;
2231 }
2232
2233 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2234
2235 /* Allocate the TX queue data structure. */
2236 txq = rte_zmalloc_socket("ice fdir tx queue",
2237 sizeof(struct ice_tx_queue),
2238 RTE_CACHE_LINE_SIZE,
2239 SOCKET_ID_ANY);
2240 if (!txq) {
2241 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2242 "tx queue structure.");
2243 return -ENOMEM;
2244 }
2245
2246 /* Allocate TX hardware ring descriptors. */
2247 ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2248 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2249
2250 tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2251 ICE_FDIR_QUEUE_ID, ring_size,
2252 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2253 if (!tz) {
2254 ice_tx_queue_release(txq);
2255 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2256 return -ENOMEM;
2257 }
2258
2259 txq->mz = tz;
2260 txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2261 txq->queue_id = ICE_FDIR_QUEUE_ID;
2262 txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2263 txq->vsi = pf->fdir.fdir_vsi;
2264
2265 txq->tx_ring_dma = tz->iova;
2266 txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2267 /*
2268 * don't need to allocate software ring and reset for the fdir
2269 * program queue just set the queue has been configured.
2270 */
2271 txq->q_set = true;
2272 pf->fdir.txq = txq;
2273
2274 txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2275
2276 return ICE_SUCCESS;
2277 }
2278
2279 int
ice_fdir_setup_rx_resources(struct ice_pf * pf)2280 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2281 {
2282 struct ice_rx_queue *rxq;
2283 const struct rte_memzone *rz = NULL;
2284 uint32_t ring_size;
2285 struct rte_eth_dev *dev;
2286
2287 if (!pf) {
2288 PMD_DRV_LOG(ERR, "PF is not available");
2289 return -EINVAL;
2290 }
2291
2292 dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2293
2294 /* Allocate the RX queue data structure. */
2295 rxq = rte_zmalloc_socket("ice fdir rx queue",
2296 sizeof(struct ice_rx_queue),
2297 RTE_CACHE_LINE_SIZE,
2298 SOCKET_ID_ANY);
2299 if (!rxq) {
2300 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2301 "rx queue structure.");
2302 return -ENOMEM;
2303 }
2304
2305 /* Allocate RX hardware ring descriptors. */
2306 ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2307 ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2308
2309 rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2310 ICE_FDIR_QUEUE_ID, ring_size,
2311 ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2312 if (!rz) {
2313 ice_rx_queue_release(rxq);
2314 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2315 return -ENOMEM;
2316 }
2317
2318 rxq->mz = rz;
2319 rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2320 rxq->queue_id = ICE_FDIR_QUEUE_ID;
2321 rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2322 rxq->vsi = pf->fdir.fdir_vsi;
2323
2324 rxq->rx_ring_dma = rz->iova;
2325 memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2326 sizeof(union ice_32byte_rx_desc));
2327 rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2328
2329 /*
2330 * Don't need to allocate software ring and reset for the fdir
2331 * rx queue, just set the queue has been configured.
2332 */
2333 rxq->q_set = true;
2334 pf->fdir.rxq = rxq;
2335
2336 rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2337
2338 return ICE_SUCCESS;
2339 }
2340
2341 uint16_t
ice_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2342 ice_recv_pkts(void *rx_queue,
2343 struct rte_mbuf **rx_pkts,
2344 uint16_t nb_pkts)
2345 {
2346 struct ice_rx_queue *rxq = rx_queue;
2347 volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2348 volatile union ice_rx_flex_desc *rxdp;
2349 union ice_rx_flex_desc rxd;
2350 struct ice_rx_entry *sw_ring = rxq->sw_ring;
2351 struct ice_rx_entry *rxe;
2352 struct rte_mbuf *nmb; /* new allocated mbuf */
2353 struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2354 uint16_t rx_id = rxq->rx_tail;
2355 uint16_t nb_rx = 0;
2356 uint16_t nb_hold = 0;
2357 uint16_t rx_packet_len;
2358 uint16_t rx_stat_err0;
2359 uint64_t dma_addr;
2360 uint64_t pkt_flags;
2361 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2362 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2363 bool is_tsinit = false;
2364 uint64_t ts_ns;
2365 struct ice_vsi *vsi = rxq->vsi;
2366 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2367 struct ice_adapter *ad = rxq->vsi->adapter;
2368
2369 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2370 uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2371
2372 if (unlikely(sw_cur_time - ad->hw_time_update > 4))
2373 is_tsinit = 1;
2374 }
2375 #endif
2376
2377 while (nb_rx < nb_pkts) {
2378 rxdp = &rx_ring[rx_id];
2379 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2380
2381 /* Check the DD bit first */
2382 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2383 break;
2384
2385 /* allocate mbuf */
2386 nmb = rte_mbuf_raw_alloc(rxq->mp);
2387 if (unlikely(!nmb)) {
2388 rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2389 break;
2390 }
2391 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2392
2393 nb_hold++;
2394 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2395 rx_id++;
2396 if (unlikely(rx_id == rxq->nb_rx_desc))
2397 rx_id = 0;
2398 rxm = rxe->mbuf;
2399 rxe->mbuf = nmb;
2400 dma_addr =
2401 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2402
2403 /**
2404 * fill the read format of descriptor with physic address in
2405 * new allocated mbuf: nmb
2406 */
2407 rxdp->read.hdr_addr = 0;
2408 rxdp->read.pkt_addr = dma_addr;
2409
2410 /* calculate rx_packet_len of the received pkt */
2411 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2412 ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2413
2414 /* fill old mbuf with received descriptor: rxd */
2415 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2416 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2417 rxm->nb_segs = 1;
2418 rxm->next = NULL;
2419 rxm->pkt_len = rx_packet_len;
2420 rxm->data_len = rx_packet_len;
2421 rxm->port = rxq->port_id;
2422 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2423 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2424 ice_rxd_to_vlan_tci(rxm, &rxd);
2425 rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
2426 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2427 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2428 if (ice_timestamp_dynflag > 0) {
2429 rxq->time_high =
2430 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2431 if (unlikely(is_tsinit)) {
2432 ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2433 ad->hw_time_low = (uint32_t)ts_ns;
2434 ad->hw_time_high = (uint32_t)(ts_ns >> 32);
2435 is_tsinit = false;
2436 } else {
2437 if (rxq->time_high < ad->hw_time_low)
2438 ad->hw_time_high += 1;
2439 ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
2440 ad->hw_time_low = rxq->time_high;
2441 }
2442 ad->hw_time_update = rte_get_timer_cycles() /
2443 (rte_get_timer_hz() / 1000);
2444 *RTE_MBUF_DYNFIELD(rxm,
2445 (ice_timestamp_dynfield_offset),
2446 rte_mbuf_timestamp_t *) = ts_ns;
2447 pkt_flags |= ice_timestamp_dynflag;
2448 }
2449
2450 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2451 RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2452 rxq->time_high =
2453 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2454 rxm->timesync = rxq->queue_id;
2455 pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2456 }
2457 #endif
2458 rxm->ol_flags |= pkt_flags;
2459 /* copy old mbuf to rx_pkts */
2460 rx_pkts[nb_rx++] = rxm;
2461 }
2462
2463 rxq->rx_tail = rx_id;
2464 /**
2465 * If the number of free RX descriptors is greater than the RX free
2466 * threshold of the queue, advance the receive tail register of queue.
2467 * Update that register with the value of the last processed RX
2468 * descriptor minus 1.
2469 */
2470 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2471 if (nb_hold > rxq->rx_free_thresh) {
2472 rx_id = (uint16_t)(rx_id == 0 ?
2473 (rxq->nb_rx_desc - 1) : (rx_id - 1));
2474 /* write TAIL register */
2475 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2476 nb_hold = 0;
2477 }
2478 rxq->nb_rx_hold = nb_hold;
2479
2480 /* return received packet in the burst */
2481 return nb_rx;
2482 }
2483
2484 static inline void
ice_parse_tunneling_params(uint64_t ol_flags,union ice_tx_offload tx_offload,uint32_t * cd_tunneling)2485 ice_parse_tunneling_params(uint64_t ol_flags,
2486 union ice_tx_offload tx_offload,
2487 uint32_t *cd_tunneling)
2488 {
2489 /* EIPT: External (outer) IP header type */
2490 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2491 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2492 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2493 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2494 else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2495 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2496
2497 /* EIPLEN: External (outer) IP header length, in DWords */
2498 *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2499 ICE_TXD_CTX_QW0_EIPLEN_S;
2500
2501 /* L4TUNT: L4 Tunneling Type */
2502 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2503 case RTE_MBUF_F_TX_TUNNEL_IPIP:
2504 /* for non UDP / GRE tunneling, set to 00b */
2505 break;
2506 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2507 case RTE_MBUF_F_TX_TUNNEL_GTP:
2508 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2509 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2510 break;
2511 case RTE_MBUF_F_TX_TUNNEL_GRE:
2512 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2513 break;
2514 default:
2515 PMD_TX_LOG(ERR, "Tunnel type not supported");
2516 return;
2517 }
2518
2519 /* L4TUNLEN: L4 Tunneling Length, in Words
2520 *
2521 * We depend on app to set rte_mbuf.l2_len correctly.
2522 * For IP in GRE it should be set to the length of the GRE
2523 * header;
2524 * For MAC in GRE or MAC in UDP it should be set to the length
2525 * of the GRE or UDP headers plus the inner MAC up to including
2526 * its last Ethertype.
2527 * If MPLS labels exists, it should include them as well.
2528 */
2529 *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2530 ICE_TXD_CTX_QW0_NATLEN_S;
2531
2532 /**
2533 * Calculate the tunneling UDP checksum.
2534 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2535 */
2536 if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2537 (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2538 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2539 }
2540
2541 static inline void
ice_txd_enable_checksum(uint64_t ol_flags,uint32_t * td_cmd,uint32_t * td_offset,union ice_tx_offload tx_offload)2542 ice_txd_enable_checksum(uint64_t ol_flags,
2543 uint32_t *td_cmd,
2544 uint32_t *td_offset,
2545 union ice_tx_offload tx_offload)
2546 {
2547 /* Set MACLEN */
2548 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2549 *td_offset |= (tx_offload.outer_l2_len >> 1)
2550 << ICE_TX_DESC_LEN_MACLEN_S;
2551 else
2552 *td_offset |= (tx_offload.l2_len >> 1)
2553 << ICE_TX_DESC_LEN_MACLEN_S;
2554
2555 /* Enable L3 checksum offloads */
2556 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2557 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2558 *td_offset |= (tx_offload.l3_len >> 2) <<
2559 ICE_TX_DESC_LEN_IPLEN_S;
2560 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2561 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2562 *td_offset |= (tx_offload.l3_len >> 2) <<
2563 ICE_TX_DESC_LEN_IPLEN_S;
2564 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2565 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2566 *td_offset |= (tx_offload.l3_len >> 2) <<
2567 ICE_TX_DESC_LEN_IPLEN_S;
2568 }
2569
2570 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2571 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2572 *td_offset |= (tx_offload.l4_len >> 2) <<
2573 ICE_TX_DESC_LEN_L4_LEN_S;
2574 return;
2575 }
2576
2577 /* Enable L4 checksum offloads */
2578 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2579 case RTE_MBUF_F_TX_TCP_CKSUM:
2580 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2581 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2582 ICE_TX_DESC_LEN_L4_LEN_S;
2583 break;
2584 case RTE_MBUF_F_TX_SCTP_CKSUM:
2585 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2586 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2587 ICE_TX_DESC_LEN_L4_LEN_S;
2588 break;
2589 case RTE_MBUF_F_TX_UDP_CKSUM:
2590 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2591 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2592 ICE_TX_DESC_LEN_L4_LEN_S;
2593 break;
2594 default:
2595 break;
2596 }
2597 }
2598
2599 static inline int
ice_xmit_cleanup(struct ice_tx_queue * txq)2600 ice_xmit_cleanup(struct ice_tx_queue *txq)
2601 {
2602 struct ice_tx_entry *sw_ring = txq->sw_ring;
2603 volatile struct ice_tx_desc *txd = txq->tx_ring;
2604 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2605 uint16_t nb_tx_desc = txq->nb_tx_desc;
2606 uint16_t desc_to_clean_to;
2607 uint16_t nb_tx_to_clean;
2608
2609 /* Determine the last descriptor needing to be cleaned */
2610 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2611 if (desc_to_clean_to >= nb_tx_desc)
2612 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2613
2614 /* Check to make sure the last descriptor to clean is done */
2615 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2616 if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2617 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2618 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2619 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2620 desc_to_clean_to,
2621 txq->port_id, txq->queue_id,
2622 txd[desc_to_clean_to].cmd_type_offset_bsz);
2623 /* Failed to clean any descriptors */
2624 return -1;
2625 }
2626
2627 /* Figure out how many descriptors will be cleaned */
2628 if (last_desc_cleaned > desc_to_clean_to)
2629 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2630 desc_to_clean_to);
2631 else
2632 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2633 last_desc_cleaned);
2634
2635 /* The last descriptor to clean is done, so that means all the
2636 * descriptors from the last descriptor that was cleaned
2637 * up to the last descriptor with the RS bit set
2638 * are done. Only reset the threshold descriptor.
2639 */
2640 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2641
2642 /* Update the txq to reflect the last descriptor that was cleaned */
2643 txq->last_desc_cleaned = desc_to_clean_to;
2644 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2645
2646 return 0;
2647 }
2648
2649 /* Construct the tx flags */
2650 static inline uint64_t
ice_build_ctob(uint32_t td_cmd,uint32_t td_offset,uint16_t size,uint32_t td_tag)2651 ice_build_ctob(uint32_t td_cmd,
2652 uint32_t td_offset,
2653 uint16_t size,
2654 uint32_t td_tag)
2655 {
2656 return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2657 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2658 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2659 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2660 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2661 }
2662
2663 /* Check if the context descriptor is needed for TX offloading */
2664 static inline uint16_t
ice_calc_context_desc(uint64_t flags)2665 ice_calc_context_desc(uint64_t flags)
2666 {
2667 static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2668 RTE_MBUF_F_TX_QINQ |
2669 RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2670 RTE_MBUF_F_TX_TUNNEL_MASK |
2671 RTE_MBUF_F_TX_IEEE1588_TMST;
2672
2673 return (flags & mask) ? 1 : 0;
2674 }
2675
2676 /* set ice TSO context descriptor */
2677 static inline uint64_t
ice_set_tso_ctx(struct rte_mbuf * mbuf,union ice_tx_offload tx_offload)2678 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2679 {
2680 uint64_t ctx_desc = 0;
2681 uint32_t cd_cmd, hdr_len, cd_tso_len;
2682
2683 if (!tx_offload.l4_len) {
2684 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2685 return ctx_desc;
2686 }
2687
2688 hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2689 hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2690 tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2691
2692 cd_cmd = ICE_TX_CTX_DESC_TSO;
2693 cd_tso_len = mbuf->pkt_len - hdr_len;
2694 ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2695 ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2696 ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2697
2698 return ctx_desc;
2699 }
2700
2701 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2702 #define ICE_MAX_DATA_PER_TXD \
2703 (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2704 /* Calculate the number of TX descriptors needed for each pkt */
2705 static inline uint16_t
ice_calc_pkt_desc(struct rte_mbuf * tx_pkt)2706 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2707 {
2708 struct rte_mbuf *txd = tx_pkt;
2709 uint16_t count = 0;
2710
2711 while (txd != NULL) {
2712 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2713 txd = txd->next;
2714 }
2715
2716 return count;
2717 }
2718
2719 uint16_t
ice_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2720 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2721 {
2722 struct ice_tx_queue *txq;
2723 volatile struct ice_tx_desc *tx_ring;
2724 volatile struct ice_tx_desc *txd;
2725 struct ice_tx_entry *sw_ring;
2726 struct ice_tx_entry *txe, *txn;
2727 struct rte_mbuf *tx_pkt;
2728 struct rte_mbuf *m_seg;
2729 uint32_t cd_tunneling_params;
2730 uint16_t tx_id;
2731 uint16_t nb_tx;
2732 uint16_t nb_used;
2733 uint16_t nb_ctx;
2734 uint32_t td_cmd = 0;
2735 uint32_t td_offset = 0;
2736 uint32_t td_tag = 0;
2737 uint16_t tx_last;
2738 uint16_t slen;
2739 uint64_t buf_dma_addr;
2740 uint64_t ol_flags;
2741 union ice_tx_offload tx_offload = {0};
2742
2743 txq = tx_queue;
2744 sw_ring = txq->sw_ring;
2745 tx_ring = txq->tx_ring;
2746 tx_id = txq->tx_tail;
2747 txe = &sw_ring[tx_id];
2748
2749 /* Check if the descriptor ring needs to be cleaned. */
2750 if (txq->nb_tx_free < txq->tx_free_thresh)
2751 (void)ice_xmit_cleanup(txq);
2752
2753 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2754 tx_pkt = *tx_pkts++;
2755
2756 td_cmd = 0;
2757 td_tag = 0;
2758 td_offset = 0;
2759 ol_flags = tx_pkt->ol_flags;
2760 tx_offload.l2_len = tx_pkt->l2_len;
2761 tx_offload.l3_len = tx_pkt->l3_len;
2762 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2763 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2764 tx_offload.l4_len = tx_pkt->l4_len;
2765 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2766 /* Calculate the number of context descriptors needed. */
2767 nb_ctx = ice_calc_context_desc(ol_flags);
2768
2769 /* The number of descriptors that must be allocated for
2770 * a packet equals to the number of the segments of that
2771 * packet plus the number of context descriptor if needed.
2772 * Recalculate the needed tx descs when TSO enabled in case
2773 * the mbuf data size exceeds max data size that hw allows
2774 * per tx desc.
2775 */
2776 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2777 nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2778 nb_ctx);
2779 else
2780 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2781 tx_last = (uint16_t)(tx_id + nb_used - 1);
2782
2783 /* Circular ring */
2784 if (tx_last >= txq->nb_tx_desc)
2785 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2786
2787 if (nb_used > txq->nb_tx_free) {
2788 if (ice_xmit_cleanup(txq) != 0) {
2789 if (nb_tx == 0)
2790 return 0;
2791 goto end_of_tx;
2792 }
2793 if (unlikely(nb_used > txq->tx_rs_thresh)) {
2794 while (nb_used > txq->nb_tx_free) {
2795 if (ice_xmit_cleanup(txq) != 0) {
2796 if (nb_tx == 0)
2797 return 0;
2798 goto end_of_tx;
2799 }
2800 }
2801 }
2802 }
2803
2804 /* Descriptor based VLAN insertion */
2805 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
2806 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2807 td_tag = tx_pkt->vlan_tci;
2808 }
2809
2810 /* Fill in tunneling parameters if necessary */
2811 cd_tunneling_params = 0;
2812 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2813 ice_parse_tunneling_params(ol_flags, tx_offload,
2814 &cd_tunneling_params);
2815
2816 /* Enable checksum offloading */
2817 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2818 ice_txd_enable_checksum(ol_flags, &td_cmd,
2819 &td_offset, tx_offload);
2820
2821 if (nb_ctx) {
2822 /* Setup TX context descriptor if required */
2823 volatile struct ice_tx_ctx_desc *ctx_txd =
2824 (volatile struct ice_tx_ctx_desc *)
2825 &tx_ring[tx_id];
2826 uint16_t cd_l2tag2 = 0;
2827 uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2828
2829 txn = &sw_ring[txe->next_id];
2830 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2831 if (txe->mbuf) {
2832 rte_pktmbuf_free_seg(txe->mbuf);
2833 txe->mbuf = NULL;
2834 }
2835
2836 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
2837 cd_type_cmd_tso_mss |=
2838 ice_set_tso_ctx(tx_pkt, tx_offload);
2839 else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
2840 cd_type_cmd_tso_mss |=
2841 ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
2842 ICE_TXD_CTX_QW1_CMD_S);
2843
2844 ctx_txd->tunneling_params =
2845 rte_cpu_to_le_32(cd_tunneling_params);
2846
2847 /* TX context descriptor based double VLAN insert */
2848 if (ol_flags & RTE_MBUF_F_TX_QINQ) {
2849 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2850 cd_type_cmd_tso_mss |=
2851 ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2852 ICE_TXD_CTX_QW1_CMD_S);
2853 }
2854 ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2855 ctx_txd->qw1 =
2856 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2857
2858 txe->last_id = tx_last;
2859 tx_id = txe->next_id;
2860 txe = txn;
2861 }
2862 m_seg = tx_pkt;
2863
2864 do {
2865 txd = &tx_ring[tx_id];
2866 txn = &sw_ring[txe->next_id];
2867
2868 if (txe->mbuf)
2869 rte_pktmbuf_free_seg(txe->mbuf);
2870 txe->mbuf = m_seg;
2871
2872 /* Setup TX Descriptor */
2873 slen = m_seg->data_len;
2874 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2875
2876 while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
2877 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2878 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2879 txd->cmd_type_offset_bsz =
2880 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2881 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2882 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2883 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2884 ICE_TXD_QW1_TX_BUF_SZ_S) |
2885 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2886
2887 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2888 slen -= ICE_MAX_DATA_PER_TXD;
2889
2890 txe->last_id = tx_last;
2891 tx_id = txe->next_id;
2892 txe = txn;
2893 txd = &tx_ring[tx_id];
2894 txn = &sw_ring[txe->next_id];
2895 }
2896
2897 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2898 txd->cmd_type_offset_bsz =
2899 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2900 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2901 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2902 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2903 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2904
2905 txe->last_id = tx_last;
2906 tx_id = txe->next_id;
2907 txe = txn;
2908 m_seg = m_seg->next;
2909 } while (m_seg);
2910
2911 /* fill the last descriptor with End of Packet (EOP) bit */
2912 td_cmd |= ICE_TX_DESC_CMD_EOP;
2913 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2914 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2915
2916 /* set RS bit on the last descriptor of one packet */
2917 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2918 PMD_TX_LOG(DEBUG,
2919 "Setting RS bit on TXD id="
2920 "%4u (port=%d queue=%d)",
2921 tx_last, txq->port_id, txq->queue_id);
2922
2923 td_cmd |= ICE_TX_DESC_CMD_RS;
2924
2925 /* Update txq RS bit counters */
2926 txq->nb_tx_used = 0;
2927 }
2928 txd->cmd_type_offset_bsz |=
2929 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2930 ICE_TXD_QW1_CMD_S);
2931 }
2932 end_of_tx:
2933 /* update Tail register */
2934 ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2935 txq->tx_tail = tx_id;
2936
2937 return nb_tx;
2938 }
2939
2940 static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue * txq)2941 ice_tx_free_bufs(struct ice_tx_queue *txq)
2942 {
2943 struct ice_tx_entry *txep;
2944 uint16_t i;
2945
2946 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2947 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2948 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2949 return 0;
2950
2951 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2952
2953 for (i = 0; i < txq->tx_rs_thresh; i++)
2954 rte_prefetch0((txep + i)->mbuf);
2955
2956 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
2957 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2958 rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2959 txep->mbuf = NULL;
2960 }
2961 } else {
2962 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2963 rte_pktmbuf_free_seg(txep->mbuf);
2964 txep->mbuf = NULL;
2965 }
2966 }
2967
2968 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2969 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2970 if (txq->tx_next_dd >= txq->nb_tx_desc)
2971 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2972
2973 return txq->tx_rs_thresh;
2974 }
2975
2976 static int
ice_tx_done_cleanup_full(struct ice_tx_queue * txq,uint32_t free_cnt)2977 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2978 uint32_t free_cnt)
2979 {
2980 struct ice_tx_entry *swr_ring = txq->sw_ring;
2981 uint16_t i, tx_last, tx_id;
2982 uint16_t nb_tx_free_last;
2983 uint16_t nb_tx_to_clean;
2984 uint32_t pkt_cnt;
2985
2986 /* Start free mbuf from the next of tx_tail */
2987 tx_last = txq->tx_tail;
2988 tx_id = swr_ring[tx_last].next_id;
2989
2990 if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2991 return 0;
2992
2993 nb_tx_to_clean = txq->nb_tx_free;
2994 nb_tx_free_last = txq->nb_tx_free;
2995 if (!free_cnt)
2996 free_cnt = txq->nb_tx_desc;
2997
2998 /* Loop through swr_ring to count the amount of
2999 * freeable mubfs and packets.
3000 */
3001 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3002 for (i = 0; i < nb_tx_to_clean &&
3003 pkt_cnt < free_cnt &&
3004 tx_id != tx_last; i++) {
3005 if (swr_ring[tx_id].mbuf != NULL) {
3006 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3007 swr_ring[tx_id].mbuf = NULL;
3008
3009 /*
3010 * last segment in the packet,
3011 * increment packet count
3012 */
3013 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3014 }
3015
3016 tx_id = swr_ring[tx_id].next_id;
3017 }
3018
3019 if (txq->tx_rs_thresh > txq->nb_tx_desc -
3020 txq->nb_tx_free || tx_id == tx_last)
3021 break;
3022
3023 if (pkt_cnt < free_cnt) {
3024 if (ice_xmit_cleanup(txq))
3025 break;
3026
3027 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
3028 nb_tx_free_last = txq->nb_tx_free;
3029 }
3030 }
3031
3032 return (int)pkt_cnt;
3033 }
3034
3035 #ifdef RTE_ARCH_X86
3036 static int
ice_tx_done_cleanup_vec(struct ice_tx_queue * txq __rte_unused,uint32_t free_cnt __rte_unused)3037 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
3038 uint32_t free_cnt __rte_unused)
3039 {
3040 return -ENOTSUP;
3041 }
3042 #endif
3043
3044 static int
ice_tx_done_cleanup_simple(struct ice_tx_queue * txq,uint32_t free_cnt)3045 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
3046 uint32_t free_cnt)
3047 {
3048 int i, n, cnt;
3049
3050 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
3051 free_cnt = txq->nb_tx_desc;
3052
3053 cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
3054
3055 for (i = 0; i < cnt; i += n) {
3056 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
3057 break;
3058
3059 n = ice_tx_free_bufs(txq);
3060
3061 if (n == 0)
3062 break;
3063 }
3064
3065 return i;
3066 }
3067
3068 int
ice_tx_done_cleanup(void * txq,uint32_t free_cnt)3069 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3070 {
3071 struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
3072 struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3073 struct ice_adapter *ad =
3074 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3075
3076 #ifdef RTE_ARCH_X86
3077 if (ad->tx_vec_allowed)
3078 return ice_tx_done_cleanup_vec(q, free_cnt);
3079 #endif
3080 if (ad->tx_simple_allowed)
3081 return ice_tx_done_cleanup_simple(q, free_cnt);
3082 else
3083 return ice_tx_done_cleanup_full(q, free_cnt);
3084 }
3085
3086 /* Populate 4 descriptors with data from 4 mbufs */
3087 static inline void
tx4(volatile struct ice_tx_desc * txdp,struct rte_mbuf ** pkts)3088 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3089 {
3090 uint64_t dma_addr;
3091 uint32_t i;
3092
3093 for (i = 0; i < 4; i++, txdp++, pkts++) {
3094 dma_addr = rte_mbuf_data_iova(*pkts);
3095 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3096 txdp->cmd_type_offset_bsz =
3097 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3098 (*pkts)->data_len, 0);
3099 }
3100 }
3101
3102 /* Populate 1 descriptor with data from 1 mbuf */
3103 static inline void
tx1(volatile struct ice_tx_desc * txdp,struct rte_mbuf ** pkts)3104 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3105 {
3106 uint64_t dma_addr;
3107
3108 dma_addr = rte_mbuf_data_iova(*pkts);
3109 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3110 txdp->cmd_type_offset_bsz =
3111 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3112 (*pkts)->data_len, 0);
3113 }
3114
3115 static inline void
ice_tx_fill_hw_ring(struct ice_tx_queue * txq,struct rte_mbuf ** pkts,uint16_t nb_pkts)3116 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3117 uint16_t nb_pkts)
3118 {
3119 volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3120 struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3121 const int N_PER_LOOP = 4;
3122 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3123 int mainpart, leftover;
3124 int i, j;
3125
3126 /**
3127 * Process most of the packets in chunks of N pkts. Any
3128 * leftover packets will get processed one at a time.
3129 */
3130 mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3131 leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3132 for (i = 0; i < mainpart; i += N_PER_LOOP) {
3133 /* Copy N mbuf pointers to the S/W ring */
3134 for (j = 0; j < N_PER_LOOP; ++j)
3135 (txep + i + j)->mbuf = *(pkts + i + j);
3136 tx4(txdp + i, pkts + i);
3137 }
3138
3139 if (unlikely(leftover > 0)) {
3140 for (i = 0; i < leftover; ++i) {
3141 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3142 tx1(txdp + mainpart + i, pkts + mainpart + i);
3143 }
3144 }
3145 }
3146
3147 static inline uint16_t
tx_xmit_pkts(struct ice_tx_queue * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)3148 tx_xmit_pkts(struct ice_tx_queue *txq,
3149 struct rte_mbuf **tx_pkts,
3150 uint16_t nb_pkts)
3151 {
3152 volatile struct ice_tx_desc *txr = txq->tx_ring;
3153 uint16_t n = 0;
3154
3155 /**
3156 * Begin scanning the H/W ring for done descriptors when the number
3157 * of available descriptors drops below tx_free_thresh. For each done
3158 * descriptor, free the associated buffer.
3159 */
3160 if (txq->nb_tx_free < txq->tx_free_thresh)
3161 ice_tx_free_bufs(txq);
3162
3163 /* Use available descriptor only */
3164 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3165 if (unlikely(!nb_pkts))
3166 return 0;
3167
3168 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3169 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3170 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3171 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3172 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3173 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3174 ICE_TXD_QW1_CMD_S);
3175 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3176 txq->tx_tail = 0;
3177 }
3178
3179 /* Fill hardware descriptor ring with mbuf data */
3180 ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3181 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3182
3183 /* Determine if RS bit needs to be set */
3184 if (txq->tx_tail > txq->tx_next_rs) {
3185 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3186 rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3187 ICE_TXD_QW1_CMD_S);
3188 txq->tx_next_rs =
3189 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3190 if (txq->tx_next_rs >= txq->nb_tx_desc)
3191 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3192 }
3193
3194 if (txq->tx_tail >= txq->nb_tx_desc)
3195 txq->tx_tail = 0;
3196
3197 /* Update the tx tail register */
3198 ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3199
3200 return nb_pkts;
3201 }
3202
3203 static uint16_t
ice_xmit_pkts_simple(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)3204 ice_xmit_pkts_simple(void *tx_queue,
3205 struct rte_mbuf **tx_pkts,
3206 uint16_t nb_pkts)
3207 {
3208 uint16_t nb_tx = 0;
3209
3210 if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3211 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3212 tx_pkts, nb_pkts);
3213
3214 while (nb_pkts) {
3215 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3216 ICE_TX_MAX_BURST);
3217
3218 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3219 &tx_pkts[nb_tx], num);
3220 nb_tx = (uint16_t)(nb_tx + ret);
3221 nb_pkts = (uint16_t)(nb_pkts - ret);
3222 if (ret < num)
3223 break;
3224 }
3225
3226 return nb_tx;
3227 }
3228
3229 void __rte_cold
ice_set_rx_function(struct rte_eth_dev * dev)3230 ice_set_rx_function(struct rte_eth_dev *dev)
3231 {
3232 PMD_INIT_FUNC_TRACE();
3233 struct ice_adapter *ad =
3234 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3235 #ifdef RTE_ARCH_X86
3236 struct ice_rx_queue *rxq;
3237 int i;
3238 int rx_check_ret = -1;
3239
3240 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3241 ad->rx_use_avx512 = false;
3242 ad->rx_use_avx2 = false;
3243 rx_check_ret = ice_rx_vec_dev_check(dev);
3244 if (ad->ptp_ena)
3245 rx_check_ret = -1;
3246 ad->rx_vec_offload_support =
3247 (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
3248 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3249 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3250 ad->rx_vec_allowed = true;
3251 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3252 rxq = dev->data->rx_queues[i];
3253 if (rxq && ice_rxq_vec_setup(rxq)) {
3254 ad->rx_vec_allowed = false;
3255 break;
3256 }
3257 }
3258
3259 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3260 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3261 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3262 #ifdef CC_AVX512_SUPPORT
3263 ad->rx_use_avx512 = true;
3264 #else
3265 PMD_DRV_LOG(NOTICE,
3266 "AVX512 is not supported in build env");
3267 #endif
3268 if (!ad->rx_use_avx512 &&
3269 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3270 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3271 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3272 ad->rx_use_avx2 = true;
3273
3274 } else {
3275 ad->rx_vec_allowed = false;
3276 }
3277 }
3278
3279 if (ad->rx_vec_allowed) {
3280 if (dev->data->scattered_rx) {
3281 if (ad->rx_use_avx512) {
3282 #ifdef CC_AVX512_SUPPORT
3283 if (ad->rx_vec_offload_support) {
3284 PMD_DRV_LOG(NOTICE,
3285 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3286 dev->data->port_id);
3287 dev->rx_pkt_burst =
3288 ice_recv_scattered_pkts_vec_avx512_offload;
3289 } else {
3290 PMD_DRV_LOG(NOTICE,
3291 "Using AVX512 Vector Scattered Rx (port %d).",
3292 dev->data->port_id);
3293 dev->rx_pkt_burst =
3294 ice_recv_scattered_pkts_vec_avx512;
3295 }
3296 #endif
3297 } else if (ad->rx_use_avx2) {
3298 if (ad->rx_vec_offload_support) {
3299 PMD_DRV_LOG(NOTICE,
3300 "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3301 dev->data->port_id);
3302 dev->rx_pkt_burst =
3303 ice_recv_scattered_pkts_vec_avx2_offload;
3304 } else {
3305 PMD_DRV_LOG(NOTICE,
3306 "Using AVX2 Vector Scattered Rx (port %d).",
3307 dev->data->port_id);
3308 dev->rx_pkt_burst =
3309 ice_recv_scattered_pkts_vec_avx2;
3310 }
3311 } else {
3312 PMD_DRV_LOG(DEBUG,
3313 "Using Vector Scattered Rx (port %d).",
3314 dev->data->port_id);
3315 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3316 }
3317 } else {
3318 if (ad->rx_use_avx512) {
3319 #ifdef CC_AVX512_SUPPORT
3320 if (ad->rx_vec_offload_support) {
3321 PMD_DRV_LOG(NOTICE,
3322 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3323 dev->data->port_id);
3324 dev->rx_pkt_burst =
3325 ice_recv_pkts_vec_avx512_offload;
3326 } else {
3327 PMD_DRV_LOG(NOTICE,
3328 "Using AVX512 Vector Rx (port %d).",
3329 dev->data->port_id);
3330 dev->rx_pkt_burst =
3331 ice_recv_pkts_vec_avx512;
3332 }
3333 #endif
3334 } else if (ad->rx_use_avx2) {
3335 if (ad->rx_vec_offload_support) {
3336 PMD_DRV_LOG(NOTICE,
3337 "Using AVX2 OFFLOAD Vector Rx (port %d).",
3338 dev->data->port_id);
3339 dev->rx_pkt_burst =
3340 ice_recv_pkts_vec_avx2_offload;
3341 } else {
3342 PMD_DRV_LOG(NOTICE,
3343 "Using AVX2 Vector Rx (port %d).",
3344 dev->data->port_id);
3345 dev->rx_pkt_burst =
3346 ice_recv_pkts_vec_avx2;
3347 }
3348 } else {
3349 PMD_DRV_LOG(DEBUG,
3350 "Using Vector Rx (port %d).",
3351 dev->data->port_id);
3352 dev->rx_pkt_burst = ice_recv_pkts_vec;
3353 }
3354 }
3355 return;
3356 }
3357
3358 #endif
3359
3360 if (dev->data->scattered_rx) {
3361 /* Set the non-LRO scattered function */
3362 PMD_INIT_LOG(DEBUG,
3363 "Using a Scattered function on port %d.",
3364 dev->data->port_id);
3365 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3366 } else if (ad->rx_bulk_alloc_allowed) {
3367 PMD_INIT_LOG(DEBUG,
3368 "Rx Burst Bulk Alloc Preconditions are "
3369 "satisfied. Rx Burst Bulk Alloc function "
3370 "will be used on port %d.",
3371 dev->data->port_id);
3372 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3373 } else {
3374 PMD_INIT_LOG(DEBUG,
3375 "Rx Burst Bulk Alloc Preconditions are not "
3376 "satisfied, Normal Rx will be used on port %d.",
3377 dev->data->port_id);
3378 dev->rx_pkt_burst = ice_recv_pkts;
3379 }
3380 }
3381
3382 static const struct {
3383 eth_rx_burst_t pkt_burst;
3384 const char *info;
3385 } ice_rx_burst_infos[] = {
3386 { ice_recv_scattered_pkts, "Scalar Scattered" },
3387 { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
3388 { ice_recv_pkts, "Scalar" },
3389 #ifdef RTE_ARCH_X86
3390 #ifdef CC_AVX512_SUPPORT
3391 { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3392 { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3393 { ice_recv_pkts_vec_avx512, "Vector AVX512" },
3394 { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3395 #endif
3396 { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3397 { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3398 { ice_recv_pkts_vec_avx2, "Vector AVX2" },
3399 { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3400 { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
3401 { ice_recv_pkts_vec, "Vector SSE" },
3402 #endif
3403 };
3404
3405 int
ice_rx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)3406 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3407 struct rte_eth_burst_mode *mode)
3408 {
3409 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3410 int ret = -EINVAL;
3411 unsigned int i;
3412
3413 for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3414 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3415 snprintf(mode->info, sizeof(mode->info), "%s",
3416 ice_rx_burst_infos[i].info);
3417 ret = 0;
3418 break;
3419 }
3420 }
3421
3422 return ret;
3423 }
3424
3425 void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev * dev,struct ice_tx_queue * txq)3426 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3427 {
3428 struct ice_adapter *ad =
3429 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3430
3431 /* Use a simple Tx queue if possible (only fast free is allowed) */
3432 ad->tx_simple_allowed =
3433 (txq->offloads ==
3434 (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3435 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3436
3437 if (ad->tx_simple_allowed)
3438 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3439 txq->queue_id);
3440 else
3441 PMD_INIT_LOG(DEBUG,
3442 "Simple Tx can NOT be enabled on Tx queue %u.",
3443 txq->queue_id);
3444 }
3445
3446 /*********************************************************************
3447 *
3448 * TX prep functions
3449 *
3450 **********************************************************************/
3451 /* The default values of TSO MSS */
3452 #define ICE_MIN_TSO_MSS 64
3453 #define ICE_MAX_TSO_MSS 9728
3454 #define ICE_MAX_TSO_FRAME_SIZE 262144
3455 uint16_t
ice_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)3456 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3457 uint16_t nb_pkts)
3458 {
3459 int i, ret;
3460 uint64_t ol_flags;
3461 struct rte_mbuf *m;
3462
3463 for (i = 0; i < nb_pkts; i++) {
3464 m = tx_pkts[i];
3465 ol_flags = m->ol_flags;
3466
3467 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3468 (m->tso_segsz < ICE_MIN_TSO_MSS ||
3469 m->tso_segsz > ICE_MAX_TSO_MSS ||
3470 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3471 /**
3472 * MSS outside the range are considered malicious
3473 */
3474 rte_errno = EINVAL;
3475 return i;
3476 }
3477
3478 #ifdef RTE_ETHDEV_DEBUG_TX
3479 ret = rte_validate_tx_offload(m);
3480 if (ret != 0) {
3481 rte_errno = -ret;
3482 return i;
3483 }
3484 #endif
3485 ret = rte_net_intel_cksum_prepare(m);
3486 if (ret != 0) {
3487 rte_errno = -ret;
3488 return i;
3489 }
3490 }
3491 return i;
3492 }
3493
3494 void __rte_cold
ice_set_tx_function(struct rte_eth_dev * dev)3495 ice_set_tx_function(struct rte_eth_dev *dev)
3496 {
3497 struct ice_adapter *ad =
3498 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3499 #ifdef RTE_ARCH_X86
3500 struct ice_tx_queue *txq;
3501 int i;
3502 int tx_check_ret = -1;
3503
3504 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3505 ad->tx_use_avx2 = false;
3506 ad->tx_use_avx512 = false;
3507 tx_check_ret = ice_tx_vec_dev_check(dev);
3508 if (tx_check_ret >= 0 &&
3509 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3510 ad->tx_vec_allowed = true;
3511
3512 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3513 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3514 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3515 #ifdef CC_AVX512_SUPPORT
3516 ad->tx_use_avx512 = true;
3517 #else
3518 PMD_DRV_LOG(NOTICE,
3519 "AVX512 is not supported in build env");
3520 #endif
3521 if (!ad->tx_use_avx512 &&
3522 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3523 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3524 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3525 ad->tx_use_avx2 = true;
3526
3527 if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3528 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3529 ad->tx_vec_allowed = false;
3530
3531 if (ad->tx_vec_allowed) {
3532 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3533 txq = dev->data->tx_queues[i];
3534 if (txq && ice_txq_vec_setup(txq)) {
3535 ad->tx_vec_allowed = false;
3536 break;
3537 }
3538 }
3539 }
3540 } else {
3541 ad->tx_vec_allowed = false;
3542 }
3543 }
3544
3545 if (ad->tx_vec_allowed) {
3546 dev->tx_pkt_prepare = NULL;
3547 if (ad->tx_use_avx512) {
3548 #ifdef CC_AVX512_SUPPORT
3549 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3550 PMD_DRV_LOG(NOTICE,
3551 "Using AVX512 OFFLOAD Vector Tx (port %d).",
3552 dev->data->port_id);
3553 dev->tx_pkt_burst =
3554 ice_xmit_pkts_vec_avx512_offload;
3555 dev->tx_pkt_prepare = ice_prep_pkts;
3556 } else {
3557 PMD_DRV_LOG(NOTICE,
3558 "Using AVX512 Vector Tx (port %d).",
3559 dev->data->port_id);
3560 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3561 }
3562 #endif
3563 } else {
3564 if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3565 PMD_DRV_LOG(NOTICE,
3566 "Using AVX2 OFFLOAD Vector Tx (port %d).",
3567 dev->data->port_id);
3568 dev->tx_pkt_burst =
3569 ice_xmit_pkts_vec_avx2_offload;
3570 dev->tx_pkt_prepare = ice_prep_pkts;
3571 } else {
3572 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3573 ad->tx_use_avx2 ? "avx2 " : "",
3574 dev->data->port_id);
3575 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3576 ice_xmit_pkts_vec_avx2 :
3577 ice_xmit_pkts_vec;
3578 }
3579 }
3580
3581 return;
3582 }
3583 #endif
3584
3585 if (ad->tx_simple_allowed) {
3586 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3587 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3588 dev->tx_pkt_prepare = NULL;
3589 } else {
3590 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3591 dev->tx_pkt_burst = ice_xmit_pkts;
3592 dev->tx_pkt_prepare = ice_prep_pkts;
3593 }
3594 }
3595
3596 static const struct {
3597 eth_tx_burst_t pkt_burst;
3598 const char *info;
3599 } ice_tx_burst_infos[] = {
3600 { ice_xmit_pkts_simple, "Scalar Simple" },
3601 { ice_xmit_pkts, "Scalar" },
3602 #ifdef RTE_ARCH_X86
3603 #ifdef CC_AVX512_SUPPORT
3604 { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3605 { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3606 #endif
3607 { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3608 { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
3609 { ice_xmit_pkts_vec, "Vector SSE" },
3610 #endif
3611 };
3612
3613 int
ice_tx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)3614 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3615 struct rte_eth_burst_mode *mode)
3616 {
3617 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3618 int ret = -EINVAL;
3619 unsigned int i;
3620
3621 for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3622 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3623 snprintf(mode->info, sizeof(mode->info), "%s",
3624 ice_tx_burst_infos[i].info);
3625 ret = 0;
3626 break;
3627 }
3628 }
3629
3630 return ret;
3631 }
3632
3633 /* For each value it means, datasheet of hardware can tell more details
3634 *
3635 * @note: fix ice_dev_supported_ptypes_get() if any change here.
3636 */
3637 static inline uint32_t
ice_get_default_pkt_type(uint16_t ptype)3638 ice_get_default_pkt_type(uint16_t ptype)
3639 {
3640 static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3641 __rte_cache_aligned = {
3642 /* L2 types */
3643 /* [0] reserved */
3644 [1] = RTE_PTYPE_L2_ETHER,
3645 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3646 /* [3] - [5] reserved */
3647 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3648 /* [7] - [10] reserved */
3649 [11] = RTE_PTYPE_L2_ETHER_ARP,
3650 /* [12] - [21] reserved */
3651
3652 /* Non tunneled IPv4 */
3653 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_L4_FRAG,
3655 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3656 RTE_PTYPE_L4_NONFRAG,
3657 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3658 RTE_PTYPE_L4_UDP,
3659 /* [25] reserved */
3660 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3661 RTE_PTYPE_L4_TCP,
3662 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3663 RTE_PTYPE_L4_SCTP,
3664 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3665 RTE_PTYPE_L4_ICMP,
3666
3667 /* IPv4 --> IPv4 */
3668 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3669 RTE_PTYPE_TUNNEL_IP |
3670 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3671 RTE_PTYPE_INNER_L4_FRAG,
3672 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3673 RTE_PTYPE_TUNNEL_IP |
3674 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3675 RTE_PTYPE_INNER_L4_NONFRAG,
3676 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3677 RTE_PTYPE_TUNNEL_IP |
3678 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3679 RTE_PTYPE_INNER_L4_UDP,
3680 /* [32] reserved */
3681 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_TUNNEL_IP |
3683 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3684 RTE_PTYPE_INNER_L4_TCP,
3685 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_IP |
3687 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3688 RTE_PTYPE_INNER_L4_SCTP,
3689 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_IP |
3691 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_ICMP,
3693
3694 /* IPv4 --> IPv6 */
3695 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_TUNNEL_IP |
3697 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_INNER_L4_FRAG,
3699 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3700 RTE_PTYPE_TUNNEL_IP |
3701 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_INNER_L4_NONFRAG,
3703 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_TUNNEL_IP |
3705 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3706 RTE_PTYPE_INNER_L4_UDP,
3707 /* [39] reserved */
3708 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3709 RTE_PTYPE_TUNNEL_IP |
3710 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3711 RTE_PTYPE_INNER_L4_TCP,
3712 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3713 RTE_PTYPE_TUNNEL_IP |
3714 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3715 RTE_PTYPE_INNER_L4_SCTP,
3716 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3717 RTE_PTYPE_TUNNEL_IP |
3718 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3719 RTE_PTYPE_INNER_L4_ICMP,
3720
3721 /* IPv4 --> GRE/Teredo/VXLAN */
3722 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3723 RTE_PTYPE_TUNNEL_GRENAT,
3724
3725 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3726 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3727 RTE_PTYPE_TUNNEL_GRENAT |
3728 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3729 RTE_PTYPE_INNER_L4_FRAG,
3730 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3731 RTE_PTYPE_TUNNEL_GRENAT |
3732 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3733 RTE_PTYPE_INNER_L4_NONFRAG,
3734 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3735 RTE_PTYPE_TUNNEL_GRENAT |
3736 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3737 RTE_PTYPE_INNER_L4_UDP,
3738 /* [47] reserved */
3739 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3740 RTE_PTYPE_TUNNEL_GRENAT |
3741 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3742 RTE_PTYPE_INNER_L4_TCP,
3743 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3744 RTE_PTYPE_TUNNEL_GRENAT |
3745 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3746 RTE_PTYPE_INNER_L4_SCTP,
3747 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3748 RTE_PTYPE_TUNNEL_GRENAT |
3749 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3750 RTE_PTYPE_INNER_L4_ICMP,
3751
3752 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3753 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3754 RTE_PTYPE_TUNNEL_GRENAT |
3755 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3756 RTE_PTYPE_INNER_L4_FRAG,
3757 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3758 RTE_PTYPE_TUNNEL_GRENAT |
3759 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3760 RTE_PTYPE_INNER_L4_NONFRAG,
3761 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3762 RTE_PTYPE_TUNNEL_GRENAT |
3763 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3764 RTE_PTYPE_INNER_L4_UDP,
3765 /* [54] reserved */
3766 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3767 RTE_PTYPE_TUNNEL_GRENAT |
3768 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3769 RTE_PTYPE_INNER_L4_TCP,
3770 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3771 RTE_PTYPE_TUNNEL_GRENAT |
3772 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3773 RTE_PTYPE_INNER_L4_SCTP,
3774 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3775 RTE_PTYPE_TUNNEL_GRENAT |
3776 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3777 RTE_PTYPE_INNER_L4_ICMP,
3778
3779 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3780 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3781 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3782
3783 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3784 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3785 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3786 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3787 RTE_PTYPE_INNER_L4_FRAG,
3788 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3789 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3790 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3791 RTE_PTYPE_INNER_L4_NONFRAG,
3792 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3793 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3794 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3795 RTE_PTYPE_INNER_L4_UDP,
3796 /* [62] reserved */
3797 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3798 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3799 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3800 RTE_PTYPE_INNER_L4_TCP,
3801 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3802 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3803 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3804 RTE_PTYPE_INNER_L4_SCTP,
3805 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3806 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3807 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3808 RTE_PTYPE_INNER_L4_ICMP,
3809
3810 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3811 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3812 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3813 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3814 RTE_PTYPE_INNER_L4_FRAG,
3815 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3816 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3817 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3818 RTE_PTYPE_INNER_L4_NONFRAG,
3819 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3820 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3821 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3822 RTE_PTYPE_INNER_L4_UDP,
3823 /* [69] reserved */
3824 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3825 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3826 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3827 RTE_PTYPE_INNER_L4_TCP,
3828 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3829 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3830 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3831 RTE_PTYPE_INNER_L4_SCTP,
3832 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3833 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3834 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3835 RTE_PTYPE_INNER_L4_ICMP,
3836 /* [73] - [87] reserved */
3837
3838 /* Non tunneled IPv6 */
3839 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3840 RTE_PTYPE_L4_FRAG,
3841 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3842 RTE_PTYPE_L4_NONFRAG,
3843 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3844 RTE_PTYPE_L4_UDP,
3845 /* [91] reserved */
3846 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3847 RTE_PTYPE_L4_TCP,
3848 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3849 RTE_PTYPE_L4_SCTP,
3850 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3851 RTE_PTYPE_L4_ICMP,
3852
3853 /* IPv6 --> IPv4 */
3854 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3855 RTE_PTYPE_TUNNEL_IP |
3856 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3857 RTE_PTYPE_INNER_L4_FRAG,
3858 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3859 RTE_PTYPE_TUNNEL_IP |
3860 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3861 RTE_PTYPE_INNER_L4_NONFRAG,
3862 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3863 RTE_PTYPE_TUNNEL_IP |
3864 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3865 RTE_PTYPE_INNER_L4_UDP,
3866 /* [98] reserved */
3867 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3868 RTE_PTYPE_TUNNEL_IP |
3869 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3870 RTE_PTYPE_INNER_L4_TCP,
3871 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3872 RTE_PTYPE_TUNNEL_IP |
3873 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3874 RTE_PTYPE_INNER_L4_SCTP,
3875 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3876 RTE_PTYPE_TUNNEL_IP |
3877 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3878 RTE_PTYPE_INNER_L4_ICMP,
3879
3880 /* IPv6 --> IPv6 */
3881 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3882 RTE_PTYPE_TUNNEL_IP |
3883 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3884 RTE_PTYPE_INNER_L4_FRAG,
3885 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3886 RTE_PTYPE_TUNNEL_IP |
3887 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3888 RTE_PTYPE_INNER_L4_NONFRAG,
3889 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3890 RTE_PTYPE_TUNNEL_IP |
3891 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3892 RTE_PTYPE_INNER_L4_UDP,
3893 /* [105] reserved */
3894 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3895 RTE_PTYPE_TUNNEL_IP |
3896 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3897 RTE_PTYPE_INNER_L4_TCP,
3898 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3899 RTE_PTYPE_TUNNEL_IP |
3900 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3901 RTE_PTYPE_INNER_L4_SCTP,
3902 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3903 RTE_PTYPE_TUNNEL_IP |
3904 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3905 RTE_PTYPE_INNER_L4_ICMP,
3906
3907 /* IPv6 --> GRE/Teredo/VXLAN */
3908 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3909 RTE_PTYPE_TUNNEL_GRENAT,
3910
3911 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3912 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3913 RTE_PTYPE_TUNNEL_GRENAT |
3914 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3915 RTE_PTYPE_INNER_L4_FRAG,
3916 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3917 RTE_PTYPE_TUNNEL_GRENAT |
3918 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3919 RTE_PTYPE_INNER_L4_NONFRAG,
3920 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3921 RTE_PTYPE_TUNNEL_GRENAT |
3922 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3923 RTE_PTYPE_INNER_L4_UDP,
3924 /* [113] reserved */
3925 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3926 RTE_PTYPE_TUNNEL_GRENAT |
3927 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3928 RTE_PTYPE_INNER_L4_TCP,
3929 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3930 RTE_PTYPE_TUNNEL_GRENAT |
3931 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3932 RTE_PTYPE_INNER_L4_SCTP,
3933 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3934 RTE_PTYPE_TUNNEL_GRENAT |
3935 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3936 RTE_PTYPE_INNER_L4_ICMP,
3937
3938 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3939 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3940 RTE_PTYPE_TUNNEL_GRENAT |
3941 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3942 RTE_PTYPE_INNER_L4_FRAG,
3943 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3944 RTE_PTYPE_TUNNEL_GRENAT |
3945 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3946 RTE_PTYPE_INNER_L4_NONFRAG,
3947 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3948 RTE_PTYPE_TUNNEL_GRENAT |
3949 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3950 RTE_PTYPE_INNER_L4_UDP,
3951 /* [120] reserved */
3952 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3953 RTE_PTYPE_TUNNEL_GRENAT |
3954 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3955 RTE_PTYPE_INNER_L4_TCP,
3956 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3957 RTE_PTYPE_TUNNEL_GRENAT |
3958 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3959 RTE_PTYPE_INNER_L4_SCTP,
3960 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3961 RTE_PTYPE_TUNNEL_GRENAT |
3962 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3963 RTE_PTYPE_INNER_L4_ICMP,
3964
3965 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3966 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3967 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3968
3969 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3970 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3971 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3972 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3973 RTE_PTYPE_INNER_L4_FRAG,
3974 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3975 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3976 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3977 RTE_PTYPE_INNER_L4_NONFRAG,
3978 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3979 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3980 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3981 RTE_PTYPE_INNER_L4_UDP,
3982 /* [128] reserved */
3983 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3984 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3985 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3986 RTE_PTYPE_INNER_L4_TCP,
3987 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3988 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3989 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3990 RTE_PTYPE_INNER_L4_SCTP,
3991 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3992 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3993 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3994 RTE_PTYPE_INNER_L4_ICMP,
3995
3996 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3997 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3998 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3999 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4000 RTE_PTYPE_INNER_L4_FRAG,
4001 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4002 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4003 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4004 RTE_PTYPE_INNER_L4_NONFRAG,
4005 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4006 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4007 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4008 RTE_PTYPE_INNER_L4_UDP,
4009 /* [135] reserved */
4010 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4011 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4012 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4013 RTE_PTYPE_INNER_L4_TCP,
4014 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4015 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4016 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4017 RTE_PTYPE_INNER_L4_SCTP,
4018 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4019 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4020 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4021 RTE_PTYPE_INNER_L4_ICMP,
4022 /* [139] - [299] reserved */
4023
4024 /* PPPoE */
4025 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
4026 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
4027
4028 /* PPPoE --> IPv4 */
4029 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
4030 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4031 RTE_PTYPE_L4_FRAG,
4032 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
4033 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4034 RTE_PTYPE_L4_NONFRAG,
4035 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
4036 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4037 RTE_PTYPE_L4_UDP,
4038 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
4039 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4040 RTE_PTYPE_L4_TCP,
4041 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
4042 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4043 RTE_PTYPE_L4_SCTP,
4044 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
4045 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4046 RTE_PTYPE_L4_ICMP,
4047
4048 /* PPPoE --> IPv6 */
4049 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
4050 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4051 RTE_PTYPE_L4_FRAG,
4052 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
4053 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4054 RTE_PTYPE_L4_NONFRAG,
4055 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
4056 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4057 RTE_PTYPE_L4_UDP,
4058 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
4059 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4060 RTE_PTYPE_L4_TCP,
4061 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
4062 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4063 RTE_PTYPE_L4_SCTP,
4064 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
4065 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4066 RTE_PTYPE_L4_ICMP,
4067 /* [314] - [324] reserved */
4068
4069 /* IPv4/IPv6 --> GTPC/GTPU */
4070 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4071 RTE_PTYPE_TUNNEL_GTPC,
4072 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4073 RTE_PTYPE_TUNNEL_GTPC,
4074 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4075 RTE_PTYPE_TUNNEL_GTPC,
4076 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4077 RTE_PTYPE_TUNNEL_GTPC,
4078 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4079 RTE_PTYPE_TUNNEL_GTPU,
4080 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4081 RTE_PTYPE_TUNNEL_GTPU,
4082
4083 /* IPv4 --> GTPU --> IPv4 */
4084 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4085 RTE_PTYPE_TUNNEL_GTPU |
4086 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4087 RTE_PTYPE_INNER_L4_FRAG,
4088 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4089 RTE_PTYPE_TUNNEL_GTPU |
4090 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4091 RTE_PTYPE_INNER_L4_NONFRAG,
4092 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4093 RTE_PTYPE_TUNNEL_GTPU |
4094 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4095 RTE_PTYPE_INNER_L4_UDP,
4096 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4097 RTE_PTYPE_TUNNEL_GTPU |
4098 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4099 RTE_PTYPE_INNER_L4_TCP,
4100 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4101 RTE_PTYPE_TUNNEL_GTPU |
4102 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4103 RTE_PTYPE_INNER_L4_ICMP,
4104
4105 /* IPv6 --> GTPU --> IPv4 */
4106 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4107 RTE_PTYPE_TUNNEL_GTPU |
4108 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4109 RTE_PTYPE_INNER_L4_FRAG,
4110 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4111 RTE_PTYPE_TUNNEL_GTPU |
4112 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4113 RTE_PTYPE_INNER_L4_NONFRAG,
4114 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4115 RTE_PTYPE_TUNNEL_GTPU |
4116 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4117 RTE_PTYPE_INNER_L4_UDP,
4118 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4119 RTE_PTYPE_TUNNEL_GTPU |
4120 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4121 RTE_PTYPE_INNER_L4_TCP,
4122 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4123 RTE_PTYPE_TUNNEL_GTPU |
4124 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4125 RTE_PTYPE_INNER_L4_ICMP,
4126
4127 /* IPv4 --> GTPU --> IPv6 */
4128 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4129 RTE_PTYPE_TUNNEL_GTPU |
4130 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4131 RTE_PTYPE_INNER_L4_FRAG,
4132 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4133 RTE_PTYPE_TUNNEL_GTPU |
4134 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4135 RTE_PTYPE_INNER_L4_NONFRAG,
4136 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4137 RTE_PTYPE_TUNNEL_GTPU |
4138 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4139 RTE_PTYPE_INNER_L4_UDP,
4140 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4141 RTE_PTYPE_TUNNEL_GTPU |
4142 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4143 RTE_PTYPE_INNER_L4_TCP,
4144 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4145 RTE_PTYPE_TUNNEL_GTPU |
4146 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4147 RTE_PTYPE_INNER_L4_ICMP,
4148
4149 /* IPv6 --> GTPU --> IPv6 */
4150 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4151 RTE_PTYPE_TUNNEL_GTPU |
4152 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4153 RTE_PTYPE_INNER_L4_FRAG,
4154 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4155 RTE_PTYPE_TUNNEL_GTPU |
4156 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4157 RTE_PTYPE_INNER_L4_NONFRAG,
4158 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4159 RTE_PTYPE_TUNNEL_GTPU |
4160 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4161 RTE_PTYPE_INNER_L4_UDP,
4162 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4163 RTE_PTYPE_TUNNEL_GTPU |
4164 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4165 RTE_PTYPE_INNER_L4_TCP,
4166 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4167 RTE_PTYPE_TUNNEL_GTPU |
4168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4169 RTE_PTYPE_INNER_L4_ICMP,
4170
4171 /* IPv4 --> UDP ECPRI */
4172 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4173 RTE_PTYPE_L4_UDP,
4174 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4175 RTE_PTYPE_L4_UDP,
4176 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4177 RTE_PTYPE_L4_UDP,
4178 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4179 RTE_PTYPE_L4_UDP,
4180 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4181 RTE_PTYPE_L4_UDP,
4182 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4183 RTE_PTYPE_L4_UDP,
4184 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4185 RTE_PTYPE_L4_UDP,
4186 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4187 RTE_PTYPE_L4_UDP,
4188 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4189 RTE_PTYPE_L4_UDP,
4190 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4191 RTE_PTYPE_L4_UDP,
4192
4193 /* IPV6 --> UDP ECPRI */
4194 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4195 RTE_PTYPE_L4_UDP,
4196 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4197 RTE_PTYPE_L4_UDP,
4198 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4199 RTE_PTYPE_L4_UDP,
4200 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4201 RTE_PTYPE_L4_UDP,
4202 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4203 RTE_PTYPE_L4_UDP,
4204 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4205 RTE_PTYPE_L4_UDP,
4206 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4207 RTE_PTYPE_L4_UDP,
4208 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4209 RTE_PTYPE_L4_UDP,
4210 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4211 RTE_PTYPE_L4_UDP,
4212 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4213 RTE_PTYPE_L4_UDP,
4214 /* All others reserved */
4215 };
4216
4217 return type_table[ptype];
4218 }
4219
4220 void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev * dev)4221 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4222 {
4223 struct ice_adapter *ad =
4224 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4225 int i;
4226
4227 for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4228 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4229 }
4230
4231 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4232 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4233 (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4234 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4235 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4236
4237 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4
4238 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \
4239 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4240 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5
4241 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \
4242 (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4243
4244 /*
4245 * check the programming status descriptor in rx queue.
4246 * done after Programming Flow Director is programmed on
4247 * tx queue
4248 */
4249 static inline int
ice_check_fdir_programming_status(struct ice_rx_queue * rxq)4250 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4251 {
4252 volatile union ice_32byte_rx_desc *rxdp;
4253 uint64_t qword1;
4254 uint32_t rx_status;
4255 uint32_t error;
4256 uint32_t id;
4257 int ret = -EAGAIN;
4258
4259 rxdp = (volatile union ice_32byte_rx_desc *)
4260 (&rxq->rx_ring[rxq->rx_tail]);
4261 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4262 rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4263 >> ICE_RXD_QW1_STATUS_S;
4264
4265 if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4266 ret = 0;
4267 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4268 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4269 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4270 ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4271 if (error) {
4272 if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4273 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4274 else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4275 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4276 ret = -EINVAL;
4277 goto err;
4278 }
4279 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4280 ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4281 if (error) {
4282 PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4283 ret = -EINVAL;
4284 }
4285 err:
4286 rxdp->wb.qword1.status_error_len = 0;
4287 rxq->rx_tail++;
4288 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4289 rxq->rx_tail = 0;
4290 if (rxq->rx_tail == 0)
4291 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4292 else
4293 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4294 }
4295
4296 return ret;
4297 }
4298
4299 #define ICE_FDIR_MAX_WAIT_US 10000
4300
4301 int
ice_fdir_programming(struct ice_pf * pf,struct ice_fltr_desc * fdir_desc)4302 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4303 {
4304 struct ice_tx_queue *txq = pf->fdir.txq;
4305 struct ice_rx_queue *rxq = pf->fdir.rxq;
4306 volatile struct ice_fltr_desc *fdirdp;
4307 volatile struct ice_tx_desc *txdp;
4308 uint32_t td_cmd;
4309 uint16_t i;
4310
4311 fdirdp = (volatile struct ice_fltr_desc *)
4312 (&txq->tx_ring[txq->tx_tail]);
4313 fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4314 fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4315
4316 txdp = &txq->tx_ring[txq->tx_tail + 1];
4317 txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4318 td_cmd = ICE_TX_DESC_CMD_EOP |
4319 ICE_TX_DESC_CMD_RS |
4320 ICE_TX_DESC_CMD_DUMMY;
4321
4322 txdp->cmd_type_offset_bsz =
4323 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4324
4325 txq->tx_tail += 2;
4326 if (txq->tx_tail >= txq->nb_tx_desc)
4327 txq->tx_tail = 0;
4328 /* Update the tx tail register */
4329 ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4330 for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4331 if ((txdp->cmd_type_offset_bsz &
4332 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4333 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4334 break;
4335 rte_delay_us(1);
4336 }
4337 if (i >= ICE_FDIR_MAX_WAIT_US) {
4338 PMD_DRV_LOG(ERR,
4339 "Failed to program FDIR filter: time out to get DD on tx queue.");
4340 return -ETIMEDOUT;
4341 }
4342
4343 for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4344 int ret;
4345
4346 ret = ice_check_fdir_programming_status(rxq);
4347 if (ret == -EAGAIN)
4348 rte_delay_us(1);
4349 else
4350 return ret;
4351 }
4352
4353 PMD_DRV_LOG(ERR,
4354 "Failed to program FDIR filter: programming status reported.");
4355 return -ETIMEDOUT;
4356
4357
4358 }
4359