1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <sys/queue.h>
14
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_mbuf.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_udp.h>
24 #include <rte_ip.h>
25 #include <rte_net.h>
26 #include <rte_vect.h>
27
28 #include "iavf.h"
29 #include "iavf_rxtx.h"
30 #include "rte_pmd_iavf.h"
31
32 /* Offset of mbuf dynamic field for protocol extraction's metadata */
33 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
34
35 /* Mask of mbuf dynamic flags for protocol extraction's type */
36 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
42
43 uint8_t
iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)44 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
45 {
46 static uint8_t rxdid_map[] = {
47 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
48 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
49 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
50 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
51 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
52 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
53 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
54 };
55
56 return flex_type < RTE_DIM(rxdid_map) ?
57 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
58 }
59
60 static inline int
check_rx_thresh(uint16_t nb_desc,uint16_t thresh)61 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
62 {
63 /* The following constraints must be satisfied:
64 * thresh < rxq->nb_rx_desc
65 */
66 if (thresh >= nb_desc) {
67 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
68 thresh, nb_desc);
69 return -EINVAL;
70 }
71 return 0;
72 }
73
74 static inline int
check_tx_thresh(uint16_t nb_desc,uint16_t tx_rs_thresh,uint16_t tx_free_thresh)75 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
76 uint16_t tx_free_thresh)
77 {
78 /* TX descriptors will have their RS bit set after tx_rs_thresh
79 * descriptors have been used. The TX descriptor ring will be cleaned
80 * after tx_free_thresh descriptors are used or if the number of
81 * descriptors required to transmit a packet is greater than the
82 * number of free TX descriptors.
83 *
84 * The following constraints must be satisfied:
85 * - tx_rs_thresh must be less than the size of the ring minus 2.
86 * - tx_free_thresh must be less than the size of the ring minus 3.
87 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
88 * - tx_rs_thresh must be a divisor of the ring size.
89 *
90 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
91 * race condition, hence the maximum threshold constraints. When set
92 * to zero use default values.
93 */
94 if (tx_rs_thresh >= (nb_desc - 2)) {
95 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
96 "number of TX descriptors (%u) minus 2",
97 tx_rs_thresh, nb_desc);
98 return -EINVAL;
99 }
100 if (tx_free_thresh >= (nb_desc - 3)) {
101 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
102 "number of TX descriptors (%u) minus 3.",
103 tx_free_thresh, nb_desc);
104 return -EINVAL;
105 }
106 if (tx_rs_thresh > tx_free_thresh) {
107 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
108 "equal to tx_free_thresh (%u).",
109 tx_rs_thresh, tx_free_thresh);
110 return -EINVAL;
111 }
112 if ((nb_desc % tx_rs_thresh) != 0) {
113 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
114 "number of TX descriptors (%u).",
115 tx_rs_thresh, nb_desc);
116 return -EINVAL;
117 }
118
119 return 0;
120 }
121
122 static inline bool
check_rx_vec_allow(struct iavf_rx_queue * rxq)123 check_rx_vec_allow(struct iavf_rx_queue *rxq)
124 {
125 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
126 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
127 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
128 return true;
129 }
130
131 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
132 return false;
133 }
134
135 static inline bool
check_tx_vec_allow(struct iavf_tx_queue * txq)136 check_tx_vec_allow(struct iavf_tx_queue *txq)
137 {
138 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
139 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
140 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
141 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
142 return true;
143 }
144 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
145 return false;
146 }
147
148 static inline bool
check_rx_bulk_allow(struct iavf_rx_queue * rxq)149 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
150 {
151 int ret = true;
152
153 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
154 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
155 "rxq->rx_free_thresh=%d, "
156 "IAVF_RX_MAX_BURST=%d",
157 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
158 ret = false;
159 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
160 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
161 "rxq->nb_rx_desc=%d, "
162 "rxq->rx_free_thresh=%d",
163 rxq->nb_rx_desc, rxq->rx_free_thresh);
164 ret = false;
165 }
166 return ret;
167 }
168
169 static inline void
reset_rx_queue(struct iavf_rx_queue * rxq)170 reset_rx_queue(struct iavf_rx_queue *rxq)
171 {
172 uint16_t len;
173 uint32_t i;
174
175 if (!rxq)
176 return;
177
178 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
179
180 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
181 ((volatile char *)rxq->rx_ring)[i] = 0;
182
183 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
184
185 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
186 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
187
188 /* for rx bulk */
189 rxq->rx_nb_avail = 0;
190 rxq->rx_next_avail = 0;
191 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
192
193 rxq->rx_tail = 0;
194 rxq->nb_rx_hold = 0;
195 rxq->pkt_first_seg = NULL;
196 rxq->pkt_last_seg = NULL;
197 rxq->rxrearm_nb = 0;
198 rxq->rxrearm_start = 0;
199 }
200
201 static inline void
reset_tx_queue(struct iavf_tx_queue * txq)202 reset_tx_queue(struct iavf_tx_queue *txq)
203 {
204 struct iavf_tx_entry *txe;
205 uint32_t i, size;
206 uint16_t prev;
207
208 if (!txq) {
209 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
210 return;
211 }
212
213 txe = txq->sw_ring;
214 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
215 for (i = 0; i < size; i++)
216 ((volatile char *)txq->tx_ring)[i] = 0;
217
218 prev = (uint16_t)(txq->nb_tx_desc - 1);
219 for (i = 0; i < txq->nb_tx_desc; i++) {
220 txq->tx_ring[i].cmd_type_offset_bsz =
221 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
222 txe[i].mbuf = NULL;
223 txe[i].last_id = i;
224 txe[prev].next_id = i;
225 prev = i;
226 }
227
228 txq->tx_tail = 0;
229 txq->nb_used = 0;
230
231 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
232 txq->nb_free = txq->nb_tx_desc - 1;
233
234 txq->next_dd = txq->rs_thresh - 1;
235 txq->next_rs = txq->rs_thresh - 1;
236 }
237
238 static int
alloc_rxq_mbufs(struct iavf_rx_queue * rxq)239 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
240 {
241 volatile union iavf_rx_desc *rxd;
242 struct rte_mbuf *mbuf = NULL;
243 uint64_t dma_addr;
244 uint16_t i;
245
246 for (i = 0; i < rxq->nb_rx_desc; i++) {
247 mbuf = rte_mbuf_raw_alloc(rxq->mp);
248 if (unlikely(!mbuf)) {
249 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
250 return -ENOMEM;
251 }
252
253 rte_mbuf_refcnt_set(mbuf, 1);
254 mbuf->next = NULL;
255 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
256 mbuf->nb_segs = 1;
257 mbuf->port = rxq->port_id;
258
259 dma_addr =
260 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
261
262 rxd = &rxq->rx_ring[i];
263 rxd->read.pkt_addr = dma_addr;
264 rxd->read.hdr_addr = 0;
265 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
266 rxd->read.rsvd1 = 0;
267 rxd->read.rsvd2 = 0;
268 #endif
269
270 rxq->sw_ring[i] = mbuf;
271 }
272
273 return 0;
274 }
275
276 static inline void
release_rxq_mbufs(struct iavf_rx_queue * rxq)277 release_rxq_mbufs(struct iavf_rx_queue *rxq)
278 {
279 uint16_t i;
280
281 if (!rxq->sw_ring)
282 return;
283
284 for (i = 0; i < rxq->nb_rx_desc; i++) {
285 if (rxq->sw_ring[i]) {
286 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
287 rxq->sw_ring[i] = NULL;
288 }
289 }
290
291 /* for rx bulk */
292 if (rxq->rx_nb_avail == 0)
293 return;
294 for (i = 0; i < rxq->rx_nb_avail; i++) {
295 struct rte_mbuf *mbuf;
296
297 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
298 rte_pktmbuf_free_seg(mbuf);
299 }
300 rxq->rx_nb_avail = 0;
301 }
302
303 static inline void
release_txq_mbufs(struct iavf_tx_queue * txq)304 release_txq_mbufs(struct iavf_tx_queue *txq)
305 {
306 uint16_t i;
307
308 if (!txq || !txq->sw_ring) {
309 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
310 return;
311 }
312
313 for (i = 0; i < txq->nb_tx_desc; i++) {
314 if (txq->sw_ring[i].mbuf) {
315 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
316 txq->sw_ring[i].mbuf = NULL;
317 }
318 }
319 }
320
321 static const struct iavf_rxq_ops def_rxq_ops = {
322 .release_mbufs = release_rxq_mbufs,
323 };
324
325 static const struct iavf_txq_ops def_txq_ops = {
326 .release_mbufs = release_txq_mbufs,
327 };
328
329 static inline void
iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue * rxq,struct rte_mbuf * mb,volatile union iavf_rx_flex_desc * rxdp)330 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
331 struct rte_mbuf *mb,
332 volatile union iavf_rx_flex_desc *rxdp)
333 {
334 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
335 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
336 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
337 uint16_t stat_err;
338 #endif
339
340 if (desc->flow_id != 0xFFFFFFFF) {
341 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
342 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
343 }
344
345 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
346 stat_err = rte_le_to_cpu_16(desc->status_error0);
347 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
348 mb->ol_flags |= PKT_RX_RSS_HASH;
349 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
350 }
351 #endif
352 }
353
354 static inline void
iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue * rxq,struct rte_mbuf * mb,volatile union iavf_rx_flex_desc * rxdp)355 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
356 struct rte_mbuf *mb,
357 volatile union iavf_rx_flex_desc *rxdp)
358 {
359 volatile struct iavf_32b_rx_flex_desc_comms *desc =
360 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
361 uint16_t stat_err;
362
363 stat_err = rte_le_to_cpu_16(desc->status_error0);
364 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
365 mb->ol_flags |= PKT_RX_RSS_HASH;
366 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
367 }
368
369 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
370 if (desc->flow_id != 0xFFFFFFFF) {
371 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
372 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
373 }
374
375 if (rxq->xtr_ol_flag) {
376 uint32_t metadata = 0;
377
378 stat_err = rte_le_to_cpu_16(desc->status_error1);
379
380 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
381 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
382
383 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
384 metadata |=
385 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
386
387 if (metadata) {
388 mb->ol_flags |= rxq->xtr_ol_flag;
389
390 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
391 }
392 }
393 #endif
394 }
395
396 static inline void
iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue * rxq,struct rte_mbuf * mb,volatile union iavf_rx_flex_desc * rxdp)397 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
398 struct rte_mbuf *mb,
399 volatile union iavf_rx_flex_desc *rxdp)
400 {
401 volatile struct iavf_32b_rx_flex_desc_comms *desc =
402 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
403 uint16_t stat_err;
404
405 stat_err = rte_le_to_cpu_16(desc->status_error0);
406 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
407 mb->ol_flags |= PKT_RX_RSS_HASH;
408 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
409 }
410
411 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
412 if (desc->flow_id != 0xFFFFFFFF) {
413 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
414 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
415 }
416
417 if (rxq->xtr_ol_flag) {
418 uint32_t metadata = 0;
419
420 if (desc->flex_ts.flex.aux0 != 0xFFFF)
421 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
422 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
423 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
424
425 if (metadata) {
426 mb->ol_flags |= rxq->xtr_ol_flag;
427
428 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
429 }
430 }
431 #endif
432 }
433
434 static void
iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue * rxq,uint32_t rxdid)435 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
436 {
437 switch (rxdid) {
438 case IAVF_RXDID_COMMS_AUX_VLAN:
439 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
440 rxq->rxd_to_pkt_fields =
441 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
442 break;
443 case IAVF_RXDID_COMMS_AUX_IPV4:
444 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
445 rxq->rxd_to_pkt_fields =
446 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
447 break;
448 case IAVF_RXDID_COMMS_AUX_IPV6:
449 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
450 rxq->rxd_to_pkt_fields =
451 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
452 break;
453 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
454 rxq->xtr_ol_flag =
455 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
456 rxq->rxd_to_pkt_fields =
457 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
458 break;
459 case IAVF_RXDID_COMMS_AUX_TCP:
460 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
461 rxq->rxd_to_pkt_fields =
462 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
463 break;
464 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
465 rxq->xtr_ol_flag =
466 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
467 rxq->rxd_to_pkt_fields =
468 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
469 break;
470 case IAVF_RXDID_COMMS_OVS_1:
471 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
472 break;
473 default:
474 /* update this according to the RXDID for FLEX_DESC_NONE */
475 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
476 break;
477 }
478
479 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
480 rxq->xtr_ol_flag = 0;
481 }
482
483 int
iavf_dev_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)484 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
485 uint16_t nb_desc, unsigned int socket_id,
486 const struct rte_eth_rxconf *rx_conf,
487 struct rte_mempool *mp)
488 {
489 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 struct iavf_adapter *ad =
491 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
492 struct iavf_info *vf =
493 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
494 struct iavf_vsi *vsi = &vf->vsi;
495 struct iavf_rx_queue *rxq;
496 const struct rte_memzone *mz;
497 uint32_t ring_size;
498 uint8_t proto_xtr;
499 uint16_t len;
500 uint16_t rx_free_thresh;
501
502 PMD_INIT_FUNC_TRACE();
503
504 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
505 nb_desc > IAVF_MAX_RING_DESC ||
506 nb_desc < IAVF_MIN_RING_DESC) {
507 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
508 "invalid", nb_desc);
509 return -EINVAL;
510 }
511
512 /* Check free threshold */
513 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
514 IAVF_DEFAULT_RX_FREE_THRESH :
515 rx_conf->rx_free_thresh;
516 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
517 return -EINVAL;
518
519 /* Free memory if needed */
520 if (dev->data->rx_queues[queue_idx]) {
521 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
522 dev->data->rx_queues[queue_idx] = NULL;
523 }
524
525 /* Allocate the rx queue data structure */
526 rxq = rte_zmalloc_socket("iavf rxq",
527 sizeof(struct iavf_rx_queue),
528 RTE_CACHE_LINE_SIZE,
529 socket_id);
530 if (!rxq) {
531 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
532 "rx queue data structure");
533 return -ENOMEM;
534 }
535
536 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
537 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
538 IAVF_PROTO_XTR_NONE;
539 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
540 rxq->proto_xtr = proto_xtr;
541 } else {
542 rxq->rxdid = IAVF_RXDID_LEGACY_1;
543 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
544 }
545
546 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
547
548 rxq->mp = mp;
549 rxq->nb_rx_desc = nb_desc;
550 rxq->rx_free_thresh = rx_free_thresh;
551 rxq->queue_id = queue_idx;
552 rxq->port_id = dev->data->port_id;
553 rxq->crc_len = 0; /* crc stripping by default */
554 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
555 rxq->rx_hdr_len = 0;
556 rxq->vsi = vsi;
557
558 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
559 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
560
561 /* Allocate the software ring. */
562 len = nb_desc + IAVF_RX_MAX_BURST;
563 rxq->sw_ring =
564 rte_zmalloc_socket("iavf rx sw ring",
565 sizeof(struct rte_mbuf *) * len,
566 RTE_CACHE_LINE_SIZE,
567 socket_id);
568 if (!rxq->sw_ring) {
569 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
570 rte_free(rxq);
571 return -ENOMEM;
572 }
573
574 /* Allocate the maximun number of RX ring hardware descriptor with
575 * a liitle more to support bulk allocate.
576 */
577 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
578 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
579 IAVF_DMA_MEM_ALIGN);
580 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
581 ring_size, IAVF_RING_BASE_ALIGN,
582 socket_id);
583 if (!mz) {
584 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
585 rte_free(rxq->sw_ring);
586 rte_free(rxq);
587 return -ENOMEM;
588 }
589 /* Zero all the descriptors in the ring. */
590 memset(mz->addr, 0, ring_size);
591 rxq->rx_ring_phys_addr = mz->iova;
592 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
593
594 rxq->mz = mz;
595 reset_rx_queue(rxq);
596 rxq->q_set = true;
597 dev->data->rx_queues[queue_idx] = rxq;
598 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
599 rxq->ops = &def_rxq_ops;
600
601 if (check_rx_bulk_allow(rxq) == true) {
602 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
603 "satisfied. Rx Burst Bulk Alloc function will be "
604 "used on port=%d, queue=%d.",
605 rxq->port_id, rxq->queue_id);
606 } else {
607 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
608 "not satisfied, Scattered Rx is requested "
609 "on port=%d, queue=%d.",
610 rxq->port_id, rxq->queue_id);
611 ad->rx_bulk_alloc_allowed = false;
612 }
613
614 if (check_rx_vec_allow(rxq) == false)
615 ad->rx_vec_allowed = false;
616
617 return 0;
618 }
619
620 int
iavf_dev_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)621 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
622 uint16_t queue_idx,
623 uint16_t nb_desc,
624 unsigned int socket_id,
625 const struct rte_eth_txconf *tx_conf)
626 {
627 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
628 struct iavf_tx_queue *txq;
629 const struct rte_memzone *mz;
630 uint32_t ring_size;
631 uint16_t tx_rs_thresh, tx_free_thresh;
632 uint64_t offloads;
633
634 PMD_INIT_FUNC_TRACE();
635
636 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
637
638 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
639 nb_desc > IAVF_MAX_RING_DESC ||
640 nb_desc < IAVF_MIN_RING_DESC) {
641 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
642 "invalid", nb_desc);
643 return -EINVAL;
644 }
645
646 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
647 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
648 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
649 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
650 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
651
652 /* Free memory if needed. */
653 if (dev->data->tx_queues[queue_idx]) {
654 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
655 dev->data->tx_queues[queue_idx] = NULL;
656 }
657
658 /* Allocate the TX queue data structure. */
659 txq = rte_zmalloc_socket("iavf txq",
660 sizeof(struct iavf_tx_queue),
661 RTE_CACHE_LINE_SIZE,
662 socket_id);
663 if (!txq) {
664 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
665 "tx queue structure");
666 return -ENOMEM;
667 }
668
669 txq->nb_tx_desc = nb_desc;
670 txq->rs_thresh = tx_rs_thresh;
671 txq->free_thresh = tx_free_thresh;
672 txq->queue_id = queue_idx;
673 txq->port_id = dev->data->port_id;
674 txq->offloads = offloads;
675 txq->tx_deferred_start = tx_conf->tx_deferred_start;
676
677 /* Allocate software ring */
678 txq->sw_ring =
679 rte_zmalloc_socket("iavf tx sw ring",
680 sizeof(struct iavf_tx_entry) * nb_desc,
681 RTE_CACHE_LINE_SIZE,
682 socket_id);
683 if (!txq->sw_ring) {
684 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
685 rte_free(txq);
686 return -ENOMEM;
687 }
688
689 /* Allocate TX hardware ring descriptors. */
690 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
691 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
692 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
693 ring_size, IAVF_RING_BASE_ALIGN,
694 socket_id);
695 if (!mz) {
696 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
697 rte_free(txq->sw_ring);
698 rte_free(txq);
699 return -ENOMEM;
700 }
701 txq->tx_ring_phys_addr = mz->iova;
702 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
703
704 txq->mz = mz;
705 reset_tx_queue(txq);
706 txq->q_set = true;
707 dev->data->tx_queues[queue_idx] = txq;
708 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
709 txq->ops = &def_txq_ops;
710
711 if (check_tx_vec_allow(txq) == false) {
712 struct iavf_adapter *ad =
713 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
714 ad->tx_vec_allowed = false;
715 }
716
717 return 0;
718 }
719
720 int
iavf_dev_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)721 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
722 {
723 struct iavf_adapter *adapter =
724 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
725 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
726 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
727 struct iavf_rx_queue *rxq;
728 int err = 0;
729
730 PMD_DRV_FUNC_TRACE();
731
732 if (rx_queue_id >= dev->data->nb_rx_queues)
733 return -EINVAL;
734
735 rxq = dev->data->rx_queues[rx_queue_id];
736
737 err = alloc_rxq_mbufs(rxq);
738 if (err) {
739 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
740 return err;
741 }
742
743 rte_wmb();
744
745 /* Init the RX tail register. */
746 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
747 IAVF_WRITE_FLUSH(hw);
748
749 /* Ready to switch the queue on */
750 if (!vf->lv_enabled)
751 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
752 else
753 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
754
755 if (err)
756 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
757 rx_queue_id);
758 else
759 dev->data->rx_queue_state[rx_queue_id] =
760 RTE_ETH_QUEUE_STATE_STARTED;
761
762 return err;
763 }
764
765 int
iavf_dev_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)766 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
767 {
768 struct iavf_adapter *adapter =
769 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
770 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
771 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
772 struct iavf_tx_queue *txq;
773 int err = 0;
774
775 PMD_DRV_FUNC_TRACE();
776
777 if (tx_queue_id >= dev->data->nb_tx_queues)
778 return -EINVAL;
779
780 txq = dev->data->tx_queues[tx_queue_id];
781
782 /* Init the RX tail register. */
783 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
784 IAVF_WRITE_FLUSH(hw);
785
786 /* Ready to switch the queue on */
787 if (!vf->lv_enabled)
788 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
789 else
790 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
791
792 if (err)
793 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
794 tx_queue_id);
795 else
796 dev->data->tx_queue_state[tx_queue_id] =
797 RTE_ETH_QUEUE_STATE_STARTED;
798
799 return err;
800 }
801
802 int
iavf_dev_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)803 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
804 {
805 struct iavf_adapter *adapter =
806 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
807 struct iavf_rx_queue *rxq;
808 int err;
809
810 PMD_DRV_FUNC_TRACE();
811
812 if (rx_queue_id >= dev->data->nb_rx_queues)
813 return -EINVAL;
814
815 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
816 if (err) {
817 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
818 rx_queue_id);
819 return err;
820 }
821
822 rxq = dev->data->rx_queues[rx_queue_id];
823 rxq->ops->release_mbufs(rxq);
824 reset_rx_queue(rxq);
825 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
826
827 return 0;
828 }
829
830 int
iavf_dev_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)831 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
832 {
833 struct iavf_adapter *adapter =
834 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
835 struct iavf_tx_queue *txq;
836 int err;
837
838 PMD_DRV_FUNC_TRACE();
839
840 if (tx_queue_id >= dev->data->nb_tx_queues)
841 return -EINVAL;
842
843 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
844 if (err) {
845 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
846 tx_queue_id);
847 return err;
848 }
849
850 txq = dev->data->tx_queues[tx_queue_id];
851 txq->ops->release_mbufs(txq);
852 reset_tx_queue(txq);
853 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
854
855 return 0;
856 }
857
858 void
iavf_dev_rx_queue_release(void * rxq)859 iavf_dev_rx_queue_release(void *rxq)
860 {
861 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
862
863 if (!q)
864 return;
865
866 q->ops->release_mbufs(q);
867 rte_free(q->sw_ring);
868 rte_memzone_free(q->mz);
869 rte_free(q);
870 }
871
872 void
iavf_dev_tx_queue_release(void * txq)873 iavf_dev_tx_queue_release(void *txq)
874 {
875 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
876
877 if (!q)
878 return;
879
880 q->ops->release_mbufs(q);
881 rte_free(q->sw_ring);
882 rte_memzone_free(q->mz);
883 rte_free(q);
884 }
885
886 void
iavf_stop_queues(struct rte_eth_dev * dev)887 iavf_stop_queues(struct rte_eth_dev *dev)
888 {
889 struct iavf_adapter *adapter =
890 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
891 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
892 struct iavf_rx_queue *rxq;
893 struct iavf_tx_queue *txq;
894 int ret, i;
895
896 /* Stop All queues */
897 if (!vf->lv_enabled) {
898 ret = iavf_disable_queues(adapter);
899 if (ret)
900 PMD_DRV_LOG(WARNING, "Fail to stop queues");
901 } else {
902 ret = iavf_disable_queues_lv(adapter);
903 if (ret)
904 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
905 }
906
907 if (ret)
908 PMD_DRV_LOG(WARNING, "Fail to stop queues");
909
910 for (i = 0; i < dev->data->nb_tx_queues; i++) {
911 txq = dev->data->tx_queues[i];
912 if (!txq)
913 continue;
914 txq->ops->release_mbufs(txq);
915 reset_tx_queue(txq);
916 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
917 }
918 for (i = 0; i < dev->data->nb_rx_queues; i++) {
919 rxq = dev->data->rx_queues[i];
920 if (!rxq)
921 continue;
922 rxq->ops->release_mbufs(rxq);
923 reset_rx_queue(rxq);
924 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
925 }
926 }
927
928 #define IAVF_RX_FLEX_ERR0_BITS \
929 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
930 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
931 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
932 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
933 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
934 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
935
936 static inline void
iavf_rxd_to_vlan_tci(struct rte_mbuf * mb,volatile union iavf_rx_desc * rxdp)937 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
938 {
939 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
940 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
941 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
942 mb->vlan_tci =
943 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
944 } else {
945 mb->vlan_tci = 0;
946 }
947 }
948
949 static inline void
iavf_flex_rxd_to_vlan_tci(struct rte_mbuf * mb,volatile union iavf_rx_flex_desc * rxdp)950 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
951 volatile union iavf_rx_flex_desc *rxdp)
952 {
953 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
954 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
955 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
956 mb->vlan_tci =
957 rte_le_to_cpu_16(rxdp->wb.l2tag1);
958 } else {
959 mb->vlan_tci = 0;
960 }
961
962 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
963 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
964 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
965 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
966 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
967 mb->vlan_tci_outer = mb->vlan_tci;
968 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
969 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
970 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
971 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
972 } else {
973 mb->vlan_tci_outer = 0;
974 }
975 #endif
976 }
977
978 /* Translate the rx descriptor status and error fields to pkt flags */
979 static inline uint64_t
iavf_rxd_to_pkt_flags(uint64_t qword)980 iavf_rxd_to_pkt_flags(uint64_t qword)
981 {
982 uint64_t flags;
983 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
984
985 #define IAVF_RX_ERR_BITS 0x3f
986
987 /* Check if RSS_HASH */
988 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
989 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
990 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
991
992 /* Check if FDIR Match */
993 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
994 PKT_RX_FDIR : 0);
995
996 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
997 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
998 return flags;
999 }
1000
1001 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1002 flags |= PKT_RX_IP_CKSUM_BAD;
1003 else
1004 flags |= PKT_RX_IP_CKSUM_GOOD;
1005
1006 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1007 flags |= PKT_RX_L4_CKSUM_BAD;
1008 else
1009 flags |= PKT_RX_L4_CKSUM_GOOD;
1010
1011 /* TODO: Oversize error bit is not processed here */
1012
1013 return flags;
1014 }
1015
1016 static inline uint64_t
iavf_rxd_build_fdir(volatile union iavf_rx_desc * rxdp,struct rte_mbuf * mb)1017 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1018 {
1019 uint64_t flags = 0;
1020 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1021 uint16_t flexbh;
1022
1023 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1024 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1025 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1026
1027 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1028 mb->hash.fdir.hi =
1029 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1030 flags |= PKT_RX_FDIR_ID;
1031 }
1032 #else
1033 mb->hash.fdir.hi =
1034 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1035 flags |= PKT_RX_FDIR_ID;
1036 #endif
1037 return flags;
1038 }
1039
1040 #define IAVF_RX_FLEX_ERR0_BITS \
1041 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1042 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1043 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1044 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1045 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1046 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1047
1048 /* Rx L3/L4 checksum */
1049 static inline uint64_t
iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)1050 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1051 {
1052 uint64_t flags = 0;
1053
1054 /* check if HW has decoded the packet and checksum */
1055 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1056 return 0;
1057
1058 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1059 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1060 return flags;
1061 }
1062
1063 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1064 flags |= PKT_RX_IP_CKSUM_BAD;
1065 else
1066 flags |= PKT_RX_IP_CKSUM_GOOD;
1067
1068 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1069 flags |= PKT_RX_L4_CKSUM_BAD;
1070 else
1071 flags |= PKT_RX_L4_CKSUM_GOOD;
1072
1073 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1074 flags |= PKT_RX_EIP_CKSUM_BAD;
1075
1076 return flags;
1077 }
1078
1079 /* If the number of free RX descriptors is greater than the RX free
1080 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1081 * register. Update the RDT with the value of the last processed RX
1082 * descriptor minus 1, to guarantee that the RDT register is never
1083 * equal to the RDH register, which creates a "full" ring situation
1084 * from the hardware point of view.
1085 */
1086 static inline void
iavf_update_rx_tail(struct iavf_rx_queue * rxq,uint16_t nb_hold,uint16_t rx_id)1087 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1088 {
1089 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1090
1091 if (nb_hold > rxq->rx_free_thresh) {
1092 PMD_RX_LOG(DEBUG,
1093 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1094 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1095 rx_id = (uint16_t)((rx_id == 0) ?
1096 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1097 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1098 nb_hold = 0;
1099 }
1100 rxq->nb_rx_hold = nb_hold;
1101 }
1102
1103 /* implement recv_pkts */
1104 uint16_t
iavf_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1105 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1106 {
1107 volatile union iavf_rx_desc *rx_ring;
1108 volatile union iavf_rx_desc *rxdp;
1109 struct iavf_rx_queue *rxq;
1110 union iavf_rx_desc rxd;
1111 struct rte_mbuf *rxe;
1112 struct rte_eth_dev *dev;
1113 struct rte_mbuf *rxm;
1114 struct rte_mbuf *nmb;
1115 uint16_t nb_rx;
1116 uint32_t rx_status;
1117 uint64_t qword1;
1118 uint16_t rx_packet_len;
1119 uint16_t rx_id, nb_hold;
1120 uint64_t dma_addr;
1121 uint64_t pkt_flags;
1122 const uint32_t *ptype_tbl;
1123
1124 nb_rx = 0;
1125 nb_hold = 0;
1126 rxq = rx_queue;
1127 rx_id = rxq->rx_tail;
1128 rx_ring = rxq->rx_ring;
1129 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1130
1131 while (nb_rx < nb_pkts) {
1132 rxdp = &rx_ring[rx_id];
1133 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1134 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1135 IAVF_RXD_QW1_STATUS_SHIFT;
1136
1137 /* Check the DD bit first */
1138 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1139 break;
1140 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1141
1142 nmb = rte_mbuf_raw_alloc(rxq->mp);
1143 if (unlikely(!nmb)) {
1144 dev = &rte_eth_devices[rxq->port_id];
1145 dev->data->rx_mbuf_alloc_failed++;
1146 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1147 "queue_id=%u", rxq->port_id, rxq->queue_id);
1148 break;
1149 }
1150
1151 rxd = *rxdp;
1152 nb_hold++;
1153 rxe = rxq->sw_ring[rx_id];
1154 rx_id++;
1155 if (unlikely(rx_id == rxq->nb_rx_desc))
1156 rx_id = 0;
1157
1158 /* Prefetch next mbuf */
1159 rte_prefetch0(rxq->sw_ring[rx_id]);
1160
1161 /* When next RX descriptor is on a cache line boundary,
1162 * prefetch the next 4 RX descriptors and next 8 pointers
1163 * to mbufs.
1164 */
1165 if ((rx_id & 0x3) == 0) {
1166 rte_prefetch0(&rx_ring[rx_id]);
1167 rte_prefetch0(rxq->sw_ring[rx_id]);
1168 }
1169 rxm = rxe;
1170 dma_addr =
1171 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1172 rxdp->read.hdr_addr = 0;
1173 rxdp->read.pkt_addr = dma_addr;
1174
1175 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1176 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1177
1178 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1179 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1180 rxm->nb_segs = 1;
1181 rxm->next = NULL;
1182 rxm->pkt_len = rx_packet_len;
1183 rxm->data_len = rx_packet_len;
1184 rxm->port = rxq->port_id;
1185 rxm->ol_flags = 0;
1186 iavf_rxd_to_vlan_tci(rxm, &rxd);
1187 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1188 rxm->packet_type =
1189 ptype_tbl[(uint8_t)((qword1 &
1190 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1191
1192 if (pkt_flags & PKT_RX_RSS_HASH)
1193 rxm->hash.rss =
1194 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1195
1196 if (pkt_flags & PKT_RX_FDIR)
1197 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1198
1199 rxm->ol_flags |= pkt_flags;
1200
1201 rx_pkts[nb_rx++] = rxm;
1202 }
1203 rxq->rx_tail = rx_id;
1204
1205 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1206
1207 return nb_rx;
1208 }
1209
1210 /* implement recv_pkts for flexible Rx descriptor */
1211 uint16_t
iavf_recv_pkts_flex_rxd(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1212 iavf_recv_pkts_flex_rxd(void *rx_queue,
1213 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1214 {
1215 volatile union iavf_rx_desc *rx_ring;
1216 volatile union iavf_rx_flex_desc *rxdp;
1217 struct iavf_rx_queue *rxq;
1218 union iavf_rx_flex_desc rxd;
1219 struct rte_mbuf *rxe;
1220 struct rte_eth_dev *dev;
1221 struct rte_mbuf *rxm;
1222 struct rte_mbuf *nmb;
1223 uint16_t nb_rx;
1224 uint16_t rx_stat_err0;
1225 uint16_t rx_packet_len;
1226 uint16_t rx_id, nb_hold;
1227 uint64_t dma_addr;
1228 uint64_t pkt_flags;
1229 const uint32_t *ptype_tbl;
1230
1231 nb_rx = 0;
1232 nb_hold = 0;
1233 rxq = rx_queue;
1234 rx_id = rxq->rx_tail;
1235 rx_ring = rxq->rx_ring;
1236 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1237
1238 while (nb_rx < nb_pkts) {
1239 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1240 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1241
1242 /* Check the DD bit first */
1243 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1244 break;
1245 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1246
1247 nmb = rte_mbuf_raw_alloc(rxq->mp);
1248 if (unlikely(!nmb)) {
1249 dev = &rte_eth_devices[rxq->port_id];
1250 dev->data->rx_mbuf_alloc_failed++;
1251 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1252 "queue_id=%u", rxq->port_id, rxq->queue_id);
1253 break;
1254 }
1255
1256 rxd = *rxdp;
1257 nb_hold++;
1258 rxe = rxq->sw_ring[rx_id];
1259 rx_id++;
1260 if (unlikely(rx_id == rxq->nb_rx_desc))
1261 rx_id = 0;
1262
1263 /* Prefetch next mbuf */
1264 rte_prefetch0(rxq->sw_ring[rx_id]);
1265
1266 /* When next RX descriptor is on a cache line boundary,
1267 * prefetch the next 4 RX descriptors and next 8 pointers
1268 * to mbufs.
1269 */
1270 if ((rx_id & 0x3) == 0) {
1271 rte_prefetch0(&rx_ring[rx_id]);
1272 rte_prefetch0(rxq->sw_ring[rx_id]);
1273 }
1274 rxm = rxe;
1275 dma_addr =
1276 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1277 rxdp->read.hdr_addr = 0;
1278 rxdp->read.pkt_addr = dma_addr;
1279
1280 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1281 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1282
1283 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1284 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1285 rxm->nb_segs = 1;
1286 rxm->next = NULL;
1287 rxm->pkt_len = rx_packet_len;
1288 rxm->data_len = rx_packet_len;
1289 rxm->port = rxq->port_id;
1290 rxm->ol_flags = 0;
1291 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1292 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1293 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1294 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1295 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1296 rxm->ol_flags |= pkt_flags;
1297
1298 rx_pkts[nb_rx++] = rxm;
1299 }
1300 rxq->rx_tail = rx_id;
1301
1302 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1303
1304 return nb_rx;
1305 }
1306
1307 /* implement recv_scattered_pkts for flexible Rx descriptor */
1308 uint16_t
iavf_recv_scattered_pkts_flex_rxd(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1309 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1310 uint16_t nb_pkts)
1311 {
1312 struct iavf_rx_queue *rxq = rx_queue;
1313 union iavf_rx_flex_desc rxd;
1314 struct rte_mbuf *rxe;
1315 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1316 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1317 struct rte_mbuf *nmb, *rxm;
1318 uint16_t rx_id = rxq->rx_tail;
1319 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1320 struct rte_eth_dev *dev;
1321 uint16_t rx_stat_err0;
1322 uint64_t dma_addr;
1323 uint64_t pkt_flags;
1324
1325 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1326 volatile union iavf_rx_flex_desc *rxdp;
1327 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1328
1329 while (nb_rx < nb_pkts) {
1330 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1331 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1332
1333 /* Check the DD bit */
1334 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1335 break;
1336 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1337
1338 nmb = rte_mbuf_raw_alloc(rxq->mp);
1339 if (unlikely(!nmb)) {
1340 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1341 "queue_id=%u", rxq->port_id, rxq->queue_id);
1342 dev = &rte_eth_devices[rxq->port_id];
1343 dev->data->rx_mbuf_alloc_failed++;
1344 break;
1345 }
1346
1347 rxd = *rxdp;
1348 nb_hold++;
1349 rxe = rxq->sw_ring[rx_id];
1350 rx_id++;
1351 if (rx_id == rxq->nb_rx_desc)
1352 rx_id = 0;
1353
1354 /* Prefetch next mbuf */
1355 rte_prefetch0(rxq->sw_ring[rx_id]);
1356
1357 /* When next RX descriptor is on a cache line boundary,
1358 * prefetch the next 4 RX descriptors and next 8 pointers
1359 * to mbufs.
1360 */
1361 if ((rx_id & 0x3) == 0) {
1362 rte_prefetch0(&rx_ring[rx_id]);
1363 rte_prefetch0(rxq->sw_ring[rx_id]);
1364 }
1365
1366 rxm = rxe;
1367 dma_addr =
1368 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1369
1370 /* Set data buffer address and data length of the mbuf */
1371 rxdp->read.hdr_addr = 0;
1372 rxdp->read.pkt_addr = dma_addr;
1373 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1374 IAVF_RX_FLX_DESC_PKT_LEN_M;
1375 rxm->data_len = rx_packet_len;
1376 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1377
1378 /* If this is the first buffer of the received packet, set the
1379 * pointer to the first mbuf of the packet and initialize its
1380 * context. Otherwise, update the total length and the number
1381 * of segments of the current scattered packet, and update the
1382 * pointer to the last mbuf of the current packet.
1383 */
1384 if (!first_seg) {
1385 first_seg = rxm;
1386 first_seg->nb_segs = 1;
1387 first_seg->pkt_len = rx_packet_len;
1388 } else {
1389 first_seg->pkt_len =
1390 (uint16_t)(first_seg->pkt_len +
1391 rx_packet_len);
1392 first_seg->nb_segs++;
1393 last_seg->next = rxm;
1394 }
1395
1396 /* If this is not the last buffer of the received packet,
1397 * update the pointer to the last mbuf of the current scattered
1398 * packet and continue to parse the RX ring.
1399 */
1400 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1401 last_seg = rxm;
1402 continue;
1403 }
1404
1405 /* This is the last buffer of the received packet. If the CRC
1406 * is not stripped by the hardware:
1407 * - Subtract the CRC length from the total packet length.
1408 * - If the last buffer only contains the whole CRC or a part
1409 * of it, free the mbuf associated to the last buffer. If part
1410 * of the CRC is also contained in the previous mbuf, subtract
1411 * the length of that CRC part from the data length of the
1412 * previous mbuf.
1413 */
1414 rxm->next = NULL;
1415 if (unlikely(rxq->crc_len > 0)) {
1416 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1417 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1418 rte_pktmbuf_free_seg(rxm);
1419 first_seg->nb_segs--;
1420 last_seg->data_len =
1421 (uint16_t)(last_seg->data_len -
1422 (RTE_ETHER_CRC_LEN - rx_packet_len));
1423 last_seg->next = NULL;
1424 } else {
1425 rxm->data_len = (uint16_t)(rx_packet_len -
1426 RTE_ETHER_CRC_LEN);
1427 }
1428 }
1429
1430 first_seg->port = rxq->port_id;
1431 first_seg->ol_flags = 0;
1432 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1433 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1434 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1435 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1436 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1437
1438 first_seg->ol_flags |= pkt_flags;
1439
1440 /* Prefetch data of first segment, if configured to do so. */
1441 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1442 first_seg->data_off));
1443 rx_pkts[nb_rx++] = first_seg;
1444 first_seg = NULL;
1445 }
1446
1447 /* Record index of the next RX descriptor to probe. */
1448 rxq->rx_tail = rx_id;
1449 rxq->pkt_first_seg = first_seg;
1450 rxq->pkt_last_seg = last_seg;
1451
1452 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1453
1454 return nb_rx;
1455 }
1456
1457 /* implement recv_scattered_pkts */
1458 uint16_t
iavf_recv_scattered_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1459 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1460 uint16_t nb_pkts)
1461 {
1462 struct iavf_rx_queue *rxq = rx_queue;
1463 union iavf_rx_desc rxd;
1464 struct rte_mbuf *rxe;
1465 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1466 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1467 struct rte_mbuf *nmb, *rxm;
1468 uint16_t rx_id = rxq->rx_tail;
1469 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1470 struct rte_eth_dev *dev;
1471 uint32_t rx_status;
1472 uint64_t qword1;
1473 uint64_t dma_addr;
1474 uint64_t pkt_flags;
1475
1476 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1477 volatile union iavf_rx_desc *rxdp;
1478 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1479
1480 while (nb_rx < nb_pkts) {
1481 rxdp = &rx_ring[rx_id];
1482 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1483 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1484 IAVF_RXD_QW1_STATUS_SHIFT;
1485
1486 /* Check the DD bit */
1487 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1488 break;
1489 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1490
1491 nmb = rte_mbuf_raw_alloc(rxq->mp);
1492 if (unlikely(!nmb)) {
1493 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1494 "queue_id=%u", rxq->port_id, rxq->queue_id);
1495 dev = &rte_eth_devices[rxq->port_id];
1496 dev->data->rx_mbuf_alloc_failed++;
1497 break;
1498 }
1499
1500 rxd = *rxdp;
1501 nb_hold++;
1502 rxe = rxq->sw_ring[rx_id];
1503 rx_id++;
1504 if (rx_id == rxq->nb_rx_desc)
1505 rx_id = 0;
1506
1507 /* Prefetch next mbuf */
1508 rte_prefetch0(rxq->sw_ring[rx_id]);
1509
1510 /* When next RX descriptor is on a cache line boundary,
1511 * prefetch the next 4 RX descriptors and next 8 pointers
1512 * to mbufs.
1513 */
1514 if ((rx_id & 0x3) == 0) {
1515 rte_prefetch0(&rx_ring[rx_id]);
1516 rte_prefetch0(rxq->sw_ring[rx_id]);
1517 }
1518
1519 rxm = rxe;
1520 dma_addr =
1521 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1522
1523 /* Set data buffer address and data length of the mbuf */
1524 rxdp->read.hdr_addr = 0;
1525 rxdp->read.pkt_addr = dma_addr;
1526 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1527 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1528 rxm->data_len = rx_packet_len;
1529 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1530
1531 /* If this is the first buffer of the received packet, set the
1532 * pointer to the first mbuf of the packet and initialize its
1533 * context. Otherwise, update the total length and the number
1534 * of segments of the current scattered packet, and update the
1535 * pointer to the last mbuf of the current packet.
1536 */
1537 if (!first_seg) {
1538 first_seg = rxm;
1539 first_seg->nb_segs = 1;
1540 first_seg->pkt_len = rx_packet_len;
1541 } else {
1542 first_seg->pkt_len =
1543 (uint16_t)(first_seg->pkt_len +
1544 rx_packet_len);
1545 first_seg->nb_segs++;
1546 last_seg->next = rxm;
1547 }
1548
1549 /* If this is not the last buffer of the received packet,
1550 * update the pointer to the last mbuf of the current scattered
1551 * packet and continue to parse the RX ring.
1552 */
1553 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1554 last_seg = rxm;
1555 continue;
1556 }
1557
1558 /* This is the last buffer of the received packet. If the CRC
1559 * is not stripped by the hardware:
1560 * - Subtract the CRC length from the total packet length.
1561 * - If the last buffer only contains the whole CRC or a part
1562 * of it, free the mbuf associated to the last buffer. If part
1563 * of the CRC is also contained in the previous mbuf, subtract
1564 * the length of that CRC part from the data length of the
1565 * previous mbuf.
1566 */
1567 rxm->next = NULL;
1568 if (unlikely(rxq->crc_len > 0)) {
1569 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1570 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1571 rte_pktmbuf_free_seg(rxm);
1572 first_seg->nb_segs--;
1573 last_seg->data_len =
1574 (uint16_t)(last_seg->data_len -
1575 (RTE_ETHER_CRC_LEN - rx_packet_len));
1576 last_seg->next = NULL;
1577 } else
1578 rxm->data_len = (uint16_t)(rx_packet_len -
1579 RTE_ETHER_CRC_LEN);
1580 }
1581
1582 first_seg->port = rxq->port_id;
1583 first_seg->ol_flags = 0;
1584 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1585 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1586 first_seg->packet_type =
1587 ptype_tbl[(uint8_t)((qword1 &
1588 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1589
1590 if (pkt_flags & PKT_RX_RSS_HASH)
1591 first_seg->hash.rss =
1592 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1593
1594 if (pkt_flags & PKT_RX_FDIR)
1595 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1596
1597 first_seg->ol_flags |= pkt_flags;
1598
1599 /* Prefetch data of first segment, if configured to do so. */
1600 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1601 first_seg->data_off));
1602 rx_pkts[nb_rx++] = first_seg;
1603 first_seg = NULL;
1604 }
1605
1606 /* Record index of the next RX descriptor to probe. */
1607 rxq->rx_tail = rx_id;
1608 rxq->pkt_first_seg = first_seg;
1609 rxq->pkt_last_seg = last_seg;
1610
1611 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1612
1613 return nb_rx;
1614 }
1615
1616 #define IAVF_LOOK_AHEAD 8
1617 static inline int
iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue * rxq)1618 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1619 {
1620 volatile union iavf_rx_flex_desc *rxdp;
1621 struct rte_mbuf **rxep;
1622 struct rte_mbuf *mb;
1623 uint16_t stat_err0;
1624 uint16_t pkt_len;
1625 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1626 int32_t i, j, nb_rx = 0;
1627 uint64_t pkt_flags;
1628 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1629
1630 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1631 rxep = &rxq->sw_ring[rxq->rx_tail];
1632
1633 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1634
1635 /* Make sure there is at least 1 packet to receive */
1636 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1637 return 0;
1638
1639 /* Scan LOOK_AHEAD descriptors at a time to determine which
1640 * descriptors reference packets that are ready to be received.
1641 */
1642 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1643 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1644 /* Read desc statuses backwards to avoid race condition */
1645 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1646 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1647
1648 rte_smp_rmb();
1649
1650 /* Compute how many status bits were set */
1651 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1652 nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1653
1654 nb_rx += nb_dd;
1655
1656 /* Translate descriptor info to mbuf parameters */
1657 for (j = 0; j < nb_dd; j++) {
1658 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1659 rxq->rx_tail +
1660 i * IAVF_LOOK_AHEAD + j);
1661
1662 mb = rxep[j];
1663 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1664 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1665 mb->data_len = pkt_len;
1666 mb->pkt_len = pkt_len;
1667 mb->ol_flags = 0;
1668
1669 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1670 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1671 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1672 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1673 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1674 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1675
1676 mb->ol_flags |= pkt_flags;
1677 }
1678
1679 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1680 rxq->rx_stage[i + j] = rxep[j];
1681
1682 if (nb_dd != IAVF_LOOK_AHEAD)
1683 break;
1684 }
1685
1686 /* Clear software ring entries */
1687 for (i = 0; i < nb_rx; i++)
1688 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1689
1690 return nb_rx;
1691 }
1692
1693 static inline int
iavf_rx_scan_hw_ring(struct iavf_rx_queue * rxq)1694 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1695 {
1696 volatile union iavf_rx_desc *rxdp;
1697 struct rte_mbuf **rxep;
1698 struct rte_mbuf *mb;
1699 uint16_t pkt_len;
1700 uint64_t qword1;
1701 uint32_t rx_status;
1702 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1703 int32_t i, j, nb_rx = 0;
1704 uint64_t pkt_flags;
1705 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1706
1707 rxdp = &rxq->rx_ring[rxq->rx_tail];
1708 rxep = &rxq->sw_ring[rxq->rx_tail];
1709
1710 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1711 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1712 IAVF_RXD_QW1_STATUS_SHIFT;
1713
1714 /* Make sure there is at least 1 packet to receive */
1715 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1716 return 0;
1717
1718 /* Scan LOOK_AHEAD descriptors at a time to determine which
1719 * descriptors reference packets that are ready to be received.
1720 */
1721 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1722 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1723 /* Read desc statuses backwards to avoid race condition */
1724 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1725 qword1 = rte_le_to_cpu_64(
1726 rxdp[j].wb.qword1.status_error_len);
1727 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1728 IAVF_RXD_QW1_STATUS_SHIFT;
1729 }
1730
1731 rte_smp_rmb();
1732
1733 /* Compute how many status bits were set */
1734 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1735 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1736
1737 nb_rx += nb_dd;
1738
1739 /* Translate descriptor info to mbuf parameters */
1740 for (j = 0; j < nb_dd; j++) {
1741 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1742 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1743
1744 mb = rxep[j];
1745 qword1 = rte_le_to_cpu_64
1746 (rxdp[j].wb.qword1.status_error_len);
1747 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1748 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1749 mb->data_len = pkt_len;
1750 mb->pkt_len = pkt_len;
1751 mb->ol_flags = 0;
1752 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1753 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1754 mb->packet_type =
1755 ptype_tbl[(uint8_t)((qword1 &
1756 IAVF_RXD_QW1_PTYPE_MASK) >>
1757 IAVF_RXD_QW1_PTYPE_SHIFT)];
1758
1759 if (pkt_flags & PKT_RX_RSS_HASH)
1760 mb->hash.rss = rte_le_to_cpu_32(
1761 rxdp[j].wb.qword0.hi_dword.rss);
1762
1763 if (pkt_flags & PKT_RX_FDIR)
1764 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
1765
1766 mb->ol_flags |= pkt_flags;
1767 }
1768
1769 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1770 rxq->rx_stage[i + j] = rxep[j];
1771
1772 if (nb_dd != IAVF_LOOK_AHEAD)
1773 break;
1774 }
1775
1776 /* Clear software ring entries */
1777 for (i = 0; i < nb_rx; i++)
1778 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1779
1780 return nb_rx;
1781 }
1782
1783 static inline uint16_t
iavf_rx_fill_from_stage(struct iavf_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1784 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1785 struct rte_mbuf **rx_pkts,
1786 uint16_t nb_pkts)
1787 {
1788 uint16_t i;
1789 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1790
1791 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1792
1793 for (i = 0; i < nb_pkts; i++)
1794 rx_pkts[i] = stage[i];
1795
1796 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1797 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1798
1799 return nb_pkts;
1800 }
1801
1802 static inline int
iavf_rx_alloc_bufs(struct iavf_rx_queue * rxq)1803 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1804 {
1805 volatile union iavf_rx_desc *rxdp;
1806 struct rte_mbuf **rxep;
1807 struct rte_mbuf *mb;
1808 uint16_t alloc_idx, i;
1809 uint64_t dma_addr;
1810 int diag;
1811
1812 /* Allocate buffers in bulk */
1813 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1814 (rxq->rx_free_thresh - 1));
1815 rxep = &rxq->sw_ring[alloc_idx];
1816 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1817 rxq->rx_free_thresh);
1818 if (unlikely(diag != 0)) {
1819 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1820 return -ENOMEM;
1821 }
1822
1823 rxdp = &rxq->rx_ring[alloc_idx];
1824 for (i = 0; i < rxq->rx_free_thresh; i++) {
1825 if (likely(i < (rxq->rx_free_thresh - 1)))
1826 /* Prefetch next mbuf */
1827 rte_prefetch0(rxep[i + 1]);
1828
1829 mb = rxep[i];
1830 rte_mbuf_refcnt_set(mb, 1);
1831 mb->next = NULL;
1832 mb->data_off = RTE_PKTMBUF_HEADROOM;
1833 mb->nb_segs = 1;
1834 mb->port = rxq->port_id;
1835 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1836 rxdp[i].read.hdr_addr = 0;
1837 rxdp[i].read.pkt_addr = dma_addr;
1838 }
1839
1840 /* Update rx tail register */
1841 rte_wmb();
1842 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1843
1844 rxq->rx_free_trigger =
1845 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1846 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1847 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1848
1849 return 0;
1850 }
1851
1852 static inline uint16_t
rx_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1853 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1854 {
1855 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1856 uint16_t nb_rx = 0;
1857
1858 if (!nb_pkts)
1859 return 0;
1860
1861 if (rxq->rx_nb_avail)
1862 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1863
1864 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
1865 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
1866 else
1867 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1868 rxq->rx_next_avail = 0;
1869 rxq->rx_nb_avail = nb_rx;
1870 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1871
1872 if (rxq->rx_tail > rxq->rx_free_trigger) {
1873 if (iavf_rx_alloc_bufs(rxq) != 0) {
1874 uint16_t i, j;
1875
1876 /* TODO: count rx_mbuf_alloc_failed here */
1877
1878 rxq->rx_nb_avail = 0;
1879 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1880 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1881 rxq->sw_ring[j] = rxq->rx_stage[i];
1882
1883 return 0;
1884 }
1885 }
1886
1887 if (rxq->rx_tail >= rxq->nb_rx_desc)
1888 rxq->rx_tail = 0;
1889
1890 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1891 rxq->port_id, rxq->queue_id,
1892 rxq->rx_tail, nb_rx);
1893
1894 if (rxq->rx_nb_avail)
1895 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1896
1897 return 0;
1898 }
1899
1900 static uint16_t
iavf_recv_pkts_bulk_alloc(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1901 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1902 struct rte_mbuf **rx_pkts,
1903 uint16_t nb_pkts)
1904 {
1905 uint16_t nb_rx = 0, n, count;
1906
1907 if (unlikely(nb_pkts == 0))
1908 return 0;
1909
1910 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1911 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1912
1913 while (nb_pkts) {
1914 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1915 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1916 nb_rx = (uint16_t)(nb_rx + count);
1917 nb_pkts = (uint16_t)(nb_pkts - count);
1918 if (count < n)
1919 break;
1920 }
1921
1922 return nb_rx;
1923 }
1924
1925 static inline int
iavf_xmit_cleanup(struct iavf_tx_queue * txq)1926 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1927 {
1928 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1929 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1930 uint16_t nb_tx_desc = txq->nb_tx_desc;
1931 uint16_t desc_to_clean_to;
1932 uint16_t nb_tx_to_clean;
1933
1934 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1935
1936 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1937 if (desc_to_clean_to >= nb_tx_desc)
1938 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1939
1940 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1941 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1942 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1943 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1944 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1945 "(port=%d queue=%d)", desc_to_clean_to,
1946 txq->port_id, txq->queue_id);
1947 return -1;
1948 }
1949
1950 if (last_desc_cleaned > desc_to_clean_to)
1951 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1952 desc_to_clean_to);
1953 else
1954 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1955 last_desc_cleaned);
1956
1957 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1958
1959 txq->last_desc_cleaned = desc_to_clean_to;
1960 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1961
1962 return 0;
1963 }
1964
1965 /* Check if the context descriptor is needed for TX offloading */
1966 static inline uint16_t
iavf_calc_context_desc(uint64_t flags)1967 iavf_calc_context_desc(uint64_t flags)
1968 {
1969 static uint64_t mask = PKT_TX_TCP_SEG;
1970
1971 return (flags & mask) ? 1 : 0;
1972 }
1973
1974 static inline void
iavf_txd_enable_checksum(uint64_t ol_flags,uint32_t * td_cmd,uint32_t * td_offset,union iavf_tx_offload tx_offload)1975 iavf_txd_enable_checksum(uint64_t ol_flags,
1976 uint32_t *td_cmd,
1977 uint32_t *td_offset,
1978 union iavf_tx_offload tx_offload)
1979 {
1980 /* Set MACLEN */
1981 *td_offset |= (tx_offload.l2_len >> 1) <<
1982 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1983
1984 /* Enable L3 checksum offloads */
1985 if (ol_flags & PKT_TX_IP_CKSUM) {
1986 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1987 *td_offset |= (tx_offload.l3_len >> 2) <<
1988 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1989 } else if (ol_flags & PKT_TX_IPV4) {
1990 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1991 *td_offset |= (tx_offload.l3_len >> 2) <<
1992 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1993 } else if (ol_flags & PKT_TX_IPV6) {
1994 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1995 *td_offset |= (tx_offload.l3_len >> 2) <<
1996 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1997 }
1998
1999 if (ol_flags & PKT_TX_TCP_SEG) {
2000 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2001 *td_offset |= (tx_offload.l4_len >> 2) <<
2002 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2003 return;
2004 }
2005
2006 /* Enable L4 checksum offloads */
2007 switch (ol_flags & PKT_TX_L4_MASK) {
2008 case PKT_TX_TCP_CKSUM:
2009 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2010 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2011 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2012 break;
2013 case PKT_TX_SCTP_CKSUM:
2014 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2015 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2016 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2017 break;
2018 case PKT_TX_UDP_CKSUM:
2019 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2020 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2021 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2022 break;
2023 default:
2024 break;
2025 }
2026 }
2027
2028 /* set TSO context descriptor
2029 * support IP -> L4 and IP -> IP -> L4
2030 */
2031 static inline uint64_t
iavf_set_tso_ctx(struct rte_mbuf * mbuf,union iavf_tx_offload tx_offload)2032 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
2033 {
2034 uint64_t ctx_desc = 0;
2035 uint32_t cd_cmd, hdr_len, cd_tso_len;
2036
2037 if (!tx_offload.l4_len) {
2038 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2039 return ctx_desc;
2040 }
2041
2042 hdr_len = tx_offload.l2_len +
2043 tx_offload.l3_len +
2044 tx_offload.l4_len;
2045
2046 cd_cmd = IAVF_TX_CTX_DESC_TSO;
2047 cd_tso_len = mbuf->pkt_len - hdr_len;
2048 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
2049 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2050 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
2051
2052 return ctx_desc;
2053 }
2054
2055 /* Construct the tx flags */
2056 static inline uint64_t
iavf_build_ctob(uint32_t td_cmd,uint32_t td_offset,unsigned int size,uint32_t td_tag)2057 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
2058 uint32_t td_tag)
2059 {
2060 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
2061 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
2062 ((uint64_t)td_offset <<
2063 IAVF_TXD_QW1_OFFSET_SHIFT) |
2064 ((uint64_t)size <<
2065 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
2066 ((uint64_t)td_tag <<
2067 IAVF_TXD_QW1_L2TAG1_SHIFT));
2068 }
2069
2070 /* TX function */
2071 uint16_t
iavf_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2072 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2073 {
2074 volatile struct iavf_tx_desc *txd;
2075 volatile struct iavf_tx_desc *txr;
2076 struct iavf_tx_queue *txq;
2077 struct iavf_tx_entry *sw_ring;
2078 struct iavf_tx_entry *txe, *txn;
2079 struct rte_mbuf *tx_pkt;
2080 struct rte_mbuf *m_seg;
2081 uint16_t tx_id;
2082 uint16_t nb_tx;
2083 uint32_t td_cmd;
2084 uint32_t td_offset;
2085 uint32_t td_tag;
2086 uint64_t ol_flags;
2087 uint16_t nb_used;
2088 uint16_t nb_ctx;
2089 uint16_t tx_last;
2090 uint16_t slen;
2091 uint64_t buf_dma_addr;
2092 union iavf_tx_offload tx_offload = {0};
2093
2094 txq = tx_queue;
2095 sw_ring = txq->sw_ring;
2096 txr = txq->tx_ring;
2097 tx_id = txq->tx_tail;
2098 txe = &sw_ring[tx_id];
2099
2100 /* Check if the descriptor ring needs to be cleaned. */
2101 if (txq->nb_free < txq->free_thresh)
2102 (void)iavf_xmit_cleanup(txq);
2103
2104 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2105 td_cmd = 0;
2106 td_tag = 0;
2107 td_offset = 0;
2108
2109 tx_pkt = *tx_pkts++;
2110 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2111
2112 ol_flags = tx_pkt->ol_flags;
2113 tx_offload.l2_len = tx_pkt->l2_len;
2114 tx_offload.l3_len = tx_pkt->l3_len;
2115 tx_offload.l4_len = tx_pkt->l4_len;
2116 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2117 /* Calculate the number of context descriptors needed. */
2118 nb_ctx = iavf_calc_context_desc(ol_flags);
2119
2120 /* The number of descriptors that must be allocated for
2121 * a packet equals to the number of the segments of that
2122 * packet plus 1 context descriptor if needed.
2123 */
2124 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2125 tx_last = (uint16_t)(tx_id + nb_used - 1);
2126
2127 /* Circular ring */
2128 if (tx_last >= txq->nb_tx_desc)
2129 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2130
2131 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
2132 " tx_first=%u tx_last=%u",
2133 txq->port_id, txq->queue_id, tx_id, tx_last);
2134
2135 if (nb_used > txq->nb_free) {
2136 if (iavf_xmit_cleanup(txq)) {
2137 if (nb_tx == 0)
2138 return 0;
2139 goto end_of_tx;
2140 }
2141 if (unlikely(nb_used > txq->rs_thresh)) {
2142 while (nb_used > txq->nb_free) {
2143 if (iavf_xmit_cleanup(txq)) {
2144 if (nb_tx == 0)
2145 return 0;
2146 goto end_of_tx;
2147 }
2148 }
2149 }
2150 }
2151
2152 /* Descriptor based VLAN insertion */
2153 if (ol_flags & PKT_TX_VLAN_PKT) {
2154 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2155 td_tag = tx_pkt->vlan_tci;
2156 }
2157
2158 /* According to datasheet, the bit2 is reserved and must be
2159 * set to 1.
2160 */
2161 td_cmd |= 0x04;
2162
2163 /* Enable checksum offloading */
2164 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
2165 iavf_txd_enable_checksum(ol_flags, &td_cmd,
2166 &td_offset, tx_offload);
2167
2168 if (nb_ctx) {
2169 /* Setup TX context descriptor if required */
2170 uint64_t cd_type_cmd_tso_mss =
2171 IAVF_TX_DESC_DTYPE_CONTEXT;
2172 volatile struct iavf_tx_context_desc *ctx_txd =
2173 (volatile struct iavf_tx_context_desc *)
2174 &txr[tx_id];
2175
2176 txn = &sw_ring[txe->next_id];
2177 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2178 if (txe->mbuf) {
2179 rte_pktmbuf_free_seg(txe->mbuf);
2180 txe->mbuf = NULL;
2181 }
2182
2183 /* TSO enabled */
2184 if (ol_flags & PKT_TX_TCP_SEG)
2185 cd_type_cmd_tso_mss |=
2186 iavf_set_tso_ctx(tx_pkt, tx_offload);
2187
2188 ctx_txd->type_cmd_tso_mss =
2189 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2190
2191 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
2192 txe->last_id = tx_last;
2193 tx_id = txe->next_id;
2194 txe = txn;
2195 }
2196
2197 m_seg = tx_pkt;
2198 do {
2199 txd = &txr[tx_id];
2200 txn = &sw_ring[txe->next_id];
2201
2202 if (txe->mbuf)
2203 rte_pktmbuf_free_seg(txe->mbuf);
2204 txe->mbuf = m_seg;
2205
2206 /* Setup TX Descriptor */
2207 slen = m_seg->data_len;
2208 buf_dma_addr = rte_mbuf_data_iova(m_seg);
2209 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
2210 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
2211 td_offset,
2212 slen,
2213 td_tag);
2214
2215 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2216 txe->last_id = tx_last;
2217 tx_id = txe->next_id;
2218 txe = txn;
2219 m_seg = m_seg->next;
2220 } while (m_seg);
2221
2222 /* The last packet data descriptor needs End Of Packet (EOP) */
2223 td_cmd |= IAVF_TX_DESC_CMD_EOP;
2224 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
2225 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
2226
2227 if (txq->nb_used >= txq->rs_thresh) {
2228 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2229 "%4u (port=%d queue=%d)",
2230 tx_last, txq->port_id, txq->queue_id);
2231
2232 td_cmd |= IAVF_TX_DESC_CMD_RS;
2233
2234 /* Update txq RS bit counters */
2235 txq->nb_used = 0;
2236 }
2237
2238 txd->cmd_type_offset_bsz |=
2239 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2240 IAVF_TXD_QW1_CMD_SHIFT);
2241 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
2242 }
2243
2244 end_of_tx:
2245 rte_wmb();
2246
2247 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2248 txq->port_id, txq->queue_id, tx_id, nb_tx);
2249
2250 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
2251 txq->tx_tail = tx_id;
2252
2253 return nb_tx;
2254 }
2255
2256 /* TX prep functions */
2257 uint16_t
iavf_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2258 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2259 uint16_t nb_pkts)
2260 {
2261 int i, ret;
2262 uint64_t ol_flags;
2263 struct rte_mbuf *m;
2264
2265 for (i = 0; i < nb_pkts; i++) {
2266 m = tx_pkts[i];
2267 ol_flags = m->ol_flags;
2268
2269 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2270 if (!(ol_flags & PKT_TX_TCP_SEG)) {
2271 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2272 rte_errno = EINVAL;
2273 return i;
2274 }
2275 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2276 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2277 /* MSS outside the range are considered malicious */
2278 rte_errno = EINVAL;
2279 return i;
2280 }
2281
2282 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2283 rte_errno = ENOTSUP;
2284 return i;
2285 }
2286
2287 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2288 ret = rte_validate_tx_offload(m);
2289 if (ret != 0) {
2290 rte_errno = -ret;
2291 return i;
2292 }
2293 #endif
2294 ret = rte_net_intel_cksum_prepare(m);
2295 if (ret != 0) {
2296 rte_errno = -ret;
2297 return i;
2298 }
2299 }
2300
2301 return i;
2302 }
2303
2304 /* choose rx function*/
2305 void
iavf_set_rx_function(struct rte_eth_dev * dev)2306 iavf_set_rx_function(struct rte_eth_dev *dev)
2307 {
2308 struct iavf_adapter *adapter =
2309 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2310 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2311
2312 #ifdef RTE_ARCH_X86
2313 struct iavf_rx_queue *rxq;
2314 int i;
2315 bool use_avx2 = false;
2316 #ifdef CC_AVX512_SUPPORT
2317 bool use_avx512 = false;
2318 #endif
2319
2320 if (!iavf_rx_vec_dev_check(dev) &&
2321 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2322 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2323 rxq = dev->data->rx_queues[i];
2324 (void)iavf_rxq_vec_setup(rxq);
2325 }
2326
2327 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2328 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2329 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2330 use_avx2 = true;
2331 #ifdef CC_AVX512_SUPPORT
2332 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2333 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2334 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2335 use_avx512 = true;
2336 #endif
2337
2338 if (dev->data->scattered_rx) {
2339 PMD_DRV_LOG(DEBUG,
2340 "Using %sVector Scattered Rx (port %d).",
2341 use_avx2 ? "avx2 " : "",
2342 dev->data->port_id);
2343 if (vf->vf_res->vf_cap_flags &
2344 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2345 dev->rx_pkt_burst = use_avx2 ?
2346 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2347 iavf_recv_scattered_pkts_vec_flex_rxd;
2348 #ifdef CC_AVX512_SUPPORT
2349 if (use_avx512)
2350 dev->rx_pkt_burst =
2351 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2352 #endif
2353 } else {
2354 dev->rx_pkt_burst = use_avx2 ?
2355 iavf_recv_scattered_pkts_vec_avx2 :
2356 iavf_recv_scattered_pkts_vec;
2357 #ifdef CC_AVX512_SUPPORT
2358 if (use_avx512)
2359 dev->rx_pkt_burst =
2360 iavf_recv_scattered_pkts_vec_avx512;
2361 #endif
2362 }
2363 } else {
2364 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2365 use_avx2 ? "avx2 " : "",
2366 dev->data->port_id);
2367 if (vf->vf_res->vf_cap_flags &
2368 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
2369 dev->rx_pkt_burst = use_avx2 ?
2370 iavf_recv_pkts_vec_avx2_flex_rxd :
2371 iavf_recv_pkts_vec_flex_rxd;
2372 #ifdef CC_AVX512_SUPPORT
2373 if (use_avx512)
2374 dev->rx_pkt_burst =
2375 iavf_recv_pkts_vec_avx512_flex_rxd;
2376 #endif
2377 } else {
2378 dev->rx_pkt_burst = use_avx2 ?
2379 iavf_recv_pkts_vec_avx2 :
2380 iavf_recv_pkts_vec;
2381 #ifdef CC_AVX512_SUPPORT
2382 if (use_avx512)
2383 dev->rx_pkt_burst =
2384 iavf_recv_pkts_vec_avx512;
2385 #endif
2386 }
2387 }
2388
2389 return;
2390 }
2391 #endif
2392
2393 if (dev->data->scattered_rx) {
2394 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2395 dev->data->port_id);
2396 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2397 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2398 else
2399 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2400 } else if (adapter->rx_bulk_alloc_allowed) {
2401 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2402 dev->data->port_id);
2403 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2404 } else {
2405 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2406 dev->data->port_id);
2407 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2408 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2409 else
2410 dev->rx_pkt_burst = iavf_recv_pkts;
2411 }
2412 }
2413
2414 /* choose tx function*/
2415 void
iavf_set_tx_function(struct rte_eth_dev * dev)2416 iavf_set_tx_function(struct rte_eth_dev *dev)
2417 {
2418 #ifdef RTE_ARCH_X86
2419 struct iavf_tx_queue *txq;
2420 int i;
2421 bool use_avx2 = false;
2422 #ifdef CC_AVX512_SUPPORT
2423 bool use_avx512 = false;
2424 #endif
2425
2426 if (!iavf_tx_vec_dev_check(dev) &&
2427 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2428 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2429 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2430 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2431 use_avx2 = true;
2432 #ifdef CC_AVX512_SUPPORT
2433 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2434 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2435 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2436 use_avx512 = true;
2437 #endif
2438
2439 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2440 use_avx2 ? "avx2 " : "",
2441 dev->data->port_id);
2442 dev->tx_pkt_burst = use_avx2 ?
2443 iavf_xmit_pkts_vec_avx2 :
2444 iavf_xmit_pkts_vec;
2445 #ifdef CC_AVX512_SUPPORT
2446 if (use_avx512)
2447 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2448 #endif
2449 dev->tx_pkt_prepare = NULL;
2450
2451 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2452 txq = dev->data->tx_queues[i];
2453 if (!txq)
2454 continue;
2455 #ifdef CC_AVX512_SUPPORT
2456 if (use_avx512)
2457 iavf_txq_vec_setup_avx512(txq);
2458 else
2459 iavf_txq_vec_setup(txq);
2460 #else
2461 iavf_txq_vec_setup(txq);
2462 #endif
2463 }
2464
2465 return;
2466 }
2467 #endif
2468
2469 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
2470 dev->data->port_id);
2471 dev->tx_pkt_burst = iavf_xmit_pkts;
2472 dev->tx_pkt_prepare = iavf_prep_pkts;
2473 }
2474
2475 static int
iavf_tx_done_cleanup_full(struct iavf_tx_queue * txq,uint32_t free_cnt)2476 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
2477 uint32_t free_cnt)
2478 {
2479 struct iavf_tx_entry *swr_ring = txq->sw_ring;
2480 uint16_t i, tx_last, tx_id;
2481 uint16_t nb_tx_free_last;
2482 uint16_t nb_tx_to_clean;
2483 uint32_t pkt_cnt;
2484
2485 /* Start free mbuf from the next of tx_tail */
2486 tx_last = txq->tx_tail;
2487 tx_id = swr_ring[tx_last].next_id;
2488
2489 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
2490 return 0;
2491
2492 nb_tx_to_clean = txq->nb_free;
2493 nb_tx_free_last = txq->nb_free;
2494 if (!free_cnt)
2495 free_cnt = txq->nb_tx_desc;
2496
2497 /* Loop through swr_ring to count the amount of
2498 * freeable mubfs and packets.
2499 */
2500 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2501 for (i = 0; i < nb_tx_to_clean &&
2502 pkt_cnt < free_cnt &&
2503 tx_id != tx_last; i++) {
2504 if (swr_ring[tx_id].mbuf != NULL) {
2505 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2506 swr_ring[tx_id].mbuf = NULL;
2507
2508 /*
2509 * last segment in the packet,
2510 * increment packet count
2511 */
2512 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2513 }
2514
2515 tx_id = swr_ring[tx_id].next_id;
2516 }
2517
2518 if (txq->rs_thresh > txq->nb_tx_desc -
2519 txq->nb_free || tx_id == tx_last)
2520 break;
2521
2522 if (pkt_cnt < free_cnt) {
2523 if (iavf_xmit_cleanup(txq))
2524 break;
2525
2526 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
2527 nb_tx_free_last = txq->nb_free;
2528 }
2529 }
2530
2531 return (int)pkt_cnt;
2532 }
2533
2534 int
iavf_dev_tx_done_cleanup(void * txq,uint32_t free_cnt)2535 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
2536 {
2537 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
2538
2539 return iavf_tx_done_cleanup_full(q, free_cnt);
2540 }
2541
2542 void
iavf_dev_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)2543 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2544 struct rte_eth_rxq_info *qinfo)
2545 {
2546 struct iavf_rx_queue *rxq;
2547
2548 rxq = dev->data->rx_queues[queue_id];
2549
2550 qinfo->mp = rxq->mp;
2551 qinfo->scattered_rx = dev->data->scattered_rx;
2552 qinfo->nb_desc = rxq->nb_rx_desc;
2553
2554 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2555 qinfo->conf.rx_drop_en = true;
2556 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2557 }
2558
2559 void
iavf_dev_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)2560 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2561 struct rte_eth_txq_info *qinfo)
2562 {
2563 struct iavf_tx_queue *txq;
2564
2565 txq = dev->data->tx_queues[queue_id];
2566
2567 qinfo->nb_desc = txq->nb_tx_desc;
2568
2569 qinfo->conf.tx_free_thresh = txq->free_thresh;
2570 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
2571 qinfo->conf.offloads = txq->offloads;
2572 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2573 }
2574
2575 /* Get the number of used descriptors of a rx queue */
2576 uint32_t
iavf_dev_rxq_count(struct rte_eth_dev * dev,uint16_t queue_id)2577 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
2578 {
2579 #define IAVF_RXQ_SCAN_INTERVAL 4
2580 volatile union iavf_rx_desc *rxdp;
2581 struct iavf_rx_queue *rxq;
2582 uint16_t desc = 0;
2583
2584 rxq = dev->data->rx_queues[queue_id];
2585 rxdp = &rxq->rx_ring[rxq->rx_tail];
2586
2587 while ((desc < rxq->nb_rx_desc) &&
2588 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
2589 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
2590 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
2591 /* Check the DD bit of a rx descriptor of each 4 in a group,
2592 * to avoid checking too frequently and downgrading performance
2593 * too much.
2594 */
2595 desc += IAVF_RXQ_SCAN_INTERVAL;
2596 rxdp += IAVF_RXQ_SCAN_INTERVAL;
2597 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2598 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2599 desc - rxq->nb_rx_desc]);
2600 }
2601
2602 return desc;
2603 }
2604
2605 int
iavf_dev_rx_desc_status(void * rx_queue,uint16_t offset)2606 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
2607 {
2608 struct iavf_rx_queue *rxq = rx_queue;
2609 volatile uint64_t *status;
2610 uint64_t mask;
2611 uint32_t desc;
2612
2613 if (unlikely(offset >= rxq->nb_rx_desc))
2614 return -EINVAL;
2615
2616 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2617 return RTE_ETH_RX_DESC_UNAVAIL;
2618
2619 desc = rxq->rx_tail + offset;
2620 if (desc >= rxq->nb_rx_desc)
2621 desc -= rxq->nb_rx_desc;
2622
2623 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
2624 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
2625 << IAVF_RXD_QW1_STATUS_SHIFT);
2626 if (*status & mask)
2627 return RTE_ETH_RX_DESC_DONE;
2628
2629 return RTE_ETH_RX_DESC_AVAIL;
2630 }
2631
2632 int
iavf_dev_tx_desc_status(void * tx_queue,uint16_t offset)2633 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
2634 {
2635 struct iavf_tx_queue *txq = tx_queue;
2636 volatile uint64_t *status;
2637 uint64_t mask, expect;
2638 uint32_t desc;
2639
2640 if (unlikely(offset >= txq->nb_tx_desc))
2641 return -EINVAL;
2642
2643 desc = txq->tx_tail + offset;
2644 /* go to next desc that has the RS bit */
2645 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
2646 txq->rs_thresh;
2647 if (desc >= txq->nb_tx_desc) {
2648 desc -= txq->nb_tx_desc;
2649 if (desc >= txq->nb_tx_desc)
2650 desc -= txq->nb_tx_desc;
2651 }
2652
2653 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2654 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
2655 expect = rte_cpu_to_le_64(
2656 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
2657 if ((*status & mask) == expect)
2658 return RTE_ETH_TX_DESC_DONE;
2659
2660 return RTE_ETH_TX_DESC_FULL;
2661 }
2662
2663 const uint32_t *
iavf_get_default_ptype_table(void)2664 iavf_get_default_ptype_table(void)
2665 {
2666 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
2667 __rte_cache_aligned = {
2668 /* L2 types */
2669 /* [0] reserved */
2670 [1] = RTE_PTYPE_L2_ETHER,
2671 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
2672 /* [3] - [5] reserved */
2673 [6] = RTE_PTYPE_L2_ETHER_LLDP,
2674 /* [7] - [10] reserved */
2675 [11] = RTE_PTYPE_L2_ETHER_ARP,
2676 /* [12] - [21] reserved */
2677
2678 /* Non tunneled IPv4 */
2679 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2680 RTE_PTYPE_L4_FRAG,
2681 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2682 RTE_PTYPE_L4_NONFRAG,
2683 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2684 RTE_PTYPE_L4_UDP,
2685 /* [25] reserved */
2686 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2687 RTE_PTYPE_L4_TCP,
2688 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2689 RTE_PTYPE_L4_SCTP,
2690 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2691 RTE_PTYPE_L4_ICMP,
2692
2693 /* IPv4 --> IPv4 */
2694 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2695 RTE_PTYPE_TUNNEL_IP |
2696 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2697 RTE_PTYPE_INNER_L4_FRAG,
2698 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2699 RTE_PTYPE_TUNNEL_IP |
2700 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2701 RTE_PTYPE_INNER_L4_NONFRAG,
2702 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2703 RTE_PTYPE_TUNNEL_IP |
2704 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2705 RTE_PTYPE_INNER_L4_UDP,
2706 /* [32] reserved */
2707 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2708 RTE_PTYPE_TUNNEL_IP |
2709 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2710 RTE_PTYPE_INNER_L4_TCP,
2711 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2712 RTE_PTYPE_TUNNEL_IP |
2713 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2714 RTE_PTYPE_INNER_L4_SCTP,
2715 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2716 RTE_PTYPE_TUNNEL_IP |
2717 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2718 RTE_PTYPE_INNER_L4_ICMP,
2719
2720 /* IPv4 --> IPv6 */
2721 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2722 RTE_PTYPE_TUNNEL_IP |
2723 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2724 RTE_PTYPE_INNER_L4_FRAG,
2725 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2726 RTE_PTYPE_TUNNEL_IP |
2727 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2728 RTE_PTYPE_INNER_L4_NONFRAG,
2729 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2730 RTE_PTYPE_TUNNEL_IP |
2731 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2732 RTE_PTYPE_INNER_L4_UDP,
2733 /* [39] reserved */
2734 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2735 RTE_PTYPE_TUNNEL_IP |
2736 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2737 RTE_PTYPE_INNER_L4_TCP,
2738 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2739 RTE_PTYPE_TUNNEL_IP |
2740 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2741 RTE_PTYPE_INNER_L4_SCTP,
2742 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2743 RTE_PTYPE_TUNNEL_IP |
2744 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2745 RTE_PTYPE_INNER_L4_ICMP,
2746
2747 /* IPv4 --> GRE/Teredo/VXLAN */
2748 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2749 RTE_PTYPE_TUNNEL_GRENAT,
2750
2751 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
2752 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2753 RTE_PTYPE_TUNNEL_GRENAT |
2754 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2755 RTE_PTYPE_INNER_L4_FRAG,
2756 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2757 RTE_PTYPE_TUNNEL_GRENAT |
2758 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2759 RTE_PTYPE_INNER_L4_NONFRAG,
2760 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2761 RTE_PTYPE_TUNNEL_GRENAT |
2762 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2763 RTE_PTYPE_INNER_L4_UDP,
2764 /* [47] reserved */
2765 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2766 RTE_PTYPE_TUNNEL_GRENAT |
2767 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2768 RTE_PTYPE_INNER_L4_TCP,
2769 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2770 RTE_PTYPE_TUNNEL_GRENAT |
2771 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2772 RTE_PTYPE_INNER_L4_SCTP,
2773 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2774 RTE_PTYPE_TUNNEL_GRENAT |
2775 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2776 RTE_PTYPE_INNER_L4_ICMP,
2777
2778 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
2779 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2780 RTE_PTYPE_TUNNEL_GRENAT |
2781 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2782 RTE_PTYPE_INNER_L4_FRAG,
2783 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2784 RTE_PTYPE_TUNNEL_GRENAT |
2785 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2786 RTE_PTYPE_INNER_L4_NONFRAG,
2787 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2788 RTE_PTYPE_TUNNEL_GRENAT |
2789 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2790 RTE_PTYPE_INNER_L4_UDP,
2791 /* [54] reserved */
2792 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2793 RTE_PTYPE_TUNNEL_GRENAT |
2794 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2795 RTE_PTYPE_INNER_L4_TCP,
2796 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2797 RTE_PTYPE_TUNNEL_GRENAT |
2798 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2799 RTE_PTYPE_INNER_L4_SCTP,
2800 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2801 RTE_PTYPE_TUNNEL_GRENAT |
2802 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2803 RTE_PTYPE_INNER_L4_ICMP,
2804
2805 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
2806 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2807 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2808
2809 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2810 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2811 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2812 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2813 RTE_PTYPE_INNER_L4_FRAG,
2814 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2815 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2816 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2817 RTE_PTYPE_INNER_L4_NONFRAG,
2818 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2819 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2820 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2821 RTE_PTYPE_INNER_L4_UDP,
2822 /* [62] reserved */
2823 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2824 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2825 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2826 RTE_PTYPE_INNER_L4_TCP,
2827 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2828 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2829 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2830 RTE_PTYPE_INNER_L4_SCTP,
2831 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2832 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2833 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2834 RTE_PTYPE_INNER_L4_ICMP,
2835
2836 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
2837 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2838 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2839 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2840 RTE_PTYPE_INNER_L4_FRAG,
2841 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2842 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2843 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2844 RTE_PTYPE_INNER_L4_NONFRAG,
2845 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2846 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2847 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2848 RTE_PTYPE_INNER_L4_UDP,
2849 /* [69] reserved */
2850 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2851 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2852 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2853 RTE_PTYPE_INNER_L4_TCP,
2854 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2855 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2856 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2857 RTE_PTYPE_INNER_L4_SCTP,
2858 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
2859 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2860 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2861 RTE_PTYPE_INNER_L4_ICMP,
2862 /* [73] - [87] reserved */
2863
2864 /* Non tunneled IPv6 */
2865 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2866 RTE_PTYPE_L4_FRAG,
2867 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2868 RTE_PTYPE_L4_NONFRAG,
2869 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2870 RTE_PTYPE_L4_UDP,
2871 /* [91] reserved */
2872 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2873 RTE_PTYPE_L4_TCP,
2874 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2875 RTE_PTYPE_L4_SCTP,
2876 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2877 RTE_PTYPE_L4_ICMP,
2878
2879 /* IPv6 --> IPv4 */
2880 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2881 RTE_PTYPE_TUNNEL_IP |
2882 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2883 RTE_PTYPE_INNER_L4_FRAG,
2884 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2885 RTE_PTYPE_TUNNEL_IP |
2886 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2887 RTE_PTYPE_INNER_L4_NONFRAG,
2888 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2889 RTE_PTYPE_TUNNEL_IP |
2890 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2891 RTE_PTYPE_INNER_L4_UDP,
2892 /* [98] reserved */
2893 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2894 RTE_PTYPE_TUNNEL_IP |
2895 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2896 RTE_PTYPE_INNER_L4_TCP,
2897 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2898 RTE_PTYPE_TUNNEL_IP |
2899 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2900 RTE_PTYPE_INNER_L4_SCTP,
2901 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2902 RTE_PTYPE_TUNNEL_IP |
2903 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2904 RTE_PTYPE_INNER_L4_ICMP,
2905
2906 /* IPv6 --> IPv6 */
2907 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2908 RTE_PTYPE_TUNNEL_IP |
2909 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2910 RTE_PTYPE_INNER_L4_FRAG,
2911 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2912 RTE_PTYPE_TUNNEL_IP |
2913 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2914 RTE_PTYPE_INNER_L4_NONFRAG,
2915 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2916 RTE_PTYPE_TUNNEL_IP |
2917 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2918 RTE_PTYPE_INNER_L4_UDP,
2919 /* [105] reserved */
2920 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2921 RTE_PTYPE_TUNNEL_IP |
2922 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2923 RTE_PTYPE_INNER_L4_TCP,
2924 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2925 RTE_PTYPE_TUNNEL_IP |
2926 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2927 RTE_PTYPE_INNER_L4_SCTP,
2928 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2929 RTE_PTYPE_TUNNEL_IP |
2930 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2931 RTE_PTYPE_INNER_L4_ICMP,
2932
2933 /* IPv6 --> GRE/Teredo/VXLAN */
2934 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2935 RTE_PTYPE_TUNNEL_GRENAT,
2936
2937 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
2938 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2939 RTE_PTYPE_TUNNEL_GRENAT |
2940 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2941 RTE_PTYPE_INNER_L4_FRAG,
2942 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2943 RTE_PTYPE_TUNNEL_GRENAT |
2944 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2945 RTE_PTYPE_INNER_L4_NONFRAG,
2946 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2947 RTE_PTYPE_TUNNEL_GRENAT |
2948 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2949 RTE_PTYPE_INNER_L4_UDP,
2950 /* [113] reserved */
2951 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2952 RTE_PTYPE_TUNNEL_GRENAT |
2953 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2954 RTE_PTYPE_INNER_L4_TCP,
2955 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2956 RTE_PTYPE_TUNNEL_GRENAT |
2957 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2958 RTE_PTYPE_INNER_L4_SCTP,
2959 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2960 RTE_PTYPE_TUNNEL_GRENAT |
2961 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2962 RTE_PTYPE_INNER_L4_ICMP,
2963
2964 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
2965 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2966 RTE_PTYPE_TUNNEL_GRENAT |
2967 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2968 RTE_PTYPE_INNER_L4_FRAG,
2969 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2970 RTE_PTYPE_TUNNEL_GRENAT |
2971 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2972 RTE_PTYPE_INNER_L4_NONFRAG,
2973 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2974 RTE_PTYPE_TUNNEL_GRENAT |
2975 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2976 RTE_PTYPE_INNER_L4_UDP,
2977 /* [120] reserved */
2978 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2979 RTE_PTYPE_TUNNEL_GRENAT |
2980 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2981 RTE_PTYPE_INNER_L4_TCP,
2982 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2983 RTE_PTYPE_TUNNEL_GRENAT |
2984 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2985 RTE_PTYPE_INNER_L4_SCTP,
2986 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2987 RTE_PTYPE_TUNNEL_GRENAT |
2988 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
2989 RTE_PTYPE_INNER_L4_ICMP,
2990
2991 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
2992 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2993 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
2994
2995 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
2996 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
2997 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
2998 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
2999 RTE_PTYPE_INNER_L4_FRAG,
3000 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3001 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3002 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3003 RTE_PTYPE_INNER_L4_NONFRAG,
3004 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3005 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3006 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3007 RTE_PTYPE_INNER_L4_UDP,
3008 /* [128] reserved */
3009 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3010 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3011 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3012 RTE_PTYPE_INNER_L4_TCP,
3013 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3014 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3015 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3016 RTE_PTYPE_INNER_L4_SCTP,
3017 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3018 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3019 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3020 RTE_PTYPE_INNER_L4_ICMP,
3021
3022 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3023 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3024 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3025 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3026 RTE_PTYPE_INNER_L4_FRAG,
3027 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3028 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3029 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3030 RTE_PTYPE_INNER_L4_NONFRAG,
3031 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3032 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3033 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3034 RTE_PTYPE_INNER_L4_UDP,
3035 /* [135] reserved */
3036 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3037 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3038 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3039 RTE_PTYPE_INNER_L4_TCP,
3040 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3041 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3042 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3043 RTE_PTYPE_INNER_L4_SCTP,
3044 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3045 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3046 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3047 RTE_PTYPE_INNER_L4_ICMP,
3048 /* [139] - [299] reserved */
3049
3050 /* PPPoE */
3051 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3052 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3053
3054 /* PPPoE --> IPv4 */
3055 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3056 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3057 RTE_PTYPE_L4_FRAG,
3058 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3059 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3060 RTE_PTYPE_L4_NONFRAG,
3061 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3062 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3063 RTE_PTYPE_L4_UDP,
3064 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3065 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3066 RTE_PTYPE_L4_TCP,
3067 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3068 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3069 RTE_PTYPE_L4_SCTP,
3070 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3071 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3072 RTE_PTYPE_L4_ICMP,
3073
3074 /* PPPoE --> IPv6 */
3075 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3076 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3077 RTE_PTYPE_L4_FRAG,
3078 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3079 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3080 RTE_PTYPE_L4_NONFRAG,
3081 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3082 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3083 RTE_PTYPE_L4_UDP,
3084 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3085 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3086 RTE_PTYPE_L4_TCP,
3087 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3088 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3089 RTE_PTYPE_L4_SCTP,
3090 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3091 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3092 RTE_PTYPE_L4_ICMP,
3093 /* [314] - [324] reserved */
3094
3095 /* IPv4/IPv6 --> GTPC/GTPU */
3096 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3097 RTE_PTYPE_TUNNEL_GTPC,
3098 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3099 RTE_PTYPE_TUNNEL_GTPC,
3100 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3101 RTE_PTYPE_TUNNEL_GTPC,
3102 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3103 RTE_PTYPE_TUNNEL_GTPC,
3104 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3105 RTE_PTYPE_TUNNEL_GTPU,
3106 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3107 RTE_PTYPE_TUNNEL_GTPU,
3108
3109 /* IPv4 --> GTPU --> IPv4 */
3110 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3111 RTE_PTYPE_TUNNEL_GTPU |
3112 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3113 RTE_PTYPE_INNER_L4_FRAG,
3114 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3115 RTE_PTYPE_TUNNEL_GTPU |
3116 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3117 RTE_PTYPE_INNER_L4_NONFRAG,
3118 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3119 RTE_PTYPE_TUNNEL_GTPU |
3120 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3121 RTE_PTYPE_INNER_L4_UDP,
3122 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3123 RTE_PTYPE_TUNNEL_GTPU |
3124 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3125 RTE_PTYPE_INNER_L4_TCP,
3126 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3127 RTE_PTYPE_TUNNEL_GTPU |
3128 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3129 RTE_PTYPE_INNER_L4_ICMP,
3130
3131 /* IPv6 --> GTPU --> IPv4 */
3132 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3133 RTE_PTYPE_TUNNEL_GTPU |
3134 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3135 RTE_PTYPE_INNER_L4_FRAG,
3136 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3137 RTE_PTYPE_TUNNEL_GTPU |
3138 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3139 RTE_PTYPE_INNER_L4_NONFRAG,
3140 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3141 RTE_PTYPE_TUNNEL_GTPU |
3142 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3143 RTE_PTYPE_INNER_L4_UDP,
3144 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3145 RTE_PTYPE_TUNNEL_GTPU |
3146 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3147 RTE_PTYPE_INNER_L4_TCP,
3148 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3149 RTE_PTYPE_TUNNEL_GTPU |
3150 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3151 RTE_PTYPE_INNER_L4_ICMP,
3152
3153 /* IPv4 --> GTPU --> IPv6 */
3154 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3155 RTE_PTYPE_TUNNEL_GTPU |
3156 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3157 RTE_PTYPE_INNER_L4_FRAG,
3158 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3159 RTE_PTYPE_TUNNEL_GTPU |
3160 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3161 RTE_PTYPE_INNER_L4_NONFRAG,
3162 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3163 RTE_PTYPE_TUNNEL_GTPU |
3164 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3165 RTE_PTYPE_INNER_L4_UDP,
3166 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3167 RTE_PTYPE_TUNNEL_GTPU |
3168 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3169 RTE_PTYPE_INNER_L4_TCP,
3170 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3171 RTE_PTYPE_TUNNEL_GTPU |
3172 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3173 RTE_PTYPE_INNER_L4_ICMP,
3174
3175 /* IPv6 --> GTPU --> IPv6 */
3176 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3177 RTE_PTYPE_TUNNEL_GTPU |
3178 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3179 RTE_PTYPE_INNER_L4_FRAG,
3180 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3181 RTE_PTYPE_TUNNEL_GTPU |
3182 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3183 RTE_PTYPE_INNER_L4_NONFRAG,
3184 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3185 RTE_PTYPE_TUNNEL_GTPU |
3186 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3187 RTE_PTYPE_INNER_L4_UDP,
3188 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3189 RTE_PTYPE_TUNNEL_GTPU |
3190 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3191 RTE_PTYPE_INNER_L4_TCP,
3192 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3193 RTE_PTYPE_TUNNEL_GTPU |
3194 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3195 RTE_PTYPE_INNER_L4_ICMP,
3196 /* All others reserved */
3197 };
3198
3199 return ptype_tbl;
3200 }
3201