xref: /dpdk/drivers/net/bnxt/bnxt_rxr.h (revision daa02b5c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8 #include "hsi_struct_def_dpdk.h"
9 
10 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
11 	((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
12 	 RX_TPA_START_CMPL_AGG_ID_SFT)
13 
14 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
15 	rte_le_to_cpu_16((cmp)->agg_id)
16 
bnxt_tpa_start_agg_id(struct bnxt * bp,struct rx_tpa_start_cmpl * cmp)17 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
18 					     struct rx_tpa_start_cmpl *cmp)
19 {
20 	if (BNXT_CHIP_P5(bp))
21 		return BNXT_TPA_START_AGG_ID_TH(cmp);
22 	else
23 		return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
24 }
25 
26 #define BNXT_TPA_END_AGG_BUFS(cmp) \
27 	(((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
28 	 >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
29 
30 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
31 	((cmp)->tpa_agg_bufs)
32 
33 #define BNXT_TPA_END_AGG_ID(cmp) \
34 	(((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
35 	 RX_TPA_END_CMPL_AGG_ID_SFT)
36 
37 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
38 	rte_le_to_cpu_16((cmp)->agg_id)
39 
40 #define BNXT_RX_L2_AGG_BUFS(cmp) \
41 	(((cmp)->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> \
42 		RX_PKT_CMPL_AGG_BUFS_SFT)
43 
44 /* Number of descriptors to process per inner loop in vector mode. */
45 #define BNXT_RX_DESCS_PER_LOOP_VEC128	4U /* SSE, Neon */
46 #define BNXT_RX_DESCS_PER_LOOP_VEC256	8U /* AVX2 */
47 
48 /* Number of extra Rx mbuf ring entries to allocate for vector mode. */
49 #define BNXT_RX_EXTRA_MBUF_ENTRIES \
50 	RTE_MAX(BNXT_RX_DESCS_PER_LOOP_VEC128, BNXT_RX_DESCS_PER_LOOP_VEC256)
51 
52 #define BNXT_OL_FLAGS_TBL_DIM	64
53 #define BNXT_OL_FLAGS_ERR_TBL_DIM 32
54 
55 struct bnxt_tpa_info {
56 	struct rte_mbuf			*mbuf;
57 	uint16_t			len;
58 	uint32_t			agg_count;
59 	struct rx_tpa_v2_abuf_cmpl	agg_arr[TPA_MAX_NUM_SEGS];
60 
61 	uint32_t                        rss_hash;
62 	uint32_t                        vlan;
63 	uint16_t                        cfa_code;
64 	uint8_t                         hash_valid:1;
65 	uint8_t                         vlan_valid:1;
66 	uint8_t                         cfa_code_valid:1;
67 	uint8_t                         l4_csum_valid:1;
68 };
69 
70 struct bnxt_rx_ring_info {
71 	uint16_t		rx_raw_prod;
72 	uint16_t		ag_raw_prod;
73 	uint16_t                rx_cons; /* Needed for representor */
74 	uint16_t                rx_next_cons;
75 	struct bnxt_db_info     rx_db;
76 	struct bnxt_db_info     ag_db;
77 
78 	struct rx_prod_pkt_bd	*rx_desc_ring;
79 	struct rx_prod_pkt_bd	*ag_desc_ring;
80 	struct rte_mbuf		**rx_buf_ring; /* sw ring */
81 	struct rte_mbuf		**ag_buf_ring; /* sw ring */
82 
83 	rte_iova_t		rx_desc_mapping;
84 	rte_iova_t		ag_desc_mapping;
85 
86 	struct bnxt_ring	*rx_ring_struct;
87 	struct bnxt_ring	*ag_ring_struct;
88 
89 	/*
90 	 * To deal with out of order return from TPA, use free buffer indicator
91 	 */
92 	struct rte_bitmap	*ag_bitmap;
93 
94 	struct bnxt_tpa_info *tpa_info;
95 
96 	uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
97 	uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
98 };
99 
100 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
101 			       uint16_t nb_pkts);
102 void bnxt_free_rx_rings(struct bnxt *bp);
103 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
104 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
105 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
106 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
107 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
108 
109 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
110 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
111 			    uint16_t nb_pkts);
112 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
113 #endif
114 
115 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
116 uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
117 				 uint16_t nb_pkts);
118 #endif
119 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
120 			   struct rx_pkt_cmpl_hi *rxcmp1,
121 			   struct rte_mbuf *mbuf);
122 
123 typedef uint32_t bnxt_cfa_code_dynfield_t;
124 extern int bnxt_cfa_code_dynfield_offset;
125 
126 static inline bnxt_cfa_code_dynfield_t *
bnxt_cfa_code_dynfield(struct rte_mbuf * mbuf)127 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf)
128 {
129 	return RTE_MBUF_DYNFIELD(mbuf,
130 		bnxt_cfa_code_dynfield_offset, bnxt_cfa_code_dynfield_t *);
131 }
132 
133 #define BNXT_RX_META_CFA_CODE_SHIFT		19
134 #define BNXT_CFA_CODE_META_SHIFT		16
135 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT	0x8000000
136 #define BNXT_RX_META_CFA_CODE_EEM_BIT		0x4000000
137 #define BNXT_CFA_META_FMT_MASK			0x70
138 #define BNXT_CFA_META_FMT_SHFT			4
139 #define BNXT_CFA_META_FMT_EM_EEM_SHFT		1
140 #define BNXT_CFA_META_FMT_EEM			3
141 #define BNXT_CFA_META_EEM_TCAM_SHIFT		31
142 #define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT)
143 
144 /* Definitions for translation of hardware packet type to mbuf ptype. */
145 #define BNXT_PTYPE_TBL_DIM		128
146 #define BNXT_PTYPE_TBL_TUN_SFT		0 /* Set if tunneled packet. */
147 #define BNXT_PTYPE_TBL_TUN_MSK		BIT(BNXT_PTYPE_TBL_TUN_SFT)
148 #define BNXT_PTYPE_TBL_IP_VER_SFT	1 /* Set if IPv6, clear if IPv4. */
149 #define BNXT_PTYPE_TBL_IP_VER_MSK	BIT(BNXT_PTYPE_TBL_IP_VER_SFT)
150 #define BNXT_PTYPE_TBL_VLAN_SFT		2 /* Set if VLAN encapsulated. */
151 #define BNXT_PTYPE_TBL_VLAN_MSK		BIT(BNXT_PTYPE_TBL_VLAN_SFT)
152 #define BNXT_PTYPE_TBL_TYPE_SFT		3 /* Hardware packet type field. */
153 #define BNXT_PTYPE_TBL_TYPE_MSK		0x78 /* Hardware itype field mask. */
154 #define BNXT_PTYPE_TBL_TYPE_IP		1
155 #define BNXT_PTYPE_TBL_TYPE_TCP		2
156 #define BNXT_PTYPE_TBL_TYPE_UDP		3
157 #define BNXT_PTYPE_TBL_TYPE_ICMP	7
158 
159 #define RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT	8
160 #define CMPL_FLAGS2_VLAN_TUN_MSK \
161 	(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
162 
163 #define BNXT_CMPL_ITYPE_TO_IDX(ft) \
164 	(((ft) & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> \
165 	  (RX_PKT_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))
166 
167 #define BNXT_CMPL_VLAN_TUN_TO_IDX(f2) \
168 	(((f2) & CMPL_FLAGS2_VLAN_TUN_MSK) >> \
169 	 (RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
170 
171 #define BNXT_CMPL_IP_VER_TO_IDX(f2) \
172 	(((f2) & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> \
173 	 (RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))
174 
175 static inline void
bnxt_check_ptype_constants(void)176 bnxt_check_ptype_constants(void)
177 {
178 	RTE_BUILD_BUG_ON(BNXT_CMPL_ITYPE_TO_IDX(RX_PKT_CMPL_FLAGS_ITYPE_MASK) !=
179 			 BNXT_PTYPE_TBL_TYPE_MSK);
180 	RTE_BUILD_BUG_ON(BNXT_CMPL_VLAN_TUN_TO_IDX(CMPL_FLAGS2_VLAN_TUN_MSK) !=
181 			 (BNXT_PTYPE_TBL_VLAN_MSK | BNXT_PTYPE_TBL_TUN_MSK));
182 	RTE_BUILD_BUG_ON(BNXT_CMPL_IP_VER_TO_IDX(RX_PKT_CMPL_FLAGS2_IP_TYPE) !=
183 			 BNXT_PTYPE_TBL_IP_VER_MSK);
184 }
185 
186 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
187 
bnxt_set_vlan(struct rx_pkt_cmpl_hi * rxcmp1,struct rte_mbuf * mbuf)188 static inline void bnxt_set_vlan(struct rx_pkt_cmpl_hi *rxcmp1,
189 				 struct rte_mbuf *mbuf)
190 {
191 	uint32_t metadata = rte_le_to_cpu_32(rxcmp1->metadata);
192 
193 	mbuf->vlan_tci = metadata & (RX_PKT_CMPL_METADATA_VID_MASK |
194 				     RX_PKT_CMPL_METADATA_DE |
195 				     RX_PKT_CMPL_METADATA_PRI_MASK);
196 }
197 
198 /* Stingray2 specific code for RX completion parsing */
199 #define RX_CMP_VLAN_VALID(rxcmp)        \
200 	(((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset &	\
201 	 RX_PKT_V2_CMPL_METADATA1_VALID)
202 
203 #define RX_CMP_METADATA0_VID(rxcmp1)				\
204 	((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) &	\
205 	 (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK |		\
206 	  RX_PKT_V2_CMPL_HI_METADATA0_DE  |			\
207 	  RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK))
208 
bnxt_rx_vlan_v2(struct rte_mbuf * mbuf,struct rx_pkt_cmpl * rxcmp,struct rx_pkt_cmpl_hi * rxcmp1)209 static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
210 				   struct rx_pkt_cmpl *rxcmp,
211 				   struct rx_pkt_cmpl_hi *rxcmp1)
212 {
213 	if (RX_CMP_VLAN_VALID(rxcmp)) {
214 		mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
215 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
216 	}
217 }
218 
219 #define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK	(0x1 << 3)
220 #define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK	(0x7 << 10)
221 #define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK	(0x1 << 13)
222 #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK	(0x1 << 14)
223 
224 #define RX_CMP_V2_CS_OK_HDR_CNT(flags)				\
225 	(((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >>	\
226 	 RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT)
227 
228 #define RX_CMP_V2_CS_ALL_OK_MODE(flags)				\
229 	(((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK))
230 
231 #define RX_CMP_FLAGS2_L3_CS_OK_MASK		(0x7 << 10)
232 #define RX_CMP_FLAGS2_L4_CS_OK_MASK		(0x38 << 10)
233 #define RX_CMP_FLAGS2_L3_CS_OK_SFT		10
234 #define RX_CMP_FLAGS2_L4_CS_OK_SFT		13
235 
236 #define RX_CMP_V2_L4_CS_OK(flags2)			\
237 	(((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >>	\
238 	 RX_CMP_FLAGS2_L4_CS_OK_SFT)
239 
240 #define RX_CMP_V2_L3_CS_OK(flags2)			\
241 	(((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >>	\
242 	 RX_CMP_FLAGS2_L3_CS_OK_SFT)
243 
244 #define RX_CMP_V2_L4_CS_ERR(err)				\
245 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK)  ==	\
246 	 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR)
247 
248 #define RX_CMP_V2_L3_CS_ERR(err)				\
249 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) ==	\
250 	 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR)
251 
252 #define RX_CMP_V2_T_IP_CS_ERR(err)				\
253 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) ==	\
254 	 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR)
255 
256 #define RX_CMP_V2_T_L4_CS_ERR(err)				\
257 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) ==	\
258 	 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR)
259 
260 #define RX_CMP_V2_OT_L4_CS_ERR(err)					\
261 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) ==	\
262 	 RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR)
263 
bnxt_parse_csum_v2(struct rte_mbuf * mbuf,struct rx_pkt_cmpl_hi * rxcmp1)264 static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
265 				      struct rx_pkt_cmpl_hi *rxcmp1)
266 {
267 	struct rx_pkt_v2_cmpl_hi *v2_cmp =
268 		(struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
269 	uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2);
270 	uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2);
271 	uint32_t hdr_cnt = 0, t_pkt = 0;
272 
273 	if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
274 		hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
275 		if (hdr_cnt > 1)
276 			t_pkt = 1;
277 
278 		if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
279 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
280 		else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
281 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
282 		else
283 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
284 
285 		if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
286 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
287 		else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
288 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
289 		else
290 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
291 	} else {
292 		hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
293 		if (hdr_cnt > 1)
294 			t_pkt = 1;
295 
296 		if (RX_CMP_V2_L4_CS_OK(flags2))
297 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
298 		else if (RX_CMP_V2_L4_CS_ERR(error_v2))
299 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
300 		else
301 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
302 
303 		if (RX_CMP_V2_L3_CS_OK(flags2))
304 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
305 		else if (RX_CMP_V2_L3_CS_ERR(error_v2))
306 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
307 		else
308 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
309 	}
310 
311 	if (t_pkt) {
312 		if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
313 					RX_CMP_V2_T_L4_CS_ERR(error_v2)))
314 			mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
315 		else
316 			mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
317 
318 		if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
319 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
320 	}
321 }
322 
323 static inline void
bnxt_parse_pkt_type_v2(struct rte_mbuf * mbuf,struct rx_pkt_cmpl * rxcmp,struct rx_pkt_cmpl_hi * rxcmp1)324 bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,
325 		       struct rx_pkt_cmpl *rxcmp,
326 		       struct rx_pkt_cmpl_hi *rxcmp1)
327 {
328 	struct rx_pkt_v2_cmpl *v2_cmp =
329 		(struct rx_pkt_v2_cmpl *)(rxcmp);
330 	struct rx_pkt_v2_cmpl_hi *v2_cmp1 =
331 		(struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
332 	uint16_t flags_type = v2_cmp->flags_type &
333 		rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK);
334 	uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2);
335 	uint32_t l3, pkt_type = 0, vlan = 0;
336 	uint32_t ip6 = 0, t_pkt = 0;
337 	uint32_t hdr_cnt, csum_count;
338 
339 	if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
340 		hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
341 		if (hdr_cnt > 1)
342 			t_pkt = 1;
343 	} else {
344 		csum_count = RX_CMP_V2_L4_CS_OK(flags2);
345 		if (csum_count > 1)
346 			t_pkt = 1;
347 	}
348 
349 	vlan = !!RX_CMP_VLAN_VALID(rxcmp);
350 	pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
351 
352 	ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE);
353 
354 	if (!t_pkt && !ip6)
355 		l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
356 	else if (!t_pkt && ip6)
357 		l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
358 	else if (t_pkt && !ip6)
359 		l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
360 	else
361 		l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
362 
363 	switch (flags_type) {
364 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP):
365 		if (!t_pkt)
366 			pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
367 		else
368 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
369 		break;
370 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP):
371 		if (!t_pkt)
372 			pkt_type |= l3 | RTE_PTYPE_L4_TCP;
373 		else
374 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
375 		break;
376 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP):
377 		if (!t_pkt)
378 			pkt_type |= l3 | RTE_PTYPE_L4_UDP;
379 		else
380 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
381 		break;
382 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP):
383 		pkt_type |= l3;
384 		break;
385 	}
386 
387 	mbuf->packet_type = pkt_type;
388 }
389 
390 #endif /*  _BNXT_RXR_H_ */
391