1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
8 
9 #include "ixgbe_ethdev.h"
10 #include "ixgbe_rxtx.h"
11 #include "ixgbe_rxtx_vec_common.h"
12 
13 #include <tmmintrin.h>
14 
15 #ifndef __INTEL_COMPILER
16 #pragma GCC diagnostic ignored "-Wcast-qual"
17 #endif
18 
19 static inline void
ixgbe_rxq_rearm(struct ixgbe_rx_queue * rxq)20 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
21 {
22 	int i;
23 	uint16_t rx_id;
24 	volatile union ixgbe_adv_rx_desc *rxdp;
25 	struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
26 	struct rte_mbuf *mb0, *mb1;
27 	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
28 			RTE_PKTMBUF_HEADROOM);
29 	__m128i dma_addr0, dma_addr1;
30 
31 	const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
32 
33 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
34 
35 	/* Pull 'n' more MBUFs into the software ring */
36 	if (rte_mempool_get_bulk(rxq->mb_pool,
37 				 (void *)rxep,
38 				 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
39 		if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
40 		    rxq->nb_rx_desc) {
41 			dma_addr0 = _mm_setzero_si128();
42 			for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
43 				rxep[i].mbuf = &rxq->fake_mbuf;
44 				_mm_store_si128((__m128i *)&rxdp[i].read,
45 						dma_addr0);
46 			}
47 		}
48 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
49 			RTE_IXGBE_RXQ_REARM_THRESH;
50 		return;
51 	}
52 
53 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
54 	for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
55 		__m128i vaddr0, vaddr1;
56 
57 		mb0 = rxep[0].mbuf;
58 		mb1 = rxep[1].mbuf;
59 
60 		/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
61 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
62 				offsetof(struct rte_mbuf, buf_addr) + 8);
63 		vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
64 		vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
65 
66 		/* convert pa to dma_addr hdr/data */
67 		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
68 		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
69 
70 		/* add headroom to pa values */
71 		dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
72 		dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
73 
74 		/* set Header Buffer Address to zero */
75 		dma_addr0 =  _mm_and_si128(dma_addr0, hba_msk);
76 		dma_addr1 =  _mm_and_si128(dma_addr1, hba_msk);
77 
78 		/* flush desc with pa dma_addr */
79 		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
80 		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
81 	}
82 
83 	rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
84 	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
85 		rxq->rxrearm_start = 0;
86 
87 	rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
88 
89 	rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
90 			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
91 
92 	/* Update the tail pointer on the NIC */
93 	IXGBE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, rx_id);
94 }
95 
96 #ifdef RTE_LIB_SECURITY
97 static inline void
desc_to_olflags_v_ipsec(__m128i descs[4],struct rte_mbuf ** rx_pkts)98 desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
99 {
100 	__m128i sterr, rearm, tmp_e, tmp_p;
101 	uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2;
102 	uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2;
103 	uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2;
104 	uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2;
105 	const __m128i ipsec_sterr_msk =
106 			_mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP |
107 				       IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED);
108 	const __m128i ipsec_proc_msk  =
109 			_mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
110 	const __m128i ipsec_err_flag  =
111 			_mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
112 				       PKT_RX_SEC_OFFLOAD);
113 	const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
114 
115 	rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
116 	sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
117 			      _mm_extract_epi32(descs[2], 2),
118 			      _mm_extract_epi32(descs[1], 2),
119 			      _mm_extract_epi32(descs[0], 2));
120 	sterr = _mm_and_si128(sterr, ipsec_sterr_msk);
121 	tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk);
122 	tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk);
123 	sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag),
124 				_mm_and_si128(tmp_p, ipsec_proc_flag));
125 	rearm = _mm_or_si128(rearm, sterr);
126 	*rearm0 = _mm_extract_epi32(rearm, 0);
127 	*rearm1 = _mm_extract_epi32(rearm, 1);
128 	*rearm2 = _mm_extract_epi32(rearm, 2);
129 	*rearm3 = _mm_extract_epi32(rearm, 3);
130 }
131 #endif
132 
133 static inline void
desc_to_olflags_v(__m128i descs[4],__m128i mbuf_init,uint8_t vlan_flags,struct rte_mbuf ** rx_pkts)134 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
135 	struct rte_mbuf **rx_pkts)
136 {
137 	__m128i ptype0, ptype1, vtag0, vtag1, csum;
138 	__m128i rearm0, rearm1, rearm2, rearm3;
139 
140 	/* mask everything except rss type */
141 	const __m128i rsstype_msk = _mm_set_epi16(
142 			0x0000, 0x0000, 0x0000, 0x0000,
143 			0x000F, 0x000F, 0x000F, 0x000F);
144 
145 	/* mask the lower byte of ol_flags */
146 	const __m128i ol_flags_msk = _mm_set_epi16(
147 			0x0000, 0x0000, 0x0000, 0x0000,
148 			0x00FF, 0x00FF, 0x00FF, 0x00FF);
149 
150 	/* map rss type to rss hash flag */
151 	const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
152 			0, 0, 0, PKT_RX_RSS_HASH,
153 			PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
154 			PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
155 
156 	/* mask everything except vlan present and l4/ip csum error */
157 	const __m128i vlan_csum_msk = _mm_set_epi16(
158 		(IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
159 		(IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
160 		(IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
161 		(IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
162 		IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
163 		IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
164 	/* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
165 	const __m128i vlan_csum_map_lo = _mm_set_epi8(
166 		0, 0, 0, 0,
167 		vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
168 		vlan_flags | PKT_RX_IP_CKSUM_BAD,
169 		vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
170 		vlan_flags | PKT_RX_IP_CKSUM_GOOD,
171 		0, 0, 0, 0,
172 		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
173 		PKT_RX_IP_CKSUM_BAD,
174 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
175 		PKT_RX_IP_CKSUM_GOOD);
176 
177 	const __m128i vlan_csum_map_hi = _mm_set_epi8(
178 		0, 0, 0, 0,
179 		0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
180 		PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
181 		0, 0, 0, 0,
182 		0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
183 		PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
184 
185 	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
186 	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
187 	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
188 	vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
189 
190 	ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
191 	ptype0 = _mm_and_si128(ptype0, rsstype_msk);
192 	ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
193 
194 	vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
195 	vtag1 = _mm_and_si128(vtag1, vlan_csum_msk);
196 
197 	/* csum bits are in the most significant, to use shuffle we need to
198 	 * shift them. Change mask to 0xc000 to 0x0003.
199 	 */
200 	csum = _mm_srli_epi16(vtag1, 14);
201 
202 	/* now or the most significant 64 bits containing the checksum
203 	 * flags with the vlan present flags.
204 	 */
205 	csum = _mm_srli_si128(csum, 8);
206 	vtag1 = _mm_or_si128(csum, vtag1);
207 
208 	/* convert VP, IPE, L4E to ol_flags */
209 	vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1);
210 	vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t));
211 
212 	vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1);
213 	vtag1 = _mm_and_si128(vtag1, ol_flags_msk);
214 	vtag1 = _mm_or_si128(vtag0, vtag1);
215 
216 	vtag1 = _mm_or_si128(ptype0, vtag1);
217 
218 	/*
219 	 * At this point, we have the 4 sets of flags in the low 64-bits
220 	 * of vtag1 (4x16).
221 	 * We want to extract these, and merge them with the mbuf init data
222 	 * so we can do a single 16-byte write to the mbuf to set the flags
223 	 * and all the other initialization fields. Extracting the
224 	 * appropriate flags means that we have to do a shift and blend for
225 	 * each mbuf before we do the write.
226 	 */
227 	rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
228 	rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
229 	rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
230 	rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
231 
232 	/* write the rearm data and the olflags in one write */
233 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
234 			offsetof(struct rte_mbuf, rearm_data) + 8);
235 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
236 			RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
237 	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
238 	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
239 	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
240 	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
241 }
242 
get_packet_type(int index,uint32_t pkt_info,uint32_t etqf_check,uint32_t tunnel_check)243 static inline uint32_t get_packet_type(int index,
244 				       uint32_t pkt_info,
245 				       uint32_t etqf_check,
246 				       uint32_t tunnel_check)
247 {
248 	if (etqf_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP)))
249 		return RTE_PTYPE_UNKNOWN;
250 
251 	if (tunnel_check & (0x02 << (index * RTE_IXGBE_DESCS_PER_LOOP))) {
252 		pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
253 		return ptype_table_tn[pkt_info];
254 	}
255 
256 	pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
257 	return ptype_table[pkt_info];
258 }
259 
260 static inline void
desc_to_ptype_v(__m128i descs[4],uint16_t pkt_type_mask,struct rte_mbuf ** rx_pkts)261 desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask,
262 		struct rte_mbuf **rx_pkts)
263 {
264 	__m128i etqf_mask = _mm_set_epi64x(0x800000008000LL, 0x800000008000LL);
265 	__m128i ptype_mask = _mm_set_epi32(
266 		pkt_type_mask, pkt_type_mask, pkt_type_mask, pkt_type_mask);
267 	__m128i tunnel_mask =
268 		_mm_set_epi64x(0x100000001000LL, 0x100000001000LL);
269 
270 	uint32_t etqf_check, tunnel_check, pkt_info;
271 
272 	__m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]);
273 	__m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]);
274 
275 	/* interleave low 32 bits,
276 	 * now we have 4 ptypes in a XMM register
277 	 */
278 	ptype0 = _mm_unpacklo_epi32(ptype0, ptype1);
279 
280 	/* create a etqf bitmask based on the etqf bit. */
281 	etqf_check = _mm_movemask_epi8(_mm_and_si128(ptype0, etqf_mask));
282 
283 	/* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
284 	ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, IXGBE_PACKET_TYPE_SHIFT),
285 			       ptype_mask);
286 
287 	/* create a tunnel bitmask based on the tunnel bit */
288 	tunnel_check = _mm_movemask_epi8(
289 		_mm_slli_epi32(_mm_and_si128(ptype0, tunnel_mask), 0x3));
290 
291 	pkt_info = _mm_extract_epi32(ptype0, 0);
292 	rx_pkts[0]->packet_type =
293 		get_packet_type(0, pkt_info, etqf_check, tunnel_check);
294 	pkt_info = _mm_extract_epi32(ptype0, 1);
295 	rx_pkts[1]->packet_type =
296 		get_packet_type(1, pkt_info, etqf_check, tunnel_check);
297 	pkt_info = _mm_extract_epi32(ptype0, 2);
298 	rx_pkts[2]->packet_type =
299 		get_packet_type(2, pkt_info, etqf_check, tunnel_check);
300 	pkt_info = _mm_extract_epi32(ptype0, 3);
301 	rx_pkts[3]->packet_type =
302 		get_packet_type(3, pkt_info, etqf_check, tunnel_check);
303 }
304 
305 /**
306  * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
307  *
308  * Notice:
309  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
310  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
311  */
312 static inline uint16_t
_recv_raw_pkts_vec(struct ixgbe_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts,uint8_t * split_packet)313 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
314 		uint16_t nb_pkts, uint8_t *split_packet)
315 {
316 	volatile union ixgbe_adv_rx_desc *rxdp;
317 	struct ixgbe_rx_entry *sw_ring;
318 	uint16_t nb_pkts_recd;
319 #ifdef RTE_LIB_SECURITY
320 	uint8_t use_ipsec = rxq->using_ipsec;
321 #endif
322 	int pos;
323 	uint64_t var;
324 	__m128i shuf_msk;
325 	__m128i crc_adjust = _mm_set_epi16(
326 				0, 0, 0,    /* ignore non-length fields */
327 				-rxq->crc_len, /* sub crc on data_len */
328 				0,          /* ignore high-16bits of pkt_len */
329 				-rxq->crc_len, /* sub crc on pkt_len */
330 				0, 0            /* ignore pkt_type field */
331 			);
332 	/*
333 	 * compile-time check the above crc_adjust layout is correct.
334 	 * NOTE: the first field (lowest address) is given last in set_epi16
335 	 * call above.
336 	 */
337 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
338 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
339 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
340 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
341 	__m128i dd_check, eop_check;
342 	__m128i mbuf_init;
343 	uint8_t vlan_flags;
344 
345 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
346 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
347 
348 	/* Just the act of getting into the function from the application is
349 	 * going to cost about 7 cycles
350 	 */
351 	rxdp = rxq->rx_ring + rxq->rx_tail;
352 
353 	rte_prefetch0(rxdp);
354 
355 	/* See if we need to rearm the RX queue - gives the prefetch a bit
356 	 * of time to act
357 	 */
358 	if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
359 		ixgbe_rxq_rearm(rxq);
360 
361 	/* Before we start moving massive data around, check to see if
362 	 * there is actually a packet available
363 	 */
364 	if (!(rxdp->wb.upper.status_error &
365 				rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
366 		return 0;
367 
368 	/* 4 packets DD mask */
369 	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
370 
371 	/* 4 packets EOP mask */
372 	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
373 
374 	/* mask to shuffle from desc. to mbuf */
375 	shuf_msk = _mm_set_epi8(
376 		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
377 		15, 14,      /* octet 14~15, low 16 bits vlan_macip */
378 		13, 12,      /* octet 12~13, 16 bits data_len */
379 		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
380 		13, 12,      /* octet 12~13, low 16 bits pkt_len */
381 		0xFF, 0xFF,  /* skip 32 bit pkt_type */
382 		0xFF, 0xFF
383 		);
384 	/*
385 	 * Compile-time verify the shuffle mask
386 	 * NOTE: some field positions already verified above, but duplicated
387 	 * here for completeness in case of future modifications.
388 	 */
389 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
390 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
391 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
392 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
393 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
394 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
395 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
396 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
397 
398 	mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
399 
400 	/* Cache is empty -> need to scan the buffer rings, but first move
401 	 * the next 'n' mbufs into the cache
402 	 */
403 	sw_ring = &rxq->sw_ring[rxq->rx_tail];
404 
405 	/* ensure these 2 flags are in the lower 8 bits */
406 	RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
407 	vlan_flags = rxq->vlan_flags & UINT8_MAX;
408 
409 	/* A. load 4 packet in one loop
410 	 * [A*. mask out 4 unused dirty field in desc]
411 	 * B. copy 4 mbuf point from swring to rx_pkts
412 	 * C. calc the number of DD bits among the 4 packets
413 	 * [C*. extract the end-of-packet bit, if requested]
414 	 * D. fill info. from desc to mbuf
415 	 */
416 	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
417 			pos += RTE_IXGBE_DESCS_PER_LOOP,
418 			rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
419 		__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
420 		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
421 		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
422 		/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
423 		__m128i mbp1;
424 #if defined(RTE_ARCH_X86_64)
425 		__m128i mbp2;
426 #endif
427 
428 		/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
429 		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
430 
431 		/* Read desc statuses backwards to avoid race condition */
432 		/* A.1 load 4 pkts desc */
433 		descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
434 		rte_compiler_barrier();
435 
436 		/* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
437 		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
438 
439 #if defined(RTE_ARCH_X86_64)
440 		/* B.1 load 2 64 bit mbuf points */
441 		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
442 #endif
443 
444 		descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
445 		rte_compiler_barrier();
446 		/* B.1 load 2 mbuf point */
447 		descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
448 		rte_compiler_barrier();
449 		descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
450 
451 #if defined(RTE_ARCH_X86_64)
452 		/* B.2 copy 2 mbuf point into rx_pkts  */
453 		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
454 #endif
455 
456 		if (split_packet) {
457 			rte_mbuf_prefetch_part2(rx_pkts[pos]);
458 			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
459 			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
460 			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
461 		}
462 
463 		/* avoid compiler reorder optimization */
464 		rte_compiler_barrier();
465 
466 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
467 		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
468 		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
469 
470 		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
471 		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
472 		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
473 
474 		/* C.1 4=>2 filter staterr info only */
475 		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
476 		/* C.1 4=>2 filter staterr info only */
477 		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
478 
479 		/* set ol_flags with vlan packet type */
480 		desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
481 
482 #ifdef RTE_LIB_SECURITY
483 		if (unlikely(use_ipsec))
484 			desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]);
485 #endif
486 
487 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
488 		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
489 		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
490 
491 		/* C.2 get 4 pkts staterr value  */
492 		zero = _mm_xor_si128(dd_check, dd_check);
493 		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
494 
495 		/* D.3 copy final 3,4 data to rx_pkts */
496 		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
497 				pkt_mb4);
498 		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
499 				pkt_mb3);
500 
501 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
502 		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
503 		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
504 
505 		/* C* extract and record EOP bit */
506 		if (split_packet) {
507 			__m128i eop_shuf_mask = _mm_set_epi8(
508 					0xFF, 0xFF, 0xFF, 0xFF,
509 					0xFF, 0xFF, 0xFF, 0xFF,
510 					0xFF, 0xFF, 0xFF, 0xFF,
511 					0x04, 0x0C, 0x00, 0x08
512 					);
513 
514 			/* and with mask to extract bits, flipping 1-0 */
515 			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
516 			/* the staterr values are not in order, as the count
517 			 * count of dd bits doesn't care. However, for end of
518 			 * packet tracking, we do care, so shuffle. This also
519 			 * compresses the 32-bit values to 8-bit
520 			 */
521 			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
522 			/* store the resulting 32-bit value */
523 			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
524 			split_packet += RTE_IXGBE_DESCS_PER_LOOP;
525 		}
526 
527 		/* C.3 calc available number of desc */
528 		staterr = _mm_and_si128(staterr, dd_check);
529 		staterr = _mm_packs_epi32(staterr, zero);
530 
531 		/* D.3 copy final 1,2 data to rx_pkts */
532 		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
533 				pkt_mb2);
534 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
535 				pkt_mb1);
536 
537 		desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
538 
539 		/* C.4 calc avaialbe number of desc */
540 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
541 		nb_pkts_recd += var;
542 		if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
543 			break;
544 	}
545 
546 	/* Update our internal tail pointer */
547 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
548 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
549 	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
550 
551 	return nb_pkts_recd;
552 }
553 
554 /**
555  * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
556  *
557  * Notice:
558  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
559  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
560  */
561 uint16_t
ixgbe_recv_pkts_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)562 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
563 		uint16_t nb_pkts)
564 {
565 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
566 }
567 
568 /**
569  * vPMD receive routine that reassembles scattered packets
570  *
571  * Notice:
572  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
573  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
574  */
575 static uint16_t
ixgbe_recv_scattered_burst_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)576 ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
577 			       uint16_t nb_pkts)
578 {
579 	struct ixgbe_rx_queue *rxq = rx_queue;
580 	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
581 
582 	/* get some new buffers */
583 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
584 			split_flags);
585 	if (nb_bufs == 0)
586 		return 0;
587 
588 	/* happy day case, full burst + no packets to be joined */
589 	const uint64_t *split_fl64 = (uint64_t *)split_flags;
590 	if (rxq->pkt_first_seg == NULL &&
591 			split_fl64[0] == 0 && split_fl64[1] == 0 &&
592 			split_fl64[2] == 0 && split_fl64[3] == 0)
593 		return nb_bufs;
594 
595 	/* reassemble any packets that need reassembly*/
596 	unsigned i = 0;
597 	if (rxq->pkt_first_seg == NULL) {
598 		/* find the first split flag, and only reassemble then*/
599 		while (i < nb_bufs && !split_flags[i])
600 			i++;
601 		if (i == nb_bufs)
602 			return nb_bufs;
603 		rxq->pkt_first_seg = rx_pkts[i];
604 	}
605 	return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
606 		&split_flags[i]);
607 }
608 
609 /**
610  * vPMD receive routine that reassembles scattered packets.
611  */
612 uint16_t
ixgbe_recv_scattered_pkts_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)613 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
614 			      uint16_t nb_pkts)
615 {
616 	uint16_t retval = 0;
617 
618 	while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
619 		uint16_t burst;
620 
621 		burst = ixgbe_recv_scattered_burst_vec(rx_queue,
622 						       rx_pkts + retval,
623 						       RTE_IXGBE_MAX_RX_BURST);
624 		retval += burst;
625 		nb_pkts -= burst;
626 		if (burst < RTE_IXGBE_MAX_RX_BURST)
627 			return retval;
628 	}
629 
630 	return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
631 						       rx_pkts + retval,
632 						       nb_pkts);
633 }
634 
635 static inline void
vtx1(volatile union ixgbe_adv_tx_desc * txdp,struct rte_mbuf * pkt,uint64_t flags)636 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
637 		struct rte_mbuf *pkt, uint64_t flags)
638 {
639 	__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
640 			flags | pkt->data_len,
641 			pkt->buf_iova + pkt->data_off);
642 	_mm_store_si128((__m128i *)&txdp->read, descriptor);
643 }
644 
645 static inline void
vtx(volatile union ixgbe_adv_tx_desc * txdp,struct rte_mbuf ** pkt,uint16_t nb_pkts,uint64_t flags)646 vtx(volatile union ixgbe_adv_tx_desc *txdp,
647 		struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
648 {
649 	int i;
650 
651 	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
652 		vtx1(txdp, *pkt, flags);
653 }
654 
655 uint16_t
ixgbe_xmit_fixed_burst_vec(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)656 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
657 			   uint16_t nb_pkts)
658 {
659 	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
660 	volatile union ixgbe_adv_tx_desc *txdp;
661 	struct ixgbe_tx_entry_v *txep;
662 	uint16_t n, nb_commit, tx_id;
663 	uint64_t flags = DCMD_DTYP_FLAGS;
664 	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
665 	int i;
666 
667 	/* cross rx_thresh boundary is not allowed */
668 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
669 
670 	if (txq->nb_tx_free < txq->tx_free_thresh)
671 		ixgbe_tx_free_bufs(txq);
672 
673 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
674 	if (unlikely(nb_pkts == 0))
675 		return 0;
676 
677 	tx_id = txq->tx_tail;
678 	txdp = &txq->tx_ring[tx_id];
679 	txep = &txq->sw_ring_v[tx_id];
680 
681 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
682 
683 	n = (uint16_t)(txq->nb_tx_desc - tx_id);
684 	if (nb_commit >= n) {
685 
686 		tx_backlog_entry(txep, tx_pkts, n);
687 
688 		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
689 			vtx1(txdp, *tx_pkts, flags);
690 
691 		vtx1(txdp, *tx_pkts++, rs);
692 
693 		nb_commit = (uint16_t)(nb_commit - n);
694 
695 		tx_id = 0;
696 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
697 
698 		/* avoid reach the end of ring */
699 		txdp = &(txq->tx_ring[tx_id]);
700 		txep = &txq->sw_ring_v[tx_id];
701 	}
702 
703 	tx_backlog_entry(txep, tx_pkts, nb_commit);
704 
705 	vtx(txdp, tx_pkts, nb_commit, flags);
706 
707 	tx_id = (uint16_t)(tx_id + nb_commit);
708 	if (tx_id > txq->tx_next_rs) {
709 		txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
710 			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
711 		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
712 			txq->tx_rs_thresh);
713 	}
714 
715 	txq->tx_tail = tx_id;
716 
717 	IXGBE_PCI_REG_WC_WRITE(txq->tdt_reg_addr, txq->tx_tail);
718 
719 	return nb_pkts;
720 }
721 
722 static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue * txq)723 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
724 {
725 	_ixgbe_tx_queue_release_mbufs_vec(txq);
726 }
727 
728 void __rte_cold
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue * rxq)729 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
730 {
731 	_ixgbe_rx_queue_release_mbufs_vec(rxq);
732 }
733 
734 static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue * txq)735 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
736 {
737 	_ixgbe_tx_free_swring_vec(txq);
738 }
739 
740 static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue * txq)741 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
742 {
743 	_ixgbe_reset_tx_queue_vec(txq);
744 }
745 
746 static const struct ixgbe_txq_ops vec_txq_ops = {
747 	.release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
748 	.free_swring = ixgbe_tx_free_swring,
749 	.reset = ixgbe_reset_tx_queue,
750 };
751 
752 int __rte_cold
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue * rxq)753 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
754 {
755 	return ixgbe_rxq_vec_setup_default(rxq);
756 }
757 
758 int __rte_cold
ixgbe_txq_vec_setup(struct ixgbe_tx_queue * txq)759 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
760 {
761 	return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
762 }
763 
764 int __rte_cold
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev * dev)765 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
766 {
767 	return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
768 }
769