1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
8 
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
14 
15 #include <tmmintrin.h>
16 
17 #ifndef __INTEL_COMPILER
18 #pragma GCC diagnostic ignored "-Wcast-qual"
19 #endif
20 
21 static inline void
i40e_rxq_rearm(struct i40e_rx_queue * rxq)22 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
23 {
24 	int i;
25 	uint16_t rx_id;
26 	volatile union i40e_rx_desc *rxdp;
27 	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
28 	struct rte_mbuf *mb0, *mb1;
29 	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
30 			RTE_PKTMBUF_HEADROOM);
31 	__m128i dma_addr0, dma_addr1;
32 
33 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
34 
35 	/* Pull 'n' more MBUFs into the software ring */
36 	if (rte_mempool_get_bulk(rxq->mp,
37 				 (void *)rxep,
38 				 RTE_I40E_RXQ_REARM_THRESH) < 0) {
39 		if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
40 		    rxq->nb_rx_desc) {
41 			dma_addr0 = _mm_setzero_si128();
42 			for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
43 				rxep[i].mbuf = &rxq->fake_mbuf;
44 				_mm_store_si128((__m128i *)&rxdp[i].read,
45 						dma_addr0);
46 			}
47 		}
48 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
49 			RTE_I40E_RXQ_REARM_THRESH;
50 		return;
51 	}
52 
53 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
54 	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
55 		__m128i vaddr0, vaddr1;
56 
57 		mb0 = rxep[0].mbuf;
58 		mb1 = rxep[1].mbuf;
59 
60 		/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
61 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
62 				offsetof(struct rte_mbuf, buf_addr) + 8);
63 		vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
64 		vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
65 
66 		/* convert pa to dma_addr hdr/data */
67 		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
68 		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
69 
70 		/* add headroom to pa values */
71 		dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
72 		dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
73 
74 		/* flush desc with pa dma_addr */
75 		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
76 		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
77 	}
78 
79 	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
80 	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
81 		rxq->rxrearm_start = 0;
82 
83 	rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
84 
85 	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
86 			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
87 
88 	/* Update the tail pointer on the NIC */
89 	I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
90 }
91 
92 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
93 /* SSE version of FDIR mark extraction for 4 32B descriptors at a time */
94 static inline __m128i
descs_to_fdir_32b(volatile union i40e_rx_desc * rxdp,struct rte_mbuf ** rx_pkt)95 descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
96 {
97 	/* 32B descriptors: Load 2nd half of descriptors for FDIR ID data */
98 	__m128i desc0_qw23, desc1_qw23, desc2_qw23, desc3_qw23;
99 	desc0_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 0)->wb.qword2);
100 	desc1_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 1)->wb.qword2);
101 	desc2_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 2)->wb.qword2);
102 	desc3_qw23 = _mm_loadu_si128((__m128i *)&(rxdp + 3)->wb.qword2);
103 
104 	/* FDIR ID data: move last u32 of each desc to 4 u32 lanes */
105 	__m128i v_unpack_01, v_unpack_23;
106 	v_unpack_01 = _mm_unpackhi_epi32(desc0_qw23, desc1_qw23);
107 	v_unpack_23 = _mm_unpackhi_epi32(desc2_qw23, desc3_qw23);
108 	__m128i v_fdir_ids = _mm_unpackhi_epi64(v_unpack_01, v_unpack_23);
109 
110 	/* Extended Status: extract from each lower 32 bits, to u32 lanes */
111 	v_unpack_01 = _mm_unpacklo_epi32(desc0_qw23, desc1_qw23);
112 	v_unpack_23 = _mm_unpacklo_epi32(desc2_qw23, desc3_qw23);
113 	__m128i v_flt_status = _mm_unpacklo_epi64(v_unpack_01, v_unpack_23);
114 
115 	/* Shift u32 left and right to "mask away" bits not required.
116 	 * Data required is 4:5 (zero based), so left shift by 26 (32-6)
117 	 * and then right shift by 30 (32 - 2 bits required).
118 	 */
119 	v_flt_status = _mm_slli_epi32(v_flt_status, 26);
120 	v_flt_status = _mm_srli_epi32(v_flt_status, 30);
121 
122 	/* Generate constant 1 in all u32 lanes and compare */
123 	RTE_BUILD_BUG_ON(I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID != 1);
124 	__m128i v_zeros = _mm_setzero_si128();
125 	__m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
126 	__m128i v_u32_one = _mm_srli_epi32(v_ffff, 31);
127 
128 	/* per desc mask, bits set if FDIR ID is valid */
129 	__m128i v_fd_id_mask = _mm_cmpeq_epi32(v_flt_status, v_u32_one);
130 
131 	/* Mask ID data to zero if the FD_ID bit not set in desc */
132 	v_fdir_ids = _mm_and_si128(v_fdir_ids, v_fd_id_mask);
133 
134 	/* Extract and store as u32. No advantage to combining into SSE
135 	 * stores, there are no surrounding stores to around fdir.hi
136 	 */
137 	rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
138 	rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
139 	rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
140 	rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
141 
142 	/* convert fdir_id_mask into a single bit, then shift as required for
143 	 * correct location in the mbuf->olflags
144 	 */
145 	const uint32_t FDIR_ID_BIT_SHIFT = 13;
146 	RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
147 	v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31);
148 	v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, FDIR_ID_BIT_SHIFT);
149 
150 	/* The returned value must be combined into each mbuf. This is already
151 	 * being done for RSS and VLAN mbuf olflags, so return bits to OR in.
152 	 */
153 	return v_fd_id_mask;
154 }
155 
156 #else /* 32 or 16B FDIR ID handling */
157 
158 /* Handle 16B descriptor FDIR ID flag setting based on FLM. See scalar driver
159  * for scalar implementation of the same functionality.
160  */
161 static inline __m128i
descs_to_fdir_16b(__m128i fltstat,__m128i descs[4],struct rte_mbuf ** rx_pkt)162 descs_to_fdir_16b(__m128i fltstat, __m128i descs[4], struct rte_mbuf **rx_pkt)
163 {
164 	/* unpack filter-status data from descriptors */
165 	__m128i v_tmp_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
166 	__m128i v_tmp_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
167 	__m128i v_fdir_ids = _mm_unpackhi_epi64(v_tmp_01, v_tmp_23);
168 
169 	/* Generate one bit in each u32 lane */
170 	__m128i v_zeros = _mm_setzero_si128();
171 	__m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
172 	__m128i v_111_mask = _mm_srli_epi32(v_ffff, 29);
173 	__m128i v_11_mask = _mm_srli_epi32(v_ffff, 30);
174 
175 	/* Top lane ones mask for FDIR isolation */
176 	__m128i v_desc_fdir_mask = _mm_insert_epi32(v_zeros, UINT32_MAX, 1);
177 
178 	/* Compare and mask away FDIR ID data if bit not set */
179 	__m128i v_u32_bits = _mm_and_si128(v_111_mask, fltstat);
180 	__m128i v_fdir_id_mask = _mm_cmpeq_epi32(v_u32_bits, v_11_mask);
181 	v_fdir_ids = _mm_and_si128(v_fdir_id_mask, v_fdir_ids);
182 
183 	/* Store data to fdir.hi in mbuf */
184 	rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
185 	rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
186 	rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
187 	rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
188 
189 	/* Move fdir_id_mask to correct lane, blend RSS to zero on hits */
190 	__m128i v_desc3_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 8);
191 	__m128i v_desc3_mask = _mm_and_si128(v_desc_fdir_mask, v_desc3_shift);
192 	descs[3] = _mm_blendv_epi8(descs[3], _mm_setzero_si128(), v_desc3_mask);
193 
194 	__m128i v_desc2_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 4);
195 	__m128i v_desc2_mask = _mm_and_si128(v_desc_fdir_mask, v_desc2_shift);
196 	descs[2] = _mm_blendv_epi8(descs[2], _mm_setzero_si128(), v_desc2_mask);
197 
198 	__m128i v_desc1_shift = v_fdir_id_mask;
199 	__m128i v_desc1_mask = _mm_and_si128(v_desc_fdir_mask, v_desc1_shift);
200 	descs[1] = _mm_blendv_epi8(descs[1], _mm_setzero_si128(), v_desc1_mask);
201 
202 	__m128i v_desc0_shift = _mm_alignr_epi8(v_fdir_id_mask, v_zeros, 12);
203 	__m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift);
204 	descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask);
205 
206 	/* Shift to 1 or 0 bit per u32 lane, then to PKT_RX_FDIR_ID offset */
207 	const uint32_t FDIR_ID_BIT_SHIFT = 13;
208 	RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
209 	__m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31);
210 	return _mm_slli_epi32(v_mask_one_bit, FDIR_ID_BIT_SHIFT);
211 }
212 #endif
213 
214 static inline void
desc_to_olflags_v(struct i40e_rx_queue * rxq,volatile union i40e_rx_desc * rxdp,__m128i descs[4],struct rte_mbuf ** rx_pkts)215 desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
216 		  __m128i descs[4], struct rte_mbuf **rx_pkts)
217 {
218 	const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
219 	__m128i rearm0, rearm1, rearm2, rearm3;
220 
221 	__m128i vlan0, vlan1, rss, l3_l4e;
222 
223 	/* mask everything except RSS, flow director and VLAN flags
224 	 * bit2 is for VLAN tag, bit11 for flow director indication
225 	 * bit13:12 for RSS indication.
226 	 */
227 	const __m128i rss_vlan_msk = _mm_set_epi32(
228 			0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
229 
230 	const __m128i cksum_mask = _mm_set_epi32(
231 			PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
232 			PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
233 			PKT_RX_EIP_CKSUM_BAD,
234 			PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
235 			PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
236 			PKT_RX_EIP_CKSUM_BAD,
237 			PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
238 			PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
239 			PKT_RX_EIP_CKSUM_BAD,
240 			PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
241 			PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
242 			PKT_RX_EIP_CKSUM_BAD);
243 
244 	/* map rss and vlan type to rss hash and vlan flag */
245 	const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
246 			0, 0, 0, 0,
247 			0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
248 			0, 0, 0, 0);
249 
250 	const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
251 			0, 0, 0, 0,
252 			PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
253 			0, 0, PKT_RX_FDIR, 0);
254 
255 	const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
256 			/* shift right 1 bit to make sure it not exceed 255 */
257 			(PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
258 			 PKT_RX_IP_CKSUM_BAD) >> 1,
259 			(PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
260 			 PKT_RX_L4_CKSUM_BAD) >> 1,
261 			(PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
262 			(PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
263 			(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
264 			(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
265 			PKT_RX_IP_CKSUM_BAD >> 1,
266 			(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
267 
268 	/* Unpack "status" from quadword 1, bits 0:32 */
269 	vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
270 	vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
271 	vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
272 
273 	vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
274 	vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
275 
276 	const __m128i desc_fltstat = _mm_srli_epi32(vlan1, 11);
277 	rss = _mm_shuffle_epi8(rss_flags, desc_fltstat);
278 
279 	l3_l4e = _mm_srli_epi32(vlan1, 22);
280 	l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
281 	/* then we shift left 1 bit */
282 	l3_l4e = _mm_slli_epi32(l3_l4e, 1);
283 	/* we need to mask out the reduntant bits */
284 	l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
285 
286 	vlan0 = _mm_or_si128(vlan0, rss);
287 	vlan0 = _mm_or_si128(vlan0, l3_l4e);
288 
289 	/* Extract FDIR ID only if FDIR is enabled to avoid useless work */
290 	if (rxq->fdir_enabled) {
291 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
292 		__m128i v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
293 #else
294 		(void)rxdp; /* rxdp not required for 16B desc mode */
295 		__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
296 							    descs, rx_pkts);
297 #endif
298 		/* OR in ol_flag bits after descriptor speicific extraction */
299 		vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
300 	}
301 
302 	/*
303 	 * At this point, we have the 4 sets of flags in the low 16-bits
304 	 * of each 32-bit value in vlan0.
305 	 * We want to extract these, and merge them with the mbuf init data
306 	 * so we can do a single 16-byte write to the mbuf to set the flags
307 	 * and all the other initialization fields. Extracting the
308 	 * appropriate flags means that we have to do a shift and blend for
309 	 * each mbuf before we do the write.
310 	 */
311 	rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
312 	rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
313 	rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
314 	rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
315 
316 	/* write the rearm data and the olflags in one write */
317 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
318 			offsetof(struct rte_mbuf, rearm_data) + 8);
319 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
320 			RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
321 	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
322 	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
323 	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
324 	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
325 }
326 
327 #define PKTLEN_SHIFT     10
328 
329 static inline void
desc_to_ptype_v(__m128i descs[4],struct rte_mbuf ** rx_pkts,uint32_t * ptype_tbl)330 desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
331 		uint32_t *ptype_tbl)
332 {
333 	__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
334 	__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
335 
336 	ptype0 = _mm_srli_epi64(ptype0, 30);
337 	ptype1 = _mm_srli_epi64(ptype1, 30);
338 
339 	rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
340 	rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
341 	rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
342 	rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
343 }
344 
345 /**
346  * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
347  *
348  * Notice:
349  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
350  * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
351  */
352 static inline uint16_t
_recv_raw_pkts_vec(struct i40e_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts,uint8_t * split_packet)353 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
354 		   uint16_t nb_pkts, uint8_t *split_packet)
355 {
356 	volatile union i40e_rx_desc *rxdp;
357 	struct i40e_rx_entry *sw_ring;
358 	uint16_t nb_pkts_recd;
359 	int pos;
360 	uint64_t var;
361 	__m128i shuf_msk;
362 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
363 
364 	__m128i crc_adjust = _mm_set_epi16(
365 				0, 0, 0,    /* ignore non-length fields */
366 				-rxq->crc_len, /* sub crc on data_len */
367 				0,          /* ignore high-16bits of pkt_len */
368 				-rxq->crc_len, /* sub crc on pkt_len */
369 				0, 0            /* ignore pkt_type field */
370 			);
371 	/*
372 	 * compile-time check the above crc_adjust layout is correct.
373 	 * NOTE: the first field (lowest address) is given last in set_epi16
374 	 * call above.
375 	 */
376 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
377 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
378 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
379 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
380 	__m128i dd_check, eop_check;
381 
382 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
383 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
384 
385 	/* Just the act of getting into the function from the application is
386 	 * going to cost about 7 cycles
387 	 */
388 	rxdp = rxq->rx_ring + rxq->rx_tail;
389 
390 	rte_prefetch0(rxdp);
391 
392 	/* See if we need to rearm the RX queue - gives the prefetch a bit
393 	 * of time to act
394 	 */
395 	if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
396 		i40e_rxq_rearm(rxq);
397 
398 	/* Before we start moving massive data around, check to see if
399 	 * there is actually a packet available
400 	 */
401 	if (!(rxdp->wb.qword1.status_error_len &
402 			rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
403 		return 0;
404 
405 	/* 4 packets DD mask */
406 	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
407 
408 	/* 4 packets EOP mask */
409 	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
410 
411 	/* mask to shuffle from desc. to mbuf */
412 	shuf_msk = _mm_set_epi8(
413 		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
414 		3, 2,        /* octet 2~3, low 16 bits vlan_macip */
415 		15, 14,      /* octet 15~14, 16 bits data_len */
416 		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
417 		15, 14,      /* octet 15~14, low 16 bits pkt_len */
418 		0xFF, 0xFF,  /* pkt_type set as unknown */
419 		0xFF, 0xFF  /*pkt_type set as unknown */
420 		);
421 	/*
422 	 * Compile-time verify the shuffle mask
423 	 * NOTE: some field positions already verified above, but duplicated
424 	 * here for completeness in case of future modifications.
425 	 */
426 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
427 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
428 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
429 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
430 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
431 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
432 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
433 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
434 
435 	/* Cache is empty -> need to scan the buffer rings, but first move
436 	 * the next 'n' mbufs into the cache
437 	 */
438 	sw_ring = &rxq->sw_ring[rxq->rx_tail];
439 
440 	/* A. load 4 packet in one loop
441 	 * [A*. mask out 4 unused dirty field in desc]
442 	 * B. copy 4 mbuf point from swring to rx_pkts
443 	 * C. calc the number of DD bits among the 4 packets
444 	 * [C*. extract the end-of-packet bit, if requested]
445 	 * D. fill info. from desc to mbuf
446 	 */
447 
448 	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
449 			pos += RTE_I40E_DESCS_PER_LOOP,
450 			rxdp += RTE_I40E_DESCS_PER_LOOP) {
451 		__m128i descs[RTE_I40E_DESCS_PER_LOOP];
452 		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
453 		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
454 		/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
455 		__m128i mbp1;
456 #if defined(RTE_ARCH_X86_64)
457 		__m128i mbp2;
458 #endif
459 
460 		/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
461 		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
462 		/* Read desc statuses backwards to avoid race condition */
463 		/* A.1 load 4 pkts desc */
464 		descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
465 		rte_compiler_barrier();
466 
467 		/* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
468 		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
469 
470 #if defined(RTE_ARCH_X86_64)
471 		/* B.1 load 2 64 bit mbuf points */
472 		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
473 #endif
474 
475 		descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
476 		rte_compiler_barrier();
477 		/* B.1 load 2 mbuf point */
478 		descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
479 		rte_compiler_barrier();
480 		descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
481 
482 #if defined(RTE_ARCH_X86_64)
483 		/* B.2 copy 2 mbuf point into rx_pkts  */
484 		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
485 #endif
486 
487 		if (split_packet) {
488 			rte_mbuf_prefetch_part2(rx_pkts[pos]);
489 			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
490 			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
491 			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
492 		}
493 
494 		/* avoid compiler reorder optimization */
495 		rte_compiler_barrier();
496 
497 		/* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
498 		const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
499 		const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
500 
501 		/* merge the now-aligned packet length fields back in */
502 		descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
503 		descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
504 
505 		/* C.1 4=>2 filter staterr info only */
506 		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
507 		/* C.1 4=>2 filter staterr info only */
508 		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
509 
510 		desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
511 
512 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
513 		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
514 		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
515 
516 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
517 		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
518 		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
519 
520 		/* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
521 		const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
522 		const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
523 
524 		/* merge the now-aligned packet length fields back in */
525 		descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
526 		descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
527 
528 		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
529 		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
530 		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
531 
532 		/* C.2 get 4 pkts staterr value  */
533 		zero = _mm_xor_si128(dd_check, dd_check);
534 		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
535 
536 		/* D.3 copy final 3,4 data to rx_pkts */
537 		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
538 				 pkt_mb4);
539 		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
540 				 pkt_mb3);
541 
542 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
543 		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
544 		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
545 
546 		/* C* extract and record EOP bit */
547 		if (split_packet) {
548 			__m128i eop_shuf_mask = _mm_set_epi8(
549 					0xFF, 0xFF, 0xFF, 0xFF,
550 					0xFF, 0xFF, 0xFF, 0xFF,
551 					0xFF, 0xFF, 0xFF, 0xFF,
552 					0x04, 0x0C, 0x00, 0x08
553 					);
554 
555 			/* and with mask to extract bits, flipping 1-0 */
556 			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
557 			/* the staterr values are not in order, as the count
558 			 * count of dd bits doesn't care. However, for end of
559 			 * packet tracking, we do care, so shuffle. This also
560 			 * compresses the 32-bit values to 8-bit
561 			 */
562 			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
563 			/* store the resulting 32-bit value */
564 			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
565 			split_packet += RTE_I40E_DESCS_PER_LOOP;
566 		}
567 
568 		/* C.3 calc available number of desc */
569 		staterr = _mm_and_si128(staterr, dd_check);
570 		staterr = _mm_packs_epi32(staterr, zero);
571 
572 		/* D.3 copy final 1,2 data to rx_pkts */
573 		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
574 				 pkt_mb2);
575 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
576 				 pkt_mb1);
577 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
578 		/* C.4 calc avaialbe number of desc */
579 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
580 		nb_pkts_recd += var;
581 		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
582 			break;
583 	}
584 
585 	/* Update our internal tail pointer */
586 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
587 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
588 	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
589 
590 	return nb_pkts_recd;
591 }
592 
593  /*
594  * Notice:
595  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
596  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
597  *   numbers of DD bits
598  */
599 uint16_t
i40e_recv_pkts_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)600 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
601 		   uint16_t nb_pkts)
602 {
603 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
604 }
605 
606 /**
607  * vPMD receive routine that reassembles single burst of 32 scattered packets
608  *
609  * Notice:
610  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
611  */
612 static uint16_t
i40e_recv_scattered_burst_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)613 i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
614 			      uint16_t nb_pkts)
615 {
616 
617 	struct i40e_rx_queue *rxq = rx_queue;
618 	uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
619 
620 	/* get some new buffers */
621 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
622 			split_flags);
623 	if (nb_bufs == 0)
624 		return 0;
625 
626 	/* happy day case, full burst + no packets to be joined */
627 	const uint64_t *split_fl64 = (uint64_t *)split_flags;
628 
629 	if (rxq->pkt_first_seg == NULL &&
630 			split_fl64[0] == 0 && split_fl64[1] == 0 &&
631 			split_fl64[2] == 0 && split_fl64[3] == 0)
632 		return nb_bufs;
633 
634 	/* reassemble any packets that need reassembly*/
635 	unsigned i = 0;
636 
637 	if (rxq->pkt_first_seg == NULL) {
638 		/* find the first split flag, and only reassemble then*/
639 		while (i < nb_bufs && !split_flags[i])
640 			i++;
641 		if (i == nb_bufs)
642 			return nb_bufs;
643 		rxq->pkt_first_seg = rx_pkts[i];
644 	}
645 	return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
646 		&split_flags[i]);
647 }
648 
649 /**
650  * vPMD receive routine that reassembles scattered packets.
651  */
652 uint16_t
i40e_recv_scattered_pkts_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)653 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
654 			     uint16_t nb_pkts)
655 {
656 	uint16_t retval = 0;
657 
658 	while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
659 		uint16_t burst;
660 
661 		burst = i40e_recv_scattered_burst_vec(rx_queue,
662 						      rx_pkts + retval,
663 						      RTE_I40E_VPMD_RX_BURST);
664 		retval += burst;
665 		nb_pkts -= burst;
666 		if (burst < RTE_I40E_VPMD_RX_BURST)
667 			return retval;
668 	}
669 
670 	return retval + i40e_recv_scattered_burst_vec(rx_queue,
671 						      rx_pkts + retval,
672 						      nb_pkts);
673 }
674 
675 static inline void
vtx1(volatile struct i40e_tx_desc * txdp,struct rte_mbuf * pkt,uint64_t flags)676 vtx1(volatile struct i40e_tx_desc *txdp,
677 		struct rte_mbuf *pkt, uint64_t flags)
678 {
679 	uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
680 			((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
681 			((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
682 
683 	__m128i descriptor = _mm_set_epi64x(high_qw,
684 				pkt->buf_iova + pkt->data_off);
685 	_mm_store_si128((__m128i *)txdp, descriptor);
686 }
687 
688 static inline void
vtx(volatile struct i40e_tx_desc * txdp,struct rte_mbuf ** pkt,uint16_t nb_pkts,uint64_t flags)689 vtx(volatile struct i40e_tx_desc *txdp,
690 		struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
691 {
692 	int i;
693 
694 	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
695 		vtx1(txdp, *pkt, flags);
696 }
697 
698 uint16_t
i40e_xmit_fixed_burst_vec(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)699 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
700 			  uint16_t nb_pkts)
701 {
702 	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
703 	volatile struct i40e_tx_desc *txdp;
704 	struct i40e_tx_entry *txep;
705 	uint16_t n, nb_commit, tx_id;
706 	uint64_t flags = I40E_TD_CMD;
707 	uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
708 	int i;
709 
710 	/* cross rx_thresh boundary is not allowed */
711 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
712 
713 	if (txq->nb_tx_free < txq->tx_free_thresh)
714 		i40e_tx_free_bufs(txq);
715 
716 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
717 	if (unlikely(nb_pkts == 0))
718 		return 0;
719 
720 	tx_id = txq->tx_tail;
721 	txdp = &txq->tx_ring[tx_id];
722 	txep = &txq->sw_ring[tx_id];
723 
724 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
725 
726 	n = (uint16_t)(txq->nb_tx_desc - tx_id);
727 	if (nb_commit >= n) {
728 		tx_backlog_entry(txep, tx_pkts, n);
729 
730 		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
731 			vtx1(txdp, *tx_pkts, flags);
732 
733 		vtx1(txdp, *tx_pkts++, rs);
734 
735 		nb_commit = (uint16_t)(nb_commit - n);
736 
737 		tx_id = 0;
738 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
739 
740 		/* avoid reach the end of ring */
741 		txdp = &txq->tx_ring[tx_id];
742 		txep = &txq->sw_ring[tx_id];
743 	}
744 
745 	tx_backlog_entry(txep, tx_pkts, nb_commit);
746 
747 	vtx(txdp, tx_pkts, nb_commit, flags);
748 
749 	tx_id = (uint16_t)(tx_id + nb_commit);
750 	if (tx_id > txq->tx_next_rs) {
751 		txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
752 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
753 						I40E_TXD_QW1_CMD_SHIFT);
754 		txq->tx_next_rs =
755 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
756 	}
757 
758 	txq->tx_tail = tx_id;
759 
760 	I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
761 
762 	return nb_pkts;
763 }
764 
765 void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue * rxq)766 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
767 {
768 	_i40e_rx_queue_release_mbufs_vec(rxq);
769 }
770 
771 int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue * rxq)772 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
773 {
774 	return i40e_rxq_vec_setup_default(rxq);
775 }
776 
777 int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)778 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
779 {
780 	return 0;
781 }
782 
783 int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev * dev)784 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
785 {
786 	return i40e_rx_vec_dev_conf_condition_check_default(dev);
787 }
788