1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
7
8 #include "ice_rxtx.h"
9
10 #ifndef __INTEL_COMPILER
11 #pragma GCC diagnostic ignored "-Wcast-qual"
12 #endif
13
14 static inline uint16_t
ice_rx_reassemble_packets(struct ice_rx_queue * rxq,struct rte_mbuf ** rx_bufs,uint16_t nb_bufs,uint8_t * split_flags)15 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
16 uint16_t nb_bufs, uint8_t *split_flags)
17 {
18 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
19 struct rte_mbuf *start = rxq->pkt_first_seg;
20 struct rte_mbuf *end = rxq->pkt_last_seg;
21 unsigned int pkt_idx, buf_idx;
22
23 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
24 if (end) {
25 /* processing a split packet */
26 end->next = rx_bufs[buf_idx];
27 rx_bufs[buf_idx]->data_len += rxq->crc_len;
28
29 start->nb_segs++;
30 start->pkt_len += rx_bufs[buf_idx]->data_len;
31 end = end->next;
32
33 if (!split_flags[buf_idx]) {
34 /* it's the last packet of the set */
35 start->hash = end->hash;
36 start->vlan_tci = end->vlan_tci;
37 start->ol_flags = end->ol_flags;
38 /* we need to strip crc for the whole packet */
39 start->pkt_len -= rxq->crc_len;
40 if (end->data_len > rxq->crc_len) {
41 end->data_len -= rxq->crc_len;
42 } else {
43 /* free up last mbuf */
44 struct rte_mbuf *secondlast = start;
45
46 start->nb_segs--;
47 while (secondlast->next != end)
48 secondlast = secondlast->next;
49 secondlast->data_len -= (rxq->crc_len -
50 end->data_len);
51 secondlast->next = NULL;
52 rte_pktmbuf_free_seg(end);
53 }
54 pkts[pkt_idx++] = start;
55 start = NULL;
56 end = NULL;
57 }
58 } else {
59 /* not processing a split packet */
60 if (!split_flags[buf_idx]) {
61 /* not a split packet, save and skip */
62 pkts[pkt_idx++] = rx_bufs[buf_idx];
63 continue;
64 }
65 start = rx_bufs[buf_idx];
66 end = start;
67 rx_bufs[buf_idx]->data_len += rxq->crc_len;
68 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
69 }
70 }
71
72 /* save the partial packet for next time */
73 rxq->pkt_first_seg = start;
74 rxq->pkt_last_seg = end;
75 rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
76 return pkt_idx;
77 }
78
79 static __rte_always_inline int
ice_tx_free_bufs_vec(struct ice_tx_queue * txq)80 ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
81 {
82 struct ice_tx_entry *txep;
83 uint32_t n;
84 uint32_t i;
85 int nb_free = 0;
86 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
87
88 /* check DD bits on threshold descriptor */
89 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
90 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
91 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
92 return 0;
93
94 n = txq->tx_rs_thresh;
95
96 /* first buffer to free from S/W ring is at index
97 * tx_next_dd - (tx_rs_thresh-1)
98 */
99 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
100 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
101 if (likely(m)) {
102 free[0] = m;
103 nb_free = 1;
104 for (i = 1; i < n; i++) {
105 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
106 if (likely(m)) {
107 if (likely(m->pool == free[0]->pool)) {
108 free[nb_free++] = m;
109 } else {
110 rte_mempool_put_bulk(free[0]->pool,
111 (void *)free,
112 nb_free);
113 free[0] = m;
114 nb_free = 1;
115 }
116 }
117 }
118 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
119 } else {
120 for (i = 1; i < n; i++) {
121 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
122 if (m)
123 rte_mempool_put(m->pool, m);
124 }
125 }
126
127 /* buffers were freed, update counters */
128 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
129 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
130 if (txq->tx_next_dd >= txq->nb_tx_desc)
131 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
132
133 return txq->tx_rs_thresh;
134 }
135
136 static __rte_always_inline void
ice_tx_backlog_entry(struct ice_tx_entry * txep,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)137 ice_tx_backlog_entry(struct ice_tx_entry *txep,
138 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
139 {
140 int i;
141
142 for (i = 0; i < (int)nb_pkts; ++i)
143 txep[i].mbuf = tx_pkts[i];
144 }
145
146 static inline void
_ice_rx_queue_release_mbufs_vec(struct ice_rx_queue * rxq)147 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
148 {
149 const unsigned int mask = rxq->nb_rx_desc - 1;
150 unsigned int i;
151
152 if (unlikely(!rxq->sw_ring)) {
153 PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
154 return;
155 }
156
157 if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
158 return;
159
160 /* free all mbufs that are valid in the ring */
161 if (rxq->rxrearm_nb == 0) {
162 for (i = 0; i < rxq->nb_rx_desc; i++) {
163 if (rxq->sw_ring[i].mbuf)
164 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
165 }
166 } else {
167 for (i = rxq->rx_tail;
168 i != rxq->rxrearm_start;
169 i = (i + 1) & mask) {
170 if (rxq->sw_ring[i].mbuf)
171 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
172 }
173 }
174
175 rxq->rxrearm_nb = rxq->nb_rx_desc;
176
177 /* set all entries to NULL */
178 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
179 }
180
181 static inline void
_ice_tx_queue_release_mbufs_vec(struct ice_tx_queue * txq)182 _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
183 {
184 uint16_t i;
185
186 if (unlikely(!txq || !txq->sw_ring)) {
187 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
188 return;
189 }
190
191 /**
192 * vPMD tx will not set sw_ring's mbuf to NULL after free,
193 * so need to free remains more carefully.
194 */
195 i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
196
197 #ifdef __AVX512VL__
198 struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
199
200 if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
201 dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
202 struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
203
204 if (txq->tx_tail < i) {
205 for (; i < txq->nb_tx_desc; i++) {
206 rte_pktmbuf_free_seg(swr[i].mbuf);
207 swr[i].mbuf = NULL;
208 }
209 i = 0;
210 }
211 for (; i < txq->tx_tail; i++) {
212 rte_pktmbuf_free_seg(swr[i].mbuf);
213 swr[i].mbuf = NULL;
214 }
215 } else
216 #endif
217 {
218 if (txq->tx_tail < i) {
219 for (; i < txq->nb_tx_desc; i++) {
220 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
221 txq->sw_ring[i].mbuf = NULL;
222 }
223 i = 0;
224 }
225 for (; i < txq->tx_tail; i++) {
226 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
227 txq->sw_ring[i].mbuf = NULL;
228 }
229 }
230 }
231
232 static inline int
ice_rxq_vec_setup_default(struct ice_rx_queue * rxq)233 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
234 {
235 uintptr_t p;
236 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
237
238 mb_def.nb_segs = 1;
239 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
240 mb_def.port = rxq->port_id;
241 rte_mbuf_refcnt_set(&mb_def, 1);
242
243 /* prevent compiler reordering: rearm_data covers previous fields */
244 rte_compiler_barrier();
245 p = (uintptr_t)&mb_def.rearm_data;
246 rxq->mbuf_initializer = *(uint64_t *)p;
247 return 0;
248 }
249
250 #define ICE_TX_NO_VECTOR_FLAGS ( \
251 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
252 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
253 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
254 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
255
256 #define ICE_TX_VECTOR_OFFLOAD ( \
257 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
258 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
259 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
260 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
261 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
262 RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
263
264 #define ICE_RX_VECTOR_OFFLOAD ( \
265 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
266 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
267 RTE_ETH_RX_OFFLOAD_VLAN | \
268 RTE_ETH_RX_OFFLOAD_RSS_HASH)
269
270 #define ICE_VECTOR_PATH 0
271 #define ICE_VECTOR_OFFLOAD_PATH 1
272
273 static inline int
ice_rx_vec_queue_default(struct ice_rx_queue * rxq)274 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
275 {
276 if (!rxq)
277 return -1;
278
279 if (!rte_is_power_of_2(rxq->nb_rx_desc))
280 return -1;
281
282 if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
283 return -1;
284
285 if (rxq->nb_rx_desc % rxq->rx_free_thresh)
286 return -1;
287
288 if (rxq->proto_xtr != PROTO_XTR_NONE)
289 return -1;
290
291 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
292 return -1;
293
294 if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
295 return ICE_VECTOR_OFFLOAD_PATH;
296
297 return ICE_VECTOR_PATH;
298 }
299
300 static inline int
ice_tx_vec_queue_default(struct ice_tx_queue * txq)301 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
302 {
303 if (!txq)
304 return -1;
305
306 if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
307 txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
308 return -1;
309
310 if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
311 return -1;
312
313 if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
314 return ICE_VECTOR_OFFLOAD_PATH;
315
316 return ICE_VECTOR_PATH;
317 }
318
319 static inline int
ice_rx_vec_dev_check_default(struct rte_eth_dev * dev)320 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
321 {
322 int i;
323 struct ice_rx_queue *rxq;
324 int ret = 0;
325 int result = 0;
326
327 for (i = 0; i < dev->data->nb_rx_queues; i++) {
328 rxq = dev->data->rx_queues[i];
329 ret = (ice_rx_vec_queue_default(rxq));
330 if (ret < 0)
331 return -1;
332 if (ret == ICE_VECTOR_OFFLOAD_PATH)
333 result = ret;
334 }
335
336 return result;
337 }
338
339 static inline int
ice_tx_vec_dev_check_default(struct rte_eth_dev * dev)340 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
341 {
342 int i;
343 struct ice_tx_queue *txq;
344 int ret = 0;
345 int result = 0;
346
347 for (i = 0; i < dev->data->nb_tx_queues; i++) {
348 txq = dev->data->tx_queues[i];
349 ret = ice_tx_vec_queue_default(txq);
350 if (ret < 0)
351 return -1;
352 if (ret == ICE_VECTOR_OFFLOAD_PATH)
353 result = ret;
354 }
355
356 return result;
357 }
358
359 static inline void
ice_txd_enable_offload(struct rte_mbuf * tx_pkt,uint64_t * txd_hi)360 ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
361 uint64_t *txd_hi)
362 {
363 uint64_t ol_flags = tx_pkt->ol_flags;
364 uint32_t td_cmd = 0;
365 uint32_t td_offset = 0;
366
367 /* Tx Checksum Offload */
368 /* SET MACLEN */
369 td_offset |= (tx_pkt->l2_len >> 1) <<
370 ICE_TX_DESC_LEN_MACLEN_S;
371
372 /* Enable L3 checksum offload */
373 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
374 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
375 td_offset |= (tx_pkt->l3_len >> 2) <<
376 ICE_TX_DESC_LEN_IPLEN_S;
377 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
378 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
379 td_offset |= (tx_pkt->l3_len >> 2) <<
380 ICE_TX_DESC_LEN_IPLEN_S;
381 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
382 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
383 td_offset |= (tx_pkt->l3_len >> 2) <<
384 ICE_TX_DESC_LEN_IPLEN_S;
385 }
386
387 /* Enable L4 checksum offloads */
388 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
389 case RTE_MBUF_F_TX_TCP_CKSUM:
390 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
391 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
392 ICE_TX_DESC_LEN_L4_LEN_S;
393 break;
394 case RTE_MBUF_F_TX_SCTP_CKSUM:
395 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
396 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
397 ICE_TX_DESC_LEN_L4_LEN_S;
398 break;
399 case RTE_MBUF_F_TX_UDP_CKSUM:
400 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
401 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
402 ICE_TX_DESC_LEN_L4_LEN_S;
403 break;
404 default:
405 break;
406 }
407
408 *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
409
410 /* Tx VLAN/QINQ insertion Offload */
411 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
412 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
413 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
414 ICE_TXD_QW1_L2TAG1_S);
415 }
416
417 *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
418 }
419 #endif
420