1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #include <stdbool.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 #include <rte_ip.h>
15 #include <rte_tcp.h>
16
17 #include "efx.h"
18 #include "efx_types.h"
19 #include "efx_regs.h"
20 #include "efx_regs_ef10.h"
21
22 #include "sfc_debug.h"
23 #include "sfc_dp_tx.h"
24 #include "sfc_tweak.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef10.h"
27 #include "sfc_tso.h"
28
29 #define sfc_ef10_tx_err(dpq, ...) \
30 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
31
32 #define sfc_ef10_tx_info(dpq, ...) \
33 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
34
35 /** Maximum length of the DMA descriptor data */
36 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
37 ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
38
39 /**
40 * Maximum number of descriptors/buffers in the Tx ring.
41 * It should guarantee that corresponding event queue never overfill.
42 * EF10 native datapath uses event queue of the same size as Tx queue.
43 * Maximum number of events on datapath can be estimated as number of
44 * Tx queue entries (one event per Tx buffer in the worst case) plus
45 * Tx error and flush events.
46 */
47 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
48 ((_ndesc) - 1 /* head must not step on tail */ - \
49 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
50 1 /* Rx error */ - 1 /* flush */)
51
52 struct sfc_ef10_tx_sw_desc {
53 struct rte_mbuf *mbuf;
54 };
55
56 struct sfc_ef10_txq {
57 unsigned int flags;
58 #define SFC_EF10_TXQ_STARTED 0x1
59 #define SFC_EF10_TXQ_NOT_RUNNING 0x2
60 #define SFC_EF10_TXQ_EXCEPTION 0x4
61
62 unsigned int ptr_mask;
63 unsigned int added;
64 unsigned int completed;
65 unsigned int max_fill_level;
66 unsigned int free_thresh;
67 unsigned int evq_read_ptr;
68 struct sfc_ef10_tx_sw_desc *sw_ring;
69 efx_qword_t *txq_hw_ring;
70 volatile void *doorbell;
71 efx_qword_t *evq_hw_ring;
72 uint8_t *tsoh;
73 rte_iova_t tsoh_iova;
74 uint16_t tso_tcp_header_offset_limit;
75
76 /* Datapath transmit queue anchor */
77 struct sfc_dp_txq dp;
78 };
79
80 static inline struct sfc_ef10_txq *
sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq * dp_txq)81 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
82 {
83 return container_of(dp_txq, struct sfc_ef10_txq, dp);
84 }
85
86 static bool
sfc_ef10_tx_get_event(struct sfc_ef10_txq * txq,efx_qword_t * tx_ev)87 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
88 {
89 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
90
91 /*
92 * Exception flag is set when reap is done.
93 * It is never done twice per packet burst get and absence of
94 * the flag is checked on burst get entry.
95 */
96 SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
97
98 *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
99
100 if (!sfc_ef10_ev_present(*tx_ev))
101 return false;
102
103 if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
104 FSE_AZ_EV_CODE_TX_EV)) {
105 /*
106 * Do not move read_ptr to keep the event for exception
107 * handling by the control path.
108 */
109 txq->flags |= SFC_EF10_TXQ_EXCEPTION;
110 sfc_ef10_tx_err(&txq->dp.dpq,
111 "TxQ exception at EvQ read ptr %#x",
112 txq->evq_read_ptr);
113 return false;
114 }
115
116 txq->evq_read_ptr++;
117 return true;
118 }
119
120 static unsigned int
sfc_ef10_tx_process_events(struct sfc_ef10_txq * txq)121 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
122 {
123 const unsigned int curr_done = txq->completed - 1;
124 unsigned int anew_done = curr_done;
125 efx_qword_t tx_ev;
126
127 while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
128 /*
129 * DROP_EVENT is an internal to the NIC, software should
130 * never see it and, therefore, may ignore it.
131 */
132
133 /* Update the latest done descriptor */
134 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
135 }
136 return (anew_done - curr_done) & txq->ptr_mask;
137 }
138
139 static void
sfc_ef10_tx_reap(struct sfc_ef10_txq * txq)140 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
141 {
142 const unsigned int old_read_ptr = txq->evq_read_ptr;
143 const unsigned int ptr_mask = txq->ptr_mask;
144 unsigned int completed = txq->completed;
145 unsigned int pending = completed;
146
147 pending += sfc_ef10_tx_process_events(txq);
148
149 if (pending != completed) {
150 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
151 unsigned int nb = 0;
152
153 do {
154 struct sfc_ef10_tx_sw_desc *txd;
155 struct rte_mbuf *m;
156
157 txd = &txq->sw_ring[completed & ptr_mask];
158 if (txd->mbuf == NULL)
159 continue;
160
161 m = rte_pktmbuf_prefree_seg(txd->mbuf);
162 txd->mbuf = NULL;
163 if (m == NULL)
164 continue;
165
166 if ((nb == RTE_DIM(bulk)) ||
167 ((nb != 0) && (m->pool != bulk[0]->pool))) {
168 rte_mempool_put_bulk(bulk[0]->pool,
169 (void *)bulk, nb);
170 nb = 0;
171 }
172
173 bulk[nb++] = m;
174 } while (++completed != pending);
175
176 if (nb != 0)
177 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
178
179 txq->completed = completed;
180 }
181
182 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
183 txq->evq_read_ptr);
184 }
185
186 static void
sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr,uint16_t size,bool eop,efx_qword_t * edp)187 sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
188 efx_qword_t *edp)
189 {
190 EFX_POPULATE_QWORD_4(*edp,
191 ESF_DZ_TX_KER_TYPE, 0,
192 ESF_DZ_TX_KER_CONT, !eop,
193 ESF_DZ_TX_KER_BYTE_CNT, size,
194 ESF_DZ_TX_KER_BUF_ADDR, addr);
195 }
196
197 static void
sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,unsigned int added,uint16_t ipv4_id,uint16_t outer_ipv4_id,uint32_t tcp_seq,uint16_t tcp_mss)198 sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,
199 unsigned int added, uint16_t ipv4_id,
200 uint16_t outer_ipv4_id, uint32_t tcp_seq,
201 uint16_t tcp_mss)
202 {
203 EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask],
204 ESF_DZ_TX_DESC_IS_OPT, 1,
205 ESF_DZ_TX_OPTION_TYPE,
206 ESE_DZ_TX_OPTION_DESC_TSO,
207 ESF_DZ_TX_TSO_OPTION_TYPE,
208 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
209 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
210 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
211 EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask],
212 ESF_DZ_TX_DESC_IS_OPT, 1,
213 ESF_DZ_TX_OPTION_TYPE,
214 ESE_DZ_TX_OPTION_DESC_TSO,
215 ESF_DZ_TX_TSO_OPTION_TYPE,
216 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
217 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
218 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
219 }
220
221 static inline void
sfc_ef10_tx_qpush(struct sfc_ef10_txq * txq,unsigned int added,unsigned int pushed)222 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
223 unsigned int pushed)
224 {
225 efx_qword_t desc;
226 efx_oword_t oword;
227
228 /*
229 * This improves performance by pushing a TX descriptor at the same
230 * time as the doorbell. The descriptor must be added to the TXQ,
231 * so that can be used if the hardware decides not to use the pushed
232 * descriptor.
233 */
234 desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
235 EFX_POPULATE_OWORD_3(oword,
236 ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
237 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
238 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
239
240 /* DMA sync to device is not required */
241
242 /*
243 * rte_io_wmb() which guarantees that the STORE operations
244 * (i.e. Tx and event descriptor updates) that precede
245 * the rte_io_wmb() call are visible to NIC before the STORE
246 * operations that follow it (i.e. doorbell write).
247 */
248 rte_io_wmb();
249
250 *(volatile efsys_uint128_t *)txq->doorbell = oword.eo_u128[0];
251 }
252
253 static unsigned int
sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf * m)254 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
255 {
256 unsigned int extra_descs_per_seg;
257 unsigned int extra_descs_per_pkt;
258
259 /*
260 * VLAN offload is not supported yet, so no extra descriptors
261 * are required for VLAN option descriptor.
262 */
263
264 /** Maximum length of the mbuf segment data */
265 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
266 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
267
268 /*
269 * Each segment is already counted once below. So, calculate
270 * how many extra DMA descriptors may be required per segment in
271 * the worst case because of maximum DMA descriptor length limit.
272 * If maximum segment length is less or equal to maximum DMA
273 * descriptor length, no extra DMA descriptors are required.
274 */
275 extra_descs_per_seg =
276 (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
277
278 /** Maximum length of the packet */
279 #define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
280 RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
281
282 /*
283 * One more limitation on maximum number of extra DMA descriptors
284 * comes from slicing entire packet because of DMA descriptor length
285 * limit taking into account that there is at least one segment
286 * which is already counted below (so division of the maximum
287 * packet length minus one with round down).
288 * TSO is not supported yet, so packet length is limited by
289 * maximum PDU size.
290 */
291 extra_descs_per_pkt =
292 (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
293 SFC_MBUF_PKT_LEN_MAX) - 1) /
294 SFC_EF10_TX_DMA_DESC_LEN_MAX;
295
296 return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
297 extra_descs_per_pkt);
298 }
299
300 static bool
sfc_ef10_try_reap(struct sfc_ef10_txq * const txq,unsigned int added,unsigned int needed_desc,unsigned int * dma_desc_space,bool * reap_done)301 sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
302 unsigned int needed_desc, unsigned int *dma_desc_space,
303 bool *reap_done)
304 {
305 if (*reap_done)
306 return false;
307
308 if (added != txq->added) {
309 sfc_ef10_tx_qpush(txq, added, txq->added);
310 txq->added = added;
311 }
312
313 sfc_ef10_tx_reap(txq);
314 *reap_done = true;
315
316 /*
317 * Recalculate DMA descriptor space since Tx reap may change
318 * the number of completed descriptors
319 */
320 *dma_desc_space = txq->max_fill_level -
321 (added - txq->completed);
322
323 return (needed_desc <= *dma_desc_space);
324 }
325
326 static uint16_t
sfc_ef10_prepare_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)327 sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
328 uint16_t nb_pkts)
329 {
330 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
331 uint16_t i;
332
333 for (i = 0; i < nb_pkts; i++) {
334 struct rte_mbuf *m = tx_pkts[i];
335 int ret;
336
337 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
338 /*
339 * In non-TSO case, check that a packet segments do not exceed
340 * the size limit. Perform the check in debug mode since MTU
341 * more than 9k is not supported, but the limit here is 16k-1.
342 */
343 if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
344 struct rte_mbuf *m_seg;
345
346 for (m_seg = m; m_seg != NULL; m_seg = m_seg->next) {
347 if (m_seg->data_len >
348 SFC_EF10_TX_DMA_DESC_LEN_MAX) {
349 rte_errno = EINVAL;
350 break;
351 }
352 }
353 }
354 #endif
355 ret = sfc_dp_tx_prepare_pkt(m, 0, SFC_TSOH_STD_LEN,
356 txq->tso_tcp_header_offset_limit,
357 txq->max_fill_level,
358 SFC_EF10_TSO_OPT_DESCS_NUM, 0);
359 if (unlikely(ret != 0)) {
360 rte_errno = ret;
361 break;
362 }
363 }
364
365 return i;
366 }
367
368 static int
sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq,struct rte_mbuf * m_seg,unsigned int * added,unsigned int * dma_desc_space,bool * reap_done)369 sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
370 unsigned int *added, unsigned int *dma_desc_space,
371 bool *reap_done)
372 {
373 size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
374 m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
375 m_seg->l2_len;
376 size_t tcph_off = iph_off + m_seg->l3_len;
377 size_t header_len = tcph_off + m_seg->l4_len;
378 /* Offset of the payload in the last segment that contains the header */
379 size_t in_off = 0;
380 const struct rte_tcp_hdr *th;
381 uint16_t packet_id = 0;
382 uint16_t outer_packet_id = 0;
383 uint32_t sent_seq;
384 uint8_t *hdr_addr;
385 rte_iova_t hdr_iova;
386 struct rte_mbuf *first_m_seg = m_seg;
387 unsigned int pkt_start = *added;
388 unsigned int needed_desc;
389 struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
390 bool eop;
391
392 /*
393 * Preliminary estimation of required DMA descriptors, including extra
394 * descriptor for TSO header that is needed when the header is
395 * separated from payload in one segment. It does not include
396 * extra descriptors that may appear when a big segment is split across
397 * several descriptors.
398 */
399 needed_desc = m_seg->nb_segs +
400 (unsigned int)SFC_EF10_TSO_OPT_DESCS_NUM +
401 (unsigned int)SFC_EF10_TSO_HDR_DESCS_NUM;
402
403 if (needed_desc > *dma_desc_space &&
404 !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
405 dma_desc_space, reap_done)) {
406 /*
407 * If a future Tx reap may increase available DMA descriptor
408 * space, do not try to send the packet.
409 */
410 if (txq->completed != pkt_start)
411 return ENOSPC;
412 /*
413 * Do not allow to send packet if the maximum DMA
414 * descriptor space is not sufficient to hold TSO
415 * descriptors, header descriptor and at least 1
416 * segment descriptor.
417 */
418 if (*dma_desc_space < SFC_EF10_TSO_OPT_DESCS_NUM +
419 SFC_EF10_TSO_HDR_DESCS_NUM + 1)
420 return EMSGSIZE;
421 }
422
423 /* Check if the header is not fragmented */
424 if (rte_pktmbuf_data_len(m_seg) >= header_len) {
425 hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
426 hdr_iova = rte_mbuf_data_iova(m_seg);
427 if (rte_pktmbuf_data_len(m_seg) == header_len) {
428 /* Cannot send a packet that consists only of header */
429 if (unlikely(m_seg->next == NULL))
430 return EMSGSIZE;
431 /*
432 * Associate header mbuf with header descriptor
433 * which is located after TSO descriptors.
434 */
435 txq->sw_ring[(pkt_start + SFC_EF10_TSO_OPT_DESCS_NUM) &
436 txq->ptr_mask].mbuf = m_seg;
437 m_seg = m_seg->next;
438 in_off = 0;
439
440 /*
441 * If there is no payload offset (payload starts at the
442 * beginning of a segment) then an extra descriptor for
443 * separated header is not needed.
444 */
445 needed_desc--;
446 } else {
447 in_off = header_len;
448 }
449 } else {
450 unsigned int copied_segs;
451 unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
452 SFC_TSOH_STD_LEN;
453
454 /*
455 * Discard a packet if header linearization is needed but
456 * the header is too big.
457 * Duplicate Tx prepare check here to avoid spoil of
458 * memory if Tx prepare is skipped.
459 */
460 if (unlikely(header_len > SFC_TSOH_STD_LEN))
461 return EMSGSIZE;
462
463 hdr_addr = txq->tsoh + hdr_addr_off;
464 hdr_iova = txq->tsoh_iova + hdr_addr_off;
465 copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
466 &m_seg, &in_off);
467
468 /* Cannot send a packet that consists only of header */
469 if (unlikely(m_seg == NULL))
470 return EMSGSIZE;
471
472 m_seg_to_free_up_to = m_seg;
473 /*
474 * Reduce the number of needed descriptors by the number of
475 * segments that entirely consist of header data.
476 */
477 needed_desc -= copied_segs;
478
479 /* Extra descriptor for separated header is not needed */
480 if (in_off == 0)
481 needed_desc--;
482 }
483
484 /*
485 * Tx prepare has debug-only checks that offload flags are correctly
486 * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag.
487 * If the packet is still IPv4, HW will simply start from zero IPID.
488 */
489 if (first_m_seg->ol_flags & PKT_TX_IPV4)
490 packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
491
492 if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
493 outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
494 first_m_seg->outer_l2_len);
495
496 th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
497 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
498 sent_seq = rte_be_to_cpu_32(sent_seq);
499
500 sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
501 sent_seq, first_m_seg->tso_segsz);
502 (*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
503
504 sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
505 &txq->txq_hw_ring[(*added) & txq->ptr_mask]);
506 (*added)++;
507
508 do {
509 rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
510 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
511 unsigned int id;
512
513 next_frag += in_off;
514 seg_len -= in_off;
515 in_off = 0;
516
517 do {
518 rte_iova_t frag_addr = next_frag;
519 size_t frag_len;
520
521 frag_len = RTE_MIN(seg_len,
522 SFC_EF10_TX_DMA_DESC_LEN_MAX);
523
524 next_frag += frag_len;
525 seg_len -= frag_len;
526
527 eop = (seg_len == 0 && m_seg->next == NULL);
528
529 id = (*added) & txq->ptr_mask;
530 (*added)++;
531
532 /*
533 * Initially we assume that one DMA descriptor is needed
534 * for every segment. When the segment is split across
535 * several DMA descriptors, increase the estimation.
536 */
537 needed_desc += (seg_len != 0);
538
539 /*
540 * When no more descriptors can be added, but not all
541 * segments are processed.
542 */
543 if (*added - pkt_start == *dma_desc_space &&
544 !eop &&
545 !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
546 dma_desc_space, reap_done)) {
547 struct rte_mbuf *m;
548 struct rte_mbuf *m_next;
549
550 if (txq->completed != pkt_start) {
551 unsigned int i;
552
553 /*
554 * Reset mbuf associations with added
555 * descriptors.
556 */
557 for (i = pkt_start; i != *added; i++) {
558 id = i & txq->ptr_mask;
559 txq->sw_ring[id].mbuf = NULL;
560 }
561 return ENOSPC;
562 }
563
564 /* Free the segments that cannot be sent */
565 for (m = m_seg->next; m != NULL; m = m_next) {
566 m_next = m->next;
567 rte_pktmbuf_free_seg(m);
568 }
569 eop = true;
570 /* Ignore the rest of the segment */
571 seg_len = 0;
572 }
573
574 sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
575 eop, &txq->txq_hw_ring[id]);
576
577 } while (seg_len != 0);
578
579 txq->sw_ring[id].mbuf = m_seg;
580
581 m_seg = m_seg->next;
582 } while (!eop);
583
584 /*
585 * Free segments which content was entirely copied to the TSO header
586 * memory space of Tx queue
587 */
588 for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
589 struct rte_mbuf *seg_to_free = m_seg;
590
591 m_seg = m_seg->next;
592 rte_pktmbuf_free_seg(seg_to_free);
593 }
594
595 return 0;
596 }
597
598 static uint16_t
sfc_ef10_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)599 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
600 {
601 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
602 unsigned int added;
603 unsigned int dma_desc_space;
604 bool reap_done;
605 struct rte_mbuf **pktp;
606 struct rte_mbuf **pktp_end;
607
608 if (unlikely(txq->flags &
609 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
610 return 0;
611
612 added = txq->added;
613 dma_desc_space = txq->max_fill_level - (added - txq->completed);
614
615 reap_done = (dma_desc_space < txq->free_thresh);
616 if (reap_done) {
617 sfc_ef10_tx_reap(txq);
618 dma_desc_space = txq->max_fill_level - (added - txq->completed);
619 }
620
621 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
622 pktp != pktp_end;
623 ++pktp) {
624 struct rte_mbuf *m_seg = *pktp;
625 unsigned int pkt_start = added;
626 uint32_t pkt_len;
627
628 if (likely(pktp + 1 != pktp_end))
629 rte_mbuf_prefetch_part1(pktp[1]);
630
631 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
632 int rc;
633
634 rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
635 &dma_desc_space, &reap_done);
636 if (rc != 0) {
637 added = pkt_start;
638
639 /* Packet can be sent in following xmit calls */
640 if (likely(rc == ENOSPC))
641 break;
642
643 /*
644 * Packet cannot be sent, tell RTE that
645 * it is sent, but actually drop it and
646 * continue with another packet
647 */
648 rte_pktmbuf_free(*pktp);
649 continue;
650 }
651
652 goto dma_desc_space_update;
653 }
654
655 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
656 if (reap_done)
657 break;
658
659 /* Push already prepared descriptors before polling */
660 if (added != txq->added) {
661 sfc_ef10_tx_qpush(txq, added, txq->added);
662 txq->added = added;
663 }
664
665 sfc_ef10_tx_reap(txq);
666 reap_done = true;
667 dma_desc_space = txq->max_fill_level -
668 (added - txq->completed);
669 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
670 break;
671 }
672
673 pkt_len = m_seg->pkt_len;
674 do {
675 rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
676 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
677 unsigned int id = added & txq->ptr_mask;
678
679 SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
680
681 pkt_len -= seg_len;
682
683 sfc_ef10_tx_qdesc_dma_create(seg_addr,
684 seg_len, (pkt_len == 0),
685 &txq->txq_hw_ring[id]);
686
687 /*
688 * rte_pktmbuf_free() is commonly used in DPDK for
689 * recycling packets - the function checks every
690 * segment's reference counter and returns the
691 * buffer to its pool whenever possible;
692 * nevertheless, freeing mbuf segments one by one
693 * may entail some performance decline;
694 * from this point, sfc_efx_tx_reap() does the same job
695 * on its own and frees buffers in bulks (all mbufs
696 * within a bulk belong to the same pool);
697 * from this perspective, individual segment pointers
698 * must be associated with the corresponding SW
699 * descriptors independently so that only one loop
700 * is sufficient on reap to inspect all the buffers
701 */
702 txq->sw_ring[id].mbuf = m_seg;
703
704 ++added;
705
706 } while ((m_seg = m_seg->next) != 0);
707
708 dma_desc_space_update:
709 dma_desc_space -= (added - pkt_start);
710 }
711
712 if (likely(added != txq->added)) {
713 sfc_ef10_tx_qpush(txq, added, txq->added);
714 txq->added = added;
715 }
716
717 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
718 if (!reap_done)
719 sfc_ef10_tx_reap(txq);
720 #endif
721
722 return pktp - &tx_pkts[0];
723 }
724
725 static void
sfc_ef10_simple_tx_reap(struct sfc_ef10_txq * txq)726 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
727 {
728 const unsigned int old_read_ptr = txq->evq_read_ptr;
729 const unsigned int ptr_mask = txq->ptr_mask;
730 unsigned int completed = txq->completed;
731 unsigned int pending = completed;
732
733 pending += sfc_ef10_tx_process_events(txq);
734
735 if (pending != completed) {
736 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
737 unsigned int nb = 0;
738
739 do {
740 struct sfc_ef10_tx_sw_desc *txd;
741
742 txd = &txq->sw_ring[completed & ptr_mask];
743
744 if (nb == RTE_DIM(bulk)) {
745 rte_mempool_put_bulk(bulk[0]->pool,
746 (void *)bulk, nb);
747 nb = 0;
748 }
749
750 bulk[nb++] = txd->mbuf;
751 } while (++completed != pending);
752
753 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
754
755 txq->completed = completed;
756 }
757
758 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
759 txq->evq_read_ptr);
760 }
761
762 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
763 static uint16_t
sfc_ef10_simple_prepare_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)764 sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue,
765 struct rte_mbuf **tx_pkts,
766 uint16_t nb_pkts)
767 {
768 uint16_t i;
769
770 for (i = 0; i < nb_pkts; i++) {
771 struct rte_mbuf *m = tx_pkts[i];
772 int ret;
773
774 ret = rte_validate_tx_offload(m);
775 if (unlikely(ret != 0)) {
776 /*
777 * Negative error code is returned by
778 * rte_validate_tx_offload(), but positive are used
779 * inside net/sfc PMD.
780 */
781 SFC_ASSERT(ret < 0);
782 rte_errno = -ret;
783 break;
784 }
785
786 /* ef10_simple does not support TSO and VLAN insertion */
787 if (unlikely(m->ol_flags &
788 (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
789 rte_errno = ENOTSUP;
790 break;
791 }
792
793 /* ef10_simple does not support scattered packets */
794 if (unlikely(m->nb_segs != 1)) {
795 rte_errno = ENOTSUP;
796 break;
797 }
798
799 /*
800 * ef10_simple requires fast-free which ignores reference
801 * counters
802 */
803 if (unlikely(rte_mbuf_refcnt_read(m) != 1)) {
804 rte_errno = ENOTSUP;
805 break;
806 }
807
808 /* ef10_simple requires single pool for all packets */
809 if (unlikely(m->pool != tx_pkts[0]->pool)) {
810 rte_errno = ENOTSUP;
811 break;
812 }
813 }
814
815 return i;
816 }
817 #endif
818
819 static uint16_t
sfc_ef10_simple_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)820 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
821 uint16_t nb_pkts)
822 {
823 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
824 unsigned int ptr_mask;
825 unsigned int added;
826 unsigned int dma_desc_space;
827 bool reap_done;
828 struct rte_mbuf **pktp;
829 struct rte_mbuf **pktp_end;
830
831 if (unlikely(txq->flags &
832 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
833 return 0;
834
835 ptr_mask = txq->ptr_mask;
836 added = txq->added;
837 dma_desc_space = txq->max_fill_level - (added - txq->completed);
838
839 reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
840 if (reap_done) {
841 sfc_ef10_simple_tx_reap(txq);
842 dma_desc_space = txq->max_fill_level - (added - txq->completed);
843 }
844
845 pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
846 for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
847 struct rte_mbuf *pkt = *pktp;
848 unsigned int id = added & ptr_mask;
849
850 SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
851 SFC_EF10_TX_DMA_DESC_LEN_MAX);
852
853 sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
854 rte_pktmbuf_data_len(pkt),
855 true, &txq->txq_hw_ring[id]);
856
857 txq->sw_ring[id].mbuf = pkt;
858
859 ++added;
860 }
861
862 if (likely(added != txq->added)) {
863 sfc_ef10_tx_qpush(txq, added, txq->added);
864 txq->added = added;
865 }
866
867 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
868 if (!reap_done)
869 sfc_ef10_simple_tx_reap(txq);
870 #endif
871
872 return pktp - &tx_pkts[0];
873 }
874
875 static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
876 static void
sfc_ef10_get_dev_info(struct rte_eth_dev_info * dev_info)877 sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
878 {
879 /*
880 * Number of descriptors just defines maximum number of pushed
881 * descriptors (fill level).
882 */
883 dev_info->tx_desc_lim.nb_min = 1;
884 dev_info->tx_desc_lim.nb_align = 1;
885 }
886
887 static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
888 static int
sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,struct sfc_dp_tx_hw_limits * limits,unsigned int * txq_entries,unsigned int * evq_entries,unsigned int * txq_max_fill_level)889 sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
890 struct sfc_dp_tx_hw_limits *limits,
891 unsigned int *txq_entries,
892 unsigned int *evq_entries,
893 unsigned int *txq_max_fill_level)
894 {
895 /*
896 * rte_ethdev API guarantees that the number meets min, max and
897 * alignment requirements.
898 */
899 if (nb_tx_desc <= limits->txq_min_entries)
900 *txq_entries = limits->txq_min_entries;
901 else
902 *txq_entries = rte_align32pow2(nb_tx_desc);
903
904 *evq_entries = *txq_entries;
905
906 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
907 SFC_EF10_TXQ_LIMIT(*evq_entries));
908 return 0;
909 }
910
911 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
912 static int
sfc_ef10_tx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_tx_qcreate_info * info,struct sfc_dp_txq ** dp_txqp)913 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
914 const struct rte_pci_addr *pci_addr, int socket_id,
915 const struct sfc_dp_tx_qcreate_info *info,
916 struct sfc_dp_txq **dp_txqp)
917 {
918 struct sfc_ef10_txq *txq;
919 int rc;
920
921 rc = EINVAL;
922 if (info->txq_entries != info->evq_entries)
923 goto fail_bad_args;
924
925 rc = ENOMEM;
926 txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
927 RTE_CACHE_LINE_SIZE, socket_id);
928 if (txq == NULL)
929 goto fail_txq_alloc;
930
931 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
932
933 rc = ENOMEM;
934 txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
935 info->txq_entries,
936 sizeof(*txq->sw_ring),
937 RTE_CACHE_LINE_SIZE, socket_id);
938 if (txq->sw_ring == NULL)
939 goto fail_sw_ring_alloc;
940
941 if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
942 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
943 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
944 txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
945 info->txq_entries,
946 SFC_TSOH_STD_LEN,
947 RTE_CACHE_LINE_SIZE,
948 socket_id);
949 if (txq->tsoh == NULL)
950 goto fail_tsoh_alloc;
951
952 txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh);
953 }
954
955 txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
956 txq->ptr_mask = info->txq_entries - 1;
957 txq->max_fill_level = info->max_fill_level;
958 txq->free_thresh = info->free_thresh;
959 txq->txq_hw_ring = info->txq_hw_ring;
960 txq->doorbell = (volatile uint8_t *)info->mem_bar +
961 ER_DZ_TX_DESC_UPD_REG_OFST +
962 (info->hw_index << info->vi_window_shift);
963 txq->evq_hw_ring = info->evq_hw_ring;
964 txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
965
966 sfc_ef10_tx_info(&txq->dp.dpq, "TxQ doorbell is %p", txq->doorbell);
967
968 *dp_txqp = &txq->dp;
969 return 0;
970
971 fail_tsoh_alloc:
972 rte_free(txq->sw_ring);
973
974 fail_sw_ring_alloc:
975 rte_free(txq);
976
977 fail_txq_alloc:
978 fail_bad_args:
979 return rc;
980 }
981
982 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
983 static void
sfc_ef10_tx_qdestroy(struct sfc_dp_txq * dp_txq)984 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
985 {
986 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
987
988 rte_free(txq->tsoh);
989 rte_free(txq->sw_ring);
990 rte_free(txq);
991 }
992
993 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
994 static int
sfc_ef10_tx_qstart(struct sfc_dp_txq * dp_txq,unsigned int evq_read_ptr,unsigned int txq_desc_index)995 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
996 unsigned int txq_desc_index)
997 {
998 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
999
1000 txq->evq_read_ptr = evq_read_ptr;
1001 txq->added = txq->completed = txq_desc_index;
1002
1003 txq->flags |= SFC_EF10_TXQ_STARTED;
1004 txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
1005
1006 return 0;
1007 }
1008
1009 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
1010 static void
sfc_ef10_tx_qstop(struct sfc_dp_txq * dp_txq,unsigned int * evq_read_ptr)1011 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
1012 {
1013 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1014
1015 txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
1016
1017 *evq_read_ptr = txq->evq_read_ptr;
1018 }
1019
1020 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
1021 static bool
sfc_ef10_tx_qtx_ev(struct sfc_dp_txq * dp_txq,__rte_unused unsigned int id)1022 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
1023 {
1024 __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1025
1026 SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
1027
1028 /*
1029 * It is safe to ignore Tx event since we reap all mbufs on
1030 * queue purge anyway.
1031 */
1032
1033 return false;
1034 }
1035
1036 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
1037 static void
sfc_ef10_tx_qreap(struct sfc_dp_txq * dp_txq)1038 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
1039 {
1040 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1041 unsigned int completed;
1042
1043 for (completed = txq->completed; completed != txq->added; ++completed) {
1044 struct sfc_ef10_tx_sw_desc *txd;
1045
1046 txd = &txq->sw_ring[completed & txq->ptr_mask];
1047 if (txd->mbuf != NULL) {
1048 rte_pktmbuf_free_seg(txd->mbuf);
1049 txd->mbuf = NULL;
1050 }
1051 }
1052
1053 txq->flags &= ~SFC_EF10_TXQ_STARTED;
1054 }
1055
1056 static unsigned int
sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq * txq)1057 sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq)
1058 {
1059 const unsigned int curr_done = txq->completed - 1;
1060 unsigned int anew_done = curr_done;
1061 efx_qword_t tx_ev;
1062 const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
1063
1064 if (unlikely(txq->flags &
1065 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
1066 return 0;
1067
1068 while (sfc_ef10_tx_get_event(txq, &tx_ev))
1069 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
1070
1071 /*
1072 * The function does not process events, so return event queue read
1073 * pointer to the original position to allow the events that were
1074 * read to be processed later
1075 */
1076 txq->evq_read_ptr = evq_old_read_ptr;
1077
1078 return (anew_done - curr_done) & txq->ptr_mask;
1079 }
1080
1081 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
1082 static int
sfc_ef10_tx_qdesc_status(struct sfc_dp_txq * dp_txq,uint16_t offset)1083 sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq,
1084 uint16_t offset)
1085 {
1086 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
1087 unsigned int npending = sfc_ef10_tx_qdesc_npending(txq);
1088
1089 if (unlikely(offset > txq->ptr_mask))
1090 return -EINVAL;
1091
1092 if (unlikely(offset >= txq->max_fill_level))
1093 return RTE_ETH_TX_DESC_UNAVAIL;
1094
1095 if (unlikely(offset < npending))
1096 return RTE_ETH_TX_DESC_FULL;
1097
1098 return RTE_ETH_TX_DESC_DONE;
1099 }
1100
1101 struct sfc_dp_tx sfc_ef10_tx = {
1102 .dp = {
1103 .name = SFC_KVARG_DATAPATH_EF10,
1104 .type = SFC_DP_TX,
1105 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
1106 },
1107 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
1108 .dev_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS,
1109 .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1110 DEV_TX_OFFLOAD_UDP_CKSUM |
1111 DEV_TX_OFFLOAD_TCP_CKSUM |
1112 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1113 DEV_TX_OFFLOAD_TCP_TSO |
1114 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1115 DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
1116 .get_dev_info = sfc_ef10_get_dev_info,
1117 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
1118 .qcreate = sfc_ef10_tx_qcreate,
1119 .qdestroy = sfc_ef10_tx_qdestroy,
1120 .qstart = sfc_ef10_tx_qstart,
1121 .qtx_ev = sfc_ef10_tx_qtx_ev,
1122 .qstop = sfc_ef10_tx_qstop,
1123 .qreap = sfc_ef10_tx_qreap,
1124 .qdesc_status = sfc_ef10_tx_qdesc_status,
1125 .pkt_prepare = sfc_ef10_prepare_pkts,
1126 .pkt_burst = sfc_ef10_xmit_pkts,
1127 };
1128
1129 struct sfc_dp_tx sfc_ef10_simple_tx = {
1130 .dp = {
1131 .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
1132 .type = SFC_DP_TX,
1133 },
1134 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
1135 .dev_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
1136 .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1137 DEV_TX_OFFLOAD_UDP_CKSUM |
1138 DEV_TX_OFFLOAD_TCP_CKSUM |
1139 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
1140 .get_dev_info = sfc_ef10_get_dev_info,
1141 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
1142 .qcreate = sfc_ef10_tx_qcreate,
1143 .qdestroy = sfc_ef10_tx_qdestroy,
1144 .qstart = sfc_ef10_tx_qstart,
1145 .qtx_ev = sfc_ef10_tx_qtx_ev,
1146 .qstop = sfc_ef10_tx_qstop,
1147 .qreap = sfc_ef10_tx_qreap,
1148 .qdesc_status = sfc_ef10_tx_qdesc_status,
1149 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
1150 .pkt_prepare = sfc_ef10_simple_prepare_pkts,
1151 #endif
1152 .pkt_burst = sfc_ef10_simple_xmit_pkts,
1153 };
1154