1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 #include <stdint.h> 11 #include <stdarg.h> 12 #include <unistd.h> 13 #include <inttypes.h> 14 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 #include <rte_log.h> 19 #include <rte_debug.h> 20 #include <rte_interrupts.h> 21 #include <rte_pci.h> 22 #include <rte_memory.h> 23 #include <rte_memzone.h> 24 #include <rte_launch.h> 25 #include <rte_eal.h> 26 #include <rte_per_lcore.h> 27 #include <rte_lcore.h> 28 #include <rte_atomic.h> 29 #include <rte_branch_prediction.h> 30 #include <rte_mempool.h> 31 #include <rte_malloc.h> 32 #include <rte_mbuf.h> 33 #include <rte_ether.h> 34 #include <ethdev_driver.h> 35 #include <rte_prefetch.h> 36 #include <rte_udp.h> 37 #include <rte_tcp.h> 38 #include <rte_sctp.h> 39 #include <rte_string_fns.h> 40 #include <rte_errno.h> 41 #include <rte_ip.h> 42 #include <rte_net.h> 43 44 #include "ionic_logs.h" 45 #include "ionic_mac_api.h" 46 #include "ionic_ethdev.h" 47 #include "ionic_lif.h" 48 #include "ionic_rxtx.h" 49 50 #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) 51 52 /********************************************************************* 53 * 54 * TX functions 55 * 56 **********************************************************************/ 57 58 void 59 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 60 struct rte_eth_txq_info *qinfo) 61 { 62 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 63 struct ionic_queue *q = &txq->qcq.q; 64 65 qinfo->nb_desc = q->num_descs; 66 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 67 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 68 } 69 70 static __rte_always_inline void 71 ionic_tx_flush(struct ionic_tx_qcq *txq) 72 { 73 struct ionic_cq *cq = &txq->qcq.cq; 74 struct ionic_queue *q = &txq->qcq.q; 75 struct rte_mbuf *txm, *next; 76 struct ionic_txq_comp *cq_desc_base = cq->base; 77 struct ionic_txq_comp *cq_desc; 78 void **info; 79 u_int32_t comp_index = (u_int32_t)-1; 80 81 cq_desc = &cq_desc_base[cq->tail_idx]; 82 while (color_match(cq_desc->color, cq->done_color)) { 83 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 84 85 /* Prefetch the next 4 descriptors (not really useful here) */ 86 if ((cq->tail_idx & 0x3) == 0) 87 rte_prefetch0(&cq_desc_base[cq->tail_idx]); 88 89 if (cq->tail_idx == 0) 90 cq->done_color = !cq->done_color; 91 92 comp_index = cq_desc->comp_index; 93 94 cq_desc = &cq_desc_base[cq->tail_idx]; 95 } 96 97 if (comp_index != (u_int32_t)-1) { 98 while (q->tail_idx != comp_index) { 99 info = IONIC_INFO_PTR(q, q->tail_idx); 100 101 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 102 103 /* Prefetch the next 4 descriptors */ 104 if ((q->tail_idx & 0x3) == 0) 105 /* q desc info */ 106 rte_prefetch0(&q->info[q->tail_idx]); 107 108 /* 109 * Note: you can just use rte_pktmbuf_free, 110 * but this loop is faster 111 */ 112 txm = info[0]; 113 while (txm != NULL) { 114 next = txm->next; 115 rte_pktmbuf_free_seg(txm); 116 txm = next; 117 } 118 } 119 } 120 } 121 122 void __rte_cold 123 ionic_dev_tx_queue_release(void *tx_queue) 124 { 125 struct ionic_tx_qcq *txq = tx_queue; 126 struct ionic_tx_stats *stats = &txq->stats; 127 128 IONIC_PRINT_CALL(); 129 130 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 131 txq->qcq.q.index, stats->packets, stats->tso); 132 133 ionic_lif_txq_deinit(txq); 134 135 ionic_qcq_free(&txq->qcq); 136 } 137 138 int __rte_cold 139 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 140 { 141 struct ionic_tx_qcq *txq; 142 143 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 144 145 txq = eth_dev->data->tx_queues[tx_queue_id]; 146 147 eth_dev->data->tx_queue_state[tx_queue_id] = 148 RTE_ETH_QUEUE_STATE_STOPPED; 149 150 /* 151 * Note: we should better post NOP Tx desc and wait for its completion 152 * before disabling Tx queue 153 */ 154 155 ionic_qcq_disable(&txq->qcq); 156 157 ionic_tx_flush(txq); 158 159 return 0; 160 } 161 162 int __rte_cold 163 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 164 uint16_t nb_desc, uint32_t socket_id, 165 const struct rte_eth_txconf *tx_conf) 166 { 167 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 168 struct ionic_tx_qcq *txq; 169 uint64_t offloads; 170 int err; 171 172 if (tx_queue_id >= lif->ntxqcqs) { 173 IONIC_PRINT(DEBUG, "Queue index %u not available " 174 "(max %u queues)", 175 tx_queue_id, lif->ntxqcqs); 176 return -EINVAL; 177 } 178 179 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 180 IONIC_PRINT(DEBUG, 181 "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 182 socket_id, tx_queue_id, nb_desc, offloads); 183 184 /* Validate number of receive descriptors */ 185 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 186 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 187 188 /* Free memory prior to re-allocation if needed... */ 189 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 190 void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; 191 ionic_dev_tx_queue_release(tx_queue); 192 eth_dev->data->tx_queues[tx_queue_id] = NULL; 193 } 194 195 eth_dev->data->tx_queue_state[tx_queue_id] = 196 RTE_ETH_QUEUE_STATE_STOPPED; 197 198 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 199 if (err) { 200 IONIC_PRINT(DEBUG, "Queue allocation failure"); 201 return -EINVAL; 202 } 203 204 /* Do not start queue with rte_eth_dev_start() */ 205 if (tx_conf->tx_deferred_start) 206 txq->flags |= IONIC_QCQ_F_DEFERRED; 207 208 /* Convert the offload flags into queue flags */ 209 if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 210 txq->flags |= IONIC_QCQ_F_CSUM_L3; 211 if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM) 212 txq->flags |= IONIC_QCQ_F_CSUM_TCP; 213 if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM) 214 txq->flags |= IONIC_QCQ_F_CSUM_UDP; 215 216 eth_dev->data->tx_queues[tx_queue_id] = txq; 217 218 return 0; 219 } 220 221 /* 222 * Start Transmit Units for specified queue. 223 */ 224 int __rte_cold 225 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 226 { 227 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 228 struct ionic_tx_qcq *txq; 229 int err; 230 231 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 232 IONIC_PRINT(DEBUG, "TX queue %u already started", 233 tx_queue_id); 234 return 0; 235 } 236 237 txq = eth_dev->data->tx_queues[tx_queue_id]; 238 239 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 240 tx_queue_id, txq->qcq.q.num_descs); 241 242 if (!(txq->flags & IONIC_QCQ_F_INITED)) { 243 err = ionic_lif_txq_init(txq); 244 if (err) 245 return err; 246 } else { 247 ionic_qcq_enable(&txq->qcq); 248 } 249 250 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 251 252 return 0; 253 } 254 255 static void 256 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 257 { 258 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 259 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 260 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 261 (l3_hdr + txm->l3_len); 262 263 if (txm->ol_flags & PKT_TX_IP_CKSUM) { 264 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 265 ipv4_hdr->hdr_checksum = 0; 266 tcp_hdr->cksum = 0; 267 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 268 } else { 269 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 270 tcp_hdr->cksum = 0; 271 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 272 } 273 } 274 275 static void 276 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 277 { 278 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 279 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 280 txm->outer_l3_len + txm->l2_len; 281 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 282 (l3_hdr + txm->l3_len); 283 284 if (txm->ol_flags & PKT_TX_IPV4) { 285 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 286 ipv4_hdr->hdr_checksum = 0; 287 tcp_hdr->cksum = 0; 288 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 289 } else { 290 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 291 tcp_hdr->cksum = 0; 292 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 293 } 294 } 295 296 static void 297 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 298 struct rte_mbuf *txm, 299 rte_iova_t addr, uint8_t nsge, uint16_t len, 300 uint32_t hdrlen, uint32_t mss, 301 bool encap, 302 uint16_t vlan_tci, bool has_vlan, 303 bool start, bool done) 304 { 305 uint8_t flags = 0; 306 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 307 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 308 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 309 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 310 311 desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 312 flags, nsge, addr); 313 desc->len = len; 314 desc->vlan_tci = vlan_tci; 315 desc->hdr_len = hdrlen; 316 desc->mss = mss; 317 318 ionic_q_post(q, done, done ? txm : NULL); 319 } 320 321 static struct ionic_txq_desc * 322 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 323 { 324 struct ionic_queue *q = &txq->qcq.q; 325 struct ionic_txq_desc *desc_base = q->base; 326 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 327 struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 328 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 329 330 *elem = sg_desc->elems; 331 return desc; 332 } 333 334 static int 335 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, 336 bool not_xmit_more) 337 { 338 struct ionic_queue *q = &txq->qcq.q; 339 struct ionic_tx_stats *stats = &txq->stats; 340 struct ionic_txq_desc *desc; 341 struct ionic_txq_sg_elem *elem; 342 struct rte_mbuf *txm_seg; 343 rte_iova_t data_iova; 344 uint64_t desc_addr = 0, next_addr; 345 uint16_t desc_len = 0; 346 uint8_t desc_nsge; 347 uint32_t hdrlen; 348 uint32_t mss = txm->tso_segsz; 349 uint32_t frag_left = 0; 350 uint32_t left; 351 uint32_t seglen; 352 uint32_t len; 353 uint32_t offset = 0; 354 bool start, done; 355 bool encap; 356 bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); 357 uint16_t vlan_tci = txm->vlan_tci; 358 uint64_t ol_flags = txm->ol_flags; 359 360 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 361 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 362 ((ol_flags & PKT_TX_OUTER_IPV4) || 363 (ol_flags & PKT_TX_OUTER_IPV6)); 364 365 /* Preload inner-most TCP csum field with IP pseudo hdr 366 * calculated with IP length set to zero. HW will later 367 * add in length to each TCP segment resulting from the TSO. 368 */ 369 370 if (encap) { 371 ionic_tx_tcp_inner_pseudo_csum(txm); 372 hdrlen = txm->outer_l2_len + txm->outer_l3_len + 373 txm->l2_len + txm->l3_len + txm->l4_len; 374 } else { 375 ionic_tx_tcp_pseudo_csum(txm); 376 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 377 } 378 379 seglen = hdrlen + mss; 380 left = txm->data_len; 381 data_iova = rte_mbuf_data_iova(txm); 382 383 desc = ionic_tx_tso_next(txq, &elem); 384 start = true; 385 386 /* Chop data up into desc segments */ 387 388 while (left > 0) { 389 len = RTE_MIN(seglen, left); 390 frag_left = seglen - len; 391 desc_addr = rte_cpu_to_le_64(data_iova + offset); 392 desc_len = len; 393 desc_nsge = 0; 394 left -= len; 395 offset += len; 396 if (txm->nb_segs > 1 && frag_left > 0) 397 continue; 398 done = (txm->nb_segs == 1 && left == 0); 399 ionic_tx_tso_post(q, desc, txm, 400 desc_addr, desc_nsge, desc_len, 401 hdrlen, mss, 402 encap, 403 vlan_tci, has_vlan, 404 start, done && not_xmit_more); 405 desc = ionic_tx_tso_next(txq, &elem); 406 start = false; 407 seglen = mss; 408 } 409 410 /* Chop frags into desc segments */ 411 412 txm_seg = txm->next; 413 while (txm_seg != NULL) { 414 offset = 0; 415 data_iova = rte_mbuf_data_iova(txm_seg); 416 left = txm_seg->data_len; 417 418 while (left > 0) { 419 next_addr = rte_cpu_to_le_64(data_iova + offset); 420 if (frag_left > 0) { 421 len = RTE_MIN(frag_left, left); 422 frag_left -= len; 423 elem->addr = next_addr; 424 elem->len = len; 425 elem++; 426 desc_nsge++; 427 } else { 428 len = RTE_MIN(mss, left); 429 frag_left = mss - len; 430 desc_addr = next_addr; 431 desc_len = len; 432 desc_nsge = 0; 433 } 434 left -= len; 435 offset += len; 436 if (txm_seg->next != NULL && frag_left > 0) 437 continue; 438 439 done = (txm_seg->next == NULL && left == 0); 440 ionic_tx_tso_post(q, desc, txm_seg, 441 desc_addr, desc_nsge, desc_len, 442 hdrlen, mss, 443 encap, 444 vlan_tci, has_vlan, 445 start, done && not_xmit_more); 446 desc = ionic_tx_tso_next(txq, &elem); 447 start = false; 448 } 449 450 txm_seg = txm_seg->next; 451 } 452 453 stats->tso++; 454 455 return 0; 456 } 457 458 static __rte_always_inline int 459 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, 460 bool not_xmit_more) 461 { 462 struct ionic_queue *q = &txq->qcq.q; 463 struct ionic_txq_desc *desc, *desc_base = q->base; 464 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 465 struct ionic_txq_sg_elem *elem; 466 struct ionic_tx_stats *stats = &txq->stats; 467 struct rte_mbuf *txm_seg; 468 bool encap; 469 bool has_vlan; 470 uint64_t ol_flags = txm->ol_flags; 471 uint64_t addr; 472 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 473 uint8_t flags = 0; 474 475 desc = &desc_base[q->head_idx]; 476 477 if ((ol_flags & PKT_TX_IP_CKSUM) && 478 (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 479 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 480 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 481 } 482 483 if (((ol_flags & PKT_TX_TCP_CKSUM) && 484 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 485 ((ol_flags & PKT_TX_UDP_CKSUM) && 486 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 487 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 488 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 489 } 490 491 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 492 stats->no_csum++; 493 494 has_vlan = (ol_flags & PKT_TX_VLAN_PKT); 495 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 496 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 497 ((ol_flags & PKT_TX_OUTER_IPV4) || 498 (ol_flags & PKT_TX_OUTER_IPV6)); 499 500 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 501 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 502 503 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 504 505 desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 506 desc->len = txm->data_len; 507 desc->vlan_tci = txm->vlan_tci; 508 509 elem = sg_desc_base[q->head_idx].elems; 510 txm_seg = txm->next; 511 while (txm_seg != NULL) { 512 elem->len = txm_seg->data_len; 513 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 514 elem++; 515 txm_seg = txm_seg->next; 516 } 517 518 ionic_q_post(q, not_xmit_more, txm); 519 520 return 0; 521 } 522 523 uint16_t 524 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 525 uint16_t nb_pkts) 526 { 527 struct ionic_tx_qcq *txq = tx_queue; 528 struct ionic_queue *q = &txq->qcq.q; 529 struct ionic_tx_stats *stats = &txq->stats; 530 uint32_t next_q_head_idx; 531 uint32_t bytes_tx = 0; 532 uint16_t nb_tx = 0; 533 int err; 534 bool last; 535 536 /* Cleaning old buffers */ 537 ionic_tx_flush(txq); 538 539 if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { 540 stats->stop += nb_pkts; 541 return 0; 542 } 543 544 while (nb_tx < nb_pkts) { 545 last = (nb_tx == (nb_pkts - 1)); 546 547 next_q_head_idx = Q_NEXT_TO_POST(q, 1); 548 if ((next_q_head_idx & 0x3) == 0) { 549 struct ionic_txq_desc *desc_base = q->base; 550 rte_prefetch0(&desc_base[next_q_head_idx]); 551 rte_prefetch0(&q->info[next_q_head_idx]); 552 } 553 554 if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) 555 err = ionic_tx_tso(txq, tx_pkts[nb_tx], last); 556 else 557 err = ionic_tx(txq, tx_pkts[nb_tx], last); 558 if (err) { 559 stats->drop += nb_pkts - nb_tx; 560 if (nb_tx > 0) 561 ionic_q_flush(q); 562 break; 563 } 564 565 bytes_tx += tx_pkts[nb_tx]->pkt_len; 566 nb_tx++; 567 } 568 569 stats->packets += nb_tx; 570 stats->bytes += bytes_tx; 571 572 return nb_tx; 573 } 574 575 /********************************************************************* 576 * 577 * TX prep functions 578 * 579 **********************************************************************/ 580 581 #define IONIC_TX_OFFLOAD_MASK ( \ 582 PKT_TX_IPV4 | \ 583 PKT_TX_IPV6 | \ 584 PKT_TX_VLAN | \ 585 PKT_TX_IP_CKSUM | \ 586 PKT_TX_TCP_SEG | \ 587 PKT_TX_L4_MASK) 588 589 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 590 (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 591 592 uint16_t 593 ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 594 uint16_t nb_pkts) 595 { 596 struct rte_mbuf *txm; 597 uint64_t offloads; 598 int i = 0; 599 600 for (i = 0; i < nb_pkts; i++) { 601 txm = tx_pkts[i]; 602 603 if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) { 604 rte_errno = -EINVAL; 605 break; 606 } 607 608 offloads = txm->ol_flags; 609 610 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 611 rte_errno = -ENOTSUP; 612 break; 613 } 614 } 615 616 return i; 617 } 618 619 /********************************************************************* 620 * 621 * RX functions 622 * 623 **********************************************************************/ 624 625 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 626 struct rte_mbuf *mbuf); 627 628 void 629 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 630 struct rte_eth_rxq_info *qinfo) 631 { 632 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 633 struct ionic_queue *q = &rxq->qcq.q; 634 635 qinfo->mp = rxq->mb_pool; 636 qinfo->scattered_rx = dev->data->scattered_rx; 637 qinfo->nb_desc = q->num_descs; 638 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 639 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 640 } 641 642 static void __rte_cold 643 ionic_rx_empty(struct ionic_rx_qcq *rxq) 644 { 645 struct ionic_queue *q = &rxq->qcq.q; 646 struct rte_mbuf *mbuf; 647 void **info; 648 649 while (q->tail_idx != q->head_idx) { 650 info = IONIC_INFO_PTR(q, q->tail_idx); 651 mbuf = info[0]; 652 rte_mempool_put(rxq->mb_pool, mbuf); 653 654 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 655 } 656 } 657 658 void __rte_cold 659 ionic_dev_rx_queue_release(void *rx_queue) 660 { 661 struct ionic_rx_qcq *rxq = rx_queue; 662 struct ionic_rx_stats *stats; 663 664 if (!rxq) 665 return; 666 667 IONIC_PRINT_CALL(); 668 669 stats = &rxq->stats; 670 671 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 672 rxq->qcq.q.index, stats->packets, stats->mtods); 673 674 ionic_rx_empty(rxq); 675 676 ionic_lif_rxq_deinit(rxq); 677 678 ionic_qcq_free(&rxq->qcq); 679 } 680 681 int __rte_cold 682 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 683 uint16_t rx_queue_id, 684 uint16_t nb_desc, 685 uint32_t socket_id, 686 const struct rte_eth_rxconf *rx_conf, 687 struct rte_mempool *mp) 688 { 689 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 690 struct ionic_rx_qcq *rxq; 691 uint64_t offloads; 692 int err; 693 694 if (rx_queue_id >= lif->nrxqcqs) { 695 IONIC_PRINT(ERR, 696 "Queue index %u not available (max %u queues)", 697 rx_queue_id, lif->nrxqcqs); 698 return -EINVAL; 699 } 700 701 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 702 IONIC_PRINT(DEBUG, 703 "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 704 socket_id, rx_queue_id, nb_desc, offloads); 705 706 if (!rx_conf->rx_drop_en) 707 IONIC_PRINT(WARNING, "No-drop mode is not supported"); 708 709 /* Validate number of receive descriptors */ 710 if (!rte_is_power_of_2(nb_desc) || 711 nb_desc < IONIC_MIN_RING_DESC || 712 nb_desc > IONIC_MAX_RING_DESC) { 713 IONIC_PRINT(ERR, 714 "Bad descriptor count (%u) for queue %u (min: %u)", 715 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 716 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 717 } 718 719 /* Free memory prior to re-allocation if needed... */ 720 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 721 void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; 722 ionic_dev_rx_queue_release(rx_queue); 723 eth_dev->data->rx_queues[rx_queue_id] = NULL; 724 } 725 726 eth_dev->data->rx_queue_state[rx_queue_id] = 727 RTE_ETH_QUEUE_STATE_STOPPED; 728 729 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, 730 &rxq); 731 if (err) { 732 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 733 return -EINVAL; 734 } 735 736 rxq->mb_pool = mp; 737 738 /* 739 * Note: the interface does not currently support 740 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 741 * when the adapter will be able to keep the CRC and subtract 742 * it to the length for all received packets: 743 * if (eth_dev->data->dev_conf.rxmode.offloads & 744 * DEV_RX_OFFLOAD_KEEP_CRC) 745 * rxq->crc_len = ETHER_CRC_LEN; 746 */ 747 748 /* Do not start queue with rte_eth_dev_start() */ 749 if (rx_conf->rx_deferred_start) 750 rxq->flags |= IONIC_QCQ_F_DEFERRED; 751 752 eth_dev->data->rx_queues[rx_queue_id] = rxq; 753 754 return 0; 755 } 756 757 static __rte_always_inline void 758 ionic_rx_clean(struct ionic_rx_qcq *rxq, 759 uint32_t q_desc_index, uint32_t cq_desc_index, 760 void *service_cb_arg) 761 { 762 struct ionic_queue *q = &rxq->qcq.q; 763 struct ionic_cq *cq = &rxq->qcq.cq; 764 struct ionic_rxq_comp *cq_desc_base = cq->base; 765 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 766 struct rte_mbuf *rxm, *rxm_seg; 767 uint32_t max_frame_size = 768 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 769 uint64_t pkt_flags = 0; 770 uint32_t pkt_type; 771 struct ionic_rx_stats *stats = &rxq->stats; 772 struct ionic_rx_service *recv_args = (struct ionic_rx_service *) 773 service_cb_arg; 774 uint32_t buf_size = (uint16_t) 775 (rte_pktmbuf_data_room_size(rxq->mb_pool) - 776 RTE_PKTMBUF_HEADROOM); 777 uint32_t left; 778 void **info; 779 780 assert(q_desc_index == cq_desc->comp_index); 781 782 info = IONIC_INFO_PTR(q, cq_desc->comp_index); 783 784 rxm = info[0]; 785 786 if (!recv_args) { 787 stats->no_cb_arg++; 788 /* Flush */ 789 rte_pktmbuf_free(rxm); 790 /* 791 * Note: rte_mempool_put is faster with no segs 792 * rte_mempool_put(rxq->mb_pool, rxm); 793 */ 794 return; 795 } 796 797 if (cq_desc->status) { 798 stats->bad_cq_status++; 799 ionic_rx_recycle(q, q_desc_index, rxm); 800 return; 801 } 802 803 if (recv_args->nb_rx >= recv_args->nb_pkts) { 804 stats->no_room++; 805 ionic_rx_recycle(q, q_desc_index, rxm); 806 return; 807 } 808 809 if (cq_desc->len > max_frame_size || 810 cq_desc->len == 0) { 811 stats->bad_len++; 812 ionic_rx_recycle(q, q_desc_index, rxm); 813 return; 814 } 815 816 rxm->data_off = RTE_PKTMBUF_HEADROOM; 817 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 818 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 819 rxm->pkt_len = cq_desc->len; 820 rxm->port = rxq->qcq.lif->port_id; 821 822 left = cq_desc->len; 823 824 rxm->data_len = RTE_MIN(buf_size, left); 825 left -= rxm->data_len; 826 827 rxm_seg = rxm->next; 828 while (rxm_seg && left) { 829 rxm_seg->data_len = RTE_MIN(buf_size, left); 830 left -= rxm_seg->data_len; 831 832 rxm_seg = rxm_seg->next; 833 rxm->nb_segs++; 834 } 835 836 /* RSS */ 837 pkt_flags |= PKT_RX_RSS_HASH; 838 rxm->hash.rss = cq_desc->rss_hash; 839 840 /* Vlan Strip */ 841 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 842 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 843 rxm->vlan_tci = cq_desc->vlan_tci; 844 } 845 846 /* Checksum */ 847 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 848 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 849 pkt_flags |= PKT_RX_IP_CKSUM_GOOD; 850 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 851 pkt_flags |= PKT_RX_IP_CKSUM_BAD; 852 853 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 854 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 855 pkt_flags |= PKT_RX_L4_CKSUM_GOOD; 856 else if ((cq_desc->csum_flags & 857 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 858 (cq_desc->csum_flags & 859 IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 860 pkt_flags |= PKT_RX_L4_CKSUM_BAD; 861 } 862 863 rxm->ol_flags = pkt_flags; 864 865 /* Packet Type */ 866 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 867 case IONIC_PKT_TYPE_IPV4: 868 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 869 break; 870 case IONIC_PKT_TYPE_IPV6: 871 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 872 break; 873 case IONIC_PKT_TYPE_IPV4_TCP: 874 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 875 RTE_PTYPE_L4_TCP; 876 break; 877 case IONIC_PKT_TYPE_IPV6_TCP: 878 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 879 RTE_PTYPE_L4_TCP; 880 break; 881 case IONIC_PKT_TYPE_IPV4_UDP: 882 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 883 RTE_PTYPE_L4_UDP; 884 break; 885 case IONIC_PKT_TYPE_IPV6_UDP: 886 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 887 RTE_PTYPE_L4_UDP; 888 break; 889 default: 890 { 891 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 892 struct rte_ether_hdr *); 893 uint16_t ether_type = eth_h->ether_type; 894 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 895 pkt_type = RTE_PTYPE_L2_ETHER_ARP; 896 else 897 pkt_type = RTE_PTYPE_UNKNOWN; 898 stats->mtods++; 899 break; 900 } 901 } 902 903 rxm->packet_type = pkt_type; 904 905 recv_args->rx_pkts[recv_args->nb_rx] = rxm; 906 recv_args->nb_rx++; 907 908 stats->packets++; 909 stats->bytes += rxm->pkt_len; 910 } 911 912 static void 913 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 914 struct rte_mbuf *mbuf) 915 { 916 struct ionic_rxq_desc *desc_base = q->base; 917 struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 918 struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 919 920 new->addr = old->addr; 921 new->len = old->len; 922 923 ionic_q_post(q, true, mbuf); 924 } 925 926 static __rte_always_inline int 927 ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) 928 { 929 struct ionic_queue *q = &rxq->qcq.q; 930 struct ionic_rxq_desc *desc, *desc_base = q->base; 931 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 932 struct ionic_rxq_sg_elem *elem; 933 rte_iova_t dma_addr; 934 uint32_t i, j, nsegs, buf_size, size; 935 bool ring_doorbell; 936 937 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 938 RTE_PKTMBUF_HEADROOM); 939 940 /* Initialize software ring entries */ 941 for (i = ionic_q_space_avail(q); i; i--) { 942 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 943 struct rte_mbuf *prev_rxm_seg; 944 945 if (rxm == NULL) { 946 IONIC_PRINT(ERR, "RX mbuf alloc failed"); 947 return -ENOMEM; 948 } 949 950 nsegs = (len + buf_size - 1) / buf_size; 951 952 desc = &desc_base[q->head_idx]; 953 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 954 desc->addr = dma_addr; 955 desc->len = buf_size; 956 size = buf_size; 957 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 958 IONIC_RXQ_DESC_OPCODE_SIMPLE; 959 rxm->next = NULL; 960 961 prev_rxm_seg = rxm; 962 sg_desc = &sg_desc_base[q->head_idx]; 963 elem = sg_desc->elems; 964 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 965 struct rte_mbuf *rxm_seg; 966 rte_iova_t data_iova; 967 968 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 969 if (rxm_seg == NULL) { 970 IONIC_PRINT(ERR, "RX mbuf alloc failed"); 971 return -ENOMEM; 972 } 973 974 data_iova = rte_mbuf_data_iova(rxm_seg); 975 dma_addr = rte_cpu_to_le_64(data_iova); 976 elem->addr = dma_addr; 977 elem->len = buf_size; 978 size += buf_size; 979 elem++; 980 rxm_seg->next = NULL; 981 prev_rxm_seg->next = rxm_seg; 982 prev_rxm_seg = rxm_seg; 983 } 984 985 if (size < len) 986 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 987 size, len); 988 989 ring_doorbell = ((q->head_idx + 1) & 990 IONIC_RX_RING_DOORBELL_STRIDE) == 0; 991 992 ionic_q_post(q, ring_doorbell, rxm); 993 } 994 995 return 0; 996 } 997 998 /* 999 * Start Receive Units for specified queue. 1000 */ 1001 int __rte_cold 1002 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1003 { 1004 uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1005 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1006 struct ionic_rx_qcq *rxq; 1007 int err; 1008 1009 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 1010 IONIC_PRINT(DEBUG, "RX queue %u already started", 1011 rx_queue_id); 1012 return 0; 1013 } 1014 1015 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1016 1017 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", 1018 rx_queue_id, rxq->qcq.q.num_descs, frame_size); 1019 1020 if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 1021 err = ionic_lif_rxq_init(rxq); 1022 if (err) 1023 return err; 1024 } else { 1025 ionic_qcq_enable(&rxq->qcq); 1026 } 1027 1028 /* Allocate buffers for descriptor rings */ 1029 if (ionic_rx_fill(rxq, frame_size) != 0) { 1030 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1031 rx_queue_id); 1032 return -1; 1033 } 1034 1035 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1036 1037 return 0; 1038 } 1039 1040 static __rte_always_inline void 1041 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 1042 void *service_cb_arg) 1043 { 1044 struct ionic_cq *cq = &rxq->qcq.cq; 1045 struct ionic_queue *q = &rxq->qcq.q; 1046 struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1047 bool more; 1048 uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1049 uint32_t work_done = 0; 1050 1051 if (work_to_do == 0) 1052 return; 1053 1054 cq_desc = &cq_desc_base[cq->tail_idx]; 1055 while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1056 curr_cq_tail_idx = cq->tail_idx; 1057 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1058 1059 if (cq->tail_idx == 0) 1060 cq->done_color = !cq->done_color; 1061 1062 /* Prefetch the next 4 descriptors */ 1063 if ((cq->tail_idx & 0x3) == 0) 1064 rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1065 1066 do { 1067 more = (q->tail_idx != cq_desc->comp_index); 1068 1069 curr_q_tail_idx = q->tail_idx; 1070 q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1071 1072 /* Prefetch the next 4 descriptors */ 1073 if ((q->tail_idx & 0x3) == 0) 1074 /* q desc info */ 1075 rte_prefetch0(&q->info[q->tail_idx]); 1076 1077 ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 1078 service_cb_arg); 1079 1080 } while (more); 1081 1082 if (++work_done == work_to_do) 1083 break; 1084 1085 cq_desc = &cq_desc_base[cq->tail_idx]; 1086 } 1087 } 1088 1089 /* 1090 * Stop Receive Units for specified queue. 1091 */ 1092 int __rte_cold 1093 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1094 { 1095 struct ionic_rx_qcq *rxq; 1096 1097 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1098 1099 rxq = eth_dev->data->rx_queues[rx_queue_id]; 1100 1101 eth_dev->data->rx_queue_state[rx_queue_id] = 1102 RTE_ETH_QUEUE_STATE_STOPPED; 1103 1104 ionic_qcq_disable(&rxq->qcq); 1105 1106 /* Flush */ 1107 ionic_rxq_service(rxq, -1, NULL); 1108 1109 return 0; 1110 } 1111 1112 uint16_t 1113 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1114 uint16_t nb_pkts) 1115 { 1116 struct ionic_rx_qcq *rxq = rx_queue; 1117 uint32_t frame_size = 1118 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1119 struct ionic_rx_service service_cb_arg; 1120 1121 service_cb_arg.rx_pkts = rx_pkts; 1122 service_cb_arg.nb_pkts = nb_pkts; 1123 service_cb_arg.nb_rx = 0; 1124 1125 ionic_rxq_service(rxq, nb_pkts, &service_cb_arg); 1126 1127 ionic_rx_fill(rxq, frame_size); 1128 1129 return service_cb_arg.nb_rx; 1130 } 1131