1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2017 NXP. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of NXP nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <assert.h> 34 #include <stdio.h> 35 #include <stdbool.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <sys/epoll.h> 40 41 #include <rte_atomic.h> 42 #include <rte_byteorder.h> 43 #include <rte_common.h> 44 #include <rte_debug.h> 45 #include <rte_dev.h> 46 #include <rte_eal.h> 47 #include <rte_fslmc.h> 48 #include <rte_lcore.h> 49 #include <rte_log.h> 50 #include <rte_malloc.h> 51 #include <rte_memcpy.h> 52 #include <rte_memory.h> 53 #include <rte_pci.h> 54 #include <rte_bus_vdev.h> 55 #include <rte_ethdev.h> 56 #include <rte_event_eth_rx_adapter.h> 57 58 #include <fslmc_vfio.h> 59 #include <dpaa2_hw_pvt.h> 60 #include <dpaa2_hw_mempool.h> 61 #include <dpaa2_hw_dpio.h> 62 #include <dpaa2_ethdev.h> 63 #include "dpaa2_eventdev.h" 64 #include <portal/dpaa2_hw_pvt.h> 65 #include <mc/fsl_dpci.h> 66 67 /* Clarifications 68 * Evendev = SoC Instance 69 * Eventport = DPIO Instance 70 * Eventqueue = DPCON Instance 71 * 1 Eventdev can have N Eventqueue 72 * Soft Event Flow is DPCI Instance 73 */ 74 75 static uint16_t 76 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 77 uint16_t nb_events) 78 { 79 struct rte_eventdev *ev_dev = 80 ((struct dpaa2_io_portal_t *)port)->eventdev; 81 struct dpaa2_eventdev *priv = ev_dev->data->dev_private; 82 uint32_t queue_id = ev[0].queue_id; 83 struct evq_info_t *evq_info = &priv->evq_info[queue_id]; 84 uint32_t fqid; 85 struct qbman_swp *swp; 86 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 87 uint32_t loop, frames_to_send; 88 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 89 uint16_t num_tx = 0; 90 int ret; 91 92 RTE_SET_USED(port); 93 94 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 95 ret = dpaa2_affine_qbman_swp(); 96 if (ret) { 97 PMD_DRV_LOG(ERR, "Failure in affining portal\n"); 98 return 0; 99 } 100 } 101 102 swp = DPAA2_PER_LCORE_PORTAL; 103 104 while (nb_events) { 105 frames_to_send = (nb_events >> 3) ? 106 MAX_TX_RING_SLOTS : nb_events; 107 108 for (loop = 0; loop < frames_to_send; loop++) { 109 const struct rte_event *event = &ev[num_tx + loop]; 110 111 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) 112 fqid = evq_info->dpci->queue[ 113 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; 114 else 115 fqid = evq_info->dpci->queue[ 116 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; 117 118 /* Prepare enqueue descriptor */ 119 qbman_eq_desc_clear(&eqdesc[loop]); 120 qbman_eq_desc_set_fq(&eqdesc[loop], fqid); 121 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); 122 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); 123 124 if (event->impl_opaque) { 125 uint8_t dqrr_index = event->impl_opaque - 1; 126 127 qbman_eq_desc_set_dca(&eqdesc[loop], 1, 128 dqrr_index, 0); 129 DPAA2_PER_LCORE_DPIO->dqrr_size--; 130 DPAA2_PER_LCORE_DPIO->dqrr_held &= 131 ~(1 << dqrr_index); 132 } 133 134 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 135 136 /* 137 * todo - need to align with hw context data 138 * to avoid copy 139 */ 140 struct rte_event *ev_temp = rte_malloc(NULL, 141 sizeof(struct rte_event), 0); 142 143 if (!ev_temp) { 144 if (!loop) 145 return num_tx; 146 frames_to_send = loop; 147 PMD_DRV_LOG(ERR, "Unable to allocate memory"); 148 goto send_partial; 149 } 150 rte_memcpy(ev_temp, event, sizeof(struct rte_event)); 151 DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp); 152 DPAA2_SET_FD_LEN((&fd_arr[loop]), 153 sizeof(struct rte_event)); 154 } 155 send_partial: 156 loop = 0; 157 while (loop < frames_to_send) { 158 loop += qbman_swp_enqueue_multiple_desc(swp, 159 &eqdesc[loop], &fd_arr[loop], 160 frames_to_send - loop); 161 } 162 num_tx += frames_to_send; 163 nb_events -= frames_to_send; 164 } 165 166 return num_tx; 167 } 168 169 static uint16_t 170 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) 171 { 172 return dpaa2_eventdev_enqueue_burst(port, ev, 1); 173 } 174 175 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) 176 { 177 struct epoll_event epoll_ev; 178 int ret, i = 0; 179 180 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, 181 QBMAN_SWP_INTERRUPT_DQRI); 182 183 RETRY: 184 ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, 185 &epoll_ev, 1, timeout_ticks); 186 if (ret < 1) { 187 /* sometimes due to some spurious interrupts epoll_wait fails 188 * with errno EINTR. so here we are retrying epoll_wait in such 189 * case to avoid the problem. 190 */ 191 if (errno == EINTR) { 192 PMD_DRV_LOG(DEBUG, "epoll_wait fails\n"); 193 if (i++ > 10) 194 PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n"); 195 goto RETRY; 196 } 197 } 198 } 199 200 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, 201 const struct qbman_fd *fd, 202 const struct qbman_result *dq, 203 struct dpaa2_queue *rxq, 204 struct rte_event *ev) 205 { 206 struct rte_event *ev_temp = 207 (struct rte_event *)DPAA2_GET_FD_ADDR(fd); 208 209 RTE_SET_USED(rxq); 210 211 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 212 rte_free(ev_temp); 213 214 qbman_swp_dqrr_consume(swp, dq); 215 } 216 217 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, 218 const struct qbman_fd *fd, 219 const struct qbman_result *dq, 220 struct dpaa2_queue *rxq, 221 struct rte_event *ev) 222 { 223 struct rte_event *ev_temp = 224 (struct rte_event *)DPAA2_GET_FD_ADDR(fd); 225 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 226 227 RTE_SET_USED(swp); 228 RTE_SET_USED(rxq); 229 230 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 231 rte_free(ev_temp); 232 ev->impl_opaque = dqrr_index + 1; 233 DPAA2_PER_LCORE_DPIO->dqrr_size++; 234 DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index; 235 } 236 237 static uint16_t 238 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], 239 uint16_t nb_events, uint64_t timeout_ticks) 240 { 241 const struct qbman_result *dq; 242 struct qbman_swp *swp; 243 const struct qbman_fd *fd; 244 struct dpaa2_queue *rxq; 245 int num_pkts = 0, ret, i = 0; 246 247 RTE_SET_USED(port); 248 249 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 250 ret = dpaa2_affine_qbman_swp(); 251 if (ret) { 252 PMD_DRV_LOG(ERR, "Failure in affining portal\n"); 253 return 0; 254 } 255 } 256 257 swp = DPAA2_PER_LCORE_PORTAL; 258 259 /* Check if there are atomic contexts to be released */ 260 while (DPAA2_PER_LCORE_DPIO->dqrr_size) { 261 if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) { 262 dq = qbman_get_dqrr_from_idx(swp, i); 263 qbman_swp_dqrr_consume(swp, dq); 264 DPAA2_PER_LCORE_DPIO->dqrr_size--; 265 } 266 i++; 267 } 268 DPAA2_PER_LCORE_DPIO->dqrr_held = 0; 269 270 do { 271 dq = qbman_swp_dqrr_next(swp); 272 if (!dq) { 273 if (!num_pkts && timeout_ticks) { 274 dpaa2_eventdev_dequeue_wait(timeout_ticks); 275 timeout_ticks = 0; 276 continue; 277 } 278 return num_pkts; 279 } 280 281 fd = qbman_result_DQ_fd(dq); 282 283 rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq); 284 if (rxq) { 285 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); 286 } else { 287 qbman_swp_dqrr_consume(swp, dq); 288 PMD_DRV_LOG(ERR, "Null Return VQ received\n"); 289 return 0; 290 } 291 292 num_pkts++; 293 } while (num_pkts < nb_events); 294 295 return num_pkts; 296 } 297 298 static uint16_t 299 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev, 300 uint64_t timeout_ticks) 301 { 302 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks); 303 } 304 305 static void 306 dpaa2_eventdev_info_get(struct rte_eventdev *dev, 307 struct rte_event_dev_info *dev_info) 308 { 309 struct dpaa2_eventdev *priv = dev->data->dev_private; 310 311 PMD_DRV_FUNC_TRACE(); 312 313 RTE_SET_USED(dev); 314 315 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 316 dev_info->min_dequeue_timeout_ns = 317 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 318 dev_info->max_dequeue_timeout_ns = 319 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; 320 dev_info->dequeue_timeout_ns = 321 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 322 dev_info->max_event_queues = priv->max_event_queues; 323 dev_info->max_event_queue_flows = 324 DPAA2_EVENT_MAX_QUEUE_FLOWS; 325 dev_info->max_event_queue_priority_levels = 326 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 327 dev_info->max_event_priority_levels = 328 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; 329 dev_info->max_event_ports = RTE_MAX_LCORE; 330 dev_info->max_event_port_dequeue_depth = 331 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 332 dev_info->max_event_port_enqueue_depth = 333 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 334 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS; 335 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 336 RTE_EVENT_DEV_CAP_BURST_MODE; 337 } 338 339 static int 340 dpaa2_eventdev_configure(const struct rte_eventdev *dev) 341 { 342 struct dpaa2_eventdev *priv = dev->data->dev_private; 343 struct rte_event_dev_config *conf = &dev->data->dev_conf; 344 345 PMD_DRV_FUNC_TRACE(); 346 347 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 348 priv->nb_event_queues = conf->nb_event_queues; 349 priv->nb_event_ports = conf->nb_event_ports; 350 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 351 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 352 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 353 priv->event_dev_cfg = conf->event_dev_cfg; 354 355 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id); 356 return 0; 357 } 358 359 static int 360 dpaa2_eventdev_start(struct rte_eventdev *dev) 361 { 362 PMD_DRV_FUNC_TRACE(); 363 364 RTE_SET_USED(dev); 365 366 return 0; 367 } 368 369 static void 370 dpaa2_eventdev_stop(struct rte_eventdev *dev) 371 { 372 PMD_DRV_FUNC_TRACE(); 373 374 RTE_SET_USED(dev); 375 } 376 377 static int 378 dpaa2_eventdev_close(struct rte_eventdev *dev) 379 { 380 PMD_DRV_FUNC_TRACE(); 381 382 RTE_SET_USED(dev); 383 384 return 0; 385 } 386 387 static void 388 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 389 struct rte_event_queue_conf *queue_conf) 390 { 391 PMD_DRV_FUNC_TRACE(); 392 393 RTE_SET_USED(dev); 394 RTE_SET_USED(queue_id); 395 RTE_SET_USED(queue_conf); 396 397 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; 398 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC | 399 RTE_SCHED_TYPE_PARALLEL; 400 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 401 } 402 403 static void 404 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 405 { 406 PMD_DRV_FUNC_TRACE(); 407 408 RTE_SET_USED(dev); 409 RTE_SET_USED(queue_id); 410 } 411 412 static int 413 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 414 const struct rte_event_queue_conf *queue_conf) 415 { 416 struct dpaa2_eventdev *priv = dev->data->dev_private; 417 struct evq_info_t *evq_info = 418 &priv->evq_info[queue_id]; 419 420 PMD_DRV_FUNC_TRACE(); 421 422 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 423 424 return 0; 425 } 426 427 static void 428 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 429 struct rte_event_port_conf *port_conf) 430 { 431 PMD_DRV_FUNC_TRACE(); 432 433 RTE_SET_USED(dev); 434 RTE_SET_USED(port_id); 435 RTE_SET_USED(port_conf); 436 437 port_conf->new_event_threshold = 438 DPAA2_EVENT_MAX_NUM_EVENTS; 439 port_conf->dequeue_depth = 440 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 441 port_conf->enqueue_depth = 442 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 443 } 444 445 static void 446 dpaa2_eventdev_port_release(void *port) 447 { 448 PMD_DRV_FUNC_TRACE(); 449 450 RTE_SET_USED(port); 451 } 452 453 static int 454 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 455 const struct rte_event_port_conf *port_conf) 456 { 457 PMD_DRV_FUNC_TRACE(); 458 459 RTE_SET_USED(port_conf); 460 461 if (!dpaa2_io_portal[port_id].dpio_dev) { 462 dpaa2_io_portal[port_id].dpio_dev = 463 dpaa2_get_qbman_swp(port_id); 464 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count); 465 if (!dpaa2_io_portal[port_id].dpio_dev) 466 return -1; 467 } 468 469 dpaa2_io_portal[port_id].eventdev = dev; 470 dev->data->ports[port_id] = &dpaa2_io_portal[port_id]; 471 return 0; 472 } 473 474 static int 475 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 476 uint8_t queues[], uint16_t nb_unlinks) 477 { 478 struct dpaa2_eventdev *priv = dev->data->dev_private; 479 struct dpaa2_io_portal_t *dpaa2_portal = port; 480 struct evq_info_t *evq_info; 481 int i; 482 483 PMD_DRV_FUNC_TRACE(); 484 485 for (i = 0; i < nb_unlinks; i++) { 486 evq_info = &priv->evq_info[queues[i]]; 487 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, 488 evq_info->dpcon->channel_index, 0); 489 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, 490 0, dpaa2_portal->dpio_dev->token, 491 evq_info->dpcon->dpcon_id); 492 evq_info->link = 0; 493 } 494 495 return (int)nb_unlinks; 496 } 497 498 static int 499 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, 500 const uint8_t queues[], const uint8_t priorities[], 501 uint16_t nb_links) 502 { 503 struct dpaa2_eventdev *priv = dev->data->dev_private; 504 struct dpaa2_io_portal_t *dpaa2_portal = port; 505 struct evq_info_t *evq_info; 506 uint8_t channel_index; 507 int ret, i, n; 508 509 PMD_DRV_FUNC_TRACE(); 510 511 for (i = 0; i < nb_links; i++) { 512 evq_info = &priv->evq_info[queues[i]]; 513 if (evq_info->link) 514 continue; 515 516 ret = dpio_add_static_dequeue_channel( 517 dpaa2_portal->dpio_dev->dpio, 518 CMD_PRI_LOW, dpaa2_portal->dpio_dev->token, 519 evq_info->dpcon->dpcon_id, &channel_index); 520 if (ret < 0) { 521 PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n", 522 ret); 523 goto err; 524 } 525 526 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, 527 channel_index, 1); 528 evq_info->dpcon->channel_index = channel_index; 529 evq_info->link = 1; 530 } 531 532 RTE_SET_USED(priorities); 533 534 return (int)nb_links; 535 err: 536 for (n = 0; n < i; n++) { 537 evq_info = &priv->evq_info[queues[n]]; 538 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal, 539 evq_info->dpcon->channel_index, 0); 540 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio, 541 0, dpaa2_portal->dpio_dev->token, 542 evq_info->dpcon->dpcon_id); 543 evq_info->link = 0; 544 } 545 return ret; 546 } 547 548 static int 549 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 550 uint64_t *timeout_ticks) 551 { 552 uint32_t scale = 1; 553 554 PMD_DRV_FUNC_TRACE(); 555 556 RTE_SET_USED(dev); 557 *timeout_ticks = ns * scale; 558 559 return 0; 560 } 561 562 static void 563 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f) 564 { 565 PMD_DRV_FUNC_TRACE(); 566 567 RTE_SET_USED(dev); 568 RTE_SET_USED(f); 569 } 570 571 static int 572 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev, 573 const struct rte_eth_dev *eth_dev, 574 uint32_t *caps) 575 { 576 const char *ethdev_driver = eth_dev->device->driver->name; 577 578 PMD_DRV_FUNC_TRACE(); 579 580 RTE_SET_USED(dev); 581 582 if (!strcmp(ethdev_driver, "net_dpaa2")) 583 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP; 584 else 585 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 586 587 return 0; 588 } 589 590 static int 591 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, 592 const struct rte_eth_dev *eth_dev, 593 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 594 { 595 struct dpaa2_eventdev *priv = dev->data->dev_private; 596 uint8_t ev_qid = queue_conf->ev.queue_id; 597 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 598 int i, ret; 599 600 PMD_DRV_FUNC_TRACE(); 601 602 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 603 ret = dpaa2_eth_eventq_attach(eth_dev, i, 604 dpcon_id, queue_conf); 605 if (ret) { 606 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n", 607 ret); 608 goto fail; 609 } 610 } 611 return 0; 612 fail: 613 for (i = (i - 1); i >= 0 ; i--) 614 dpaa2_eth_eventq_detach(eth_dev, i); 615 616 return ret; 617 } 618 619 static int 620 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, 621 const struct rte_eth_dev *eth_dev, 622 int32_t rx_queue_id, 623 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 624 { 625 struct dpaa2_eventdev *priv = dev->data->dev_private; 626 uint8_t ev_qid = queue_conf->ev.queue_id; 627 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 628 int ret; 629 630 PMD_DRV_FUNC_TRACE(); 631 632 if (rx_queue_id == -1) 633 return dpaa2_eventdev_eth_queue_add_all(dev, 634 eth_dev, queue_conf); 635 636 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, 637 dpcon_id, queue_conf); 638 if (ret) { 639 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret); 640 return ret; 641 } 642 return 0; 643 } 644 645 static int 646 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, 647 const struct rte_eth_dev *eth_dev) 648 { 649 int i, ret; 650 651 PMD_DRV_FUNC_TRACE(); 652 653 RTE_SET_USED(dev); 654 655 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 656 ret = dpaa2_eth_eventq_detach(eth_dev, i); 657 if (ret) { 658 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n", 659 ret); 660 return ret; 661 } 662 } 663 664 return 0; 665 } 666 667 static int 668 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, 669 const struct rte_eth_dev *eth_dev, 670 int32_t rx_queue_id) 671 { 672 int ret; 673 674 PMD_DRV_FUNC_TRACE(); 675 676 if (rx_queue_id == -1) 677 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev); 678 679 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); 680 if (ret) { 681 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret); 682 return ret; 683 } 684 685 return 0; 686 } 687 688 static int 689 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev, 690 const struct rte_eth_dev *eth_dev) 691 { 692 PMD_DRV_FUNC_TRACE(); 693 694 RTE_SET_USED(dev); 695 RTE_SET_USED(eth_dev); 696 697 return 0; 698 } 699 700 static int 701 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, 702 const struct rte_eth_dev *eth_dev) 703 { 704 PMD_DRV_FUNC_TRACE(); 705 706 RTE_SET_USED(dev); 707 RTE_SET_USED(eth_dev); 708 709 return 0; 710 } 711 712 static const struct rte_eventdev_ops dpaa2_eventdev_ops = { 713 .dev_infos_get = dpaa2_eventdev_info_get, 714 .dev_configure = dpaa2_eventdev_configure, 715 .dev_start = dpaa2_eventdev_start, 716 .dev_stop = dpaa2_eventdev_stop, 717 .dev_close = dpaa2_eventdev_close, 718 .queue_def_conf = dpaa2_eventdev_queue_def_conf, 719 .queue_setup = dpaa2_eventdev_queue_setup, 720 .queue_release = dpaa2_eventdev_queue_release, 721 .port_def_conf = dpaa2_eventdev_port_def_conf, 722 .port_setup = dpaa2_eventdev_port_setup, 723 .port_release = dpaa2_eventdev_port_release, 724 .port_link = dpaa2_eventdev_port_link, 725 .port_unlink = dpaa2_eventdev_port_unlink, 726 .timeout_ticks = dpaa2_eventdev_timeout_ticks, 727 .dump = dpaa2_eventdev_dump, 728 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, 729 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, 730 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, 731 .eth_rx_adapter_start = dpaa2_eventdev_eth_start, 732 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, 733 }; 734 735 static int 736 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, 737 struct dpaa2_dpcon_dev *dpcon_dev) 738 { 739 struct dpci_rx_queue_cfg rx_queue_cfg; 740 int ret, i; 741 742 /*Do settings to get the frame on a DPCON object*/ 743 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST | 744 DPCI_QUEUE_OPT_USER_CTX; 745 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON; 746 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; 747 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; 748 749 dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = 750 dpaa2_eventdev_process_parallel; 751 dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = 752 dpaa2_eventdev_process_atomic; 753 754 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { 755 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]); 756 ret = dpci_set_rx_queue(&dpci_dev->dpci, 757 CMD_PRI_LOW, 758 dpci_dev->token, i, 759 &rx_queue_cfg); 760 if (ret) { 761 PMD_DRV_LOG(ERR, 762 "set_rx_q failed with err code: %d", ret); 763 return ret; 764 } 765 } 766 return 0; 767 } 768 769 static int 770 dpaa2_eventdev_create(const char *name) 771 { 772 struct rte_eventdev *eventdev; 773 struct dpaa2_eventdev *priv; 774 struct dpaa2_dpcon_dev *dpcon_dev = NULL; 775 struct dpaa2_dpci_dev *dpci_dev = NULL; 776 int ret; 777 778 eventdev = rte_event_pmd_vdev_init(name, 779 sizeof(struct dpaa2_eventdev), 780 rte_socket_id()); 781 if (eventdev == NULL) { 782 PMD_DRV_ERR("Failed to create eventdev vdev %s", name); 783 goto fail; 784 } 785 786 eventdev->dev_ops = &dpaa2_eventdev_ops; 787 eventdev->enqueue = dpaa2_eventdev_enqueue; 788 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; 789 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; 790 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; 791 eventdev->dequeue = dpaa2_eventdev_dequeue; 792 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; 793 794 /* For secondary processes, the primary has done all the work */ 795 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 796 return 0; 797 798 priv = eventdev->data->dev_private; 799 priv->max_event_queues = 0; 800 801 do { 802 dpcon_dev = rte_dpaa2_alloc_dpcon_dev(); 803 if (!dpcon_dev) 804 break; 805 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev; 806 807 dpci_dev = rte_dpaa2_alloc_dpci_dev(); 808 if (!dpci_dev) { 809 rte_dpaa2_free_dpcon_dev(dpcon_dev); 810 break; 811 } 812 priv->evq_info[priv->max_event_queues].dpci = dpci_dev; 813 814 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); 815 if (ret) { 816 PMD_DRV_LOG(ERR, 817 "dpci setup failed with err code: %d", ret); 818 return ret; 819 } 820 priv->max_event_queues++; 821 } while (dpcon_dev && dpci_dev); 822 823 return 0; 824 fail: 825 return -EFAULT; 826 } 827 828 static int 829 dpaa2_eventdev_probe(struct rte_vdev_device *vdev) 830 { 831 const char *name; 832 833 name = rte_vdev_device_name(vdev); 834 PMD_DRV_LOG(INFO, "Initializing %s", name); 835 return dpaa2_eventdev_create(name); 836 } 837 838 static int 839 dpaa2_eventdev_remove(struct rte_vdev_device *vdev) 840 { 841 const char *name; 842 843 name = rte_vdev_device_name(vdev); 844 PMD_DRV_LOG(INFO, "Closing %s", name); 845 846 return rte_event_pmd_vdev_uninit(name); 847 } 848 849 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { 850 .probe = dpaa2_eventdev_probe, 851 .remove = dpaa2_eventdev_remove 852 }; 853 854 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); 855