1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <ctype.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <stdarg.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 #include <sys/types.h> 14 #include <sys/queue.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_log.h> 18 #include <rte_debug.h> 19 #include <rte_dev.h> 20 #include <rte_memory.h> 21 #include <rte_memcpy.h> 22 #include <rte_memzone.h> 23 #include <rte_eal.h> 24 #include <rte_per_lcore.h> 25 #include <rte_lcore.h> 26 #include <rte_atomic.h> 27 #include <rte_branch_prediction.h> 28 #include <rte_common.h> 29 #include <rte_malloc.h> 30 #include <rte_errno.h> 31 #include <rte_ethdev.h> 32 #include <rte_cryptodev.h> 33 #include <rte_cryptodev_pmd.h> 34 35 #include "rte_eventdev.h" 36 #include "rte_eventdev_pmd.h" 37 38 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; 39 40 struct rte_eventdev *rte_eventdevs = rte_event_devices; 41 42 static struct rte_eventdev_global eventdev_globals = { 43 .nb_devs = 0 44 }; 45 46 /* Event dev north bound API implementation */ 47 48 uint8_t 49 rte_event_dev_count(void) 50 { 51 return eventdev_globals.nb_devs; 52 } 53 54 int 55 rte_event_dev_get_dev_id(const char *name) 56 { 57 int i; 58 uint8_t cmp; 59 60 if (!name) 61 return -EINVAL; 62 63 for (i = 0; i < eventdev_globals.nb_devs; i++) { 64 cmp = (strncmp(rte_event_devices[i].data->name, name, 65 RTE_EVENTDEV_NAME_MAX_LEN) == 0) || 66 (rte_event_devices[i].dev ? (strncmp( 67 rte_event_devices[i].dev->driver->name, name, 68 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); 69 if (cmp && (rte_event_devices[i].attached == 70 RTE_EVENTDEV_ATTACHED)) 71 return i; 72 } 73 return -ENODEV; 74 } 75 76 int 77 rte_event_dev_socket_id(uint8_t dev_id) 78 { 79 struct rte_eventdev *dev; 80 81 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 82 dev = &rte_eventdevs[dev_id]; 83 84 return dev->data->socket_id; 85 } 86 87 int 88 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) 89 { 90 struct rte_eventdev *dev; 91 92 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 93 dev = &rte_eventdevs[dev_id]; 94 95 if (dev_info == NULL) 96 return -EINVAL; 97 98 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 99 100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 101 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 102 103 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; 104 105 dev_info->dev = dev->dev; 106 return 0; 107 } 108 109 int 110 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 111 uint32_t *caps) 112 { 113 struct rte_eventdev *dev; 114 115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 116 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 117 118 dev = &rte_eventdevs[dev_id]; 119 120 if (caps == NULL) 121 return -EINVAL; 122 *caps = 0; 123 124 return dev->dev_ops->eth_rx_adapter_caps_get ? 125 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, 126 &rte_eth_devices[eth_port_id], 127 caps) 128 : 0; 129 } 130 131 int __rte_experimental 132 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) 133 { 134 struct rte_eventdev *dev; 135 const struct rte_event_timer_adapter_ops *ops; 136 137 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 138 139 dev = &rte_eventdevs[dev_id]; 140 141 if (caps == NULL) 142 return -EINVAL; 143 *caps = 0; 144 145 return dev->dev_ops->timer_adapter_caps_get ? 146 (*dev->dev_ops->timer_adapter_caps_get)(dev, 147 0, 148 caps, 149 &ops) 150 : 0; 151 } 152 153 int __rte_experimental 154 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 155 uint32_t *caps) 156 { 157 struct rte_eventdev *dev; 158 struct rte_cryptodev *cdev; 159 160 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 161 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) 162 return -EINVAL; 163 164 dev = &rte_eventdevs[dev_id]; 165 cdev = rte_cryptodev_pmd_get_dev(cdev_id); 166 167 if (caps == NULL) 168 return -EINVAL; 169 *caps = 0; 170 171 return dev->dev_ops->crypto_adapter_caps_get ? 172 (*dev->dev_ops->crypto_adapter_caps_get) 173 (dev, cdev, caps) : -ENOTSUP; 174 } 175 176 int __rte_experimental 177 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 178 uint32_t *caps) 179 { 180 struct rte_eventdev *dev; 181 struct rte_eth_dev *eth_dev; 182 183 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 184 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 185 186 dev = &rte_eventdevs[dev_id]; 187 eth_dev = &rte_eth_devices[eth_port_id]; 188 189 if (caps == NULL) 190 return -EINVAL; 191 192 *caps = 0; 193 194 return dev->dev_ops->eth_tx_adapter_caps_get ? 195 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, 196 eth_dev, 197 caps) 198 : 0; 199 } 200 201 static inline int 202 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) 203 { 204 uint8_t old_nb_queues = dev->data->nb_queues; 205 struct rte_event_queue_conf *queues_cfg; 206 unsigned int i; 207 208 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, 209 dev->data->dev_id); 210 211 /* First time configuration */ 212 if (dev->data->queues_cfg == NULL && nb_queues != 0) { 213 /* Allocate memory to store queue configuration */ 214 dev->data->queues_cfg = rte_zmalloc_socket( 215 "eventdev->data->queues_cfg", 216 sizeof(dev->data->queues_cfg[0]) * nb_queues, 217 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 218 if (dev->data->queues_cfg == NULL) { 219 dev->data->nb_queues = 0; 220 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," 221 "nb_queues %u", nb_queues); 222 return -(ENOMEM); 223 } 224 /* Re-configure */ 225 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { 226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 227 228 for (i = nb_queues; i < old_nb_queues; i++) 229 (*dev->dev_ops->queue_release)(dev, i); 230 231 /* Re allocate memory to store queue configuration */ 232 queues_cfg = dev->data->queues_cfg; 233 queues_cfg = rte_realloc(queues_cfg, 234 sizeof(queues_cfg[0]) * nb_queues, 235 RTE_CACHE_LINE_SIZE); 236 if (queues_cfg == NULL) { 237 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," 238 " nb_queues %u", nb_queues); 239 return -(ENOMEM); 240 } 241 dev->data->queues_cfg = queues_cfg; 242 243 if (nb_queues > old_nb_queues) { 244 uint8_t new_qs = nb_queues - old_nb_queues; 245 246 memset(queues_cfg + old_nb_queues, 0, 247 sizeof(queues_cfg[0]) * new_qs); 248 } 249 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { 250 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 251 252 for (i = nb_queues; i < old_nb_queues; i++) 253 (*dev->dev_ops->queue_release)(dev, i); 254 } 255 256 dev->data->nb_queues = nb_queues; 257 return 0; 258 } 259 260 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) 261 262 static inline int 263 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) 264 { 265 uint8_t old_nb_ports = dev->data->nb_ports; 266 void **ports; 267 uint16_t *links_map; 268 struct rte_event_port_conf *ports_cfg; 269 unsigned int i; 270 271 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, 272 dev->data->dev_id); 273 274 /* First time configuration */ 275 if (dev->data->ports == NULL && nb_ports != 0) { 276 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports", 277 sizeof(dev->data->ports[0]) * nb_ports, 278 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 279 if (dev->data->ports == NULL) { 280 dev->data->nb_ports = 0; 281 RTE_EDEV_LOG_ERR("failed to get mem for port meta data," 282 "nb_ports %u", nb_ports); 283 return -(ENOMEM); 284 } 285 286 /* Allocate memory to store port configurations */ 287 dev->data->ports_cfg = 288 rte_zmalloc_socket("eventdev->ports_cfg", 289 sizeof(dev->data->ports_cfg[0]) * nb_ports, 290 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 291 if (dev->data->ports_cfg == NULL) { 292 dev->data->nb_ports = 0; 293 RTE_EDEV_LOG_ERR("failed to get mem for port cfg," 294 "nb_ports %u", nb_ports); 295 return -(ENOMEM); 296 } 297 298 /* Allocate memory to store queue to port link connection */ 299 dev->data->links_map = 300 rte_zmalloc_socket("eventdev->links_map", 301 sizeof(dev->data->links_map[0]) * nb_ports * 302 RTE_EVENT_MAX_QUEUES_PER_DEV, 303 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 304 if (dev->data->links_map == NULL) { 305 dev->data->nb_ports = 0; 306 RTE_EDEV_LOG_ERR("failed to get mem for port_map area," 307 "nb_ports %u", nb_ports); 308 return -(ENOMEM); 309 } 310 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) 311 dev->data->links_map[i] = 312 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 313 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ 314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 315 316 ports = dev->data->ports; 317 ports_cfg = dev->data->ports_cfg; 318 links_map = dev->data->links_map; 319 320 for (i = nb_ports; i < old_nb_ports; i++) 321 (*dev->dev_ops->port_release)(ports[i]); 322 323 /* Realloc memory for ports */ 324 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports, 325 RTE_CACHE_LINE_SIZE); 326 if (ports == NULL) { 327 RTE_EDEV_LOG_ERR("failed to realloc port meta data," 328 " nb_ports %u", nb_ports); 329 return -(ENOMEM); 330 } 331 332 /* Realloc memory for ports_cfg */ 333 ports_cfg = rte_realloc(ports_cfg, 334 sizeof(ports_cfg[0]) * nb_ports, 335 RTE_CACHE_LINE_SIZE); 336 if (ports_cfg == NULL) { 337 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," 338 " nb_ports %u", nb_ports); 339 return -(ENOMEM); 340 } 341 342 /* Realloc memory to store queue to port link connection */ 343 links_map = rte_realloc(links_map, 344 sizeof(dev->data->links_map[0]) * nb_ports * 345 RTE_EVENT_MAX_QUEUES_PER_DEV, 346 RTE_CACHE_LINE_SIZE); 347 if (links_map == NULL) { 348 dev->data->nb_ports = 0; 349 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," 350 "nb_ports %u", nb_ports); 351 return -(ENOMEM); 352 } 353 354 if (nb_ports > old_nb_ports) { 355 uint8_t new_ps = nb_ports - old_nb_ports; 356 unsigned int old_links_map_end = 357 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 358 unsigned int links_map_end = 359 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 360 361 memset(ports + old_nb_ports, 0, 362 sizeof(ports[0]) * new_ps); 363 memset(ports_cfg + old_nb_ports, 0, 364 sizeof(ports_cfg[0]) * new_ps); 365 for (i = old_links_map_end; i < links_map_end; i++) 366 links_map[i] = 367 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 368 } 369 370 dev->data->ports = ports; 371 dev->data->ports_cfg = ports_cfg; 372 dev->data->links_map = links_map; 373 } else if (dev->data->ports != NULL && nb_ports == 0) { 374 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 375 376 ports = dev->data->ports; 377 for (i = nb_ports; i < old_nb_ports; i++) 378 (*dev->dev_ops->port_release)(ports[i]); 379 } 380 381 dev->data->nb_ports = nb_ports; 382 return 0; 383 } 384 385 int 386 rte_event_dev_configure(uint8_t dev_id, 387 const struct rte_event_dev_config *dev_conf) 388 { 389 struct rte_eventdev *dev; 390 struct rte_event_dev_info info; 391 int diag; 392 393 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 394 dev = &rte_eventdevs[dev_id]; 395 396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 397 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 398 399 if (dev->data->dev_started) { 400 RTE_EDEV_LOG_ERR( 401 "device %d must be stopped to allow configuration", dev_id); 402 return -EBUSY; 403 } 404 405 if (dev_conf == NULL) 406 return -EINVAL; 407 408 (*dev->dev_ops->dev_infos_get)(dev, &info); 409 410 /* Check dequeue_timeout_ns value is in limit */ 411 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { 412 if (dev_conf->dequeue_timeout_ns && 413 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns 414 || dev_conf->dequeue_timeout_ns > 415 info.max_dequeue_timeout_ns)) { 416 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" 417 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", 418 dev_id, dev_conf->dequeue_timeout_ns, 419 info.min_dequeue_timeout_ns, 420 info.max_dequeue_timeout_ns); 421 return -EINVAL; 422 } 423 } 424 425 /* Check nb_events_limit is in limit */ 426 if (dev_conf->nb_events_limit > info.max_num_events) { 427 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", 428 dev_id, dev_conf->nb_events_limit, info.max_num_events); 429 return -EINVAL; 430 } 431 432 /* Check nb_event_queues is in limit */ 433 if (!dev_conf->nb_event_queues) { 434 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", 435 dev_id); 436 return -EINVAL; 437 } 438 if (dev_conf->nb_event_queues > info.max_event_queues) { 439 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d", 440 dev_id, dev_conf->nb_event_queues, info.max_event_queues); 441 return -EINVAL; 442 } 443 444 /* Check nb_event_ports is in limit */ 445 if (!dev_conf->nb_event_ports) { 446 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); 447 return -EINVAL; 448 } 449 if (dev_conf->nb_event_ports > info.max_event_ports) { 450 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d", 451 dev_id, dev_conf->nb_event_ports, info.max_event_ports); 452 return -EINVAL; 453 } 454 455 /* Check nb_event_queue_flows is in limit */ 456 if (!dev_conf->nb_event_queue_flows) { 457 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); 458 return -EINVAL; 459 } 460 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { 461 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", 462 dev_id, dev_conf->nb_event_queue_flows, 463 info.max_event_queue_flows); 464 return -EINVAL; 465 } 466 467 /* Check nb_event_port_dequeue_depth is in limit */ 468 if (!dev_conf->nb_event_port_dequeue_depth) { 469 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", 470 dev_id); 471 return -EINVAL; 472 } 473 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 474 (dev_conf->nb_event_port_dequeue_depth > 475 info.max_event_port_dequeue_depth)) { 476 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", 477 dev_id, dev_conf->nb_event_port_dequeue_depth, 478 info.max_event_port_dequeue_depth); 479 return -EINVAL; 480 } 481 482 /* Check nb_event_port_enqueue_depth is in limit */ 483 if (!dev_conf->nb_event_port_enqueue_depth) { 484 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", 485 dev_id); 486 return -EINVAL; 487 } 488 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 489 (dev_conf->nb_event_port_enqueue_depth > 490 info.max_event_port_enqueue_depth)) { 491 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", 492 dev_id, dev_conf->nb_event_port_enqueue_depth, 493 info.max_event_port_enqueue_depth); 494 return -EINVAL; 495 } 496 497 /* Copy the dev_conf parameter into the dev structure */ 498 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 499 500 /* Setup new number of queues and reconfigure device. */ 501 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues); 502 if (diag != 0) { 503 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d", 504 dev_id, diag); 505 return diag; 506 } 507 508 /* Setup new number of ports and reconfigure device. */ 509 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports); 510 if (diag != 0) { 511 rte_event_dev_queue_config(dev, 0); 512 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d", 513 dev_id, diag); 514 return diag; 515 } 516 517 /* Configure the device */ 518 diag = (*dev->dev_ops->dev_configure)(dev); 519 if (diag != 0) { 520 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); 521 rte_event_dev_queue_config(dev, 0); 522 rte_event_dev_port_config(dev, 0); 523 } 524 525 dev->data->event_dev_cap = info.event_dev_cap; 526 return diag; 527 } 528 529 static inline int 530 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) 531 { 532 if (queue_id < dev->data->nb_queues && queue_id < 533 RTE_EVENT_MAX_QUEUES_PER_DEV) 534 return 1; 535 else 536 return 0; 537 } 538 539 int 540 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 541 struct rte_event_queue_conf *queue_conf) 542 { 543 struct rte_eventdev *dev; 544 545 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 546 dev = &rte_eventdevs[dev_id]; 547 548 if (queue_conf == NULL) 549 return -EINVAL; 550 551 if (!is_valid_queue(dev, queue_id)) { 552 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 553 return -EINVAL; 554 } 555 556 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); 557 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 558 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); 559 return 0; 560 } 561 562 static inline int 563 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) 564 { 565 if (queue_conf && 566 !(queue_conf->event_queue_cfg & 567 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 568 ((queue_conf->event_queue_cfg & 569 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 570 (queue_conf->schedule_type 571 == RTE_SCHED_TYPE_ATOMIC) 572 )) 573 return 1; 574 else 575 return 0; 576 } 577 578 static inline int 579 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) 580 { 581 if (queue_conf && 582 !(queue_conf->event_queue_cfg & 583 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 584 ((queue_conf->event_queue_cfg & 585 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 586 (queue_conf->schedule_type 587 == RTE_SCHED_TYPE_ORDERED) 588 )) 589 return 1; 590 else 591 return 0; 592 } 593 594 595 int 596 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 597 const struct rte_event_queue_conf *queue_conf) 598 { 599 struct rte_eventdev *dev; 600 struct rte_event_queue_conf def_conf; 601 602 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 603 dev = &rte_eventdevs[dev_id]; 604 605 if (!is_valid_queue(dev, queue_id)) { 606 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 607 return -EINVAL; 608 } 609 610 /* Check nb_atomic_flows limit */ 611 if (is_valid_atomic_queue_conf(queue_conf)) { 612 if (queue_conf->nb_atomic_flows == 0 || 613 queue_conf->nb_atomic_flows > 614 dev->data->dev_conf.nb_event_queue_flows) { 615 RTE_EDEV_LOG_ERR( 616 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", 617 dev_id, queue_id, queue_conf->nb_atomic_flows, 618 dev->data->dev_conf.nb_event_queue_flows); 619 return -EINVAL; 620 } 621 } 622 623 /* Check nb_atomic_order_sequences limit */ 624 if (is_valid_ordered_queue_conf(queue_conf)) { 625 if (queue_conf->nb_atomic_order_sequences == 0 || 626 queue_conf->nb_atomic_order_sequences > 627 dev->data->dev_conf.nb_event_queue_flows) { 628 RTE_EDEV_LOG_ERR( 629 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", 630 dev_id, queue_id, queue_conf->nb_atomic_order_sequences, 631 dev->data->dev_conf.nb_event_queue_flows); 632 return -EINVAL; 633 } 634 } 635 636 if (dev->data->dev_started) { 637 RTE_EDEV_LOG_ERR( 638 "device %d must be stopped to allow queue setup", dev_id); 639 return -EBUSY; 640 } 641 642 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP); 643 644 if (queue_conf == NULL) { 645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, 646 -ENOTSUP); 647 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); 648 queue_conf = &def_conf; 649 } 650 651 dev->data->queues_cfg[queue_id] = *queue_conf; 652 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); 653 } 654 655 static inline int 656 is_valid_port(struct rte_eventdev *dev, uint8_t port_id) 657 { 658 if (port_id < dev->data->nb_ports) 659 return 1; 660 else 661 return 0; 662 } 663 664 int 665 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 666 struct rte_event_port_conf *port_conf) 667 { 668 struct rte_eventdev *dev; 669 670 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 671 dev = &rte_eventdevs[dev_id]; 672 673 if (port_conf == NULL) 674 return -EINVAL; 675 676 if (!is_valid_port(dev, port_id)) { 677 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 678 return -EINVAL; 679 } 680 681 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP); 682 memset(port_conf, 0, sizeof(struct rte_event_port_conf)); 683 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); 684 return 0; 685 } 686 687 int 688 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 689 const struct rte_event_port_conf *port_conf) 690 { 691 struct rte_eventdev *dev; 692 struct rte_event_port_conf def_conf; 693 int diag; 694 695 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 696 dev = &rte_eventdevs[dev_id]; 697 698 if (!is_valid_port(dev, port_id)) { 699 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 700 return -EINVAL; 701 } 702 703 /* Check new_event_threshold limit */ 704 if ((port_conf && !port_conf->new_event_threshold) || 705 (port_conf && port_conf->new_event_threshold > 706 dev->data->dev_conf.nb_events_limit)) { 707 RTE_EDEV_LOG_ERR( 708 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", 709 dev_id, port_id, port_conf->new_event_threshold, 710 dev->data->dev_conf.nb_events_limit); 711 return -EINVAL; 712 } 713 714 /* Check dequeue_depth limit */ 715 if ((port_conf && !port_conf->dequeue_depth) || 716 (port_conf && port_conf->dequeue_depth > 717 dev->data->dev_conf.nb_event_port_dequeue_depth)) { 718 RTE_EDEV_LOG_ERR( 719 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", 720 dev_id, port_id, port_conf->dequeue_depth, 721 dev->data->dev_conf.nb_event_port_dequeue_depth); 722 return -EINVAL; 723 } 724 725 /* Check enqueue_depth limit */ 726 if ((port_conf && !port_conf->enqueue_depth) || 727 (port_conf && port_conf->enqueue_depth > 728 dev->data->dev_conf.nb_event_port_enqueue_depth)) { 729 RTE_EDEV_LOG_ERR( 730 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", 731 dev_id, port_id, port_conf->enqueue_depth, 732 dev->data->dev_conf.nb_event_port_enqueue_depth); 733 return -EINVAL; 734 } 735 736 if (port_conf && port_conf->disable_implicit_release && 737 !(dev->data->event_dev_cap & 738 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { 739 RTE_EDEV_LOG_ERR( 740 "dev%d port%d Implicit release disable not supported", 741 dev_id, port_id); 742 return -EINVAL; 743 } 744 745 if (dev->data->dev_started) { 746 RTE_EDEV_LOG_ERR( 747 "device %d must be stopped to allow port setup", dev_id); 748 return -EBUSY; 749 } 750 751 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP); 752 753 if (port_conf == NULL) { 754 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, 755 -ENOTSUP); 756 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); 757 port_conf = &def_conf; 758 } 759 760 dev->data->ports_cfg[port_id] = *port_conf; 761 762 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); 763 764 /* Unlink all the queues from this port(default state after setup) */ 765 if (!diag) 766 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); 767 768 if (diag < 0) 769 return diag; 770 771 return 0; 772 } 773 774 int 775 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 776 uint32_t *attr_value) 777 { 778 struct rte_eventdev *dev; 779 780 if (!attr_value) 781 return -EINVAL; 782 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 783 dev = &rte_eventdevs[dev_id]; 784 785 switch (attr_id) { 786 case RTE_EVENT_DEV_ATTR_PORT_COUNT: 787 *attr_value = dev->data->nb_ports; 788 break; 789 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: 790 *attr_value = dev->data->nb_queues; 791 break; 792 case RTE_EVENT_DEV_ATTR_STARTED: 793 *attr_value = dev->data->dev_started; 794 break; 795 default: 796 return -EINVAL; 797 } 798 799 return 0; 800 } 801 802 int 803 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 804 uint32_t *attr_value) 805 { 806 struct rte_eventdev *dev; 807 808 if (!attr_value) 809 return -EINVAL; 810 811 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 812 dev = &rte_eventdevs[dev_id]; 813 if (!is_valid_port(dev, port_id)) { 814 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 815 return -EINVAL; 816 } 817 818 switch (attr_id) { 819 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: 820 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; 821 break; 822 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: 823 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; 824 break; 825 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: 826 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; 827 break; 828 default: 829 return -EINVAL; 830 }; 831 return 0; 832 } 833 834 int 835 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 836 uint32_t *attr_value) 837 { 838 struct rte_event_queue_conf *conf; 839 struct rte_eventdev *dev; 840 841 if (!attr_value) 842 return -EINVAL; 843 844 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 845 dev = &rte_eventdevs[dev_id]; 846 if (!is_valid_queue(dev, queue_id)) { 847 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 848 return -EINVAL; 849 } 850 851 conf = &dev->data->queues_cfg[queue_id]; 852 853 switch (attr_id) { 854 case RTE_EVENT_QUEUE_ATTR_PRIORITY: 855 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; 856 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 857 *attr_value = conf->priority; 858 break; 859 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: 860 *attr_value = conf->nb_atomic_flows; 861 break; 862 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: 863 *attr_value = conf->nb_atomic_order_sequences; 864 break; 865 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: 866 *attr_value = conf->event_queue_cfg; 867 break; 868 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: 869 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) 870 return -EOVERFLOW; 871 872 *attr_value = conf->schedule_type; 873 break; 874 default: 875 return -EINVAL; 876 }; 877 return 0; 878 } 879 880 int 881 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 882 const uint8_t queues[], const uint8_t priorities[], 883 uint16_t nb_links) 884 { 885 struct rte_eventdev *dev; 886 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 887 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 888 uint16_t *links_map; 889 int i, diag; 890 891 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 892 dev = &rte_eventdevs[dev_id]; 893 894 if (*dev->dev_ops->port_link == NULL) { 895 RTE_PMD_DEBUG_TRACE("Function not supported\n"); 896 rte_errno = ENOTSUP; 897 return 0; 898 } 899 900 if (!is_valid_port(dev, port_id)) { 901 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 902 rte_errno = EINVAL; 903 return 0; 904 } 905 906 if (queues == NULL) { 907 for (i = 0; i < dev->data->nb_queues; i++) 908 queues_list[i] = i; 909 910 queues = queues_list; 911 nb_links = dev->data->nb_queues; 912 } 913 914 if (priorities == NULL) { 915 for (i = 0; i < nb_links; i++) 916 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL; 917 918 priorities = priorities_list; 919 } 920 921 for (i = 0; i < nb_links; i++) 922 if (queues[i] >= dev->data->nb_queues) { 923 rte_errno = EINVAL; 924 return 0; 925 } 926 927 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], 928 queues, priorities, nb_links); 929 if (diag < 0) 930 return diag; 931 932 links_map = dev->data->links_map; 933 /* Point links_map to this port specific area */ 934 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 935 for (i = 0; i < diag; i++) 936 links_map[queues[i]] = (uint8_t)priorities[i]; 937 938 return diag; 939 } 940 941 int 942 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 943 uint8_t queues[], uint16_t nb_unlinks) 944 { 945 struct rte_eventdev *dev; 946 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 947 int i, diag, j; 948 uint16_t *links_map; 949 950 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); 951 dev = &rte_eventdevs[dev_id]; 952 953 if (*dev->dev_ops->port_unlink == NULL) { 954 RTE_PMD_DEBUG_TRACE("Function not supported\n"); 955 rte_errno = ENOTSUP; 956 return 0; 957 } 958 959 if (!is_valid_port(dev, port_id)) { 960 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 961 rte_errno = EINVAL; 962 return 0; 963 } 964 965 links_map = dev->data->links_map; 966 /* Point links_map to this port specific area */ 967 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 968 969 if (queues == NULL) { 970 j = 0; 971 for (i = 0; i < dev->data->nb_queues; i++) { 972 if (links_map[i] != 973 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 974 all_queues[j] = i; 975 j++; 976 } 977 } 978 queues = all_queues; 979 } else { 980 for (j = 0; j < nb_unlinks; j++) { 981 if (links_map[queues[j]] == 982 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) 983 break; 984 } 985 } 986 987 nb_unlinks = j; 988 for (i = 0; i < nb_unlinks; i++) 989 if (queues[i] >= dev->data->nb_queues) { 990 rte_errno = EINVAL; 991 return 0; 992 } 993 994 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], 995 queues, nb_unlinks); 996 997 if (diag < 0) 998 return diag; 999 1000 for (i = 0; i < diag; i++) 1001 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 1002 1003 return diag; 1004 } 1005 1006 int __rte_experimental 1007 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) 1008 { 1009 struct rte_eventdev *dev; 1010 1011 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1012 dev = &rte_eventdevs[dev_id]; 1013 if (!is_valid_port(dev, port_id)) { 1014 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1015 return -EINVAL; 1016 } 1017 1018 /* Return 0 if the PMD does not implement unlinks in progress. 1019 * This allows PMDs which handle unlink synchronously to not implement 1020 * this function at all. 1021 */ 1022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0); 1023 1024 return (*dev->dev_ops->port_unlinks_in_progress)(dev, 1025 dev->data->ports[port_id]); 1026 } 1027 1028 int 1029 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1030 uint8_t queues[], uint8_t priorities[]) 1031 { 1032 struct rte_eventdev *dev; 1033 uint16_t *links_map; 1034 int i, count = 0; 1035 1036 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1037 dev = &rte_eventdevs[dev_id]; 1038 if (!is_valid_port(dev, port_id)) { 1039 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 1040 return -EINVAL; 1041 } 1042 1043 links_map = dev->data->links_map; 1044 /* Point links_map to this port specific area */ 1045 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 1046 for (i = 0; i < dev->data->nb_queues; i++) { 1047 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 1048 queues[count] = i; 1049 priorities[count] = (uint8_t)links_map[i]; 1050 ++count; 1051 } 1052 } 1053 return count; 1054 } 1055 1056 int 1057 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1058 uint64_t *timeout_ticks) 1059 { 1060 struct rte_eventdev *dev; 1061 1062 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1063 dev = &rte_eventdevs[dev_id]; 1064 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP); 1065 1066 if (timeout_ticks == NULL) 1067 return -EINVAL; 1068 1069 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); 1070 } 1071 1072 int 1073 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) 1074 { 1075 struct rte_eventdev *dev; 1076 1077 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1078 dev = &rte_eventdevs[dev_id]; 1079 1080 if (service_id == NULL) 1081 return -EINVAL; 1082 1083 if (dev->data->service_inited) 1084 *service_id = dev->data->service_id; 1085 1086 return dev->data->service_inited ? 0 : -ESRCH; 1087 } 1088 1089 int 1090 rte_event_dev_dump(uint8_t dev_id, FILE *f) 1091 { 1092 struct rte_eventdev *dev; 1093 1094 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1095 dev = &rte_eventdevs[dev_id]; 1096 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP); 1097 1098 (*dev->dev_ops->dump)(dev, f); 1099 return 0; 1100 1101 } 1102 1103 static int 1104 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1105 uint8_t queue_port_id) 1106 { 1107 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1108 if (dev->dev_ops->xstats_get_names != NULL) 1109 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1110 queue_port_id, 1111 NULL, NULL, 0); 1112 return 0; 1113 } 1114 1115 int 1116 rte_event_dev_xstats_names_get(uint8_t dev_id, 1117 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, 1118 struct rte_event_dev_xstats_name *xstats_names, 1119 unsigned int *ids, unsigned int size) 1120 { 1121 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1122 const int cnt_expected_entries = xstats_get_count(dev_id, mode, 1123 queue_port_id); 1124 if (xstats_names == NULL || cnt_expected_entries < 0 || 1125 (int)size < cnt_expected_entries) 1126 return cnt_expected_entries; 1127 1128 /* dev_id checked above */ 1129 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1130 1131 if (dev->dev_ops->xstats_get_names != NULL) 1132 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1133 queue_port_id, xstats_names, ids, size); 1134 1135 return -ENOTSUP; 1136 } 1137 1138 /* retrieve eventdev extended statistics */ 1139 int 1140 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1141 uint8_t queue_port_id, const unsigned int ids[], 1142 uint64_t values[], unsigned int n) 1143 { 1144 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1145 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1146 1147 /* implemented by the driver */ 1148 if (dev->dev_ops->xstats_get != NULL) 1149 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, 1150 ids, values, n); 1151 return -ENOTSUP; 1152 } 1153 1154 uint64_t 1155 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1156 unsigned int *id) 1157 { 1158 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); 1159 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1160 unsigned int temp = -1; 1161 1162 if (id != NULL) 1163 *id = (unsigned int)-1; 1164 else 1165 id = &temp; /* ensure driver never gets a NULL value */ 1166 1167 /* implemented by driver */ 1168 if (dev->dev_ops->xstats_get_by_name != NULL) 1169 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); 1170 return -ENOTSUP; 1171 } 1172 1173 int rte_event_dev_xstats_reset(uint8_t dev_id, 1174 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, 1175 const uint32_t ids[], uint32_t nb_ids) 1176 { 1177 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1178 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1179 1180 if (dev->dev_ops->xstats_reset != NULL) 1181 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, 1182 ids, nb_ids); 1183 return -ENOTSUP; 1184 } 1185 1186 int rte_event_dev_selftest(uint8_t dev_id) 1187 { 1188 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1189 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1190 1191 if (dev->dev_ops->dev_selftest != NULL) 1192 return (*dev->dev_ops->dev_selftest)(); 1193 return -ENOTSUP; 1194 } 1195 1196 int 1197 rte_event_dev_start(uint8_t dev_id) 1198 { 1199 struct rte_eventdev *dev; 1200 int diag; 1201 1202 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1203 1204 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1205 dev = &rte_eventdevs[dev_id]; 1206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1207 1208 if (dev->data->dev_started != 0) { 1209 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", 1210 dev_id); 1211 return 0; 1212 } 1213 1214 diag = (*dev->dev_ops->dev_start)(dev); 1215 if (diag == 0) 1216 dev->data->dev_started = 1; 1217 else 1218 return diag; 1219 1220 return 0; 1221 } 1222 1223 int 1224 rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 1225 eventdev_stop_flush_t callback, void *userdata) 1226 { 1227 struct rte_eventdev *dev; 1228 1229 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); 1230 1231 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1232 dev = &rte_eventdevs[dev_id]; 1233 1234 dev->dev_ops->dev_stop_flush = callback; 1235 dev->data->dev_stop_flush_arg = userdata; 1236 1237 return 0; 1238 } 1239 1240 void 1241 rte_event_dev_stop(uint8_t dev_id) 1242 { 1243 struct rte_eventdev *dev; 1244 1245 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id); 1246 1247 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 1248 dev = &rte_eventdevs[dev_id]; 1249 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1250 1251 if (dev->data->dev_started == 0) { 1252 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped", 1253 dev_id); 1254 return; 1255 } 1256 1257 dev->data->dev_started = 0; 1258 (*dev->dev_ops->dev_stop)(dev); 1259 } 1260 1261 int 1262 rte_event_dev_close(uint8_t dev_id) 1263 { 1264 struct rte_eventdev *dev; 1265 1266 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1267 dev = &rte_eventdevs[dev_id]; 1268 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1269 1270 /* Device must be stopped before it can be closed */ 1271 if (dev->data->dev_started == 1) { 1272 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", 1273 dev_id); 1274 return -EBUSY; 1275 } 1276 1277 return (*dev->dev_ops->dev_close)(dev); 1278 } 1279 1280 static inline int 1281 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, 1282 int socket_id) 1283 { 1284 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1285 const struct rte_memzone *mz; 1286 int n; 1287 1288 /* Generate memzone name */ 1289 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); 1290 if (n >= (int)sizeof(mz_name)) 1291 return -EINVAL; 1292 1293 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1294 mz = rte_memzone_reserve(mz_name, 1295 sizeof(struct rte_eventdev_data), 1296 socket_id, 0); 1297 } else 1298 mz = rte_memzone_lookup(mz_name); 1299 1300 if (mz == NULL) 1301 return -ENOMEM; 1302 1303 *data = mz->addr; 1304 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1305 memset(*data, 0, sizeof(struct rte_eventdev_data)); 1306 1307 return 0; 1308 } 1309 1310 static inline uint8_t 1311 rte_eventdev_find_free_device_index(void) 1312 { 1313 uint8_t dev_id; 1314 1315 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1316 if (rte_eventdevs[dev_id].attached == 1317 RTE_EVENTDEV_DETACHED) 1318 return dev_id; 1319 } 1320 return RTE_EVENT_MAX_DEVS; 1321 } 1322 1323 static uint16_t 1324 rte_event_tx_adapter_enqueue(__rte_unused void *port, 1325 __rte_unused struct rte_event ev[], 1326 __rte_unused uint16_t nb_events) 1327 { 1328 rte_errno = ENOTSUP; 1329 return 0; 1330 } 1331 1332 struct rte_eventdev * 1333 rte_event_pmd_allocate(const char *name, int socket_id) 1334 { 1335 struct rte_eventdev *eventdev; 1336 uint8_t dev_id; 1337 1338 if (rte_event_pmd_get_named_dev(name) != NULL) { 1339 RTE_EDEV_LOG_ERR("Event device with name %s already " 1340 "allocated!", name); 1341 return NULL; 1342 } 1343 1344 dev_id = rte_eventdev_find_free_device_index(); 1345 if (dev_id == RTE_EVENT_MAX_DEVS) { 1346 RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); 1347 return NULL; 1348 } 1349 1350 eventdev = &rte_eventdevs[dev_id]; 1351 1352 eventdev->txa_enqueue = rte_event_tx_adapter_enqueue; 1353 1354 if (eventdev->data == NULL) { 1355 struct rte_eventdev_data *eventdev_data = NULL; 1356 1357 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data, 1358 socket_id); 1359 1360 if (retval < 0 || eventdev_data == NULL) 1361 return NULL; 1362 1363 eventdev->data = eventdev_data; 1364 1365 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN, 1366 "%s", name); 1367 1368 eventdev->data->dev_id = dev_id; 1369 eventdev->data->socket_id = socket_id; 1370 eventdev->data->dev_started = 0; 1371 1372 eventdev->attached = RTE_EVENTDEV_ATTACHED; 1373 1374 eventdev_globals.nb_devs++; 1375 } 1376 1377 return eventdev; 1378 } 1379 1380 int 1381 rte_event_pmd_release(struct rte_eventdev *eventdev) 1382 { 1383 int ret; 1384 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1385 const struct rte_memzone *mz; 1386 1387 if (eventdev == NULL) 1388 return -EINVAL; 1389 1390 eventdev->attached = RTE_EVENTDEV_DETACHED; 1391 eventdev_globals.nb_devs--; 1392 1393 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1394 rte_free(eventdev->data->dev_private); 1395 1396 /* Generate memzone name */ 1397 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", 1398 eventdev->data->dev_id); 1399 if (ret >= (int)sizeof(mz_name)) 1400 return -EINVAL; 1401 1402 mz = rte_memzone_lookup(mz_name); 1403 if (mz == NULL) 1404 return -ENOMEM; 1405 1406 ret = rte_memzone_free(mz); 1407 if (ret) 1408 return ret; 1409 } 1410 1411 eventdev->data = NULL; 1412 return 0; 1413 } 1414