1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2016 Cavium, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium, Inc nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <ctype.h> 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdarg.h> 38 #include <errno.h> 39 #include <stdint.h> 40 #include <inttypes.h> 41 #include <sys/types.h> 42 #include <sys/queue.h> 43 44 #include <rte_byteorder.h> 45 #include <rte_log.h> 46 #include <rte_debug.h> 47 #include <rte_dev.h> 48 #include <rte_memory.h> 49 #include <rte_memcpy.h> 50 #include <rte_memzone.h> 51 #include <rte_eal.h> 52 #include <rte_per_lcore.h> 53 #include <rte_lcore.h> 54 #include <rte_atomic.h> 55 #include <rte_branch_prediction.h> 56 #include <rte_common.h> 57 #include <rte_malloc.h> 58 #include <rte_errno.h> 59 #include <rte_ethdev.h> 60 61 #include "rte_eventdev.h" 62 #include "rte_eventdev_pmd.h" 63 64 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; 65 66 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0]; 67 68 static struct rte_eventdev_global eventdev_globals = { 69 .nb_devs = 0 70 }; 71 72 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals; 73 74 /* Event dev north bound API implementation */ 75 76 uint8_t 77 rte_event_dev_count(void) 78 { 79 return rte_eventdev_globals->nb_devs; 80 } 81 82 int 83 rte_event_dev_get_dev_id(const char *name) 84 { 85 int i; 86 87 if (!name) 88 return -EINVAL; 89 90 for (i = 0; i < rte_eventdev_globals->nb_devs; i++) 91 if ((strcmp(rte_event_devices[i].data->name, name) 92 == 0) && 93 (rte_event_devices[i].attached == 94 RTE_EVENTDEV_ATTACHED)) 95 return i; 96 return -ENODEV; 97 } 98 99 int 100 rte_event_dev_socket_id(uint8_t dev_id) 101 { 102 struct rte_eventdev *dev; 103 104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 105 dev = &rte_eventdevs[dev_id]; 106 107 return dev->data->socket_id; 108 } 109 110 int 111 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) 112 { 113 struct rte_eventdev *dev; 114 115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 116 dev = &rte_eventdevs[dev_id]; 117 118 if (dev_info == NULL) 119 return -EINVAL; 120 121 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 122 123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 124 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 125 126 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; 127 128 dev_info->dev = dev->dev; 129 return 0; 130 } 131 132 int 133 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, 134 uint32_t *caps) 135 { 136 struct rte_eventdev *dev; 137 138 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 139 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 140 141 dev = &rte_eventdevs[dev_id]; 142 143 if (caps == NULL) 144 return -EINVAL; 145 *caps = 0; 146 147 return dev->dev_ops->eth_rx_adapter_caps_get ? 148 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, 149 &rte_eth_devices[eth_port_id], 150 caps) 151 : 0; 152 } 153 154 static inline int 155 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) 156 { 157 uint8_t old_nb_queues = dev->data->nb_queues; 158 struct rte_event_queue_conf *queues_cfg; 159 unsigned int i; 160 161 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, 162 dev->data->dev_id); 163 164 /* First time configuration */ 165 if (dev->data->queues_cfg == NULL && nb_queues != 0) { 166 /* Allocate memory to store queue configuration */ 167 dev->data->queues_cfg = rte_zmalloc_socket( 168 "eventdev->data->queues_cfg", 169 sizeof(dev->data->queues_cfg[0]) * nb_queues, 170 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 171 if (dev->data->queues_cfg == NULL) { 172 dev->data->nb_queues = 0; 173 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," 174 "nb_queues %u", nb_queues); 175 return -(ENOMEM); 176 } 177 /* Re-configure */ 178 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { 179 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 180 181 for (i = nb_queues; i < old_nb_queues; i++) 182 (*dev->dev_ops->queue_release)(dev, i); 183 184 /* Re allocate memory to store queue configuration */ 185 queues_cfg = dev->data->queues_cfg; 186 queues_cfg = rte_realloc(queues_cfg, 187 sizeof(queues_cfg[0]) * nb_queues, 188 RTE_CACHE_LINE_SIZE); 189 if (queues_cfg == NULL) { 190 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," 191 " nb_queues %u", nb_queues); 192 return -(ENOMEM); 193 } 194 dev->data->queues_cfg = queues_cfg; 195 196 if (nb_queues > old_nb_queues) { 197 uint8_t new_qs = nb_queues - old_nb_queues; 198 199 memset(queues_cfg + old_nb_queues, 0, 200 sizeof(queues_cfg[0]) * new_qs); 201 } 202 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { 203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); 204 205 for (i = nb_queues; i < old_nb_queues; i++) 206 (*dev->dev_ops->queue_release)(dev, i); 207 } 208 209 dev->data->nb_queues = nb_queues; 210 return 0; 211 } 212 213 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) 214 215 static inline int 216 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) 217 { 218 uint8_t old_nb_ports = dev->data->nb_ports; 219 void **ports; 220 uint16_t *links_map; 221 struct rte_event_port_conf *ports_cfg; 222 unsigned int i; 223 224 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, 225 dev->data->dev_id); 226 227 /* First time configuration */ 228 if (dev->data->ports == NULL && nb_ports != 0) { 229 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports", 230 sizeof(dev->data->ports[0]) * nb_ports, 231 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 232 if (dev->data->ports == NULL) { 233 dev->data->nb_ports = 0; 234 RTE_EDEV_LOG_ERR("failed to get mem for port meta data," 235 "nb_ports %u", nb_ports); 236 return -(ENOMEM); 237 } 238 239 /* Allocate memory to store port configurations */ 240 dev->data->ports_cfg = 241 rte_zmalloc_socket("eventdev->ports_cfg", 242 sizeof(dev->data->ports_cfg[0]) * nb_ports, 243 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 244 if (dev->data->ports_cfg == NULL) { 245 dev->data->nb_ports = 0; 246 RTE_EDEV_LOG_ERR("failed to get mem for port cfg," 247 "nb_ports %u", nb_ports); 248 return -(ENOMEM); 249 } 250 251 /* Allocate memory to store queue to port link connection */ 252 dev->data->links_map = 253 rte_zmalloc_socket("eventdev->links_map", 254 sizeof(dev->data->links_map[0]) * nb_ports * 255 RTE_EVENT_MAX_QUEUES_PER_DEV, 256 RTE_CACHE_LINE_SIZE, dev->data->socket_id); 257 if (dev->data->links_map == NULL) { 258 dev->data->nb_ports = 0; 259 RTE_EDEV_LOG_ERR("failed to get mem for port_map area," 260 "nb_ports %u", nb_ports); 261 return -(ENOMEM); 262 } 263 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) 264 dev->data->links_map[i] = 265 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 266 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ 267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 268 269 ports = dev->data->ports; 270 ports_cfg = dev->data->ports_cfg; 271 links_map = dev->data->links_map; 272 273 for (i = nb_ports; i < old_nb_ports; i++) 274 (*dev->dev_ops->port_release)(ports[i]); 275 276 /* Realloc memory for ports */ 277 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports, 278 RTE_CACHE_LINE_SIZE); 279 if (ports == NULL) { 280 RTE_EDEV_LOG_ERR("failed to realloc port meta data," 281 " nb_ports %u", nb_ports); 282 return -(ENOMEM); 283 } 284 285 /* Realloc memory for ports_cfg */ 286 ports_cfg = rte_realloc(ports_cfg, 287 sizeof(ports_cfg[0]) * nb_ports, 288 RTE_CACHE_LINE_SIZE); 289 if (ports_cfg == NULL) { 290 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," 291 " nb_ports %u", nb_ports); 292 return -(ENOMEM); 293 } 294 295 /* Realloc memory to store queue to port link connection */ 296 links_map = rte_realloc(links_map, 297 sizeof(dev->data->links_map[0]) * nb_ports * 298 RTE_EVENT_MAX_QUEUES_PER_DEV, 299 RTE_CACHE_LINE_SIZE); 300 if (links_map == NULL) { 301 dev->data->nb_ports = 0; 302 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," 303 "nb_ports %u", nb_ports); 304 return -(ENOMEM); 305 } 306 307 if (nb_ports > old_nb_ports) { 308 uint8_t new_ps = nb_ports - old_nb_ports; 309 unsigned int old_links_map_end = 310 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 311 unsigned int links_map_end = 312 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; 313 314 memset(ports + old_nb_ports, 0, 315 sizeof(ports[0]) * new_ps); 316 memset(ports_cfg + old_nb_ports, 0, 317 sizeof(ports_cfg[0]) * new_ps); 318 for (i = old_links_map_end; i < links_map_end; i++) 319 links_map[i] = 320 EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 321 } 322 323 dev->data->ports = ports; 324 dev->data->ports_cfg = ports_cfg; 325 dev->data->links_map = links_map; 326 } else if (dev->data->ports != NULL && nb_ports == 0) { 327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); 328 329 ports = dev->data->ports; 330 for (i = nb_ports; i < old_nb_ports; i++) 331 (*dev->dev_ops->port_release)(ports[i]); 332 } 333 334 dev->data->nb_ports = nb_ports; 335 return 0; 336 } 337 338 int 339 rte_event_dev_configure(uint8_t dev_id, 340 const struct rte_event_dev_config *dev_conf) 341 { 342 struct rte_eventdev *dev; 343 struct rte_event_dev_info info; 344 int diag; 345 346 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 347 dev = &rte_eventdevs[dev_id]; 348 349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 351 352 if (dev->data->dev_started) { 353 RTE_EDEV_LOG_ERR( 354 "device %d must be stopped to allow configuration", dev_id); 355 return -EBUSY; 356 } 357 358 if (dev_conf == NULL) 359 return -EINVAL; 360 361 (*dev->dev_ops->dev_infos_get)(dev, &info); 362 363 /* Check dequeue_timeout_ns value is in limit */ 364 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { 365 if (dev_conf->dequeue_timeout_ns && 366 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns 367 || dev_conf->dequeue_timeout_ns > 368 info.max_dequeue_timeout_ns)) { 369 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" 370 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", 371 dev_id, dev_conf->dequeue_timeout_ns, 372 info.min_dequeue_timeout_ns, 373 info.max_dequeue_timeout_ns); 374 return -EINVAL; 375 } 376 } 377 378 /* Check nb_events_limit is in limit */ 379 if (dev_conf->nb_events_limit > info.max_num_events) { 380 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", 381 dev_id, dev_conf->nb_events_limit, info.max_num_events); 382 return -EINVAL; 383 } 384 385 /* Check nb_event_queues is in limit */ 386 if (!dev_conf->nb_event_queues) { 387 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", 388 dev_id); 389 return -EINVAL; 390 } 391 if (dev_conf->nb_event_queues > info.max_event_queues) { 392 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d", 393 dev_id, dev_conf->nb_event_queues, info.max_event_queues); 394 return -EINVAL; 395 } 396 397 /* Check nb_event_ports is in limit */ 398 if (!dev_conf->nb_event_ports) { 399 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); 400 return -EINVAL; 401 } 402 if (dev_conf->nb_event_ports > info.max_event_ports) { 403 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d", 404 dev_id, dev_conf->nb_event_ports, info.max_event_ports); 405 return -EINVAL; 406 } 407 408 /* Check nb_event_queue_flows is in limit */ 409 if (!dev_conf->nb_event_queue_flows) { 410 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); 411 return -EINVAL; 412 } 413 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { 414 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", 415 dev_id, dev_conf->nb_event_queue_flows, 416 info.max_event_queue_flows); 417 return -EINVAL; 418 } 419 420 /* Check nb_event_port_dequeue_depth is in limit */ 421 if (!dev_conf->nb_event_port_dequeue_depth) { 422 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", 423 dev_id); 424 return -EINVAL; 425 } 426 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 427 (dev_conf->nb_event_port_dequeue_depth > 428 info.max_event_port_dequeue_depth)) { 429 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", 430 dev_id, dev_conf->nb_event_port_dequeue_depth, 431 info.max_event_port_dequeue_depth); 432 return -EINVAL; 433 } 434 435 /* Check nb_event_port_enqueue_depth is in limit */ 436 if (!dev_conf->nb_event_port_enqueue_depth) { 437 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", 438 dev_id); 439 return -EINVAL; 440 } 441 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && 442 (dev_conf->nb_event_port_enqueue_depth > 443 info.max_event_port_enqueue_depth)) { 444 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", 445 dev_id, dev_conf->nb_event_port_enqueue_depth, 446 info.max_event_port_enqueue_depth); 447 return -EINVAL; 448 } 449 450 /* Copy the dev_conf parameter into the dev structure */ 451 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); 452 453 /* Setup new number of queues and reconfigure device. */ 454 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues); 455 if (diag != 0) { 456 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d", 457 dev_id, diag); 458 return diag; 459 } 460 461 /* Setup new number of ports and reconfigure device. */ 462 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports); 463 if (diag != 0) { 464 rte_event_dev_queue_config(dev, 0); 465 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d", 466 dev_id, diag); 467 return diag; 468 } 469 470 /* Configure the device */ 471 diag = (*dev->dev_ops->dev_configure)(dev); 472 if (diag != 0) { 473 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); 474 rte_event_dev_queue_config(dev, 0); 475 rte_event_dev_port_config(dev, 0); 476 } 477 478 dev->data->event_dev_cap = info.event_dev_cap; 479 return diag; 480 } 481 482 static inline int 483 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id) 484 { 485 if (queue_id < dev->data->nb_queues && queue_id < 486 RTE_EVENT_MAX_QUEUES_PER_DEV) 487 return 1; 488 else 489 return 0; 490 } 491 492 int 493 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 494 struct rte_event_queue_conf *queue_conf) 495 { 496 struct rte_eventdev *dev; 497 498 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 499 dev = &rte_eventdevs[dev_id]; 500 501 if (queue_conf == NULL) 502 return -EINVAL; 503 504 if (!is_valid_queue(dev, queue_id)) { 505 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 506 return -EINVAL; 507 } 508 509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); 510 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 511 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf); 512 return 0; 513 } 514 515 static inline int 516 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) 517 { 518 if (queue_conf && 519 !(queue_conf->event_queue_cfg & 520 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 521 ((queue_conf->event_queue_cfg & 522 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 523 (queue_conf->schedule_type 524 == RTE_SCHED_TYPE_ATOMIC) 525 )) 526 return 1; 527 else 528 return 0; 529 } 530 531 static inline int 532 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) 533 { 534 if (queue_conf && 535 !(queue_conf->event_queue_cfg & 536 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && 537 ((queue_conf->event_queue_cfg & 538 RTE_EVENT_QUEUE_CFG_ALL_TYPES) || 539 (queue_conf->schedule_type 540 == RTE_SCHED_TYPE_ORDERED) 541 )) 542 return 1; 543 else 544 return 0; 545 } 546 547 548 int 549 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 550 const struct rte_event_queue_conf *queue_conf) 551 { 552 struct rte_eventdev *dev; 553 struct rte_event_queue_conf def_conf; 554 555 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 556 dev = &rte_eventdevs[dev_id]; 557 558 if (!is_valid_queue(dev, queue_id)) { 559 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 560 return -EINVAL; 561 } 562 563 /* Check nb_atomic_flows limit */ 564 if (is_valid_atomic_queue_conf(queue_conf)) { 565 if (queue_conf->nb_atomic_flows == 0 || 566 queue_conf->nb_atomic_flows > 567 dev->data->dev_conf.nb_event_queue_flows) { 568 RTE_EDEV_LOG_ERR( 569 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", 570 dev_id, queue_id, queue_conf->nb_atomic_flows, 571 dev->data->dev_conf.nb_event_queue_flows); 572 return -EINVAL; 573 } 574 } 575 576 /* Check nb_atomic_order_sequences limit */ 577 if (is_valid_ordered_queue_conf(queue_conf)) { 578 if (queue_conf->nb_atomic_order_sequences == 0 || 579 queue_conf->nb_atomic_order_sequences > 580 dev->data->dev_conf.nb_event_queue_flows) { 581 RTE_EDEV_LOG_ERR( 582 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", 583 dev_id, queue_id, queue_conf->nb_atomic_order_sequences, 584 dev->data->dev_conf.nb_event_queue_flows); 585 return -EINVAL; 586 } 587 } 588 589 if (dev->data->dev_started) { 590 RTE_EDEV_LOG_ERR( 591 "device %d must be stopped to allow queue setup", dev_id); 592 return -EBUSY; 593 } 594 595 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP); 596 597 if (queue_conf == NULL) { 598 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, 599 -ENOTSUP); 600 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); 601 queue_conf = &def_conf; 602 } 603 604 dev->data->queues_cfg[queue_id] = *queue_conf; 605 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); 606 } 607 608 static inline int 609 is_valid_port(struct rte_eventdev *dev, uint8_t port_id) 610 { 611 if (port_id < dev->data->nb_ports) 612 return 1; 613 else 614 return 0; 615 } 616 617 int 618 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 619 struct rte_event_port_conf *port_conf) 620 { 621 struct rte_eventdev *dev; 622 623 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 624 dev = &rte_eventdevs[dev_id]; 625 626 if (port_conf == NULL) 627 return -EINVAL; 628 629 if (!is_valid_port(dev, port_id)) { 630 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 631 return -EINVAL; 632 } 633 634 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP); 635 memset(port_conf, 0, sizeof(struct rte_event_port_conf)); 636 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); 637 return 0; 638 } 639 640 int 641 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 642 const struct rte_event_port_conf *port_conf) 643 { 644 struct rte_eventdev *dev; 645 struct rte_event_port_conf def_conf; 646 int diag; 647 648 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 649 dev = &rte_eventdevs[dev_id]; 650 651 if (!is_valid_port(dev, port_id)) { 652 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 653 return -EINVAL; 654 } 655 656 /* Check new_event_threshold limit */ 657 if ((port_conf && !port_conf->new_event_threshold) || 658 (port_conf && port_conf->new_event_threshold > 659 dev->data->dev_conf.nb_events_limit)) { 660 RTE_EDEV_LOG_ERR( 661 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", 662 dev_id, port_id, port_conf->new_event_threshold, 663 dev->data->dev_conf.nb_events_limit); 664 return -EINVAL; 665 } 666 667 /* Check dequeue_depth limit */ 668 if ((port_conf && !port_conf->dequeue_depth) || 669 (port_conf && port_conf->dequeue_depth > 670 dev->data->dev_conf.nb_event_port_dequeue_depth)) { 671 RTE_EDEV_LOG_ERR( 672 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", 673 dev_id, port_id, port_conf->dequeue_depth, 674 dev->data->dev_conf.nb_event_port_dequeue_depth); 675 return -EINVAL; 676 } 677 678 /* Check enqueue_depth limit */ 679 if ((port_conf && !port_conf->enqueue_depth) || 680 (port_conf && port_conf->enqueue_depth > 681 dev->data->dev_conf.nb_event_port_enqueue_depth)) { 682 RTE_EDEV_LOG_ERR( 683 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", 684 dev_id, port_id, port_conf->enqueue_depth, 685 dev->data->dev_conf.nb_event_port_enqueue_depth); 686 return -EINVAL; 687 } 688 689 if (dev->data->dev_started) { 690 RTE_EDEV_LOG_ERR( 691 "device %d must be stopped to allow port setup", dev_id); 692 return -EBUSY; 693 } 694 695 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP); 696 697 if (port_conf == NULL) { 698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, 699 -ENOTSUP); 700 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); 701 port_conf = &def_conf; 702 } 703 704 dev->data->ports_cfg[port_id] = *port_conf; 705 706 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); 707 708 /* Unlink all the queues from this port(default state after setup) */ 709 if (!diag) 710 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); 711 712 if (diag < 0) 713 return diag; 714 715 return 0; 716 } 717 718 int 719 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 720 uint32_t *attr_value) 721 { 722 struct rte_eventdev *dev; 723 724 if (!attr_value) 725 return -EINVAL; 726 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 727 dev = &rte_eventdevs[dev_id]; 728 729 switch (attr_id) { 730 case RTE_EVENT_DEV_ATTR_PORT_COUNT: 731 *attr_value = dev->data->nb_ports; 732 break; 733 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: 734 *attr_value = dev->data->nb_queues; 735 break; 736 case RTE_EVENT_DEV_ATTR_STARTED: 737 *attr_value = dev->data->dev_started; 738 break; 739 default: 740 return -EINVAL; 741 } 742 743 return 0; 744 } 745 746 int 747 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 748 uint32_t *attr_value) 749 { 750 struct rte_eventdev *dev; 751 752 if (!attr_value) 753 return -EINVAL; 754 755 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 756 dev = &rte_eventdevs[dev_id]; 757 if (!is_valid_port(dev, port_id)) { 758 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 759 return -EINVAL; 760 } 761 762 switch (attr_id) { 763 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: 764 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; 765 break; 766 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: 767 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; 768 break; 769 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: 770 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; 771 break; 772 default: 773 return -EINVAL; 774 }; 775 return 0; 776 } 777 778 int 779 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 780 uint32_t *attr_value) 781 { 782 struct rte_event_queue_conf *conf; 783 struct rte_eventdev *dev; 784 785 if (!attr_value) 786 return -EINVAL; 787 788 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 789 dev = &rte_eventdevs[dev_id]; 790 if (!is_valid_queue(dev, queue_id)) { 791 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); 792 return -EINVAL; 793 } 794 795 conf = &dev->data->queues_cfg[queue_id]; 796 797 switch (attr_id) { 798 case RTE_EVENT_QUEUE_ATTR_PRIORITY: 799 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; 800 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) 801 *attr_value = conf->priority; 802 break; 803 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: 804 *attr_value = conf->nb_atomic_flows; 805 break; 806 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: 807 *attr_value = conf->nb_atomic_order_sequences; 808 break; 809 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: 810 *attr_value = conf->event_queue_cfg; 811 break; 812 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: 813 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) 814 return -EOVERFLOW; 815 816 *attr_value = conf->schedule_type; 817 break; 818 default: 819 return -EINVAL; 820 }; 821 return 0; 822 } 823 824 int 825 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 826 const uint8_t queues[], const uint8_t priorities[], 827 uint16_t nb_links) 828 { 829 struct rte_eventdev *dev; 830 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 831 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV]; 832 uint16_t *links_map; 833 int i, diag; 834 835 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); 836 dev = &rte_eventdevs[dev_id]; 837 838 if (*dev->dev_ops->port_link == NULL) { 839 RTE_PMD_DEBUG_TRACE("Function not supported\n"); 840 rte_errno = -ENOTSUP; 841 return 0; 842 } 843 844 if (!is_valid_port(dev, port_id)) { 845 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 846 rte_errno = -EINVAL; 847 return 0; 848 } 849 850 if (queues == NULL) { 851 for (i = 0; i < dev->data->nb_queues; i++) 852 queues_list[i] = i; 853 854 queues = queues_list; 855 nb_links = dev->data->nb_queues; 856 } 857 858 if (priorities == NULL) { 859 for (i = 0; i < nb_links; i++) 860 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL; 861 862 priorities = priorities_list; 863 } 864 865 for (i = 0; i < nb_links; i++) 866 if (queues[i] >= dev->data->nb_queues) { 867 rte_errno = -EINVAL; 868 return 0; 869 } 870 871 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], 872 queues, priorities, nb_links); 873 if (diag < 0) 874 return diag; 875 876 links_map = dev->data->links_map; 877 /* Point links_map to this port specific area */ 878 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 879 for (i = 0; i < diag; i++) 880 links_map[queues[i]] = (uint8_t)priorities[i]; 881 882 return diag; 883 } 884 885 int 886 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 887 uint8_t queues[], uint16_t nb_unlinks) 888 { 889 struct rte_eventdev *dev; 890 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; 891 int i, diag; 892 uint16_t *links_map; 893 894 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); 895 dev = &rte_eventdevs[dev_id]; 896 897 if (*dev->dev_ops->port_unlink == NULL) { 898 RTE_PMD_DEBUG_TRACE("Function not supported\n"); 899 rte_errno = -ENOTSUP; 900 return 0; 901 } 902 903 if (!is_valid_port(dev, port_id)) { 904 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 905 rte_errno = -EINVAL; 906 return 0; 907 } 908 909 if (queues == NULL) { 910 for (i = 0; i < dev->data->nb_queues; i++) 911 all_queues[i] = i; 912 queues = all_queues; 913 nb_unlinks = dev->data->nb_queues; 914 } 915 916 for (i = 0; i < nb_unlinks; i++) 917 if (queues[i] >= dev->data->nb_queues) { 918 rte_errno = -EINVAL; 919 return 0; 920 } 921 922 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], 923 queues, nb_unlinks); 924 925 if (diag < 0) 926 return diag; 927 928 links_map = dev->data->links_map; 929 /* Point links_map to this port specific area */ 930 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 931 for (i = 0; i < diag; i++) 932 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; 933 934 return diag; 935 } 936 937 int 938 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 939 uint8_t queues[], uint8_t priorities[]) 940 { 941 struct rte_eventdev *dev; 942 uint16_t *links_map; 943 int i, count = 0; 944 945 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 946 dev = &rte_eventdevs[dev_id]; 947 if (!is_valid_port(dev, port_id)) { 948 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); 949 return -EINVAL; 950 } 951 952 links_map = dev->data->links_map; 953 /* Point links_map to this port specific area */ 954 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); 955 for (i = 0; i < dev->data->nb_queues; i++) { 956 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { 957 queues[count] = i; 958 priorities[count] = (uint8_t)links_map[i]; 959 ++count; 960 } 961 } 962 return count; 963 } 964 965 int 966 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 967 uint64_t *timeout_ticks) 968 { 969 struct rte_eventdev *dev; 970 971 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 972 dev = &rte_eventdevs[dev_id]; 973 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP); 974 975 if (timeout_ticks == NULL) 976 return -EINVAL; 977 978 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); 979 } 980 981 int 982 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) 983 { 984 struct rte_eventdev *dev; 985 986 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 987 dev = &rte_eventdevs[dev_id]; 988 989 if (service_id == NULL) 990 return -EINVAL; 991 992 if (dev->data->service_inited) 993 *service_id = dev->data->service_id; 994 995 return dev->data->service_inited ? 0 : -ESRCH; 996 } 997 998 int 999 rte_event_dev_dump(uint8_t dev_id, FILE *f) 1000 { 1001 struct rte_eventdev *dev; 1002 1003 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1004 dev = &rte_eventdevs[dev_id]; 1005 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP); 1006 1007 (*dev->dev_ops->dump)(dev, f); 1008 return 0; 1009 1010 } 1011 1012 static int 1013 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1014 uint8_t queue_port_id) 1015 { 1016 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1017 if (dev->dev_ops->xstats_get_names != NULL) 1018 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1019 queue_port_id, 1020 NULL, NULL, 0); 1021 return 0; 1022 } 1023 1024 int 1025 rte_event_dev_xstats_names_get(uint8_t dev_id, 1026 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, 1027 struct rte_event_dev_xstats_name *xstats_names, 1028 unsigned int *ids, unsigned int size) 1029 { 1030 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1031 const int cnt_expected_entries = xstats_get_count(dev_id, mode, 1032 queue_port_id); 1033 if (xstats_names == NULL || cnt_expected_entries < 0 || 1034 (int)size < cnt_expected_entries) 1035 return cnt_expected_entries; 1036 1037 /* dev_id checked above */ 1038 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1039 1040 if (dev->dev_ops->xstats_get_names != NULL) 1041 return (*dev->dev_ops->xstats_get_names)(dev, mode, 1042 queue_port_id, xstats_names, ids, size); 1043 1044 return -ENOTSUP; 1045 } 1046 1047 /* retrieve eventdev extended statistics */ 1048 int 1049 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, 1050 uint8_t queue_port_id, const unsigned int ids[], 1051 uint64_t values[], unsigned int n) 1052 { 1053 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); 1054 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1055 1056 /* implemented by the driver */ 1057 if (dev->dev_ops->xstats_get != NULL) 1058 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, 1059 ids, values, n); 1060 return -ENOTSUP; 1061 } 1062 1063 uint64_t 1064 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1065 unsigned int *id) 1066 { 1067 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); 1068 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1069 unsigned int temp = -1; 1070 1071 if (id != NULL) 1072 *id = (unsigned int)-1; 1073 else 1074 id = &temp; /* ensure driver never gets a NULL value */ 1075 1076 /* implemented by driver */ 1077 if (dev->dev_ops->xstats_get_by_name != NULL) 1078 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); 1079 return -ENOTSUP; 1080 } 1081 1082 int rte_event_dev_xstats_reset(uint8_t dev_id, 1083 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, 1084 const uint32_t ids[], uint32_t nb_ids) 1085 { 1086 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1087 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1088 1089 if (dev->dev_ops->xstats_reset != NULL) 1090 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, 1091 ids, nb_ids); 1092 return -ENOTSUP; 1093 } 1094 1095 int 1096 rte_event_dev_start(uint8_t dev_id) 1097 { 1098 struct rte_eventdev *dev; 1099 int diag; 1100 1101 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1102 1103 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1104 dev = &rte_eventdevs[dev_id]; 1105 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1106 1107 if (dev->data->dev_started != 0) { 1108 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", 1109 dev_id); 1110 return 0; 1111 } 1112 1113 diag = (*dev->dev_ops->dev_start)(dev); 1114 if (diag == 0) 1115 dev->data->dev_started = 1; 1116 else 1117 return diag; 1118 1119 return 0; 1120 } 1121 1122 void 1123 rte_event_dev_stop(uint8_t dev_id) 1124 { 1125 struct rte_eventdev *dev; 1126 1127 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id); 1128 1129 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id); 1130 dev = &rte_eventdevs[dev_id]; 1131 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1132 1133 if (dev->data->dev_started == 0) { 1134 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped", 1135 dev_id); 1136 return; 1137 } 1138 1139 dev->data->dev_started = 0; 1140 (*dev->dev_ops->dev_stop)(dev); 1141 } 1142 1143 int 1144 rte_event_dev_close(uint8_t dev_id) 1145 { 1146 struct rte_eventdev *dev; 1147 1148 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 1149 dev = &rte_eventdevs[dev_id]; 1150 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1151 1152 /* Device must be stopped before it can be closed */ 1153 if (dev->data->dev_started == 1) { 1154 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", 1155 dev_id); 1156 return -EBUSY; 1157 } 1158 1159 return (*dev->dev_ops->dev_close)(dev); 1160 } 1161 1162 static inline int 1163 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, 1164 int socket_id) 1165 { 1166 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1167 const struct rte_memzone *mz; 1168 int n; 1169 1170 /* Generate memzone name */ 1171 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); 1172 if (n >= (int)sizeof(mz_name)) 1173 return -EINVAL; 1174 1175 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1176 mz = rte_memzone_reserve(mz_name, 1177 sizeof(struct rte_eventdev_data), 1178 socket_id, 0); 1179 } else 1180 mz = rte_memzone_lookup(mz_name); 1181 1182 if (mz == NULL) 1183 return -ENOMEM; 1184 1185 *data = mz->addr; 1186 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1187 memset(*data, 0, sizeof(struct rte_eventdev_data)); 1188 1189 return 0; 1190 } 1191 1192 static inline uint8_t 1193 rte_eventdev_find_free_device_index(void) 1194 { 1195 uint8_t dev_id; 1196 1197 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { 1198 if (rte_eventdevs[dev_id].attached == 1199 RTE_EVENTDEV_DETACHED) 1200 return dev_id; 1201 } 1202 return RTE_EVENT_MAX_DEVS; 1203 } 1204 1205 struct rte_eventdev * 1206 rte_event_pmd_allocate(const char *name, int socket_id) 1207 { 1208 struct rte_eventdev *eventdev; 1209 uint8_t dev_id; 1210 1211 if (rte_event_pmd_get_named_dev(name) != NULL) { 1212 RTE_EDEV_LOG_ERR("Event device with name %s already " 1213 "allocated!", name); 1214 return NULL; 1215 } 1216 1217 dev_id = rte_eventdev_find_free_device_index(); 1218 if (dev_id == RTE_EVENT_MAX_DEVS) { 1219 RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); 1220 return NULL; 1221 } 1222 1223 eventdev = &rte_eventdevs[dev_id]; 1224 1225 if (eventdev->data == NULL) { 1226 struct rte_eventdev_data *eventdev_data = NULL; 1227 1228 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data, 1229 socket_id); 1230 1231 if (retval < 0 || eventdev_data == NULL) 1232 return NULL; 1233 1234 eventdev->data = eventdev_data; 1235 1236 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN, 1237 "%s", name); 1238 1239 eventdev->data->dev_id = dev_id; 1240 eventdev->data->socket_id = socket_id; 1241 eventdev->data->dev_started = 0; 1242 1243 eventdev->attached = RTE_EVENTDEV_ATTACHED; 1244 1245 eventdev_globals.nb_devs++; 1246 } 1247 1248 return eventdev; 1249 } 1250 1251 int 1252 rte_event_pmd_release(struct rte_eventdev *eventdev) 1253 { 1254 int ret; 1255 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; 1256 const struct rte_memzone *mz; 1257 1258 if (eventdev == NULL) 1259 return -EINVAL; 1260 1261 eventdev->attached = RTE_EVENTDEV_DETACHED; 1262 eventdev_globals.nb_devs--; 1263 1264 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1265 rte_free(eventdev->data->dev_private); 1266 1267 /* Generate memzone name */ 1268 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", 1269 eventdev->data->dev_id); 1270 if (ret >= (int)sizeof(mz_name)) 1271 return -EINVAL; 1272 1273 mz = rte_memzone_lookup(mz_name); 1274 if (mz == NULL) 1275 return -ENOMEM; 1276 1277 ret = rte_memzone_free(mz); 1278 if (ret) 1279 return ret; 1280 } 1281 1282 eventdev->data = NULL; 1283 return 0; 1284 } 1285