1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc. 3 * Copyright(c) 2016-2018 Intel Corporation. 4 * Copyright 2016 NXP 5 * All rights reserved. 6 */ 7 8 #ifndef _RTE_EVENTDEV_H_ 9 #define _RTE_EVENTDEV_H_ 10 11 /** 12 * @file 13 * 14 * RTE Event Device API 15 * 16 * In a polling model, lcores poll ethdev ports and associated rx queues 17 * directly to look for packet. In an event driven model, by contrast, lcores 18 * call the scheduler that selects packets for them based on programmer 19 * specified criteria. Eventdev library adds support for event driven 20 * programming model, which offer applications automatic multicore scaling, 21 * dynamic load balancing, pipelining, packet ingress order maintenance and 22 * synchronization services to simplify application packet processing. 23 * 24 * The Event Device API is composed of two parts: 25 * 26 * - The application-oriented Event API that includes functions to setup 27 * an event device (configure it, setup its queues, ports and start it), to 28 * establish the link between queues to port and to receive events, and so on. 29 * 30 * - The driver-oriented Event API that exports a function allowing 31 * an event poll Mode Driver (PMD) to simultaneously register itself as 32 * an event device driver. 33 * 34 * Event device components: 35 * 36 * +-----------------+ 37 * | +-------------+ | 38 * +-------+ | | flow 0 | | 39 * |Packet | | +-------------+ | 40 * |event | | +-------------+ | 41 * | | | | flow 1 | |port_link(port0, queue0) 42 * +-------+ | +-------------+ | | +--------+ 43 * +-------+ | +-------------+ o-----v-----o |dequeue +------+ 44 * |Crypto | | | flow n | | | event +------->|Core 0| 45 * |work | | +-------------+ o----+ | port 0 | | | 46 * |done ev| | event queue 0 | | +--------+ +------+ 47 * +-------+ +-----------------+ | 48 * +-------+ | 49 * |Timer | +-----------------+ | +--------+ 50 * |expiry | | +-------------+ | +------o |dequeue +------+ 51 * |event | | | flow 0 | o-----------o event +------->|Core 1| 52 * +-------+ | +-------------+ | +----o port 1 | | | 53 * Event enqueue | +-------------+ | | +--------+ +------+ 54 * o-------------> | | flow 1 | | | 55 * enqueue( | +-------------+ | | 56 * queue_id, | | | +--------+ +------+ 57 * flow_id, | +-------------+ | | | |dequeue |Core 2| 58 * sched_type, | | flow n | o-----------o event +------->| | 59 * event_type, | +-------------+ | | | port 2 | +------+ 60 * subev_type, | event queue 1 | | +--------+ 61 * event) +-----------------+ | +--------+ 62 * | | |dequeue +------+ 63 * +-------+ +-----------------+ | | event +------->|Core n| 64 * |Core | | +-------------+ o-----------o port n | | | 65 * |(SW) | | | flow 0 | | | +--------+ +--+---+ 66 * |event | | +-------------+ | | | 67 * +-------+ | +-------------+ | | | 68 * ^ | | flow 1 | | | | 69 * | | +-------------+ o------+ | 70 * | | +-------------+ | | 71 * | | | flow n | | | 72 * | | +-------------+ | | 73 * | | event queue n | | 74 * | +-----------------+ | 75 * | | 76 * +-----------------------------------------------------------+ 77 * 78 * Event device: A hardware or software-based event scheduler. 79 * 80 * Event: A unit of scheduling that encapsulates a packet or other datatype 81 * like SW generated event from the CPU, Crypto work completion notification, 82 * Timer expiry event notification etc as well as metadata. 83 * The metadata includes flow ID, scheduling type, event priority, event_type, 84 * sub_event_type etc. 85 * 86 * Event queue: A queue containing events that are scheduled by the event dev. 87 * An event queue contains events of different flows associated with scheduling 88 * types, such as atomic, ordered, or parallel. 89 * 90 * Event port: An application's interface into the event dev for enqueue and 91 * dequeue operations. Each event port can be linked with one or more 92 * event queues for dequeue operations. 93 * 94 * By default, all the functions of the Event Device API exported by a PMD 95 * are lock-free functions which assume to not be invoked in parallel on 96 * different logical cores to work on the same target object. For instance, 97 * the dequeue function of a PMD cannot be invoked in parallel on two logical 98 * cores to operates on same event port. Of course, this function 99 * can be invoked in parallel by different logical cores on different ports. 100 * It is the responsibility of the upper level application to enforce this rule. 101 * 102 * In all functions of the Event API, the Event device is 103 * designated by an integer >= 0 named the device identifier *dev_id* 104 * 105 * At the Event driver level, Event devices are represented by a generic 106 * data structure of type *rte_event_dev*. 107 * 108 * Event devices are dynamically registered during the PCI/SoC device probing 109 * phase performed at EAL initialization time. 110 * When an Event device is being probed, a *rte_event_dev* structure and 111 * a new device identifier are allocated for that device. Then, the 112 * event_dev_init() function supplied by the Event driver matching the probed 113 * device is invoked to properly initialize the device. 114 * 115 * The role of the device init function consists of resetting the hardware or 116 * software event driver implementations. 117 * 118 * If the device init operation is successful, the correspondence between 119 * the device identifier assigned to the new device and its associated 120 * *rte_event_dev* structure is effectively registered. 121 * Otherwise, both the *rte_event_dev* structure and the device identifier are 122 * freed. 123 * 124 * The functions exported by the application Event API to setup a device 125 * designated by its device identifier must be invoked in the following order: 126 * - rte_event_dev_configure() 127 * - rte_event_queue_setup() 128 * - rte_event_port_setup() 129 * - rte_event_port_link() 130 * - rte_event_dev_start() 131 * 132 * Then, the application can invoke, in any order, the functions 133 * exported by the Event API to schedule events, dequeue events, enqueue events, 134 * change event queue(s) to event port [un]link establishment and so on. 135 * 136 * Application may use rte_event_[queue/port]_default_conf_get() to get the 137 * default configuration to set up an event queue or event port by 138 * overriding few default values. 139 * 140 * If the application wants to change the configuration (i.e. call 141 * rte_event_dev_configure(), rte_event_queue_setup(), or 142 * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the 143 * device and then do the reconfiguration before calling rte_event_dev_start() 144 * again. The schedule, enqueue and dequeue functions should not be invoked 145 * when the device is stopped. 146 * 147 * Finally, an application can close an Event device by invoking the 148 * rte_event_dev_close() function. 149 * 150 * Each function of the application Event API invokes a specific function 151 * of the PMD that controls the target device designated by its device 152 * identifier. 153 * 154 * For this purpose, all device-specific functions of an Event driver are 155 * supplied through a set of pointers contained in a generic structure of type 156 * *event_dev_ops*. 157 * The address of the *event_dev_ops* structure is stored in the *rte_event_dev* 158 * structure by the device init function of the Event driver, which is 159 * invoked during the PCI/SoC device probing phase, as explained earlier. 160 * 161 * In other words, each function of the Event API simply retrieves the 162 * *rte_event_dev* structure associated with the device identifier and 163 * performs an indirect invocation of the corresponding driver function 164 * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure. 165 * 166 * For performance reasons, the address of the fast-path functions of the 167 * Event driver is not contained in the *event_dev_ops* structure. 168 * Instead, they are directly stored at the beginning of the *rte_event_dev* 169 * structure to avoid an extra indirect memory access during their invocation. 170 * 171 * RTE event device drivers do not use interrupts for enqueue or dequeue 172 * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue 173 * functions to applications. 174 * 175 * The events are injected to event device through *enqueue* operation by 176 * event producers in the system. The typical event producers are ethdev 177 * subsystem for generating packet events, CPU(SW) for generating events based 178 * on different stages of application processing, cryptodev for generating 179 * crypto work completion notification etc 180 * 181 * The *dequeue* operation gets one or more events from the event ports. 182 * The application process the events and send to downstream event queue through 183 * rte_event_enqueue_burst() if it is an intermediate stage of event processing, 184 * on the final stage, the application may send to different subsystem like 185 * ethdev to send the packet/event on the wire using ethdev 186 * rte_eth_tx_burst() API. 187 * 188 * The point at which events are scheduled to ports depends on the device. 189 * For hardware devices, scheduling occurs asynchronously without any software 190 * intervention. Software schedulers can either be distributed 191 * (each worker thread schedules events to its own port) or centralized 192 * (a dedicated thread schedules to all ports). Distributed software schedulers 193 * perform the scheduling in rte_event_dequeue_burst(), whereas centralized 194 * scheduler logic need a dedicated service core for scheduling. 195 * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set 196 * indicates the device is centralized and thus needs a dedicated scheduling 197 * thread that repeatedly calls software specific scheduling function. 198 * 199 * An event driven worker thread has following typical workflow on fastpath: 200 * \code{.c} 201 * while (1) { 202 * rte_event_dequeue_burst(...); 203 * (event processing) 204 * rte_event_enqueue_burst(...); 205 * } 206 * \endcode 207 * 208 */ 209 210 #ifdef __cplusplus 211 extern "C" { 212 #endif 213 214 #include <rte_common.h> 215 #include <rte_config.h> 216 #include <rte_memory.h> 217 #include <rte_errno.h> 218 219 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ 220 struct rte_event; 221 222 /* Event device capability bitmap flags */ 223 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) 224 /**< Event scheduling prioritization is based on the priority associated with 225 * each event queue. 226 * 227 * @see rte_event_queue_setup() 228 */ 229 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1) 230 /**< Event scheduling prioritization is based on the priority associated with 231 * each event. Priority of each event is supplied in *rte_event* structure 232 * on each enqueue operation. 233 * 234 * @see rte_event_enqueue_burst() 235 */ 236 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2) 237 /**< Event device operates in distributed scheduling mode. 238 * In distributed scheduling mode, event scheduling happens in HW or 239 * rte_event_dequeue_burst() or the combination of these two. 240 * If the flag is not set then eventdev is centralized and thus needs a 241 * dedicated service core that acts as a scheduling thread . 242 * 243 * @see rte_event_dequeue_burst() 244 */ 245 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) 246 /**< Event device is capable of enqueuing events of any type to any queue. 247 * If this capability is not set, the queue only supports events of the 248 * *RTE_SCHED_TYPE_* type that it was created with. 249 * 250 * @see RTE_SCHED_TYPE_* values 251 */ 252 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) 253 /**< Event device is capable of operating in burst mode for enqueue(forward, 254 * release) and dequeue operation. If this capability is not set, application 255 * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but 256 * PMD accepts only one event at a time. 257 * 258 * @see rte_event_dequeue_burst() rte_event_enqueue_burst() 259 */ 260 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5) 261 /**< Event device ports support disabling the implicit release feature, in 262 * which the port will release all unreleased events in its dequeue operation. 263 * If this capability is set and the port is configured with implicit release 264 * disabled, the application is responsible for explicitly releasing events 265 * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event 266 * enqueue operations. 267 * 268 * @see rte_event_dequeue_burst() rte_event_enqueue_burst() 269 */ 270 271 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6) 272 /**< Event device is capable of operating in none sequential mode. The path 273 * of the event is not necessary to be sequential. Application can change 274 * the path of event at runtime. If the flag is not set, then event each event 275 * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is 276 * set, events may be sent to queues in any order. If the flag is not set, the 277 * eventdev will return an error when the application enqueues an event for a 278 * qid which is not the next in the sequence. 279 */ 280 281 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7) 282 /**< Event device is capable of configuring the queue/port link at runtime. 283 * If the flag is not set, the eventdev queue/port link is only can be 284 * configured during initialization. 285 */ 286 287 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8) 288 /**< Event device is capable of setting up the link between multiple queue 289 * with single port. If the flag is not set, the eventdev can only map a 290 * single queue to each port or map a single queue to many port. 291 */ 292 293 /* Event device priority levels */ 294 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 295 /**< Highest priority expressed across eventdev subsystem 296 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 297 * @see rte_event_port_link() 298 */ 299 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128 300 /**< Normal priority expressed across eventdev subsystem 301 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 302 * @see rte_event_port_link() 303 */ 304 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255 305 /**< Lowest priority expressed across eventdev subsystem 306 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 307 * @see rte_event_port_link() 308 */ 309 310 /** 311 * Get the total number of event devices that have been successfully 312 * initialised. 313 * 314 * @return 315 * The total number of usable event devices. 316 */ 317 uint8_t 318 rte_event_dev_count(void); 319 320 /** 321 * Get the device identifier for the named event device. 322 * 323 * @param name 324 * Event device name to select the event device identifier. 325 * 326 * @return 327 * Returns event device identifier on success. 328 * - <0: Failure to find named event device. 329 */ 330 int 331 rte_event_dev_get_dev_id(const char *name); 332 333 /** 334 * Return the NUMA socket to which a device is connected. 335 * 336 * @param dev_id 337 * The identifier of the device. 338 * @return 339 * The NUMA socket id to which the device is connected or 340 * a default of zero if the socket could not be determined. 341 * -(-EINVAL) dev_id value is out of range. 342 */ 343 int 344 rte_event_dev_socket_id(uint8_t dev_id); 345 346 /** 347 * Event device information 348 */ 349 struct rte_event_dev_info { 350 const char *driver_name; /**< Event driver name */ 351 struct rte_device *dev; /**< Device information */ 352 uint32_t min_dequeue_timeout_ns; 353 /**< Minimum supported global dequeue timeout(ns) by this device */ 354 uint32_t max_dequeue_timeout_ns; 355 /**< Maximum supported global dequeue timeout(ns) by this device */ 356 uint32_t dequeue_timeout_ns; 357 /**< Configured global dequeue timeout(ns) for this device */ 358 uint8_t max_event_queues; 359 /**< Maximum event_queues supported by this device */ 360 uint32_t max_event_queue_flows; 361 /**< Maximum supported flows in an event queue by this device*/ 362 uint8_t max_event_queue_priority_levels; 363 /**< Maximum number of event queue priority levels by this device. 364 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 365 */ 366 uint8_t max_event_priority_levels; 367 /**< Maximum number of event priority levels by this device. 368 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability 369 */ 370 uint8_t max_event_ports; 371 /**< Maximum number of event ports supported by this device */ 372 uint8_t max_event_port_dequeue_depth; 373 /**< Maximum number of events can be dequeued at a time from an 374 * event port by this device. 375 * A device that does not support bulk dequeue will set this as 1. 376 */ 377 uint32_t max_event_port_enqueue_depth; 378 /**< Maximum number of events can be enqueued at a time from an 379 * event port by this device. 380 * A device that does not support bulk enqueue will set this as 1. 381 */ 382 int32_t max_num_events; 383 /**< A *closed system* event dev has a limit on the number of events it 384 * can manage at a time. An *open system* event dev does not have a 385 * limit and will specify this as -1. 386 */ 387 uint32_t event_dev_cap; 388 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ 389 }; 390 391 /** 392 * Retrieve the contextual information of an event device. 393 * 394 * @param dev_id 395 * The identifier of the device. 396 * 397 * @param[out] dev_info 398 * A pointer to a structure of type *rte_event_dev_info* to be filled with the 399 * contextual information of the device. 400 * 401 * @return 402 * - 0: Success, driver updates the contextual information of the event device 403 * - <0: Error code returned by the driver info get function. 404 * 405 */ 406 int 407 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); 408 409 /** 410 * The count of ports. 411 */ 412 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 413 /** 414 * The count of queues. 415 */ 416 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 417 /** 418 * The status of the device, zero for stopped, non-zero for started. 419 */ 420 #define RTE_EVENT_DEV_ATTR_STARTED 2 421 422 /** 423 * Get an attribute from a device. 424 * 425 * @param dev_id Eventdev id 426 * @param attr_id The attribute ID to retrieve 427 * @param[out] attr_value A pointer that will be filled in with the attribute 428 * value if successful. 429 * 430 * @return 431 * - 0: Successfully retrieved attribute value 432 * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL 433 */ 434 int 435 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 436 uint32_t *attr_value); 437 438 439 /* Event device configuration bitmap flags */ 440 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) 441 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. 442 * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst() 443 */ 444 445 /** Event device configuration structure */ 446 struct rte_event_dev_config { 447 uint32_t dequeue_timeout_ns; 448 /**< rte_event_dequeue_burst() timeout on this device. 449 * This value should be in the range of *min_dequeue_timeout_ns* and 450 * *max_dequeue_timeout_ns* which previously provided in 451 * rte_event_dev_info_get() 452 * The value 0 is allowed, in which case, default dequeue timeout used. 453 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 454 */ 455 int32_t nb_events_limit; 456 /**< In a *closed system* this field is the limit on maximum number of 457 * events that can be inflight in the eventdev at a given time. The 458 * limit is required to ensure that the finite space in a closed system 459 * is not overwhelmed. The value cannot exceed the *max_num_events* 460 * as provided by rte_event_dev_info_get(). 461 * This value should be set to -1 for *open system*. 462 */ 463 uint8_t nb_event_queues; 464 /**< Number of event queues to configure on this device. 465 * This value cannot exceed the *max_event_queues* which previously 466 * provided in rte_event_dev_info_get() 467 */ 468 uint8_t nb_event_ports; 469 /**< Number of event ports to configure on this device. 470 * This value cannot exceed the *max_event_ports* which previously 471 * provided in rte_event_dev_info_get() 472 */ 473 uint32_t nb_event_queue_flows; 474 /**< Number of flows for any event queue on this device. 475 * This value cannot exceed the *max_event_queue_flows* which previously 476 * provided in rte_event_dev_info_get() 477 */ 478 uint32_t nb_event_port_dequeue_depth; 479 /**< Maximum number of events can be dequeued at a time from an 480 * event port by this device. 481 * This value cannot exceed the *max_event_port_dequeue_depth* 482 * which previously provided in rte_event_dev_info_get(). 483 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 484 * @see rte_event_port_setup() 485 */ 486 uint32_t nb_event_port_enqueue_depth; 487 /**< Maximum number of events can be enqueued at a time from an 488 * event port by this device. 489 * This value cannot exceed the *max_event_port_enqueue_depth* 490 * which previously provided in rte_event_dev_info_get(). 491 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 492 * @see rte_event_port_setup() 493 */ 494 uint32_t event_dev_cfg; 495 /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ 496 }; 497 498 /** 499 * Configure an event device. 500 * 501 * This function must be invoked first before any other function in the 502 * API. This function can also be re-invoked when a device is in the 503 * stopped state. 504 * 505 * The caller may use rte_event_dev_info_get() to get the capability of each 506 * resources available for this event device. 507 * 508 * @param dev_id 509 * The identifier of the device to configure. 510 * @param dev_conf 511 * The event device configuration structure. 512 * 513 * @return 514 * - 0: Success, device configured. 515 * - <0: Error code returned by the driver configuration function. 516 */ 517 int 518 rte_event_dev_configure(uint8_t dev_id, 519 const struct rte_event_dev_config *dev_conf); 520 521 522 /* Event queue specific APIs */ 523 524 /* Event queue configuration bitmap flags */ 525 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) 526 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue 527 * 528 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL 529 * @see rte_event_enqueue_burst() 530 */ 531 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) 532 /**< This event queue links only to a single event port. 533 * 534 * @see rte_event_port_setup(), rte_event_port_link() 535 */ 536 537 /** Event queue configuration structure */ 538 struct rte_event_queue_conf { 539 uint32_t nb_atomic_flows; 540 /**< The maximum number of active flows this queue can track at any 541 * given time. If the queue is configured for atomic scheduling (by 542 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg 543 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the 544 * value must be in the range of [1, nb_event_queue_flows], which was 545 * previously provided in rte_event_dev_configure(). 546 */ 547 uint32_t nb_atomic_order_sequences; 548 /**< The maximum number of outstanding events waiting to be 549 * reordered by this queue. In other words, the number of entries in 550 * this queue’s reorder buffer.When the number of events in the 551 * reorder buffer reaches to *nb_atomic_order_sequences* then the 552 * scheduler cannot schedule the events from this queue and invalid 553 * event will be returned from dequeue until one or more entries are 554 * freed up/released. 555 * If the queue is configured for ordered scheduling (by applying the 556 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or 557 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must 558 * be in the range of [1, nb_event_queue_flows], which was 559 * previously supplied to rte_event_dev_configure(). 560 */ 561 uint32_t event_queue_cfg; 562 /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ 563 uint8_t schedule_type; 564 /**< Queue schedule type(RTE_SCHED_TYPE_*). 565 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in 566 * event_queue_cfg. 567 */ 568 uint8_t priority; 569 /**< Priority for this event queue relative to other event queues. 570 * The requested priority should in the range of 571 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 572 * The implementation shall normalize the requested priority to 573 * event device supported priority value. 574 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 575 */ 576 }; 577 578 /** 579 * Retrieve the default configuration information of an event queue designated 580 * by its *queue_id* from the event driver for an event device. 581 * 582 * This function intended to be used in conjunction with rte_event_queue_setup() 583 * where caller needs to set up the queue by overriding few default values. 584 * 585 * @param dev_id 586 * The identifier of the device. 587 * @param queue_id 588 * The index of the event queue to get the configuration information. 589 * The value must be in the range [0, nb_event_queues - 1] 590 * previously supplied to rte_event_dev_configure(). 591 * @param[out] queue_conf 592 * The pointer to the default event queue configuration data. 593 * @return 594 * - 0: Success, driver updates the default event queue configuration data. 595 * - <0: Error code returned by the driver info get function. 596 * 597 * @see rte_event_queue_setup() 598 * 599 */ 600 int 601 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 602 struct rte_event_queue_conf *queue_conf); 603 604 /** 605 * Allocate and set up an event queue for an event device. 606 * 607 * @param dev_id 608 * The identifier of the device. 609 * @param queue_id 610 * The index of the event queue to setup. The value must be in the range 611 * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure(). 612 * @param queue_conf 613 * The pointer to the configuration data to be used for the event queue. 614 * NULL value is allowed, in which case default configuration used. 615 * 616 * @see rte_event_queue_default_conf_get() 617 * 618 * @return 619 * - 0: Success, event queue correctly set up. 620 * - <0: event queue configuration failed 621 */ 622 int 623 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 624 const struct rte_event_queue_conf *queue_conf); 625 626 /** 627 * The priority of the queue. 628 */ 629 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 630 /** 631 * The number of atomic flows configured for the queue. 632 */ 633 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 634 /** 635 * The number of atomic order sequences configured for the queue. 636 */ 637 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 638 /** 639 * The cfg flags for the queue. 640 */ 641 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 642 /** 643 * The schedule type of the queue. 644 */ 645 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 646 647 /** 648 * Get an attribute from a queue. 649 * 650 * @param dev_id 651 * Eventdev id 652 * @param queue_id 653 * Eventdev queue id 654 * @param attr_id 655 * The attribute ID to retrieve 656 * @param[out] attr_value 657 * A pointer that will be filled in with the attribute value if successful 658 * 659 * @return 660 * - 0: Successfully returned value 661 * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was 662 * NULL 663 * - -EOVERFLOW: returned when attr_id is set to 664 * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to 665 * RTE_EVENT_QUEUE_CFG_ALL_TYPES 666 */ 667 int 668 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 669 uint32_t *attr_value); 670 671 /* Event port specific APIs */ 672 673 /** Event port configuration structure */ 674 struct rte_event_port_conf { 675 int32_t new_event_threshold; 676 /**< A backpressure threshold for new event enqueues on this port. 677 * Use for *closed system* event dev where event capacity is limited, 678 * and cannot exceed the capacity of the event dev. 679 * Configuring ports with different thresholds can make higher priority 680 * traffic less likely to be backpressured. 681 * For example, a port used to inject NIC Rx packets into the event dev 682 * can have a lower threshold so as not to overwhelm the device, 683 * while ports used for worker pools can have a higher threshold. 684 * This value cannot exceed the *nb_events_limit* 685 * which was previously supplied to rte_event_dev_configure(). 686 * This should be set to '-1' for *open system*. 687 */ 688 uint16_t dequeue_depth; 689 /**< Configure number of bulk dequeues for this event port. 690 * This value cannot exceed the *nb_event_port_dequeue_depth* 691 * which previously supplied to rte_event_dev_configure(). 692 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 693 */ 694 uint16_t enqueue_depth; 695 /**< Configure number of bulk enqueues for this event port. 696 * This value cannot exceed the *nb_event_port_enqueue_depth* 697 * which previously supplied to rte_event_dev_configure(). 698 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 699 */ 700 uint8_t disable_implicit_release; 701 /**< Configure the port not to release outstanding events in 702 * rte_event_dev_dequeue_burst(). If true, all events received through 703 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or 704 * RTE_EVENT_OP_FORWARD. Must be false when the device is not 705 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. 706 */ 707 }; 708 709 /** 710 * Retrieve the default configuration information of an event port designated 711 * by its *port_id* from the event driver for an event device. 712 * 713 * This function intended to be used in conjunction with rte_event_port_setup() 714 * where caller needs to set up the port by overriding few default values. 715 * 716 * @param dev_id 717 * The identifier of the device. 718 * @param port_id 719 * The index of the event port to get the configuration information. 720 * The value must be in the range [0, nb_event_ports - 1] 721 * previously supplied to rte_event_dev_configure(). 722 * @param[out] port_conf 723 * The pointer to the default event port configuration data 724 * @return 725 * - 0: Success, driver updates the default event port configuration data. 726 * - <0: Error code returned by the driver info get function. 727 * 728 * @see rte_event_port_setup() 729 * 730 */ 731 int 732 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 733 struct rte_event_port_conf *port_conf); 734 735 /** 736 * Allocate and set up an event port for an event device. 737 * 738 * @param dev_id 739 * The identifier of the device. 740 * @param port_id 741 * The index of the event port to setup. The value must be in the range 742 * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure(). 743 * @param port_conf 744 * The pointer to the configuration data to be used for the queue. 745 * NULL value is allowed, in which case default configuration used. 746 * 747 * @see rte_event_port_default_conf_get() 748 * 749 * @return 750 * - 0: Success, event port correctly set up. 751 * - <0: Port configuration failed 752 * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured 753 * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 754 */ 755 int 756 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 757 const struct rte_event_port_conf *port_conf); 758 759 /** 760 * The queue depth of the port on the enqueue side 761 */ 762 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 763 /** 764 * The queue depth of the port on the dequeue side 765 */ 766 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 767 /** 768 * The new event threshold of the port 769 */ 770 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 771 772 /** 773 * Get an attribute from a port. 774 * 775 * @param dev_id 776 * Eventdev id 777 * @param port_id 778 * Eventdev port id 779 * @param attr_id 780 * The attribute ID to retrieve 781 * @param[out] attr_value 782 * A pointer that will be filled in with the attribute value if successful 783 * 784 * @return 785 * - 0: Successfully returned value 786 * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL 787 */ 788 int 789 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 790 uint32_t *attr_value); 791 792 /** 793 * Start an event device. 794 * 795 * The device start step is the last one and consists of setting the event 796 * queues to start accepting the events and schedules to event ports. 797 * 798 * On success, all basic functions exported by the API (event enqueue, 799 * event dequeue and so on) can be invoked. 800 * 801 * @param dev_id 802 * Event device identifier 803 * @return 804 * - 0: Success, device started. 805 * - -ESTALE : Not all ports of the device are configured 806 * - -ENOLINK: Not all queues are linked, which could lead to deadlock. 807 */ 808 int 809 rte_event_dev_start(uint8_t dev_id); 810 811 /** 812 * Stop an event device. 813 * 814 * This function causes all queued events to be drained, including those 815 * residing in event ports. While draining events out of the device, this 816 * function calls the user-provided flush callback (if one was registered) once 817 * per event. 818 * 819 * The device can be restarted with a call to rte_event_dev_start(). Threads 820 * that continue to enqueue/dequeue while the device is stopped, or being 821 * stopped, will result in undefined behavior. This includes event adapters, 822 * which must be stopped prior to stopping the eventdev. 823 * 824 * @param dev_id 825 * Event device identifier. 826 * 827 * @see rte_event_dev_stop_flush_callback_register() 828 */ 829 void 830 rte_event_dev_stop(uint8_t dev_id); 831 832 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, 833 void *arg); 834 /**< Callback function called during rte_event_dev_stop(), invoked once per 835 * flushed event. 836 */ 837 838 /** 839 * Registers a callback function to be invoked during rte_event_dev_stop() for 840 * each flushed event. This function can be used to properly dispose of queued 841 * events, for example events containing memory pointers. 842 * 843 * The callback function is only registered for the calling process. The 844 * callback function must be registered in every process that can call 845 * rte_event_dev_stop(). 846 * 847 * To unregister a callback, call this function with a NULL callback pointer. 848 * 849 * @param dev_id 850 * The identifier of the device. 851 * @param callback 852 * Callback function invoked once per flushed event. 853 * @param userdata 854 * Argument supplied to callback. 855 * 856 * @return 857 * - 0 on success. 858 * - -EINVAL if *dev_id* is invalid 859 * 860 * @see rte_event_dev_stop() 861 */ 862 int 863 rte_event_dev_stop_flush_callback_register(uint8_t dev_id, 864 eventdev_stop_flush_t callback, void *userdata); 865 866 /** 867 * Close an event device. The device cannot be restarted! 868 * 869 * @param dev_id 870 * Event device identifier 871 * 872 * @return 873 * - 0 on successfully closing device 874 * - <0 on failure to close device 875 * - (-EAGAIN) if device is busy 876 */ 877 int 878 rte_event_dev_close(uint8_t dev_id); 879 880 /* Scheduler type definitions */ 881 #define RTE_SCHED_TYPE_ORDERED 0 882 /**< Ordered scheduling 883 * 884 * Events from an ordered flow of an event queue can be scheduled to multiple 885 * ports for concurrent processing while maintaining the original event order. 886 * This scheme enables the user to achieve high single flow throughput by 887 * avoiding SW synchronization for ordering between ports which bound to cores. 888 * 889 * The source flow ordering from an event queue is maintained when events are 890 * enqueued to their destination queue within the same ordered flow context. 891 * An event port holds the context until application call 892 * rte_event_dequeue_burst() from the same port, which implicitly releases 893 * the context. 894 * User may allow the scheduler to release the context earlier than that 895 * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation. 896 * 897 * Events from the source queue appear in their original order when dequeued 898 * from a destination queue. 899 * Event ordering is based on the received event(s), but also other 900 * (newly allocated or stored) events are ordered when enqueued within the same 901 * ordered context. Events not enqueued (e.g. released or stored) within the 902 * context are considered missing from reordering and are skipped at this time 903 * (but can be ordered again within another context). 904 * 905 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 906 */ 907 908 #define RTE_SCHED_TYPE_ATOMIC 1 909 /**< Atomic scheduling 910 * 911 * Events from an atomic flow of an event queue can be scheduled only to a 912 * single port at a time. The port is guaranteed to have exclusive (atomic) 913 * access to the associated flow context, which enables the user to avoid SW 914 * synchronization. Atomic flows also help to maintain event ordering 915 * since only one port at a time can process events from a flow of an 916 * event queue. 917 * 918 * The atomic queue synchronization context is dedicated to the port until 919 * application call rte_event_dequeue_burst() from the same port, 920 * which implicitly releases the context. User may allow the scheduler to 921 * release the context earlier than that by invoking rte_event_enqueue_burst() 922 * with RTE_EVENT_OP_RELEASE operation. 923 * 924 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 925 */ 926 927 #define RTE_SCHED_TYPE_PARALLEL 2 928 /**< Parallel scheduling 929 * 930 * The scheduler performs priority scheduling, load balancing, etc. functions 931 * but does not provide additional event synchronization or ordering. 932 * It is free to schedule events from a single parallel flow of an event queue 933 * to multiple events ports for concurrent processing. 934 * The application is responsible for flow context synchronization and 935 * event ordering (SW synchronization). 936 * 937 * @see rte_event_queue_setup(), rte_event_dequeue_burst() 938 */ 939 940 /* Event types to classify the event source */ 941 #define RTE_EVENT_TYPE_ETHDEV 0x0 942 /**< The event generated from ethdev subsystem */ 943 #define RTE_EVENT_TYPE_CRYPTODEV 0x1 944 /**< The event generated from crypodev subsystem */ 945 #define RTE_EVENT_TYPE_TIMER 0x2 946 /**< The event generated from event timer adapter */ 947 #define RTE_EVENT_TYPE_CPU 0x3 948 /**< The event generated from cpu for pipelining. 949 * Application may use *sub_event_type* to further classify the event 950 */ 951 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 952 /**< The event generated from event eth Rx adapter */ 953 #define RTE_EVENT_TYPE_MAX 0x10 954 /**< Maximum number of event types */ 955 956 /* Event enqueue operations */ 957 #define RTE_EVENT_OP_NEW 0 958 /**< The event producers use this operation to inject a new event to the 959 * event device. 960 */ 961 #define RTE_EVENT_OP_FORWARD 1 962 /**< The CPU use this operation to forward the event to different event queue or 963 * change to new application specific flow or schedule type to enable 964 * pipelining. 965 * 966 * This operation must only be enqueued to the same port that the 967 * event to be forwarded was dequeued from. 968 */ 969 #define RTE_EVENT_OP_RELEASE 2 970 /**< Release the flow context associated with the schedule type. 971 * 972 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC* 973 * then this function hints the scheduler that the user has completed critical 974 * section processing in the current atomic context. 975 * The scheduler is now allowed to schedule events from the same flow from 976 * an event queue to another port. However, the context may be still held 977 * until the next rte_event_dequeue_burst() call, this call allows but does not 978 * force the scheduler to release the context early. 979 * 980 * Early atomic context release may increase parallelism and thus system 981 * performance, but the user needs to design carefully the split into critical 982 * vs non-critical sections. 983 * 984 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED* 985 * then this function hints the scheduler that the user has done all that need 986 * to maintain event order in the current ordered context. 987 * The scheduler is allowed to release the ordered context of this port and 988 * avoid reordering any following enqueues. 989 * 990 * Early ordered context release may increase parallelism and thus system 991 * performance. 992 * 993 * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL* 994 * or no scheduling context is held then this function may be an NOOP, 995 * depending on the implementation. 996 * 997 * This operation must only be enqueued to the same port that the 998 * event to be released was dequeued from. 999 * 1000 */ 1001 1002 /** 1003 * The generic *rte_event* structure to hold the event attributes 1004 * for dequeue and enqueue operation 1005 */ 1006 RTE_STD_C11 1007 struct rte_event { 1008 /** WORD0 */ 1009 union { 1010 uint64_t event; 1011 /** Event attributes for dequeue or enqueue operation */ 1012 struct { 1013 uint32_t flow_id:20; 1014 /**< Targeted flow identifier for the enqueue and 1015 * dequeue operation. 1016 * The value must be in the range of 1017 * [0, nb_event_queue_flows - 1] which 1018 * previously supplied to rte_event_dev_configure(). 1019 */ 1020 uint32_t sub_event_type:8; 1021 /**< Sub-event types based on the event source. 1022 * @see RTE_EVENT_TYPE_CPU 1023 */ 1024 uint32_t event_type:4; 1025 /**< Event type to classify the event source. 1026 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*) 1027 */ 1028 uint8_t op:2; 1029 /**< The type of event enqueue operation - new/forward/ 1030 * etc.This field is not preserved across an instance 1031 * and is undefined on dequeue. 1032 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*) 1033 */ 1034 uint8_t rsvd:4; 1035 /**< Reserved for future use */ 1036 uint8_t sched_type:2; 1037 /**< Scheduler synchronization type (RTE_SCHED_TYPE_*) 1038 * associated with flow id on a given event queue 1039 * for the enqueue and dequeue operation. 1040 */ 1041 uint8_t queue_id; 1042 /**< Targeted event queue identifier for the enqueue or 1043 * dequeue operation. 1044 * The value must be in the range of 1045 * [0, nb_event_queues - 1] which previously supplied to 1046 * rte_event_dev_configure(). 1047 */ 1048 uint8_t priority; 1049 /**< Event priority relative to other events in the 1050 * event queue. The requested priority should in the 1051 * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST, 1052 * RTE_EVENT_DEV_PRIORITY_LOWEST]. 1053 * The implementation shall normalize the requested 1054 * priority to supported priority value. 1055 * Valid when the device has 1056 * RTE_EVENT_DEV_CAP_EVENT_QOS capability. 1057 */ 1058 uint8_t impl_opaque; 1059 /**< Implementation specific opaque value. 1060 * An implementation may use this field to hold 1061 * implementation specific value to share between 1062 * dequeue and enqueue operation. 1063 * The application should not modify this field. 1064 */ 1065 }; 1066 }; 1067 /** WORD1 */ 1068 union { 1069 uint64_t u64; 1070 /**< Opaque 64-bit value */ 1071 void *event_ptr; 1072 /**< Opaque event pointer */ 1073 struct rte_mbuf *mbuf; 1074 /**< mbuf pointer if dequeued event is associated with mbuf */ 1075 }; 1076 }; 1077 1078 /* Ethdev Rx adapter capability bitmap flags */ 1079 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 1080 /**< This flag is sent when the packet transfer mechanism is in HW. 1081 * Ethdev can send packets to the event device using internal event port. 1082 */ 1083 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 1084 /**< Adapter supports multiple event queues per ethdev. Every ethdev 1085 * Rx queue can be connected to a unique event queue. 1086 */ 1087 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 1088 /**< The application can override the adapter generated flow ID in the 1089 * event. This flow ID can be specified when adding an ethdev Rx queue 1090 * to the adapter using the ev member of struct rte_event_eth_rx_adapter 1091 * @see struct rte_event_eth_rx_adapter_queue_conf::ev 1092 * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags 1093 */ 1094 1095 /** 1096 * Retrieve the event device's ethdev Rx adapter capabilities for the 1097 * specified ethernet port 1098 * 1099 * @param dev_id 1100 * The identifier of the device. 1101 * 1102 * @param eth_port_id 1103 * The identifier of the ethernet device. 1104 * 1105 * @param[out] caps 1106 * A pointer to memory filled with Rx event adapter capabilities. 1107 * 1108 * @return 1109 * - 0: Success, driver provides Rx event adapter capabilities for the 1110 * ethernet device. 1111 * - <0: Error code returned by the driver function. 1112 * 1113 */ 1114 int 1115 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1116 uint32_t *caps); 1117 1118 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0) 1119 /**< This flag is set when the timer mechanism is in HW. */ 1120 1121 /** 1122 * Retrieve the event device's timer adapter capabilities. 1123 * 1124 * @param dev_id 1125 * The identifier of the device. 1126 * 1127 * @param[out] caps 1128 * A pointer to memory to be filled with event timer adapter capabilities. 1129 * 1130 * @return 1131 * - 0: Success, driver provided event timer adapter capabilities. 1132 * - <0: Error code returned by the driver function. 1133 */ 1134 int __rte_experimental 1135 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps); 1136 1137 /* Crypto adapter capability bitmap flag */ 1138 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 1139 /**< Flag indicates HW is capable of generating events in 1140 * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send 1141 * packets to the event device as new events using an internal 1142 * event port. 1143 */ 1144 1145 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 1146 /**< Flag indicates HW is capable of generating events in 1147 * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send 1148 * packets to the event device as forwarded event using an 1149 * internal event port. 1150 */ 1151 1152 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4 1153 /**< Flag indicates HW is capable of mapping crypto queue pair to 1154 * event queue. 1155 */ 1156 1157 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8 1158 /**< Flag indicates HW/SW supports a mechanism to store and retrieve 1159 * the private data information along with the crypto session. 1160 */ 1161 1162 /** 1163 * @warning 1164 * @b EXPERIMENTAL: this API may change without prior notice 1165 * 1166 * Retrieve the event device's crypto adapter capabilities for the 1167 * specified cryptodev device 1168 * 1169 * @param dev_id 1170 * The identifier of the device. 1171 * 1172 * @param cdev_id 1173 * The identifier of the cryptodev device. 1174 * 1175 * @param[out] caps 1176 * A pointer to memory filled with event adapter capabilities. 1177 * It is expected to be pre-allocated & initialized by caller. 1178 * 1179 * @return 1180 * - 0: Success, driver provides event adapter capabilities for the 1181 * cryptodev device. 1182 * - <0: Error code returned by the driver function. 1183 * 1184 */ 1185 int __rte_experimental 1186 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, 1187 uint32_t *caps); 1188 1189 /* Ethdev Tx adapter capability bitmap flags */ 1190 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 1191 /**< This flag is sent when the PMD supports a packet transmit callback 1192 */ 1193 1194 /** 1195 * Retrieve the event device's eth Tx adapter capabilities 1196 * 1197 * @param dev_id 1198 * The identifier of the device. 1199 * 1200 * @param eth_port_id 1201 * The identifier of the ethernet device. 1202 * 1203 * @param[out] caps 1204 * A pointer to memory filled with eth Tx adapter capabilities. 1205 * 1206 * @return 1207 * - 0: Success, driver provides eth Tx adapter capabilities. 1208 * - <0: Error code returned by the driver function. 1209 * 1210 */ 1211 int __rte_experimental 1212 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, 1213 uint32_t *caps); 1214 1215 struct rte_eventdev_ops; 1216 struct rte_eventdev; 1217 1218 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev); 1219 /**< @internal Enqueue event on port of a device */ 1220 1221 typedef uint16_t (*event_enqueue_burst_t)(void *port, 1222 const struct rte_event ev[], uint16_t nb_events); 1223 /**< @internal Enqueue burst of events on port of a device */ 1224 1225 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev, 1226 uint64_t timeout_ticks); 1227 /**< @internal Dequeue event from port of a device */ 1228 1229 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[], 1230 uint16_t nb_events, uint64_t timeout_ticks); 1231 /**< @internal Dequeue burst of events from port of a device */ 1232 1233 typedef uint16_t (*event_tx_adapter_enqueue)(void *port, 1234 struct rte_event ev[], uint16_t nb_events); 1235 /**< @internal Enqueue burst of events on port of a device */ 1236 1237 #define RTE_EVENTDEV_NAME_MAX_LEN (64) 1238 /**< @internal Max length of name of event PMD */ 1239 1240 /** 1241 * @internal 1242 * The data part, with no function pointers, associated with each device. 1243 * 1244 * This structure is safe to place in shared memory to be common among 1245 * different processes in a multi-process configuration. 1246 */ 1247 struct rte_eventdev_data { 1248 int socket_id; 1249 /**< Socket ID where memory is allocated */ 1250 uint8_t dev_id; 1251 /**< Device ID for this instance */ 1252 uint8_t nb_queues; 1253 /**< Number of event queues. */ 1254 uint8_t nb_ports; 1255 /**< Number of event ports. */ 1256 void **ports; 1257 /**< Array of pointers to ports. */ 1258 struct rte_event_port_conf *ports_cfg; 1259 /**< Array of port configuration structures. */ 1260 struct rte_event_queue_conf *queues_cfg; 1261 /**< Array of queue configuration structures. */ 1262 uint16_t *links_map; 1263 /**< Memory to store queues to port connections. */ 1264 void *dev_private; 1265 /**< PMD-specific private data */ 1266 uint32_t event_dev_cap; 1267 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ 1268 struct rte_event_dev_config dev_conf; 1269 /**< Configuration applied to device. */ 1270 uint8_t service_inited; 1271 /* Service initialization state */ 1272 uint32_t service_id; 1273 /* Service ID*/ 1274 void *dev_stop_flush_arg; 1275 /**< User-provided argument for event flush function */ 1276 1277 RTE_STD_C11 1278 uint8_t dev_started : 1; 1279 /**< Device state: STARTED(1)/STOPPED(0) */ 1280 1281 char name[RTE_EVENTDEV_NAME_MAX_LEN]; 1282 /**< Unique identifier name */ 1283 } __rte_cache_aligned; 1284 1285 /** @internal The data structure associated with each event device. */ 1286 struct rte_eventdev { 1287 event_enqueue_t enqueue; 1288 /**< Pointer to PMD enqueue function. */ 1289 event_enqueue_burst_t enqueue_burst; 1290 /**< Pointer to PMD enqueue burst function. */ 1291 event_enqueue_burst_t enqueue_new_burst; 1292 /**< Pointer to PMD enqueue burst function(op new variant) */ 1293 event_enqueue_burst_t enqueue_forward_burst; 1294 /**< Pointer to PMD enqueue burst function(op forward variant) */ 1295 event_dequeue_t dequeue; 1296 /**< Pointer to PMD dequeue function. */ 1297 event_dequeue_burst_t dequeue_burst; 1298 /**< Pointer to PMD dequeue burst function. */ 1299 event_tx_adapter_enqueue txa_enqueue; 1300 /**< Pointer to PMD eth Tx adapter enqueue function. */ 1301 struct rte_eventdev_data *data; 1302 /**< Pointer to device data */ 1303 struct rte_eventdev_ops *dev_ops; 1304 /**< Functions exported by PMD */ 1305 struct rte_device *dev; 1306 /**< Device info. supplied by probing */ 1307 1308 RTE_STD_C11 1309 uint8_t attached : 1; 1310 /**< Flag indicating the device is attached */ 1311 } __rte_cache_aligned; 1312 1313 extern struct rte_eventdev *rte_eventdevs; 1314 /** @internal The pool of rte_eventdev structures. */ 1315 1316 static __rte_always_inline uint16_t 1317 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 1318 const struct rte_event ev[], uint16_t nb_events, 1319 const event_enqueue_burst_t fn) 1320 { 1321 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1322 1323 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1324 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { 1325 rte_errno = -EINVAL; 1326 return 0; 1327 } 1328 1329 if (port_id >= dev->data->nb_ports) { 1330 rte_errno = -EINVAL; 1331 return 0; 1332 } 1333 #endif 1334 /* 1335 * Allow zero cost non burst mode routine invocation if application 1336 * requests nb_events as const one 1337 */ 1338 if (nb_events == 1) 1339 return (*dev->enqueue)(dev->data->ports[port_id], ev); 1340 else 1341 return fn(dev->data->ports[port_id], ev, nb_events); 1342 } 1343 1344 /** 1345 * Enqueue a burst of events objects or an event object supplied in *rte_event* 1346 * structure on an event device designated by its *dev_id* through the event 1347 * port specified by *port_id*. Each event object specifies the event queue on 1348 * which it will be enqueued. 1349 * 1350 * The *nb_events* parameter is the number of event objects to enqueue which are 1351 * supplied in the *ev* array of *rte_event* structure. 1352 * 1353 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 1354 * enqueued to the same port that their associated events were dequeued from. 1355 * 1356 * The rte_event_enqueue_burst() function returns the number of 1357 * events objects it actually enqueued. A return value equal to *nb_events* 1358 * means that all event objects have been enqueued. 1359 * 1360 * @param dev_id 1361 * The identifier of the device. 1362 * @param port_id 1363 * The identifier of the event port. 1364 * @param ev 1365 * Points to an array of *nb_events* objects of type *rte_event* structure 1366 * which contain the event object enqueue operations to be processed. 1367 * @param nb_events 1368 * The number of event objects to enqueue, typically number of 1369 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 1370 * available for this port. 1371 * 1372 * @return 1373 * The number of event objects actually enqueued on the event device. The 1374 * return value can be less than the value of the *nb_events* parameter when 1375 * the event devices queue is full or if invalid parameters are specified in a 1376 * *rte_event*. If the return value is less than *nb_events*, the remaining 1377 * events at the end of ev[] are not consumed and the caller has to take care 1378 * of them, and rte_errno is set accordingly. Possible errno values include: 1379 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1380 * ID is invalid, or an event's sched type doesn't match the 1381 * capabilities of the destination queue. 1382 * - -ENOSPC The event port was backpressured and unable to enqueue 1383 * one or more events. This error code is only applicable to 1384 * closed systems. 1385 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 1386 */ 1387 static inline uint16_t 1388 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 1389 const struct rte_event ev[], uint16_t nb_events) 1390 { 1391 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1392 1393 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1394 dev->enqueue_burst); 1395 } 1396 1397 /** 1398 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on 1399 * an event device designated by its *dev_id* through the event port specified 1400 * by *port_id*. 1401 * 1402 * Provides the same functionality as rte_event_enqueue_burst(), expect that 1403 * application can use this API when the all objects in the burst contains 1404 * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized 1405 * function can provide the additional hint to the PMD and optimize if possible. 1406 * 1407 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 1408 * has event object of operation type != RTE_EVENT_OP_NEW. 1409 * 1410 * @param dev_id 1411 * The identifier of the device. 1412 * @param port_id 1413 * The identifier of the event port. 1414 * @param ev 1415 * Points to an array of *nb_events* objects of type *rte_event* structure 1416 * which contain the event object enqueue operations to be processed. 1417 * @param nb_events 1418 * The number of event objects to enqueue, typically number of 1419 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 1420 * available for this port. 1421 * 1422 * @return 1423 * The number of event objects actually enqueued on the event device. The 1424 * return value can be less than the value of the *nb_events* parameter when 1425 * the event devices queue is full or if invalid parameters are specified in a 1426 * *rte_event*. If the return value is less than *nb_events*, the remaining 1427 * events at the end of ev[] are not consumed and the caller has to take care 1428 * of them, and rte_errno is set accordingly. Possible errno values include: 1429 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1430 * ID is invalid, or an event's sched type doesn't match the 1431 * capabilities of the destination queue. 1432 * - -ENOSPC The event port was backpressured and unable to enqueue 1433 * one or more events. This error code is only applicable to 1434 * closed systems. 1435 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 1436 * @see rte_event_enqueue_burst() 1437 */ 1438 static inline uint16_t 1439 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, 1440 const struct rte_event ev[], uint16_t nb_events) 1441 { 1442 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1443 1444 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1445 dev->enqueue_new_burst); 1446 } 1447 1448 /** 1449 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* 1450 * on an event device designated by its *dev_id* through the event port 1451 * specified by *port_id*. 1452 * 1453 * Provides the same functionality as rte_event_enqueue_burst(), expect that 1454 * application can use this API when the all objects in the burst contains 1455 * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized 1456 * function can provide the additional hint to the PMD and optimize if possible. 1457 * 1458 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 1459 * has event object of operation type != RTE_EVENT_OP_FORWARD. 1460 * 1461 * @param dev_id 1462 * The identifier of the device. 1463 * @param port_id 1464 * The identifier of the event port. 1465 * @param ev 1466 * Points to an array of *nb_events* objects of type *rte_event* structure 1467 * which contain the event object enqueue operations to be processed. 1468 * @param nb_events 1469 * The number of event objects to enqueue, typically number of 1470 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) 1471 * available for this port. 1472 * 1473 * @return 1474 * The number of event objects actually enqueued on the event device. The 1475 * return value can be less than the value of the *nb_events* parameter when 1476 * the event devices queue is full or if invalid parameters are specified in a 1477 * *rte_event*. If the return value is less than *nb_events*, the remaining 1478 * events at the end of ev[] are not consumed and the caller has to take care 1479 * of them, and rte_errno is set accordingly. Possible errno values include: 1480 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1481 * ID is invalid, or an event's sched type doesn't match the 1482 * capabilities of the destination queue. 1483 * - -ENOSPC The event port was backpressured and unable to enqueue 1484 * one or more events. This error code is only applicable to 1485 * closed systems. 1486 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH 1487 * @see rte_event_enqueue_burst() 1488 */ 1489 static inline uint16_t 1490 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, 1491 const struct rte_event ev[], uint16_t nb_events) 1492 { 1493 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1494 1495 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1496 dev->enqueue_forward_burst); 1497 } 1498 1499 /** 1500 * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() 1501 * 1502 * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag 1503 * then application can use this function to convert timeout value in 1504 * nanoseconds to implementations specific timeout value supplied in 1505 * rte_event_dequeue_burst() 1506 * 1507 * @param dev_id 1508 * The identifier of the device. 1509 * @param ns 1510 * Wait time in nanosecond 1511 * @param[out] timeout_ticks 1512 * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst() 1513 * 1514 * @return 1515 * - 0 on success. 1516 * - -ENOTSUP if the device doesn't support timeouts 1517 * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL 1518 * - other values < 0 on failure. 1519 * 1520 * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 1521 * @see rte_event_dev_configure() 1522 * 1523 */ 1524 int 1525 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1526 uint64_t *timeout_ticks); 1527 1528 /** 1529 * Dequeue a burst of events objects or an event object from the event port 1530 * designated by its *event_port_id*, on an event device designated 1531 * by its *dev_id*. 1532 * 1533 * rte_event_dequeue_burst() does not dictate the specifics of scheduling 1534 * algorithm as each eventdev driver may have different criteria to schedule 1535 * an event. However, in general, from an application perspective scheduler may 1536 * use the following scheme to dispatch an event to the port. 1537 * 1538 * 1) Selection of event queue based on 1539 * a) The list of event queues are linked to the event port. 1540 * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event 1541 * queue selection from list is based on event queue priority relative to 1542 * other event queue supplied as *priority* in rte_event_queue_setup() 1543 * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event 1544 * queue selection from the list is based on event priority supplied as 1545 * *priority* in rte_event_enqueue_burst() 1546 * 2) Selection of event 1547 * a) The number of flows available in selected event queue. 1548 * b) Schedule type method associated with the event 1549 * 1550 * The *nb_events* parameter is the maximum number of event objects to dequeue 1551 * which are returned in the *ev* array of *rte_event* structure. 1552 * 1553 * The rte_event_dequeue_burst() function returns the number of events objects 1554 * it actually dequeued. A return value equal to *nb_events* means that all 1555 * event objects have been dequeued. 1556 * 1557 * The number of events dequeued is the number of scheduler contexts held by 1558 * this port. These contexts are automatically released in the next 1559 * rte_event_dequeue_burst() invocation if the port supports implicit 1560 * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE 1561 * operation can be used to release the contexts early. 1562 * 1563 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 1564 * enqueued to the same port that their associated events were dequeued from. 1565 * 1566 * @param dev_id 1567 * The identifier of the device. 1568 * @param port_id 1569 * The identifier of the event port. 1570 * @param[out] ev 1571 * Points to an array of *nb_events* objects of type *rte_event* structure 1572 * for output to be populated with the dequeued event objects. 1573 * @param nb_events 1574 * The maximum number of event objects to dequeue, typically number of 1575 * rte_event_port_dequeue_depth() available for this port. 1576 * 1577 * @param timeout_ticks 1578 * - 0 no-wait, returns immediately if there is no event. 1579 * - >0 wait for the event, if the device is configured with 1580 * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until 1581 * at least one event is available or *timeout_ticks* time. 1582 * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 1583 * then this function will wait until the event available or 1584 * *dequeue_timeout_ns* ns which was previously supplied to 1585 * rte_event_dev_configure() 1586 * 1587 * @return 1588 * The number of event objects actually dequeued from the port. The return 1589 * value can be less than the value of the *nb_events* parameter when the 1590 * event port's queue is not full. 1591 * 1592 * @see rte_event_port_dequeue_depth() 1593 */ 1594 static inline uint16_t 1595 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], 1596 uint16_t nb_events, uint64_t timeout_ticks) 1597 { 1598 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1599 1600 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1601 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { 1602 rte_errno = -EINVAL; 1603 return 0; 1604 } 1605 1606 if (port_id >= dev->data->nb_ports) { 1607 rte_errno = -EINVAL; 1608 return 0; 1609 } 1610 #endif 1611 1612 /* 1613 * Allow zero cost non burst mode routine invocation if application 1614 * requests nb_events as const one 1615 */ 1616 if (nb_events == 1) 1617 return (*dev->dequeue)( 1618 dev->data->ports[port_id], ev, timeout_ticks); 1619 else 1620 return (*dev->dequeue_burst)( 1621 dev->data->ports[port_id], ev, nb_events, 1622 timeout_ticks); 1623 } 1624 1625 /** 1626 * Link multiple source event queues supplied in *queues* to the destination 1627 * event port designated by its *port_id* with associated service priority 1628 * supplied in *priorities* on the event device designated by its *dev_id*. 1629 * 1630 * The link establishment shall enable the event port *port_id* from 1631 * receiving events from the specified event queue(s) supplied in *queues* 1632 * 1633 * An event queue may link to one or more event ports. 1634 * The number of links can be established from an event queue to event port is 1635 * implementation defined. 1636 * 1637 * Event queue(s) to event port link establishment can be changed at runtime 1638 * without re-configuring the device to support scaling and to reduce the 1639 * latency of critical work by establishing the link with more event ports 1640 * at runtime. 1641 * 1642 * @param dev_id 1643 * The identifier of the device. 1644 * 1645 * @param port_id 1646 * Event port identifier to select the destination port to link. 1647 * 1648 * @param queues 1649 * Points to an array of *nb_links* event queues to be linked 1650 * to the event port. 1651 * NULL value is allowed, in which case this function links all the configured 1652 * event queues *nb_event_queues* which previously supplied to 1653 * rte_event_dev_configure() to the event port *port_id* 1654 * 1655 * @param priorities 1656 * Points to an array of *nb_links* service priorities associated with each 1657 * event queue link to event port. 1658 * The priority defines the event port's servicing priority for 1659 * event queue, which may be ignored by an implementation. 1660 * The requested priority should in the range of 1661 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 1662 * The implementation shall normalize the requested priority to 1663 * implementation supported priority value. 1664 * NULL value is allowed, in which case this function links the event queues 1665 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 1666 * 1667 * @param nb_links 1668 * The number of links to establish. This parameter is ignored if queues is 1669 * NULL. 1670 * 1671 * @return 1672 * The number of links actually established. The return value can be less than 1673 * the value of the *nb_links* parameter when the implementation has the 1674 * limitation on specific queue to port link establishment or if invalid 1675 * parameters are specified in *queues* 1676 * If the return value is less than *nb_links*, the remaining links at the end 1677 * of link[] are not established, and the caller has to take care of them. 1678 * If return value is less than *nb_links* then implementation shall update the 1679 * rte_errno accordingly, Possible rte_errno values are 1680 * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with 1681 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 1682 * (-EINVAL) Invalid parameter 1683 * 1684 */ 1685 int 1686 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 1687 const uint8_t queues[], const uint8_t priorities[], 1688 uint16_t nb_links); 1689 1690 /** 1691 * Unlink multiple source event queues supplied in *queues* from the destination 1692 * event port designated by its *port_id* on the event device designated 1693 * by its *dev_id*. 1694 * 1695 * The unlink call issues an async request to disable the event port *port_id* 1696 * from receiving events from the specified event queue *queue_id*. 1697 * Event queue(s) to event port unlink establishment can be changed at runtime 1698 * without re-configuring the device. 1699 * 1700 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. 1701 * 1702 * @param dev_id 1703 * The identifier of the device. 1704 * 1705 * @param port_id 1706 * Event port identifier to select the destination port to unlink. 1707 * 1708 * @param queues 1709 * Points to an array of *nb_unlinks* event queues to be unlinked 1710 * from the event port. 1711 * NULL value is allowed, in which case this function unlinks all the 1712 * event queue(s) from the event port *port_id*. 1713 * 1714 * @param nb_unlinks 1715 * The number of unlinks to establish. This parameter is ignored if queues is 1716 * NULL. 1717 * 1718 * @return 1719 * The number of unlinks successfully requested. The return value can be less 1720 * than the value of the *nb_unlinks* parameter when the implementation has the 1721 * limitation on specific queue to port unlink establishment or 1722 * if invalid parameters are specified. 1723 * If the return value is less than *nb_unlinks*, the remaining queues at the 1724 * end of queues[] are not unlinked, and the caller has to take care of them. 1725 * If return value is less than *nb_unlinks* then implementation shall update 1726 * the rte_errno accordingly, Possible rte_errno values are 1727 * (-EINVAL) Invalid parameter 1728 */ 1729 int 1730 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 1731 uint8_t queues[], uint16_t nb_unlinks); 1732 1733 /** 1734 * @warning 1735 * @b EXPERIMENTAL: this API may change without prior notice 1736 * 1737 * Returns the number of unlinks in progress. 1738 * 1739 * This function provides the application with a method to detect when an 1740 * unlink has been completed by the implementation. 1741 * 1742 * @see rte_event_port_unlink() to issue unlink requests. 1743 * 1744 * @param dev_id 1745 * The identifier of the device. 1746 * 1747 * @param port_id 1748 * Event port identifier to select port to check for unlinks in progress. 1749 * 1750 * @return 1751 * The number of unlinks that are in progress. A return of zero indicates that 1752 * there are no outstanding unlink requests. A positive return value indicates 1753 * the number of unlinks that are in progress, but are not yet complete. 1754 * A negative return value indicates an error, -EINVAL indicates an invalid 1755 * parameter passed for *dev_id* or *port_id*. 1756 */ 1757 int __rte_experimental 1758 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id); 1759 1760 /** 1761 * Retrieve the list of source event queues and its associated service priority 1762 * linked to the destination event port designated by its *port_id* 1763 * on the event device designated by its *dev_id*. 1764 * 1765 * @param dev_id 1766 * The identifier of the device. 1767 * 1768 * @param port_id 1769 * Event port identifier. 1770 * 1771 * @param[out] queues 1772 * Points to an array of *queues* for output. 1773 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1774 * store the event queue(s) linked with event port *port_id* 1775 * 1776 * @param[out] priorities 1777 * Points to an array of *priorities* for output. 1778 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1779 * store the service priority associated with each event queue linked 1780 * 1781 * @return 1782 * The number of links established on the event port designated by its 1783 * *port_id*. 1784 * - <0 on failure. 1785 * 1786 */ 1787 int 1788 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1789 uint8_t queues[], uint8_t priorities[]); 1790 1791 /** 1792 * Retrieve the service ID of the event dev. If the adapter doesn't use 1793 * a rte_service function, this function returns -ESRCH. 1794 * 1795 * @param dev_id 1796 * The identifier of the device. 1797 * 1798 * @param [out] service_id 1799 * A pointer to a uint32_t, to be filled in with the service id. 1800 * 1801 * @return 1802 * - 0: Success 1803 * - <0: Error code on failure, if the event dev doesn't use a rte_service 1804 * function, this function returns -ESRCH. 1805 */ 1806 int 1807 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); 1808 1809 /** 1810 * Dump internal information about *dev_id* to the FILE* provided in *f*. 1811 * 1812 * @param dev_id 1813 * The identifier of the device. 1814 * 1815 * @param f 1816 * A pointer to a file for output 1817 * 1818 * @return 1819 * - 0: on success 1820 * - <0: on failure. 1821 */ 1822 int 1823 rte_event_dev_dump(uint8_t dev_id, FILE *f); 1824 1825 /** Maximum name length for extended statistics counters */ 1826 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 1827 1828 /** 1829 * Selects the component of the eventdev to retrieve statistics from. 1830 */ 1831 enum rte_event_dev_xstats_mode { 1832 RTE_EVENT_DEV_XSTATS_DEVICE, 1833 RTE_EVENT_DEV_XSTATS_PORT, 1834 RTE_EVENT_DEV_XSTATS_QUEUE, 1835 }; 1836 1837 /** 1838 * A name-key lookup element for extended statistics. 1839 * 1840 * This structure is used to map between names and ID numbers 1841 * for extended ethdev statistics. 1842 */ 1843 struct rte_event_dev_xstats_name { 1844 char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; 1845 }; 1846 1847 /** 1848 * Retrieve names of extended statistics of an event device. 1849 * 1850 * @param dev_id 1851 * The identifier of the event device. 1852 * @param mode 1853 * The mode of statistics to retrieve. Choices include the device statistics, 1854 * port statistics or queue statistics. 1855 * @param queue_port_id 1856 * Used to specify the port or queue number in queue or port mode, and is 1857 * ignored in device mode. 1858 * @param[out] xstats_names 1859 * Block of memory to insert names into. Must be at least size in capacity. 1860 * If set to NULL, function returns required capacity. 1861 * @param[out] ids 1862 * Block of memory to insert ids into. Must be at least size in capacity. 1863 * If set to NULL, function returns required capacity. The id values returned 1864 * can be passed to *rte_event_dev_xstats_get* to select statistics. 1865 * @param size 1866 * Capacity of xstats_names (number of names). 1867 * @return 1868 * - positive value lower or equal to size: success. The return value 1869 * is the number of entries filled in the stats table. 1870 * - positive value higher than size: error, the given statistics table 1871 * is too small. The return value corresponds to the size that should 1872 * be given to succeed. The entries in the table are not valid and 1873 * shall not be used by the caller. 1874 * - negative value on error: 1875 * -ENODEV for invalid *dev_id* 1876 * -EINVAL for invalid mode, queue port or id parameters 1877 * -ENOTSUP if the device doesn't support this function. 1878 */ 1879 int 1880 rte_event_dev_xstats_names_get(uint8_t dev_id, 1881 enum rte_event_dev_xstats_mode mode, 1882 uint8_t queue_port_id, 1883 struct rte_event_dev_xstats_name *xstats_names, 1884 unsigned int *ids, 1885 unsigned int size); 1886 1887 /** 1888 * Retrieve extended statistics of an event device. 1889 * 1890 * @param dev_id 1891 * The identifier of the device. 1892 * @param mode 1893 * The mode of statistics to retrieve. Choices include the device statistics, 1894 * port statistics or queue statistics. 1895 * @param queue_port_id 1896 * Used to specify the port or queue number in queue or port mode, and is 1897 * ignored in device mode. 1898 * @param ids 1899 * The id numbers of the stats to get. The ids can be got from the stat 1900 * position in the stat list from rte_event_dev_get_xstats_names(), or 1901 * by using rte_event_dev_xstats_by_name_get(). 1902 * @param[out] values 1903 * The values for each stats request by ID. 1904 * @param n 1905 * The number of stats requested 1906 * @return 1907 * - positive value: number of stat entries filled into the values array 1908 * - negative value on error: 1909 * -ENODEV for invalid *dev_id* 1910 * -EINVAL for invalid mode, queue port or id parameters 1911 * -ENOTSUP if the device doesn't support this function. 1912 */ 1913 int 1914 rte_event_dev_xstats_get(uint8_t dev_id, 1915 enum rte_event_dev_xstats_mode mode, 1916 uint8_t queue_port_id, 1917 const unsigned int ids[], 1918 uint64_t values[], unsigned int n); 1919 1920 /** 1921 * Retrieve the value of a single stat by requesting it by name. 1922 * 1923 * @param dev_id 1924 * The identifier of the device 1925 * @param name 1926 * The stat name to retrieve 1927 * @param[out] id 1928 * If non-NULL, the numerical id of the stat will be returned, so that further 1929 * requests for the stat can be got using rte_event_dev_xstats_get, which will 1930 * be faster as it doesn't need to scan a list of names for the stat. 1931 * If the stat cannot be found, the id returned will be (unsigned)-1. 1932 * @return 1933 * - positive value or zero: the stat value 1934 * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. 1935 */ 1936 uint64_t 1937 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1938 unsigned int *id); 1939 1940 /** 1941 * Reset the values of the xstats of the selected component in the device. 1942 * 1943 * @param dev_id 1944 * The identifier of the device 1945 * @param mode 1946 * The mode of the statistics to reset. Choose from device, queue or port. 1947 * @param queue_port_id 1948 * The queue or port to reset. 0 and positive values select ports and queues, 1949 * while -1 indicates all ports or queues. 1950 * @param ids 1951 * Selects specific statistics to be reset. When NULL, all statistics selected 1952 * by *mode* will be reset. If non-NULL, must point to array of at least 1953 * *nb_ids* size. 1954 * @param nb_ids 1955 * The number of ids available from the *ids* array. Ignored when ids is NULL. 1956 * @return 1957 * - zero: successfully reset the statistics to zero 1958 * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. 1959 */ 1960 int 1961 rte_event_dev_xstats_reset(uint8_t dev_id, 1962 enum rte_event_dev_xstats_mode mode, 1963 int16_t queue_port_id, 1964 const uint32_t ids[], 1965 uint32_t nb_ids); 1966 1967 /** 1968 * Trigger the eventdev self test. 1969 * 1970 * @param dev_id 1971 * The identifier of the device 1972 * @return 1973 * - 0: Selftest successful 1974 * - -ENOTSUP if the device doesn't support selftest 1975 * - other values < 0 on failure. 1976 */ 1977 int rte_event_dev_selftest(uint8_t dev_id); 1978 1979 #ifdef __cplusplus 1980 } 1981 #endif 1982 1983 #endif /* _RTE_EVENTDEV_H_ */ 1984