1 /* 2 * BSD LICENSE 3 * 4 * Copyright 2016 Cavium, Inc. 5 * Copyright 2016 Intel Corporation. 6 * Copyright 2016 NXP. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Cavium, Inc nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #ifndef _RTE_EVENTDEV_H_ 36 #define _RTE_EVENTDEV_H_ 37 38 /** 39 * @file 40 * 41 * RTE Event Device API 42 * 43 * In a polling model, lcores poll ethdev ports and associated rx queues 44 * directly to look for packet. In an event driven model, by contrast, lcores 45 * call the scheduler that selects packets for them based on programmer 46 * specified criteria. Eventdev library adds support for event driven 47 * programming model, which offer applications automatic multicore scaling, 48 * dynamic load balancing, pipelining, packet ingress order maintenance and 49 * synchronization services to simplify application packet processing. 50 * 51 * The Event Device API is composed of two parts: 52 * 53 * - The application-oriented Event API that includes functions to setup 54 * an event device (configure it, setup its queues, ports and start it), to 55 * establish the link between queues to port and to receive events, and so on. 56 * 57 * - The driver-oriented Event API that exports a function allowing 58 * an event poll Mode Driver (PMD) to simultaneously register itself as 59 * an event device driver. 60 * 61 * Event device components: 62 * 63 * +-----------------+ 64 * | +-------------+ | 65 * +-------+ | | flow 0 | | 66 * |Packet | | +-------------+ | 67 * |event | | +-------------+ | 68 * | | | | flow 1 | |port_link(port0, queue0) 69 * +-------+ | +-------------+ | | +--------+ 70 * +-------+ | +-------------+ o-----v-----o |dequeue +------+ 71 * |Crypto | | | flow n | | | event +------->|Core 0| 72 * |work | | +-------------+ o----+ | port 0 | | | 73 * |done ev| | event queue 0 | | +--------+ +------+ 74 * +-------+ +-----------------+ | 75 * +-------+ | 76 * |Timer | +-----------------+ | +--------+ 77 * |expiry | | +-------------+ | +------o |dequeue +------+ 78 * |event | | | flow 0 | o-----------o event +------->|Core 1| 79 * +-------+ | +-------------+ | +----o port 1 | | | 80 * Event enqueue | +-------------+ | | +--------+ +------+ 81 * o-------------> | | flow 1 | | | 82 * enqueue( | +-------------+ | | 83 * queue_id, | | | +--------+ +------+ 84 * flow_id, | +-------------+ | | | |dequeue |Core 2| 85 * sched_type, | | flow n | o-----------o event +------->| | 86 * event_type, | +-------------+ | | | port 2 | +------+ 87 * subev_type, | event queue 1 | | +--------+ 88 * event) +-----------------+ | +--------+ 89 * | | |dequeue +------+ 90 * +-------+ +-----------------+ | | event +------->|Core n| 91 * |Core | | +-------------+ o-----------o port n | | | 92 * |(SW) | | | flow 0 | | | +--------+ +--+---+ 93 * |event | | +-------------+ | | | 94 * +-------+ | +-------------+ | | | 95 * ^ | | flow 1 | | | | 96 * | | +-------------+ o------+ | 97 * | | +-------------+ | | 98 * | | | flow n | | | 99 * | | +-------------+ | | 100 * | | event queue n | | 101 * | +-----------------+ | 102 * | | 103 * +-----------------------------------------------------------+ 104 * 105 * Event device: A hardware or software-based event scheduler. 106 * 107 * Event: A unit of scheduling that encapsulates a packet or other datatype 108 * like SW generated event from the CPU, Crypto work completion notification, 109 * Timer expiry event notification etc as well as metadata. 110 * The metadata includes flow ID, scheduling type, event priority, event_type, 111 * sub_event_type etc. 112 * 113 * Event queue: A queue containing events that are scheduled by the event dev. 114 * An event queue contains events of different flows associated with scheduling 115 * types, such as atomic, ordered, or parallel. 116 * 117 * Event port: An application's interface into the event dev for enqueue and 118 * dequeue operations. Each event port can be linked with one or more 119 * event queues for dequeue operations. 120 * 121 * By default, all the functions of the Event Device API exported by a PMD 122 * are lock-free functions which assume to not be invoked in parallel on 123 * different logical cores to work on the same target object. For instance, 124 * the dequeue function of a PMD cannot be invoked in parallel on two logical 125 * cores to operates on same event port. Of course, this function 126 * can be invoked in parallel by different logical cores on different ports. 127 * It is the responsibility of the upper level application to enforce this rule. 128 * 129 * In all functions of the Event API, the Event device is 130 * designated by an integer >= 0 named the device identifier *dev_id* 131 * 132 * At the Event driver level, Event devices are represented by a generic 133 * data structure of type *rte_event_dev*. 134 * 135 * Event devices are dynamically registered during the PCI/SoC device probing 136 * phase performed at EAL initialization time. 137 * When an Event device is being probed, a *rte_event_dev* structure and 138 * a new device identifier are allocated for that device. Then, the 139 * event_dev_init() function supplied by the Event driver matching the probed 140 * device is invoked to properly initialize the device. 141 * 142 * The role of the device init function consists of resetting the hardware or 143 * software event driver implementations. 144 * 145 * If the device init operation is successful, the correspondence between 146 * the device identifier assigned to the new device and its associated 147 * *rte_event_dev* structure is effectively registered. 148 * Otherwise, both the *rte_event_dev* structure and the device identifier are 149 * freed. 150 * 151 * The functions exported by the application Event API to setup a device 152 * designated by its device identifier must be invoked in the following order: 153 * - rte_event_dev_configure() 154 * - rte_event_queue_setup() 155 * - rte_event_port_setup() 156 * - rte_event_port_link() 157 * - rte_event_dev_start() 158 * 159 * Then, the application can invoke, in any order, the functions 160 * exported by the Event API to schedule events, dequeue events, enqueue events, 161 * change event queue(s) to event port [un]link establishment and so on. 162 * 163 * Application may use rte_event_[queue/port]_default_conf_get() to get the 164 * default configuration to set up an event queue or event port by 165 * overriding few default values. 166 * 167 * If the application wants to change the configuration (i.e. call 168 * rte_event_dev_configure(), rte_event_queue_setup(), or 169 * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the 170 * device and then do the reconfiguration before calling rte_event_dev_start() 171 * again. The schedule, enqueue and dequeue functions should not be invoked 172 * when the device is stopped. 173 * 174 * Finally, an application can close an Event device by invoking the 175 * rte_event_dev_close() function. 176 * 177 * Each function of the application Event API invokes a specific function 178 * of the PMD that controls the target device designated by its device 179 * identifier. 180 * 181 * For this purpose, all device-specific functions of an Event driver are 182 * supplied through a set of pointers contained in a generic structure of type 183 * *event_dev_ops*. 184 * The address of the *event_dev_ops* structure is stored in the *rte_event_dev* 185 * structure by the device init function of the Event driver, which is 186 * invoked during the PCI/SoC device probing phase, as explained earlier. 187 * 188 * In other words, each function of the Event API simply retrieves the 189 * *rte_event_dev* structure associated with the device identifier and 190 * performs an indirect invocation of the corresponding driver function 191 * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure. 192 * 193 * For performance reasons, the address of the fast-path functions of the 194 * Event driver is not contained in the *event_dev_ops* structure. 195 * Instead, they are directly stored at the beginning of the *rte_event_dev* 196 * structure to avoid an extra indirect memory access during their invocation. 197 * 198 * RTE event device drivers do not use interrupts for enqueue or dequeue 199 * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue 200 * functions to applications. 201 * 202 * The events are injected to event device through *enqueue* operation by 203 * event producers in the system. The typical event producers are ethdev 204 * subsystem for generating packet events, CPU(SW) for generating events based 205 * on different stages of application processing, cryptodev for generating 206 * crypto work completion notification etc 207 * 208 * The *dequeue* operation gets one or more events from the event ports. 209 * The application process the events and send to downstream event queue through 210 * rte_event_enqueue_burst() if it is an intermediate stage of event processing, 211 * on the final stage, the application may send to different subsystem like 212 * ethdev to send the packet/event on the wire using ethdev 213 * rte_eth_tx_burst() API. 214 * 215 * The point at which events are scheduled to ports depends on the device. 216 * For hardware devices, scheduling occurs asynchronously without any software 217 * intervention. Software schedulers can either be distributed 218 * (each worker thread schedules events to its own port) or centralized 219 * (a dedicated thread schedules to all ports). Distributed software schedulers 220 * perform the scheduling in rte_event_dequeue_burst(), whereas centralized 221 * scheduler logic need a dedicated service core for scheduling. 222 * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set 223 * indicates the device is centralized and thus needs a dedicated scheduling 224 * thread that repeatedly calls software specific scheduling function. 225 * 226 * An event driven worker thread has following typical workflow on fastpath: 227 * \code{.c} 228 * while (1) { 229 * rte_event_dequeue_burst(...); 230 * (event processing) 231 * rte_event_enqueue_burst(...); 232 * } 233 * \endcode 234 * 235 */ 236 237 #ifdef __cplusplus 238 extern "C" { 239 #endif 240 241 #include <rte_common.h> 242 #include <rte_config.h> 243 #include <rte_memory.h> 244 #include <rte_errno.h> 245 246 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ 247 248 /* Event device capability bitmap flags */ 249 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) 250 /**< Event scheduling prioritization is based on the priority associated with 251 * each event queue. 252 * 253 * @see rte_event_queue_setup() 254 */ 255 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1) 256 /**< Event scheduling prioritization is based on the priority associated with 257 * each event. Priority of each event is supplied in *rte_event* structure 258 * on each enqueue operation. 259 * 260 * @see rte_event_enqueue_burst() 261 */ 262 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2) 263 /**< Event device operates in distributed scheduling mode. 264 * In distributed scheduling mode, event scheduling happens in HW or 265 * rte_event_dequeue_burst() or the combination of these two. 266 * If the flag is not set then eventdev is centralized and thus needs a 267 * dedicated service core that acts as a scheduling thread . 268 * 269 * @see rte_event_dequeue_burst() 270 */ 271 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) 272 /**< Event device is capable of enqueuing events of any type to any queue. 273 * If this capability is not set, the queue only supports events of the 274 * *RTE_SCHED_TYPE_* type that it was created with. 275 * 276 * @see RTE_SCHED_TYPE_* values 277 */ 278 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) 279 /**< Event device is capable of operating in burst mode for enqueue(forward, 280 * release) and dequeue operation. If this capability is not set, application 281 * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but 282 * PMD accepts only one event at a time. 283 * 284 * @see rte_event_dequeue_burst() rte_event_enqueue_burst() 285 */ 286 287 /* Event device priority levels */ 288 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 289 /**< Highest priority expressed across eventdev subsystem 290 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 291 * @see rte_event_port_link() 292 */ 293 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128 294 /**< Normal priority expressed across eventdev subsystem 295 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 296 * @see rte_event_port_link() 297 */ 298 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255 299 /**< Lowest priority expressed across eventdev subsystem 300 * @see rte_event_queue_setup(), rte_event_enqueue_burst() 301 * @see rte_event_port_link() 302 */ 303 304 /** 305 * Get the total number of event devices that have been successfully 306 * initialised. 307 * 308 * @return 309 * The total number of usable event devices. 310 */ 311 uint8_t 312 rte_event_dev_count(void); 313 314 /** 315 * Get the device identifier for the named event device. 316 * 317 * @param name 318 * Event device name to select the event device identifier. 319 * 320 * @return 321 * Returns event device identifier on success. 322 * - <0: Failure to find named event device. 323 */ 324 int 325 rte_event_dev_get_dev_id(const char *name); 326 327 /** 328 * Return the NUMA socket to which a device is connected. 329 * 330 * @param dev_id 331 * The identifier of the device. 332 * @return 333 * The NUMA socket id to which the device is connected or 334 * a default of zero if the socket could not be determined. 335 * -(-EINVAL) dev_id value is out of range. 336 */ 337 int 338 rte_event_dev_socket_id(uint8_t dev_id); 339 340 /** 341 * Event device information 342 */ 343 struct rte_event_dev_info { 344 const char *driver_name; /**< Event driver name */ 345 struct rte_device *dev; /**< Device information */ 346 uint32_t min_dequeue_timeout_ns; 347 /**< Minimum supported global dequeue timeout(ns) by this device */ 348 uint32_t max_dequeue_timeout_ns; 349 /**< Maximum supported global dequeue timeout(ns) by this device */ 350 uint32_t dequeue_timeout_ns; 351 /**< Configured global dequeue timeout(ns) for this device */ 352 uint8_t max_event_queues; 353 /**< Maximum event_queues supported by this device */ 354 uint32_t max_event_queue_flows; 355 /**< Maximum supported flows in an event queue by this device*/ 356 uint8_t max_event_queue_priority_levels; 357 /**< Maximum number of event queue priority levels by this device. 358 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 359 */ 360 uint8_t max_event_priority_levels; 361 /**< Maximum number of event priority levels by this device. 362 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability 363 */ 364 uint8_t max_event_ports; 365 /**< Maximum number of event ports supported by this device */ 366 uint8_t max_event_port_dequeue_depth; 367 /**< Maximum number of events can be dequeued at a time from an 368 * event port by this device. 369 * A device that does not support bulk dequeue will set this as 1. 370 */ 371 uint32_t max_event_port_enqueue_depth; 372 /**< Maximum number of events can be enqueued at a time from an 373 * event port by this device. 374 * A device that does not support bulk enqueue will set this as 1. 375 */ 376 int32_t max_num_events; 377 /**< A *closed system* event dev has a limit on the number of events it 378 * can manage at a time. An *open system* event dev does not have a 379 * limit and will specify this as -1. 380 */ 381 uint32_t event_dev_cap; 382 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ 383 }; 384 385 /** 386 * Retrieve the contextual information of an event device. 387 * 388 * @param dev_id 389 * The identifier of the device. 390 * 391 * @param[out] dev_info 392 * A pointer to a structure of type *rte_event_dev_info* to be filled with the 393 * contextual information of the device. 394 * 395 * @return 396 * - 0: Success, driver updates the contextual information of the event device 397 * - <0: Error code returned by the driver info get function. 398 * 399 */ 400 int 401 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); 402 403 /** 404 * The count of ports. 405 */ 406 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 407 /** 408 * The count of queues. 409 */ 410 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 411 /** 412 * The status of the device, zero for stopped, non-zero for started. 413 */ 414 #define RTE_EVENT_DEV_ATTR_STARTED 2 415 416 /** 417 * Get an attribute from a device. 418 * 419 * @param dev_id Eventdev id 420 * @param attr_id The attribute ID to retrieve 421 * @param[out] attr_value A pointer that will be filled in with the attribute 422 * value if successful. 423 * 424 * @return 425 * - 0: Successfully retrieved attribute value 426 * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL 427 */ 428 int 429 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, 430 uint32_t *attr_value); 431 432 433 /* Event device configuration bitmap flags */ 434 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) 435 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. 436 * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst() 437 */ 438 439 /** Event device configuration structure */ 440 struct rte_event_dev_config { 441 uint32_t dequeue_timeout_ns; 442 /**< rte_event_dequeue_burst() timeout on this device. 443 * This value should be in the range of *min_dequeue_timeout_ns* and 444 * *max_dequeue_timeout_ns* which previously provided in 445 * rte_event_dev_info_get() 446 * The value 0 is allowed, in which case, default dequeue timeout used. 447 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 448 */ 449 int32_t nb_events_limit; 450 /**< In a *closed system* this field is the limit on maximum number of 451 * events that can be inflight in the eventdev at a given time. The 452 * limit is required to ensure that the finite space in a closed system 453 * is not overwhelmed. The value cannot exceed the *max_num_events* 454 * as provided by rte_event_dev_info_get(). 455 * This value should be set to -1 for *open system*. 456 */ 457 uint8_t nb_event_queues; 458 /**< Number of event queues to configure on this device. 459 * This value cannot exceed the *max_event_queues* which previously 460 * provided in rte_event_dev_info_get() 461 */ 462 uint8_t nb_event_ports; 463 /**< Number of event ports to configure on this device. 464 * This value cannot exceed the *max_event_ports* which previously 465 * provided in rte_event_dev_info_get() 466 */ 467 uint32_t nb_event_queue_flows; 468 /**< Number of flows for any event queue on this device. 469 * This value cannot exceed the *max_event_queue_flows* which previously 470 * provided in rte_event_dev_info_get() 471 */ 472 uint32_t nb_event_port_dequeue_depth; 473 /**< Maximum number of events can be dequeued at a time from an 474 * event port by this device. 475 * This value cannot exceed the *max_event_port_dequeue_depth* 476 * which previously provided in rte_event_dev_info_get(). 477 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 478 * @see rte_event_port_setup() 479 */ 480 uint32_t nb_event_port_enqueue_depth; 481 /**< Maximum number of events can be enqueued at a time from an 482 * event port by this device. 483 * This value cannot exceed the *max_event_port_enqueue_depth* 484 * which previously provided in rte_event_dev_info_get(). 485 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 486 * @see rte_event_port_setup() 487 */ 488 uint32_t event_dev_cfg; 489 /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ 490 }; 491 492 /** 493 * Configure an event device. 494 * 495 * This function must be invoked first before any other function in the 496 * API. This function can also be re-invoked when a device is in the 497 * stopped state. 498 * 499 * The caller may use rte_event_dev_info_get() to get the capability of each 500 * resources available for this event device. 501 * 502 * @param dev_id 503 * The identifier of the device to configure. 504 * @param dev_conf 505 * The event device configuration structure. 506 * 507 * @return 508 * - 0: Success, device configured. 509 * - <0: Error code returned by the driver configuration function. 510 */ 511 int 512 rte_event_dev_configure(uint8_t dev_id, 513 const struct rte_event_dev_config *dev_conf); 514 515 516 /* Event queue specific APIs */ 517 518 /* Event queue configuration bitmap flags */ 519 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) 520 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue 521 * 522 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL 523 * @see rte_event_enqueue_burst() 524 */ 525 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) 526 /**< This event queue links only to a single event port. 527 * 528 * @see rte_event_port_setup(), rte_event_port_link() 529 */ 530 531 /** Event queue configuration structure */ 532 struct rte_event_queue_conf { 533 uint32_t nb_atomic_flows; 534 /**< The maximum number of active flows this queue can track at any 535 * given time. If the queue is configured for atomic scheduling (by 536 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg 537 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the 538 * value must be in the range of [1, nb_event_queue_flows], which was 539 * previously provided in rte_event_dev_configure(). 540 */ 541 uint32_t nb_atomic_order_sequences; 542 /**< The maximum number of outstanding events waiting to be 543 * reordered by this queue. In other words, the number of entries in 544 * this queue’s reorder buffer.When the number of events in the 545 * reorder buffer reaches to *nb_atomic_order_sequences* then the 546 * scheduler cannot schedule the events from this queue and invalid 547 * event will be returned from dequeue until one or more entries are 548 * freed up/released. 549 * If the queue is configured for ordered scheduling (by applying the 550 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or 551 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must 552 * be in the range of [1, nb_event_queue_flows], which was 553 * previously supplied to rte_event_dev_configure(). 554 */ 555 uint32_t event_queue_cfg; 556 /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ 557 uint8_t schedule_type; 558 /**< Queue schedule type(RTE_SCHED_TYPE_*). 559 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in 560 * event_queue_cfg. 561 */ 562 uint8_t priority; 563 /**< Priority for this event queue relative to other event queues. 564 * The requested priority should in the range of 565 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 566 * The implementation shall normalize the requested priority to 567 * event device supported priority value. 568 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability 569 */ 570 }; 571 572 /** 573 * Retrieve the default configuration information of an event queue designated 574 * by its *queue_id* from the event driver for an event device. 575 * 576 * This function intended to be used in conjunction with rte_event_queue_setup() 577 * where caller needs to set up the queue by overriding few default values. 578 * 579 * @param dev_id 580 * The identifier of the device. 581 * @param queue_id 582 * The index of the event queue to get the configuration information. 583 * The value must be in the range [0, nb_event_queues - 1] 584 * previously supplied to rte_event_dev_configure(). 585 * @param[out] queue_conf 586 * The pointer to the default event queue configuration data. 587 * @return 588 * - 0: Success, driver updates the default event queue configuration data. 589 * - <0: Error code returned by the driver info get function. 590 * 591 * @see rte_event_queue_setup() 592 * 593 */ 594 int 595 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, 596 struct rte_event_queue_conf *queue_conf); 597 598 /** 599 * Allocate and set up an event queue for an event device. 600 * 601 * @param dev_id 602 * The identifier of the device. 603 * @param queue_id 604 * The index of the event queue to setup. The value must be in the range 605 * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure(). 606 * @param queue_conf 607 * The pointer to the configuration data to be used for the event queue. 608 * NULL value is allowed, in which case default configuration used. 609 * 610 * @see rte_event_queue_default_conf_get() 611 * 612 * @return 613 * - 0: Success, event queue correctly set up. 614 * - <0: event queue configuration failed 615 */ 616 int 617 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, 618 const struct rte_event_queue_conf *queue_conf); 619 620 /** 621 * The priority of the queue. 622 */ 623 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 624 /** 625 * The number of atomic flows configured for the queue. 626 */ 627 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 628 /** 629 * The number of atomic order sequences configured for the queue. 630 */ 631 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 632 /** 633 * The cfg flags for the queue. 634 */ 635 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 636 /** 637 * The schedule type of the queue. 638 */ 639 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 640 641 /** 642 * Get an attribute from a queue. 643 * 644 * @param dev_id 645 * Eventdev id 646 * @param queue_id 647 * Eventdev queue id 648 * @param attr_id 649 * The attribute ID to retrieve 650 * @param[out] attr_value 651 * A pointer that will be filled in with the attribute value if successful 652 * 653 * @return 654 * - 0: Successfully returned value 655 * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was 656 * NULL 657 * - -EOVERFLOW: returned when attr_id is set to 658 * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to 659 * RTE_EVENT_QUEUE_CFG_ALL_TYPES 660 */ 661 int 662 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, 663 uint32_t *attr_value); 664 665 /* Event port specific APIs */ 666 667 /** Event port configuration structure */ 668 struct rte_event_port_conf { 669 int32_t new_event_threshold; 670 /**< A backpressure threshold for new event enqueues on this port. 671 * Use for *closed system* event dev where event capacity is limited, 672 * and cannot exceed the capacity of the event dev. 673 * Configuring ports with different thresholds can make higher priority 674 * traffic less likely to be backpressured. 675 * For example, a port used to inject NIC Rx packets into the event dev 676 * can have a lower threshold so as not to overwhelm the device, 677 * while ports used for worker pools can have a higher threshold. 678 * This value cannot exceed the *nb_events_limit* 679 * which was previously supplied to rte_event_dev_configure(). 680 * This should be set to '-1' for *open system*. 681 */ 682 uint16_t dequeue_depth; 683 /**< Configure number of bulk dequeues for this event port. 684 * This value cannot exceed the *nb_event_port_dequeue_depth* 685 * which previously supplied to rte_event_dev_configure(). 686 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 687 */ 688 uint16_t enqueue_depth; 689 /**< Configure number of bulk enqueues for this event port. 690 * This value cannot exceed the *nb_event_port_enqueue_depth* 691 * which previously supplied to rte_event_dev_configure(). 692 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. 693 */ 694 }; 695 696 /** 697 * Retrieve the default configuration information of an event port designated 698 * by its *port_id* from the event driver for an event device. 699 * 700 * This function intended to be used in conjunction with rte_event_port_setup() 701 * where caller needs to set up the port by overriding few default values. 702 * 703 * @param dev_id 704 * The identifier of the device. 705 * @param port_id 706 * The index of the event port to get the configuration information. 707 * The value must be in the range [0, nb_event_ports - 1] 708 * previously supplied to rte_event_dev_configure(). 709 * @param[out] port_conf 710 * The pointer to the default event port configuration data 711 * @return 712 * - 0: Success, driver updates the default event port configuration data. 713 * - <0: Error code returned by the driver info get function. 714 * 715 * @see rte_event_port_setup() 716 * 717 */ 718 int 719 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, 720 struct rte_event_port_conf *port_conf); 721 722 /** 723 * Allocate and set up an event port for an event device. 724 * 725 * @param dev_id 726 * The identifier of the device. 727 * @param port_id 728 * The index of the event port to setup. The value must be in the range 729 * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure(). 730 * @param port_conf 731 * The pointer to the configuration data to be used for the queue. 732 * NULL value is allowed, in which case default configuration used. 733 * 734 * @see rte_event_port_default_conf_get() 735 * 736 * @return 737 * - 0: Success, event port correctly set up. 738 * - <0: Port configuration failed 739 * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured 740 * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 741 */ 742 int 743 rte_event_port_setup(uint8_t dev_id, uint8_t port_id, 744 const struct rte_event_port_conf *port_conf); 745 746 /** 747 * The queue depth of the port on the enqueue side 748 */ 749 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 750 /** 751 * The queue depth of the port on the dequeue side 752 */ 753 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 754 /** 755 * The new event threshold of the port 756 */ 757 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 758 759 /** 760 * Get an attribute from a port. 761 * 762 * @param dev_id 763 * Eventdev id 764 * @param port_id 765 * Eventdev port id 766 * @param attr_id 767 * The attribute ID to retrieve 768 * @param[out] attr_value 769 * A pointer that will be filled in with the attribute value if successful 770 * 771 * @return 772 * - 0: Successfully returned value 773 * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL 774 */ 775 int 776 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, 777 uint32_t *attr_value); 778 779 /** 780 * Start an event device. 781 * 782 * The device start step is the last one and consists of setting the event 783 * queues to start accepting the events and schedules to event ports. 784 * 785 * On success, all basic functions exported by the API (event enqueue, 786 * event dequeue and so on) can be invoked. 787 * 788 * @param dev_id 789 * Event device identifier 790 * @return 791 * - 0: Success, device started. 792 * - -ESTALE : Not all ports of the device are configured 793 * - -ENOLINK: Not all queues are linked, which could lead to deadlock. 794 */ 795 int 796 rte_event_dev_start(uint8_t dev_id); 797 798 /** 799 * Stop an event device. The device can be restarted with a call to 800 * rte_event_dev_start() 801 * 802 * @param dev_id 803 * Event device identifier. 804 */ 805 void 806 rte_event_dev_stop(uint8_t dev_id); 807 808 /** 809 * Close an event device. The device cannot be restarted! 810 * 811 * @param dev_id 812 * Event device identifier 813 * 814 * @return 815 * - 0 on successfully closing device 816 * - <0 on failure to close device 817 * - (-EAGAIN) if device is busy 818 */ 819 int 820 rte_event_dev_close(uint8_t dev_id); 821 822 /* Scheduler type definitions */ 823 #define RTE_SCHED_TYPE_ORDERED 0 824 /**< Ordered scheduling 825 * 826 * Events from an ordered flow of an event queue can be scheduled to multiple 827 * ports for concurrent processing while maintaining the original event order. 828 * This scheme enables the user to achieve high single flow throughput by 829 * avoiding SW synchronization for ordering between ports which bound to cores. 830 * 831 * The source flow ordering from an event queue is maintained when events are 832 * enqueued to their destination queue within the same ordered flow context. 833 * An event port holds the context until application call 834 * rte_event_dequeue_burst() from the same port, which implicitly releases 835 * the context. 836 * User may allow the scheduler to release the context earlier than that 837 * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation. 838 * 839 * Events from the source queue appear in their original order when dequeued 840 * from a destination queue. 841 * Event ordering is based on the received event(s), but also other 842 * (newly allocated or stored) events are ordered when enqueued within the same 843 * ordered context. Events not enqueued (e.g. released or stored) within the 844 * context are considered missing from reordering and are skipped at this time 845 * (but can be ordered again within another context). 846 * 847 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 848 */ 849 850 #define RTE_SCHED_TYPE_ATOMIC 1 851 /**< Atomic scheduling 852 * 853 * Events from an atomic flow of an event queue can be scheduled only to a 854 * single port at a time. The port is guaranteed to have exclusive (atomic) 855 * access to the associated flow context, which enables the user to avoid SW 856 * synchronization. Atomic flows also help to maintain event ordering 857 * since only one port at a time can process events from a flow of an 858 * event queue. 859 * 860 * The atomic queue synchronization context is dedicated to the port until 861 * application call rte_event_dequeue_burst() from the same port, 862 * which implicitly releases the context. User may allow the scheduler to 863 * release the context earlier than that by invoking rte_event_enqueue_burst() 864 * with RTE_EVENT_OP_RELEASE operation. 865 * 866 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE 867 */ 868 869 #define RTE_SCHED_TYPE_PARALLEL 2 870 /**< Parallel scheduling 871 * 872 * The scheduler performs priority scheduling, load balancing, etc. functions 873 * but does not provide additional event synchronization or ordering. 874 * It is free to schedule events from a single parallel flow of an event queue 875 * to multiple events ports for concurrent processing. 876 * The application is responsible for flow context synchronization and 877 * event ordering (SW synchronization). 878 * 879 * @see rte_event_queue_setup(), rte_event_dequeue_burst() 880 */ 881 882 /* Event types to classify the event source */ 883 #define RTE_EVENT_TYPE_ETHDEV 0x0 884 /**< The event generated from ethdev subsystem */ 885 #define RTE_EVENT_TYPE_CRYPTODEV 0x1 886 /**< The event generated from crypodev subsystem */ 887 #define RTE_EVENT_TYPE_TIMERDEV 0x2 888 /**< The event generated from timerdev subsystem */ 889 #define RTE_EVENT_TYPE_CPU 0x3 890 /**< The event generated from cpu for pipelining. 891 * Application may use *sub_event_type* to further classify the event 892 */ 893 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 894 /**< The event generated from event eth Rx adapter */ 895 #define RTE_EVENT_TYPE_MAX 0x10 896 /**< Maximum number of event types */ 897 898 /* Event enqueue operations */ 899 #define RTE_EVENT_OP_NEW 0 900 /**< The event producers use this operation to inject a new event to the 901 * event device. 902 */ 903 #define RTE_EVENT_OP_FORWARD 1 904 /**< The CPU use this operation to forward the event to different event queue or 905 * change to new application specific flow or schedule type to enable 906 * pipelining. 907 * 908 * This operation must only be enqueued to the same port that the 909 * event to be forwarded was dequeued from. 910 */ 911 #define RTE_EVENT_OP_RELEASE 2 912 /**< Release the flow context associated with the schedule type. 913 * 914 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC* 915 * then this function hints the scheduler that the user has completed critical 916 * section processing in the current atomic context. 917 * The scheduler is now allowed to schedule events from the same flow from 918 * an event queue to another port. However, the context may be still held 919 * until the next rte_event_dequeue_burst() call, this call allows but does not 920 * force the scheduler to release the context early. 921 * 922 * Early atomic context release may increase parallelism and thus system 923 * performance, but the user needs to design carefully the split into critical 924 * vs non-critical sections. 925 * 926 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED* 927 * then this function hints the scheduler that the user has done all that need 928 * to maintain event order in the current ordered context. 929 * The scheduler is allowed to release the ordered context of this port and 930 * avoid reordering any following enqueues. 931 * 932 * Early ordered context release may increase parallelism and thus system 933 * performance. 934 * 935 * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL* 936 * or no scheduling context is held then this function may be an NOOP, 937 * depending on the implementation. 938 * 939 * This operation must only be enqueued to the same port that the 940 * event to be released was dequeued from. 941 * 942 */ 943 944 /** 945 * The generic *rte_event* structure to hold the event attributes 946 * for dequeue and enqueue operation 947 */ 948 RTE_STD_C11 949 struct rte_event { 950 /** WORD0 */ 951 union { 952 uint64_t event; 953 /** Event attributes for dequeue or enqueue operation */ 954 struct { 955 uint32_t flow_id:20; 956 /**< Targeted flow identifier for the enqueue and 957 * dequeue operation. 958 * The value must be in the range of 959 * [0, nb_event_queue_flows - 1] which 960 * previously supplied to rte_event_dev_configure(). 961 */ 962 uint32_t sub_event_type:8; 963 /**< Sub-event types based on the event source. 964 * @see RTE_EVENT_TYPE_CPU 965 */ 966 uint32_t event_type:4; 967 /**< Event type to classify the event source. 968 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*) 969 */ 970 uint8_t op:2; 971 /**< The type of event enqueue operation - new/forward/ 972 * etc.This field is not preserved across an instance 973 * and is undefined on dequeue. 974 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*) 975 */ 976 uint8_t rsvd:4; 977 /**< Reserved for future use */ 978 uint8_t sched_type:2; 979 /**< Scheduler synchronization type (RTE_SCHED_TYPE_*) 980 * associated with flow id on a given event queue 981 * for the enqueue and dequeue operation. 982 */ 983 uint8_t queue_id; 984 /**< Targeted event queue identifier for the enqueue or 985 * dequeue operation. 986 * The value must be in the range of 987 * [0, nb_event_queues - 1] which previously supplied to 988 * rte_event_dev_configure(). 989 */ 990 uint8_t priority; 991 /**< Event priority relative to other events in the 992 * event queue. The requested priority should in the 993 * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST, 994 * RTE_EVENT_DEV_PRIORITY_LOWEST]. 995 * The implementation shall normalize the requested 996 * priority to supported priority value. 997 * Valid when the device has 998 * RTE_EVENT_DEV_CAP_EVENT_QOS capability. 999 */ 1000 uint8_t impl_opaque; 1001 /**< Implementation specific opaque value. 1002 * An implementation may use this field to hold 1003 * implementation specific value to share between 1004 * dequeue and enqueue operation. 1005 * The application should not modify this field. 1006 */ 1007 }; 1008 }; 1009 /** WORD1 */ 1010 union { 1011 uint64_t u64; 1012 /**< Opaque 64-bit value */ 1013 void *event_ptr; 1014 /**< Opaque event pointer */ 1015 struct rte_mbuf *mbuf; 1016 /**< mbuf pointer if dequeued event is associated with mbuf */ 1017 }; 1018 }; 1019 1020 /* Ethdev Rx adapter capability bitmap flags */ 1021 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 1022 /**< This flag is sent when the packet transfer mechanism is in HW. 1023 * Ethdev can send packets to the event device using internal event port. 1024 */ 1025 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 1026 /**< Adapter supports multiple event queues per ethdev. Every ethdev 1027 * Rx queue can be connected to a unique event queue. 1028 */ 1029 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 1030 /**< The application can override the adapter generated flow ID in the 1031 * event. This flow ID can be specified when adding an ethdev Rx queue 1032 * to the adapter using the ev member of struct rte_event_eth_rx_adapter 1033 * @see struct rte_event_eth_rx_adapter_queue_conf::ev 1034 * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags 1035 */ 1036 1037 /** 1038 * Retrieve the event device's ethdev Rx adapter capabilities for the 1039 * specified ethernet port 1040 * 1041 * @param dev_id 1042 * The identifier of the device. 1043 * 1044 * @param eth_port_id 1045 * The identifier of the ethernet device. 1046 * 1047 * @param[out] caps 1048 * A pointer to memory filled with Rx event adapter capabilities. 1049 * 1050 * @return 1051 * - 0: Success, driver provides Rx event adapter capabilities for the 1052 * ethernet device. 1053 * - <0: Error code returned by the driver function. 1054 * 1055 */ 1056 int 1057 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, 1058 uint32_t *caps); 1059 1060 struct rte_eventdev_driver; 1061 struct rte_eventdev_ops; 1062 struct rte_eventdev; 1063 1064 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev); 1065 /**< @internal Enqueue event on port of a device */ 1066 1067 typedef uint16_t (*event_enqueue_burst_t)(void *port, 1068 const struct rte_event ev[], uint16_t nb_events); 1069 /**< @internal Enqueue burst of events on port of a device */ 1070 1071 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev, 1072 uint64_t timeout_ticks); 1073 /**< @internal Dequeue event from port of a device */ 1074 1075 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[], 1076 uint16_t nb_events, uint64_t timeout_ticks); 1077 /**< @internal Dequeue burst of events from port of a device */ 1078 1079 #define RTE_EVENTDEV_NAME_MAX_LEN (64) 1080 /**< @internal Max length of name of event PMD */ 1081 1082 /** 1083 * @internal 1084 * The data part, with no function pointers, associated with each device. 1085 * 1086 * This structure is safe to place in shared memory to be common among 1087 * different processes in a multi-process configuration. 1088 */ 1089 struct rte_eventdev_data { 1090 int socket_id; 1091 /**< Socket ID where memory is allocated */ 1092 uint8_t dev_id; 1093 /**< Device ID for this instance */ 1094 uint8_t nb_queues; 1095 /**< Number of event queues. */ 1096 uint8_t nb_ports; 1097 /**< Number of event ports. */ 1098 void **ports; 1099 /**< Array of pointers to ports. */ 1100 struct rte_event_port_conf *ports_cfg; 1101 /**< Array of port configuration structures. */ 1102 struct rte_event_queue_conf *queues_cfg; 1103 /**< Array of queue configuration structures. */ 1104 uint16_t *links_map; 1105 /**< Memory to store queues to port connections. */ 1106 void *dev_private; 1107 /**< PMD-specific private data */ 1108 uint32_t event_dev_cap; 1109 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ 1110 struct rte_event_dev_config dev_conf; 1111 /**< Configuration applied to device. */ 1112 uint8_t service_inited; 1113 /* Service initialization state */ 1114 uint32_t service_id; 1115 /* Service ID*/ 1116 1117 RTE_STD_C11 1118 uint8_t dev_started : 1; 1119 /**< Device state: STARTED(1)/STOPPED(0) */ 1120 1121 char name[RTE_EVENTDEV_NAME_MAX_LEN]; 1122 /**< Unique identifier name */ 1123 } __rte_cache_aligned; 1124 1125 /** @internal The data structure associated with each event device. */ 1126 struct rte_eventdev { 1127 event_enqueue_t enqueue; 1128 /**< Pointer to PMD enqueue function. */ 1129 event_enqueue_burst_t enqueue_burst; 1130 /**< Pointer to PMD enqueue burst function. */ 1131 event_enqueue_burst_t enqueue_new_burst; 1132 /**< Pointer to PMD enqueue burst function(op new variant) */ 1133 event_enqueue_burst_t enqueue_forward_burst; 1134 /**< Pointer to PMD enqueue burst function(op forward variant) */ 1135 event_dequeue_t dequeue; 1136 /**< Pointer to PMD dequeue function. */ 1137 event_dequeue_burst_t dequeue_burst; 1138 /**< Pointer to PMD dequeue burst function. */ 1139 1140 struct rte_eventdev_data *data; 1141 /**< Pointer to device data */ 1142 const struct rte_eventdev_ops *dev_ops; 1143 /**< Functions exported by PMD */ 1144 struct rte_device *dev; 1145 /**< Device info. supplied by probing */ 1146 1147 RTE_STD_C11 1148 uint8_t attached : 1; 1149 /**< Flag indicating the device is attached */ 1150 } __rte_cache_aligned; 1151 1152 extern struct rte_eventdev *rte_eventdevs; 1153 /** @internal The pool of rte_eventdev structures. */ 1154 1155 static __rte_always_inline uint16_t 1156 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 1157 const struct rte_event ev[], uint16_t nb_events, 1158 const event_enqueue_burst_t fn) 1159 { 1160 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1161 1162 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1163 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { 1164 rte_errno = -EINVAL; 1165 return 0; 1166 } 1167 1168 if (port_id >= dev->data->nb_ports) { 1169 rte_errno = -EINVAL; 1170 return 0; 1171 } 1172 #endif 1173 /* 1174 * Allow zero cost non burst mode routine invocation if application 1175 * requests nb_events as const one 1176 */ 1177 if (nb_events == 1) 1178 return (*dev->enqueue)(dev->data->ports[port_id], ev); 1179 else 1180 return fn(dev->data->ports[port_id], ev, nb_events); 1181 } 1182 1183 /** 1184 * Enqueue a burst of events objects or an event object supplied in *rte_event* 1185 * structure on an event device designated by its *dev_id* through the event 1186 * port specified by *port_id*. Each event object specifies the event queue on 1187 * which it will be enqueued. 1188 * 1189 * The *nb_events* parameter is the number of event objects to enqueue which are 1190 * supplied in the *ev* array of *rte_event* structure. 1191 * 1192 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 1193 * enqueued to the same port that their associated events were dequeued from. 1194 * 1195 * The rte_event_enqueue_burst() function returns the number of 1196 * events objects it actually enqueued. A return value equal to *nb_events* 1197 * means that all event objects have been enqueued. 1198 * 1199 * @param dev_id 1200 * The identifier of the device. 1201 * @param port_id 1202 * The identifier of the event port. 1203 * @param ev 1204 * Points to an array of *nb_events* objects of type *rte_event* structure 1205 * which contain the event object enqueue operations to be processed. 1206 * @param nb_events 1207 * The number of event objects to enqueue, typically number of 1208 * rte_event_port_enqueue_depth() available for this port. 1209 * 1210 * @return 1211 * The number of event objects actually enqueued on the event device. The 1212 * return value can be less than the value of the *nb_events* parameter when 1213 * the event devices queue is full or if invalid parameters are specified in a 1214 * *rte_event*. If the return value is less than *nb_events*, the remaining 1215 * events at the end of ev[] are not consumed and the caller has to take care 1216 * of them, and rte_errno is set accordingly. Possible errno values include: 1217 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1218 * ID is invalid, or an event's sched type doesn't match the 1219 * capabilities of the destination queue. 1220 * - -ENOSPC The event port was backpressured and unable to enqueue 1221 * one or more events. This error code is only applicable to 1222 * closed systems. 1223 * @see rte_event_port_enqueue_depth() 1224 */ 1225 static inline uint16_t 1226 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, 1227 const struct rte_event ev[], uint16_t nb_events) 1228 { 1229 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1230 1231 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1232 dev->enqueue_burst); 1233 } 1234 1235 /** 1236 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on 1237 * an event device designated by its *dev_id* through the event port specified 1238 * by *port_id*. 1239 * 1240 * Provides the same functionality as rte_event_enqueue_burst(), expect that 1241 * application can use this API when the all objects in the burst contains 1242 * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized 1243 * function can provide the additional hint to the PMD and optimize if possible. 1244 * 1245 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 1246 * has event object of operation type != RTE_EVENT_OP_NEW. 1247 * 1248 * @param dev_id 1249 * The identifier of the device. 1250 * @param port_id 1251 * The identifier of the event port. 1252 * @param ev 1253 * Points to an array of *nb_events* objects of type *rte_event* structure 1254 * which contain the event object enqueue operations to be processed. 1255 * @param nb_events 1256 * The number of event objects to enqueue, typically number of 1257 * rte_event_port_enqueue_depth() available for this port. 1258 * 1259 * @return 1260 * The number of event objects actually enqueued on the event device. The 1261 * return value can be less than the value of the *nb_events* parameter when 1262 * the event devices queue is full or if invalid parameters are specified in a 1263 * *rte_event*. If the return value is less than *nb_events*, the remaining 1264 * events at the end of ev[] are not consumed and the caller has to take care 1265 * of them, and rte_errno is set accordingly. Possible errno values include: 1266 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1267 * ID is invalid, or an event's sched type doesn't match the 1268 * capabilities of the destination queue. 1269 * - -ENOSPC The event port was backpressured and unable to enqueue 1270 * one or more events. This error code is only applicable to 1271 * closed systems. 1272 * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst() 1273 */ 1274 static inline uint16_t 1275 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, 1276 const struct rte_event ev[], uint16_t nb_events) 1277 { 1278 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1279 1280 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1281 dev->enqueue_new_burst); 1282 } 1283 1284 /** 1285 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* 1286 * on an event device designated by its *dev_id* through the event port 1287 * specified by *port_id*. 1288 * 1289 * Provides the same functionality as rte_event_enqueue_burst(), expect that 1290 * application can use this API when the all objects in the burst contains 1291 * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized 1292 * function can provide the additional hint to the PMD and optimize if possible. 1293 * 1294 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst 1295 * has event object of operation type != RTE_EVENT_OP_FORWARD. 1296 * 1297 * @param dev_id 1298 * The identifier of the device. 1299 * @param port_id 1300 * The identifier of the event port. 1301 * @param ev 1302 * Points to an array of *nb_events* objects of type *rte_event* structure 1303 * which contain the event object enqueue operations to be processed. 1304 * @param nb_events 1305 * The number of event objects to enqueue, typically number of 1306 * rte_event_port_enqueue_depth() available for this port. 1307 * 1308 * @return 1309 * The number of event objects actually enqueued on the event device. The 1310 * return value can be less than the value of the *nb_events* parameter when 1311 * the event devices queue is full or if invalid parameters are specified in a 1312 * *rte_event*. If the return value is less than *nb_events*, the remaining 1313 * events at the end of ev[] are not consumed and the caller has to take care 1314 * of them, and rte_errno is set accordingly. Possible errno values include: 1315 * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue 1316 * ID is invalid, or an event's sched type doesn't match the 1317 * capabilities of the destination queue. 1318 * - -ENOSPC The event port was backpressured and unable to enqueue 1319 * one or more events. This error code is only applicable to 1320 * closed systems. 1321 * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst() 1322 */ 1323 static inline uint16_t 1324 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, 1325 const struct rte_event ev[], uint16_t nb_events) 1326 { 1327 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1328 1329 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, 1330 dev->enqueue_forward_burst); 1331 } 1332 1333 /** 1334 * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() 1335 * 1336 * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag 1337 * then application can use this function to convert timeout value in 1338 * nanoseconds to implementations specific timeout value supplied in 1339 * rte_event_dequeue_burst() 1340 * 1341 * @param dev_id 1342 * The identifier of the device. 1343 * @param ns 1344 * Wait time in nanosecond 1345 * @param[out] timeout_ticks 1346 * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst() 1347 * 1348 * @return 1349 * - 0 on success. 1350 * - -ENOTSUP if the device doesn't support timeouts 1351 * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL 1352 * - other values < 0 on failure. 1353 * 1354 * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 1355 * @see rte_event_dev_configure() 1356 * 1357 */ 1358 int 1359 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, 1360 uint64_t *timeout_ticks); 1361 1362 /** 1363 * Dequeue a burst of events objects or an event object from the event port 1364 * designated by its *event_port_id*, on an event device designated 1365 * by its *dev_id*. 1366 * 1367 * rte_event_dequeue_burst() does not dictate the specifics of scheduling 1368 * algorithm as each eventdev driver may have different criteria to schedule 1369 * an event. However, in general, from an application perspective scheduler may 1370 * use the following scheme to dispatch an event to the port. 1371 * 1372 * 1) Selection of event queue based on 1373 * a) The list of event queues are linked to the event port. 1374 * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event 1375 * queue selection from list is based on event queue priority relative to 1376 * other event queue supplied as *priority* in rte_event_queue_setup() 1377 * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event 1378 * queue selection from the list is based on event priority supplied as 1379 * *priority* in rte_event_enqueue_burst() 1380 * 2) Selection of event 1381 * a) The number of flows available in selected event queue. 1382 * b) Schedule type method associated with the event 1383 * 1384 * The *nb_events* parameter is the maximum number of event objects to dequeue 1385 * which are returned in the *ev* array of *rte_event* structure. 1386 * 1387 * The rte_event_dequeue_burst() function returns the number of events objects 1388 * it actually dequeued. A return value equal to *nb_events* means that all 1389 * event objects have been dequeued. 1390 * 1391 * The number of events dequeued is the number of scheduler contexts held by 1392 * this port. These contexts are automatically released in the next 1393 * rte_event_dequeue_burst() invocation, or invoking rte_event_enqueue_burst() 1394 * with RTE_EVENT_OP_RELEASE operation can be used to release the 1395 * contexts early. 1396 * 1397 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be 1398 * enqueued to the same port that their associated events were dequeued from. 1399 * 1400 * @param dev_id 1401 * The identifier of the device. 1402 * @param port_id 1403 * The identifier of the event port. 1404 * @param[out] ev 1405 * Points to an array of *nb_events* objects of type *rte_event* structure 1406 * for output to be populated with the dequeued event objects. 1407 * @param nb_events 1408 * The maximum number of event objects to dequeue, typically number of 1409 * rte_event_port_dequeue_depth() available for this port. 1410 * 1411 * @param timeout_ticks 1412 * - 0 no-wait, returns immediately if there is no event. 1413 * - >0 wait for the event, if the device is configured with 1414 * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until 1415 * at least one event is available or *timeout_ticks* time. 1416 * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT 1417 * then this function will wait until the event available or 1418 * *dequeue_timeout_ns* ns which was previously supplied to 1419 * rte_event_dev_configure() 1420 * 1421 * @return 1422 * The number of event objects actually dequeued from the port. The return 1423 * value can be less than the value of the *nb_events* parameter when the 1424 * event port's queue is not full. 1425 * 1426 * @see rte_event_port_dequeue_depth() 1427 */ 1428 static inline uint16_t 1429 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], 1430 uint16_t nb_events, uint64_t timeout_ticks) 1431 { 1432 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; 1433 1434 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 1435 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { 1436 rte_errno = -EINVAL; 1437 return 0; 1438 } 1439 1440 if (port_id >= dev->data->nb_ports) { 1441 rte_errno = -EINVAL; 1442 return 0; 1443 } 1444 #endif 1445 1446 /* 1447 * Allow zero cost non burst mode routine invocation if application 1448 * requests nb_events as const one 1449 */ 1450 if (nb_events == 1) 1451 return (*dev->dequeue)( 1452 dev->data->ports[port_id], ev, timeout_ticks); 1453 else 1454 return (*dev->dequeue_burst)( 1455 dev->data->ports[port_id], ev, nb_events, 1456 timeout_ticks); 1457 } 1458 1459 /** 1460 * Link multiple source event queues supplied in *queues* to the destination 1461 * event port designated by its *port_id* with associated service priority 1462 * supplied in *priorities* on the event device designated by its *dev_id*. 1463 * 1464 * The link establishment shall enable the event port *port_id* from 1465 * receiving events from the specified event queue(s) supplied in *queues* 1466 * 1467 * An event queue may link to one or more event ports. 1468 * The number of links can be established from an event queue to event port is 1469 * implementation defined. 1470 * 1471 * Event queue(s) to event port link establishment can be changed at runtime 1472 * without re-configuring the device to support scaling and to reduce the 1473 * latency of critical work by establishing the link with more event ports 1474 * at runtime. 1475 * 1476 * @param dev_id 1477 * The identifier of the device. 1478 * 1479 * @param port_id 1480 * Event port identifier to select the destination port to link. 1481 * 1482 * @param queues 1483 * Points to an array of *nb_links* event queues to be linked 1484 * to the event port. 1485 * NULL value is allowed, in which case this function links all the configured 1486 * event queues *nb_event_queues* which previously supplied to 1487 * rte_event_dev_configure() to the event port *port_id* 1488 * 1489 * @param priorities 1490 * Points to an array of *nb_links* service priorities associated with each 1491 * event queue link to event port. 1492 * The priority defines the event port's servicing priority for 1493 * event queue, which may be ignored by an implementation. 1494 * The requested priority should in the range of 1495 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. 1496 * The implementation shall normalize the requested priority to 1497 * implementation supported priority value. 1498 * NULL value is allowed, in which case this function links the event queues 1499 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority 1500 * 1501 * @param nb_links 1502 * The number of links to establish. This parameter is ignored if queues is 1503 * NULL. 1504 * 1505 * @return 1506 * The number of links actually established. The return value can be less than 1507 * the value of the *nb_links* parameter when the implementation has the 1508 * limitation on specific queue to port link establishment or if invalid 1509 * parameters are specified in *queues* 1510 * If the return value is less than *nb_links*, the remaining links at the end 1511 * of link[] are not established, and the caller has to take care of them. 1512 * If return value is less than *nb_links* then implementation shall update the 1513 * rte_errno accordingly, Possible rte_errno values are 1514 * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with 1515 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) 1516 * (-EINVAL) Invalid parameter 1517 * 1518 */ 1519 int 1520 rte_event_port_link(uint8_t dev_id, uint8_t port_id, 1521 const uint8_t queues[], const uint8_t priorities[], 1522 uint16_t nb_links); 1523 1524 /** 1525 * Unlink multiple source event queues supplied in *queues* from the destination 1526 * event port designated by its *port_id* on the event device designated 1527 * by its *dev_id*. 1528 * 1529 * The unlink establishment shall disable the event port *port_id* from 1530 * receiving events from the specified event queue *queue_id* 1531 * 1532 * Event queue(s) to event port unlink establishment can be changed at runtime 1533 * without re-configuring the device. 1534 * 1535 * @param dev_id 1536 * The identifier of the device. 1537 * 1538 * @param port_id 1539 * Event port identifier to select the destination port to unlink. 1540 * 1541 * @param queues 1542 * Points to an array of *nb_unlinks* event queues to be unlinked 1543 * from the event port. 1544 * NULL value is allowed, in which case this function unlinks all the 1545 * event queue(s) from the event port *port_id*. 1546 * 1547 * @param nb_unlinks 1548 * The number of unlinks to establish. This parameter is ignored if queues is 1549 * NULL. 1550 * 1551 * @return 1552 * The number of unlinks actually established. The return value can be less 1553 * than the value of the *nb_unlinks* parameter when the implementation has the 1554 * limitation on specific queue to port unlink establishment or 1555 * if invalid parameters are specified. 1556 * If the return value is less than *nb_unlinks*, the remaining queues at the 1557 * end of queues[] are not established, and the caller has to take care of them. 1558 * If return value is less than *nb_unlinks* then implementation shall update 1559 * the rte_errno accordingly, Possible rte_errno values are 1560 * (-EINVAL) Invalid parameter 1561 * 1562 */ 1563 int 1564 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, 1565 uint8_t queues[], uint16_t nb_unlinks); 1566 1567 /** 1568 * Retrieve the list of source event queues and its associated service priority 1569 * linked to the destination event port designated by its *port_id* 1570 * on the event device designated by its *dev_id*. 1571 * 1572 * @param dev_id 1573 * The identifier of the device. 1574 * 1575 * @param port_id 1576 * Event port identifier. 1577 * 1578 * @param[out] queues 1579 * Points to an array of *queues* for output. 1580 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1581 * store the event queue(s) linked with event port *port_id* 1582 * 1583 * @param[out] priorities 1584 * Points to an array of *priorities* for output. 1585 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to 1586 * store the service priority associated with each event queue linked 1587 * 1588 * @return 1589 * The number of links established on the event port designated by its 1590 * *port_id*. 1591 * - <0 on failure. 1592 * 1593 */ 1594 int 1595 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, 1596 uint8_t queues[], uint8_t priorities[]); 1597 1598 /** 1599 * Retrieve the service ID of the event dev. If the adapter doesn't use 1600 * a rte_service function, this function returns -ESRCH. 1601 * 1602 * @param dev_id 1603 * The identifier of the device. 1604 * 1605 * @param [out] service_id 1606 * A pointer to a uint32_t, to be filled in with the service id. 1607 * 1608 * @return 1609 * - 0: Success 1610 * - <0: Error code on failure, if the event dev doesn't use a rte_service 1611 * function, this function returns -ESRCH. 1612 */ 1613 int 1614 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); 1615 1616 /** 1617 * Dump internal information about *dev_id* to the FILE* provided in *f*. 1618 * 1619 * @param dev_id 1620 * The identifier of the device. 1621 * 1622 * @param f 1623 * A pointer to a file for output 1624 * 1625 * @return 1626 * - 0: on success 1627 * - <0: on failure. 1628 */ 1629 int 1630 rte_event_dev_dump(uint8_t dev_id, FILE *f); 1631 1632 /** Maximum name length for extended statistics counters */ 1633 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 1634 1635 /** 1636 * Selects the component of the eventdev to retrieve statistics from. 1637 */ 1638 enum rte_event_dev_xstats_mode { 1639 RTE_EVENT_DEV_XSTATS_DEVICE, 1640 RTE_EVENT_DEV_XSTATS_PORT, 1641 RTE_EVENT_DEV_XSTATS_QUEUE, 1642 }; 1643 1644 /** 1645 * A name-key lookup element for extended statistics. 1646 * 1647 * This structure is used to map between names and ID numbers 1648 * for extended ethdev statistics. 1649 */ 1650 struct rte_event_dev_xstats_name { 1651 char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; 1652 }; 1653 1654 /** 1655 * Retrieve names of extended statistics of an event device. 1656 * 1657 * @param dev_id 1658 * The identifier of the event device. 1659 * @param mode 1660 * The mode of statistics to retrieve. Choices include the device statistics, 1661 * port statistics or queue statistics. 1662 * @param queue_port_id 1663 * Used to specify the port or queue number in queue or port mode, and is 1664 * ignored in device mode. 1665 * @param[out] xstats_names 1666 * Block of memory to insert names into. Must be at least size in capacity. 1667 * If set to NULL, function returns required capacity. 1668 * @param[out] ids 1669 * Block of memory to insert ids into. Must be at least size in capacity. 1670 * If set to NULL, function returns required capacity. The id values returned 1671 * can be passed to *rte_event_dev_xstats_get* to select statistics. 1672 * @param size 1673 * Capacity of xstats_names (number of names). 1674 * @return 1675 * - positive value lower or equal to size: success. The return value 1676 * is the number of entries filled in the stats table. 1677 * - positive value higher than size: error, the given statistics table 1678 * is too small. The return value corresponds to the size that should 1679 * be given to succeed. The entries in the table are not valid and 1680 * shall not be used by the caller. 1681 * - negative value on error: 1682 * -ENODEV for invalid *dev_id* 1683 * -EINVAL for invalid mode, queue port or id parameters 1684 * -ENOTSUP if the device doesn't support this function. 1685 */ 1686 int 1687 rte_event_dev_xstats_names_get(uint8_t dev_id, 1688 enum rte_event_dev_xstats_mode mode, 1689 uint8_t queue_port_id, 1690 struct rte_event_dev_xstats_name *xstats_names, 1691 unsigned int *ids, 1692 unsigned int size); 1693 1694 /** 1695 * Retrieve extended statistics of an event device. 1696 * 1697 * @param dev_id 1698 * The identifier of the device. 1699 * @param mode 1700 * The mode of statistics to retrieve. Choices include the device statistics, 1701 * port statistics or queue statistics. 1702 * @param queue_port_id 1703 * Used to specify the port or queue number in queue or port mode, and is 1704 * ignored in device mode. 1705 * @param ids 1706 * The id numbers of the stats to get. The ids can be got from the stat 1707 * position in the stat list from rte_event_dev_get_xstats_names(), or 1708 * by using rte_eventdev_get_xstats_by_name() 1709 * @param[out] values 1710 * The values for each stats request by ID. 1711 * @param n 1712 * The number of stats requested 1713 * @return 1714 * - positive value: number of stat entries filled into the values array 1715 * - negative value on error: 1716 * -ENODEV for invalid *dev_id* 1717 * -EINVAL for invalid mode, queue port or id parameters 1718 * -ENOTSUP if the device doesn't support this function. 1719 */ 1720 int 1721 rte_event_dev_xstats_get(uint8_t dev_id, 1722 enum rte_event_dev_xstats_mode mode, 1723 uint8_t queue_port_id, 1724 const unsigned int ids[], 1725 uint64_t values[], unsigned int n); 1726 1727 /** 1728 * Retrieve the value of a single stat by requesting it by name. 1729 * 1730 * @param dev_id 1731 * The identifier of the device 1732 * @param name 1733 * The stat name to retrieve 1734 * @param[out] id 1735 * If non-NULL, the numerical id of the stat will be returned, so that further 1736 * requests for the stat can be got using rte_eventdev_xstats_get, which will 1737 * be faster as it doesn't need to scan a list of names for the stat. 1738 * If the stat cannot be found, the id returned will be (unsigned)-1. 1739 * @return 1740 * - positive value or zero: the stat value 1741 * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. 1742 */ 1743 uint64_t 1744 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, 1745 unsigned int *id); 1746 1747 /** 1748 * Reset the values of the xstats of the selected component in the device. 1749 * 1750 * @param dev_id 1751 * The identifier of the device 1752 * @param mode 1753 * The mode of the statistics to reset. Choose from device, queue or port. 1754 * @param queue_port_id 1755 * The queue or port to reset. 0 and positive values select ports and queues, 1756 * while -1 indicates all ports or queues. 1757 * @param ids 1758 * Selects specific statistics to be reset. When NULL, all statistics selected 1759 * by *mode* will be reset. If non-NULL, must point to array of at least 1760 * *nb_ids* size. 1761 * @param nb_ids 1762 * The number of ids available from the *ids* array. Ignored when ids is NULL. 1763 * @return 1764 * - zero: successfully reset the statistics to zero 1765 * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. 1766 */ 1767 int 1768 rte_event_dev_xstats_reset(uint8_t dev_id, 1769 enum rte_event_dev_xstats_mode mode, 1770 int16_t queue_port_id, 1771 const uint32_t ids[], 1772 uint32_t nb_ids); 1773 1774 #ifdef __cplusplus 1775 } 1776 #endif 1777 1778 #endif /* _RTE_EVENTDEV_H_ */ 1779