Lines Matching refs:dev_id
351 rte_event_dev_socket_id(uint8_t dev_id);
424 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
452 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
543 rte_event_dev_configure(uint8_t dev_id,
625 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
647 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
692 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
764 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
788 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
825 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
845 rte_event_dev_start(uint8_t dev_id);
866 rte_event_dev_stop(uint8_t dev_id);
868 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
899 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
914 rte_event_dev_close(uint8_t dev_id);
1151 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1171 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1219 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1245 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1289 uint8_t dev_id; member
1366 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, in __rte_event_enqueue_burst() argument
1370 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in __rte_event_enqueue_burst()
1373 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { in __rte_event_enqueue_burst()
1383 rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn); in __rte_event_enqueue_burst()
1438 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, in rte_event_enqueue_burst() argument
1441 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_enqueue_burst()
1443 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, in rte_event_enqueue_burst()
1489 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, in rte_event_enqueue_new_burst() argument
1492 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_enqueue_new_burst()
1494 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, in rte_event_enqueue_new_burst()
1540 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, in rte_event_enqueue_forward_burst() argument
1543 const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_enqueue_forward_burst()
1545 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, in rte_event_enqueue_forward_burst()
1575 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1645 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], in rte_event_dequeue_burst() argument
1648 struct rte_eventdev *dev = &rte_eventdevs[dev_id]; in rte_event_dequeue_burst()
1651 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { in rte_event_dequeue_burst()
1661 rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events); in rte_event_dequeue_burst()
1736 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1780 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1805 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1835 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1854 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1870 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1927 rte_event_dev_xstats_names_get(uint8_t dev_id,
1961 rte_event_dev_xstats_get(uint8_t dev_id,
1984 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2008 rte_event_dev_xstats_reset(uint8_t dev_id,
2024 int rte_event_dev_selftest(uint8_t dev_id);