| /dpdk/app/test-eventdev/ |
| H A D | test_pipeline_queue.c | 64 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_single_stage_fwd() 125 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_queue_worker_single_stage_burst_fwd() 185 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_queue_worker_single_stage_fwd_vector() 250 ev[i].queue_id = tx_queue[ev[i].vec->port]; in pipeline_queue_worker_single_stage_burst_fwd_vector() 282 if (ev.queue_id == tx_queue[ev.mbuf->port]) { in pipeline_queue_worker_multi_stage_tx() 318 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_multi_stage_fwd() 353 if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) { in pipeline_queue_worker_multi_stage_burst_tx() 393 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_queue_worker_multi_stage_burst_fwd() 431 if (ev.queue_id == tx_queue[ev.vec->port]) { in pipeline_queue_worker_multi_stage_tx_vector() 470 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_queue_worker_multi_stage_fwd_vector() [all …]
|
| H A D | test_pipeline_atq.c | 57 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_single_stage_fwd() 112 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_atq_worker_single_stage_burst_fwd() 164 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_atq_worker_single_stage_fwd_vector() 165 ev.vec->queue = 0; in pipeline_atq_worker_single_stage_fwd_vector() 221 ev[i].queue_id = tx_queue[ev[i].vec->port]; in pipeline_atq_worker_single_stage_burst_fwd_vector() 258 ev.sub_event_type++; in pipeline_atq_worker_multi_stage_tx() 285 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_multi_stage_fwd() 357 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_atq_worker_multi_stage_burst_fwd() 426 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_atq_worker_multi_stage_fwd_vector() 427 ev.vec->queue = 0; in pipeline_atq_worker_multi_stage_fwd_vector() [all …]
|
| H A D | test_perf_atq.c | 31 ev->sub_event_type++; in atq_fwd_event() 32 ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages]; in atq_fwd_event() 33 ev->op = RTE_EVENT_OP_FORWARD; in atq_fwd_event() 41 struct rte_event ev; in perf_atq_worker() local 70 atq_mark_fwd_latency(&ev); in perf_atq_worker() 76 &ev, w, bufs, sz, cnt); in perf_atq_worker() 119 ev[i].event_ptr = in perf_atq_worker_burst() 122 ev[i].event_ptr = in perf_atq_worker_burst() 136 atq_mark_fwd_latency(&ev[i]); in perf_atq_worker_burst() 146 &ev[i], w, bufs, sz, cnt); in perf_atq_worker_burst() [all …]
|
| H A D | test_order_atq.c | 16 ev->op = RTE_EVENT_OP_FORWARD; in order_atq_process_stage_0() 17 ev->sched_type = RTE_SCHED_TYPE_ATOMIC; in order_atq_process_stage_0() 18 ev->event_type = RTE_EVENT_TYPE_CPU; in order_atq_process_stage_0() 25 struct rte_event ev; in order_atq_worker() local 29 &ev, 1, 0); in order_atq_worker() 41 order_atq_process_stage_0(&ev); in order_atq_worker() 49 order_process_stage_invalid(t, &ev); in order_atq_worker() 59 struct rte_event ev[BURST_SIZE]; in order_atq_worker_burst() local 78 order_atq_process_stage_0(&ev[i]); in order_atq_worker_burst() 82 ev[i].op = RTE_EVENT_OP_RELEASE; in order_atq_worker_burst() [all …]
|
| H A D | test_order_queue.c | 15 ev->queue_id = 1; /* q1 atomic queue */ in order_queue_process_stage_0() 16 ev->op = RTE_EVENT_OP_FORWARD; in order_queue_process_stage_0() 17 ev->sched_type = RTE_SCHED_TYPE_ATOMIC; in order_queue_process_stage_0() 18 ev->event_type = RTE_EVENT_TYPE_CPU; in order_queue_process_stage_0() 25 struct rte_event ev; in order_queue_worker() local 29 &ev, 1, 0); in order_queue_worker() 41 order_queue_process_stage_0(&ev); in order_queue_worker() 49 order_process_stage_invalid(t, &ev); in order_queue_worker() 59 struct rte_event ev[BURST_SIZE]; in order_queue_worker_burst() local 83 ev[i].op = RTE_EVENT_OP_RELEASE; in order_queue_worker_burst() [all …]
|
| H A D | test_perf_queue.c | 33 ev->queue_id++; in fwd_event() 34 ev->sched_type = sched_type_list[ev->queue_id % nb_stages]; in fwd_event() 35 ev->op = RTE_EVENT_OP_FORWARD; in fwd_event() 43 struct rte_event ev; in perf_queue_worker() local 60 ev.event_ptr = op->sym->m_src; in perf_queue_worker() 78 &ev, w, bufs, sz, cnt); in perf_queue_worker() 81 &ev, w, bufs, sz, cnt); in perf_queue_worker() 121 ev[i].event_ptr = in perf_queue_worker_burst() 124 ev[i].event_ptr = in perf_queue_worker_burst() 148 &ev[i], w, bufs, sz, cnt); in perf_queue_worker_burst() [all …]
|
| H A D | test_pipeline_common.h | 63 struct rte_event ev __rte_cache_aligned 82 struct rte_event ev __rte_cache_aligned 99 ev->event_type = RTE_EVENT_TYPE_CPU; in pipeline_fwd_event() 100 ev->op = RTE_EVENT_OP_FORWARD; in pipeline_fwd_event() 101 ev->sched_type = sched; in pipeline_fwd_event() 107 ev->event_type = RTE_EVENT_TYPE_CPU_VECTOR; in pipeline_fwd_event_vector() 108 ev->op = RTE_EVENT_OP_FORWARD; in pipeline_fwd_event_vector() 109 ev->sched_type = sched; in pipeline_fwd_event_vector() 132 ev->vec->queue = 0; in pipeline_event_tx_vector() 150 ev + enq, nb_rx - enq, 0); in pipeline_event_tx_burst() [all …]
|
| /dpdk/examples/eventdev_pipeline/ |
| H A D | pipeline_worker_tx.c | 14 ev->sched_type = sched; in worker_fwd_event() 19 struct rte_event *ev) in worker_event_enqueue() argument 58 struct rte_event ev; in worker_do_tx_single() local 74 ev.queue_id++; in worker_do_tx_single() 81 if (ev.u64) { in worker_do_tx_single() 121 if (ev.u64) { in worker_do_tx_single_atq() 257 cdata.next_qid[ev.queue_id] : ev.queue_id; in worker_do_tx() 259 ev.queue_id = cdata.next_qid[ev.queue_id]; in worker_do_tx() 268 if (ev.u64) { in worker_do_tx() 319 if (ev.u64) { in worker_do_tx_atq() [all …]
|
| /dpdk/drivers/event/cnxk/ |
| H A D | cn9k_worker.c | 15 switch (ev->op) { in cn9k_sso_hws_enq() 17 return cn9k_sso_hws_new_event(ws, ev); in cn9k_sso_hws_enq() 19 cn9k_sso_hws_forward_event(ws, ev); in cn9k_sso_hws_enq() 23 cnxk_sso_hws_desched(ev->u64, ws->base); in cn9k_sso_hws_enq() 41 return cn9k_sso_hws_enq(port, ev); in cn9k_sso_hws_enq_burst() 52 rc = cn9k_sso_hws_new_event(ws, &ev[i]); in cn9k_sso_hws_enq_new_burst() 64 cn9k_sso_hws_forward_event(ws, ev); in cn9k_sso_hws_enq_fwd_burst() 78 switch (ev->op) { in cn9k_sso_hws_dual_enq() 86 cnxk_sso_hws_desched(ev->u64, base); in cn9k_sso_hws_dual_enq() 104 return cn9k_sso_hws_dual_enq(port, ev); in cn9k_sso_hws_dual_enq_burst() [all …]
|
| H A D | cn9k_worker.h | 27 const uint64_t event_ptr = ev->u64; in cn9k_sso_hws_new_event() 28 const uint16_t grp = ev->queue_id; in cn9k_sso_hws_new_event() 123 const struct rte_event *ev) in cn9k_sso_hws_dual_new_event_wait() argument 139 const struct rte_event *ev) in cn9k_sso_hws_dual_forward_event() argument 241 ev->event = gw.u64[0]; in cn9k_sso_hws_dual_get_work() 242 ev->u64 = gw.u64[1]; in cn9k_sso_hws_dual_get_work() 291 ev->event = gw.u64[0]; in cn9k_sso_hws_get_work() 292 ev->u64 = gw.u64[1]; in cn9k_sso_hws_get_work() 338 ev->event = gw.u64[0]; in cn9k_sso_hws_get_work_empty() 339 ev->u64 = gw.u64[1]; in cn9k_sso_hws_get_work_empty() [all …]
|
| H A D | cnxk_eventdev_selftest.c | 267 ev->mbuf = m; in update_event_and_validation_attr() 297 struct rte_event ev; in check_excess_events() local 372 struct rte_event ev; in consume_events() local 907 ev.flow_id = 0x2; in worker_flow_based_pipeline() 1223 ev.sched_type = in worker_flow_based_pipeline_max_stages_rand_sched_type() 1298 ev.queue_id++; in worker_queue_based_pipeline_max_stages_rand_sched_type() 1299 ev.sched_type = in worker_queue_based_pipeline_max_stages_rand_sched_type() 1343 ev.queue_id++; in worker_mixed_pipeline_max_stages_rand_sched_type() 1345 ev.sched_type = in worker_mixed_pipeline_max_stages_rand_sched_type() 1386 ev.queue_id = 0; in worker_ordered_flow_producer() [all …]
|
| H A D | cn10k_worker.h | 26 const uint64_t event_ptr = ev->u64; in cn10k_sso_hws_new_event() 27 const uint16_t grp = ev->queue_id; in cn10k_sso_hws_new_event() 78 const struct rte_event *ev) in cn10k_sso_hws_forward_event() argument 80 const uint8_t grp = ev->queue_id; in cn10k_sso_hws_forward_event() 84 cn10k_sso_hws_fwd_swtag(ws, ev); in cn10k_sso_hws_forward_event() 300 ev->event = gw.u64[0]; in cn10k_sso_hws_get_work() 301 ev->u64 = gw.u64[1]; in cn10k_sso_hws_get_work() 339 ev->event = gw.u64[0]; in cn10k_sso_hws_get_work_empty() 340 ev->u64 = gw.u64[1]; in cn10k_sso_hws_get_work_empty() 652 rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec); in cn10k_sso_hws_event_tx() [all …]
|
| H A D | cn10k_worker.c | 10 cn10k_sso_hws_enq(void *port, const struct rte_event *ev) in cn10k_sso_hws_enq() argument 14 switch (ev->op) { in cn10k_sso_hws_enq() 16 return cn10k_sso_hws_new_event(ws, ev); in cn10k_sso_hws_enq() 18 cn10k_sso_hws_forward_event(ws, ev); in cn10k_sso_hws_enq() 22 cnxk_sso_hws_desched(ev->u64, ws->base); in cn10k_sso_hws_enq() 36 cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[], in cn10k_sso_hws_enq_burst() argument 40 return cn10k_sso_hws_enq(port, ev); in cn10k_sso_hws_enq_burst() 44 cn10k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[], in cn10k_sso_hws_enq_new_burst() argument 51 rc = cn10k_sso_hws_new_event(ws, &ev[i]); in cn10k_sso_hws_enq_new_burst() 63 cn10k_sso_hws_forward_event(ws, ev); in cn10k_sso_hws_enq_fwd_burst() [all …]
|
| /dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev_selftest.c | 286 ev->mbuf = m; in update_event_and_validation_attr() 316 struct rte_event ev; in check_excess_events() local 392 struct rte_event ev; in consume_events() local 902 ev.flow_id = 0x2; in worker_flow_based_pipeline() 1217 ev.sched_type = in worker_flow_based_pipeline_max_stages_rand_sched_type() 1291 ev.queue_id++; in worker_queue_based_pipeline_max_stages_rand_sched_type() 1292 ev.sched_type = in worker_queue_based_pipeline_max_stages_rand_sched_type() 1333 ev.queue_id++; in worker_mixed_pipeline_max_stages_rand_sched_type() 1335 ev.sched_type = in worker_mixed_pipeline_max_stages_rand_sched_type() 1375 ev.queue_id = 0; in worker_ordered_flow_producer() [all …]
|
| H A D | ssovf_worker.c | 10 const uint64_t event_ptr = ev->u64; in ssows_new_event() 13 const uint8_t grp = ev->queue_id; in ssows_new_event() 157 switch (ev->op) { in ssows_enq() 160 ssows_new_event(ws, ev); in ssows_enq() 210 struct rte_event ev; in ssows_flush_events() local 244 ev.u64 = get_work1; in ssows_flush_events() 247 fn(arg, ev); in ssows_flush_events() 286 switch (ev->sched_type) { in __sso_event_tx_adapter_enqueue() 293 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC, in __sso_event_tx_adapter_enqueue() 294 ev->queue_id); in __sso_event_tx_adapter_enqueue() [all …]
|
| /dpdk/app/test/ |
| H A D | test_event_timer_adapter.c | 437 struct rte_event ev; in test_timer_state() local 439 .ev.op = RTE_EVENT_OP_NEW, in test_timer_state() 440 .ev.queue_id = 0, in test_timer_state() 497 .ev.queue_id = 0, in _arm_timers() 528 struct rte_event ev; in _wait_timer_triggers() local 611 .ev.queue_id = 0, in _arm_timers_burst() 694 .ev.queue_id = 0, in test_timer_cancel_periodic() 736 .ev.queue_id = 0, in test_timer_cancel() 778 .ev.queue_id = 0, in _cancel_producer() 817 .ev.queue_id = 0, in _cancel_producer_burst() [all …]
|
| H A D | test_event_eth_rx_adapter.c | 459 ev.queue_id = 0; in adapter_queue_event_buf_test() 461 ev.priority = 0; in adapter_queue_event_buf_test() 465 ev.flow_id = 1; in adapter_queue_event_buf_test() 469 queue_config.ev = ev; in adapter_queue_event_buf_test() 515 ev.queue_id = 0; in adapter_queue_stats_test() 517 ev.priority = 0; in adapter_queue_stats_test() 525 queue_config.ev = ev; in adapter_queue_stats_test() 659 queue_config.ev = ev; in adapter_queue_add_del() 738 queue_config.ev = ev; in adapter_multi_eth_add_del() 816 queue_config.ev = ev; in adapter_intr_queue_add_del() [all …]
|
| /dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev_selftest.c | 235 ev->flow_id = flow_id; in update_event_and_validation_attr() 241 ev->queue_id = queue; in update_event_and_validation_attr() 242 ev->mbuf = m; in update_event_and_validation_attr() 271 struct rte_event ev; in check_excess_events() local 279 *dpaa2_seqn(ev.mbuf)); in check_excess_events() 347 struct rte_event ev; in consume_events() local 444 struct rte_event ev; in worker_multi_port_fn() local 618 struct rte_event *ev) in validate_queue_to_port_single_link() argument 623 port, ev->queue_id); in validate_queue_to_port_single_link() 690 struct rte_event *ev) in validate_queue_to_port_multi_link() argument [all …]
|
| /dpdk/drivers/event/dlb2/ |
| H A D | dlb2_selftest.c | 233 ev.queue_id = 0; in test_stop_flush() 245 ev.queue_id = 1; in test_stop_flush() 891 ev.queue_id = 0; in test_load_balanced_traffic() 892 ev.priority = 0; in test_load_balanced_traffic() 893 ev.u64 = 0; in test_load_balanced_traffic() 1018 ev.queue_id = 0; in test_directed_traffic() 1019 ev.priority = 0; in test_directed_traffic() 1020 ev.u64 = 0; in test_directed_traffic() 1178 ev.queue_id = 0; in test_deferred_sched() 1180 ev.u64 = 0; in test_deferred_sched() [all …]
|
| /dpdk/drivers/event/sw/ |
| H A D | sw_evdev_selftest.c | 940 ev.mbuf = arp; in xstats_tests() 941 ev.flow_id = 7; in xstats_tests() 1492 ev.mbuf = arp; in xstats_id_reset_tests() 1882 ev.mbuf = arp; in qid_priorities() 2378 ev.mbuf = arp; in single_packet() 2380 ev.flow_id = 3; in single_packet() 2494 ev.mbuf = arp; in inflight_counts() 2511 ev.mbuf = arp; in inflight_counts() 2832 ev = new_ev; in holb() 2842 ev = new_ev; in holb() [all …]
|
| H A D | sw_evdev_worker.c | 23 struct rte_event ev; in sw_event_release() local 24 ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE]; in sw_event_release() 27 rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count); in sw_event_release() 67 new += (ev[i].op == RTE_EVENT_OP_NEW); in sw_event_enqueue_burst() 87 int op = ev[i].op; in sw_event_enqueue_burst() 89 const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count); in sw_event_enqueue_burst() 113 uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i, in sw_event_enqueue_burst() 135 sw_event_enqueue(void *port, const struct rte_event *ev) in sw_event_enqueue() argument 137 return sw_event_enqueue_burst(port, ev, 1); in sw_event_enqueue() 183 sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait) in sw_event_dequeue() argument [all …]
|
| /dpdk/drivers/event/opdl/ |
| H A D | opdl_test.c | 249 struct rte_event ev; in ordered_basic() local 258 ev.mbuf = mbufs[i]; in ordered_basic() 397 ev.flow_id = 1; in atomic_basic() 398 ev.mbuf = mbufs[i]; in atomic_basic() 760 ev[i].flow_id = 1; in populate_event_burst() 839 ev, in qid_basic() 854 ev, in qid_basic() 903 ev, in qid_basic() 918 ev, in qid_basic() 948 ev, in qid_basic() [all …]
|
| /dpdk/lib/port/ |
| H A D | rte_port_eventdev.c | 74 p->ev, n_pkts, 0); in rte_port_eventdev_reader_rx() 77 pkts[i] = p->ev[i].mbuf; in rte_port_eventdev_reader_rx() 180 memset(&port->ev, 0, sizeof(port->ev)); in rte_port_eventdev_writer_create() 185 port->ev[i].op = port->evt_op; in rte_port_eventdev_writer_create() 197 p->ev, p->enq_buf_count); in send_burst() 203 rte_pktmbuf_free(p->ev[nb_enq].mbuf); in send_burst() 213 p->ev[p->enq_buf_count++].mbuf = pkt; in rte_port_eventdev_writer_tx() 387 memset(&port->ev, 0, sizeof(port->ev)); in rte_port_eventdev_writer_nodrop_create() 392 port->ev[i].op = port->evt_op; in rte_port_eventdev_writer_nodrop_create() 410 p->ev, p->enq_buf_count); in send_burst_nodrop() [all …]
|
| /dpdk/examples/l2fwd-event/ |
| H A D | l2fwd_event.c | 159 struct rte_mbuf *mbuf = ev->mbuf; in l2fwd_event_fwd() 174 ev->queue_id = tx_q_id; in l2fwd_event_fwd() 175 ev->op = RTE_EVENT_OP_FORWARD; in l2fwd_event_fwd() 197 struct rte_event ev; in l2fwd_event_loop_single() local 216 port_id, &ev, 1); in l2fwd_event_loop_single() 242 struct rte_event ev[MAX_PKT_BURST]; in l2fwd_event_loop_burst() local 268 ev, nb_rx); in l2fwd_event_loop_burst() 271 port_id, ev + nb_tx, in l2fwd_event_loop_burst() 278 port_id, ev, in l2fwd_event_loop_burst() 283 ev + nb_tx, nb_rx - nb_tx, 0); in l2fwd_event_loop_burst() [all …]
|
| /dpdk/examples/ipsec-secgw/ |
| H A D | ipsec_worker.c | 296 pkt = ev->mbuf; in process_ipsec_ev_inbound() 380 ev->mbuf = NULL; in process_ipsec_ev_inbound() 398 pkt = ev->mbuf; in process_ipsec_ev_outbound() 482 ev->mbuf = NULL; in process_ipsec_ev_outbound() 707 ev, 1, 0); in ipsec_ev_vector_process() 729 ev, 1, 0); in ipsec_ev_vector_drv_mode_process() 821 ev.event_type); in ipsec_wrkr_non_burst_int_port_drv_mode() 825 pkt = ev.mbuf; in ipsec_wrkr_non_burst_int_port_drv_mode() 866 if (ev.u64) { in ipsec_wrkr_non_burst_int_port_drv_mode() 954 ev.event_type); in ipsec_wrkr_non_burst_int_port_app_mode() [all …]
|