Lines Matching refs:ev
13 order_atq_process_stage_0(struct rte_event *const ev) in order_atq_process_stage_0() argument
15 ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */ in order_atq_process_stage_0()
16 ev->op = RTE_EVENT_OP_FORWARD; in order_atq_process_stage_0()
17 ev->sched_type = RTE_SCHED_TYPE_ATOMIC; in order_atq_process_stage_0()
18 ev->event_type = RTE_EVENT_TYPE_CPU; in order_atq_process_stage_0()
25 struct rte_event ev; in order_atq_worker() local
29 &ev, 1, 0); in order_atq_worker()
38 order_flow_id_copy_from_mbuf(t, &ev); in order_atq_worker()
40 if (ev.sub_event_type == 0) { /* stage 0 from producer */ in order_atq_worker()
41 order_atq_process_stage_0(&ev); in order_atq_worker()
42 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) in order_atq_worker()
45 } else if (ev.sub_event_type == 1) { /* stage 1 */ in order_atq_worker()
46 order_process_stage_1(t, &ev, nb_flows, in order_atq_worker()
49 order_process_stage_invalid(t, &ev); in order_atq_worker()
59 struct rte_event ev[BURST_SIZE]; in order_atq_worker_burst() local
63 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev, in order_atq_worker_burst()
75 order_flow_id_copy_from_mbuf(t, &ev[i]); in order_atq_worker_burst()
77 if (ev[i].sub_event_type == 0) { /*stage 0 */ in order_atq_worker_burst()
78 order_atq_process_stage_0(&ev[i]); in order_atq_worker_burst()
79 } else if (ev[i].sub_event_type == 1) { /* stage 1 */ in order_atq_worker_burst()
80 order_process_stage_1(t, &ev[i], nb_flows, in order_atq_worker_burst()
82 ev[i].op = RTE_EVENT_OP_RELEASE; in order_atq_worker_burst()
84 order_process_stage_invalid(t, &ev[i]); in order_atq_worker_burst()
90 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx); in order_atq_worker_burst()
93 ev + enq, nb_rx - enq); in order_atq_worker_burst()