xref: /f-stack/dpdk/drivers/event/dsw/dsw_evdev.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2018 Ericsson AB
3d30ea906Sjfb8856606  */
4d30ea906Sjfb8856606 
5d30ea906Sjfb8856606 #include <stdbool.h>
6d30ea906Sjfb8856606 
7d30ea906Sjfb8856606 #include <rte_cycles.h>
8d30ea906Sjfb8856606 #include <rte_eventdev_pmd.h>
9d30ea906Sjfb8856606 #include <rte_eventdev_pmd_vdev.h>
10d30ea906Sjfb8856606 #include <rte_random.h>
11*2d9fd380Sjfb8856606 #include <rte_ring_elem.h>
12d30ea906Sjfb8856606 
13d30ea906Sjfb8856606 #include "dsw_evdev.h"
14d30ea906Sjfb8856606 
15d30ea906Sjfb8856606 #define EVENTDEV_NAME_DSW_PMD event_dsw
16d30ea906Sjfb8856606 
17d30ea906Sjfb8856606 static int
dsw_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * conf)18d30ea906Sjfb8856606 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
19d30ea906Sjfb8856606 	       const struct rte_event_port_conf *conf)
20d30ea906Sjfb8856606 {
21d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
22d30ea906Sjfb8856606 	struct dsw_port *port;
23d30ea906Sjfb8856606 	struct rte_event_ring *in_ring;
24d30ea906Sjfb8856606 	struct rte_ring *ctl_in_ring;
25d30ea906Sjfb8856606 	char ring_name[RTE_RING_NAMESIZE];
26d30ea906Sjfb8856606 
27d30ea906Sjfb8856606 	port = &dsw->ports[port_id];
28d30ea906Sjfb8856606 
29d30ea906Sjfb8856606 	*port = (struct dsw_port) {
30d30ea906Sjfb8856606 		.id = port_id,
31d30ea906Sjfb8856606 		.dsw = dsw,
32d30ea906Sjfb8856606 		.dequeue_depth = conf->dequeue_depth,
33d30ea906Sjfb8856606 		.enqueue_depth = conf->enqueue_depth,
34d30ea906Sjfb8856606 		.new_event_threshold = conf->new_event_threshold
35d30ea906Sjfb8856606 	};
36d30ea906Sjfb8856606 
37d30ea906Sjfb8856606 	snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
38d30ea906Sjfb8856606 		 port_id);
39d30ea906Sjfb8856606 
40d30ea906Sjfb8856606 	in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
41d30ea906Sjfb8856606 					dev->data->socket_id,
42d30ea906Sjfb8856606 					RING_F_SC_DEQ|RING_F_EXACT_SZ);
43d30ea906Sjfb8856606 
44d30ea906Sjfb8856606 	if (in_ring == NULL)
45d30ea906Sjfb8856606 		return -ENOMEM;
46d30ea906Sjfb8856606 
47d30ea906Sjfb8856606 	snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u",
48d30ea906Sjfb8856606 		 dev->data->dev_id, port_id);
49d30ea906Sjfb8856606 
50*2d9fd380Sjfb8856606 	ctl_in_ring = rte_ring_create_elem(ring_name,
51*2d9fd380Sjfb8856606 					   sizeof(struct dsw_ctl_msg),
52*2d9fd380Sjfb8856606 					   DSW_CTL_IN_RING_SIZE,
53d30ea906Sjfb8856606 					   dev->data->socket_id,
54d30ea906Sjfb8856606 					   RING_F_SC_DEQ|RING_F_EXACT_SZ);
55d30ea906Sjfb8856606 
56d30ea906Sjfb8856606 	if (ctl_in_ring == NULL) {
57d30ea906Sjfb8856606 		rte_event_ring_free(in_ring);
58d30ea906Sjfb8856606 		return -ENOMEM;
59d30ea906Sjfb8856606 	}
60d30ea906Sjfb8856606 
61d30ea906Sjfb8856606 	port->in_ring = in_ring;
62d30ea906Sjfb8856606 	port->ctl_in_ring = ctl_in_ring;
63d30ea906Sjfb8856606 
64d30ea906Sjfb8856606 	rte_atomic16_init(&port->load);
65*2d9fd380Sjfb8856606 	rte_atomic32_init(&port->immigration_load);
66d30ea906Sjfb8856606 
67d30ea906Sjfb8856606 	port->load_update_interval =
68d30ea906Sjfb8856606 		(DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
69d30ea906Sjfb8856606 
70d30ea906Sjfb8856606 	port->migration_interval =
71d30ea906Sjfb8856606 		(DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S;
72d30ea906Sjfb8856606 
73d30ea906Sjfb8856606 	dev->data->ports[port_id] = port;
74d30ea906Sjfb8856606 
75d30ea906Sjfb8856606 	return 0;
76d30ea906Sjfb8856606 }
77d30ea906Sjfb8856606 
78d30ea906Sjfb8856606 static void
dsw_port_def_conf(struct rte_eventdev * dev __rte_unused,uint8_t port_id __rte_unused,struct rte_event_port_conf * port_conf)79d30ea906Sjfb8856606 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
80d30ea906Sjfb8856606 		  uint8_t port_id __rte_unused,
81d30ea906Sjfb8856606 		  struct rte_event_port_conf *port_conf)
82d30ea906Sjfb8856606 {
83d30ea906Sjfb8856606 	*port_conf = (struct rte_event_port_conf) {
84d30ea906Sjfb8856606 		.new_event_threshold = 1024,
85d30ea906Sjfb8856606 		.dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
86d30ea906Sjfb8856606 		.enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
87d30ea906Sjfb8856606 	};
88d30ea906Sjfb8856606 }
89d30ea906Sjfb8856606 
90d30ea906Sjfb8856606 static void
dsw_port_release(void * p)91d30ea906Sjfb8856606 dsw_port_release(void *p)
92d30ea906Sjfb8856606 {
93d30ea906Sjfb8856606 	struct dsw_port *port = p;
94d30ea906Sjfb8856606 
95d30ea906Sjfb8856606 	rte_event_ring_free(port->in_ring);
96d30ea906Sjfb8856606 	rte_ring_free(port->ctl_in_ring);
97d30ea906Sjfb8856606 }
98d30ea906Sjfb8856606 
99d30ea906Sjfb8856606 static int
dsw_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * conf)100d30ea906Sjfb8856606 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
101d30ea906Sjfb8856606 		const struct rte_event_queue_conf *conf)
102d30ea906Sjfb8856606 {
103d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
104d30ea906Sjfb8856606 	struct dsw_queue *queue = &dsw->queues[queue_id];
105d30ea906Sjfb8856606 
106d30ea906Sjfb8856606 	if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
107d30ea906Sjfb8856606 		return -ENOTSUP;
108d30ea906Sjfb8856606 
109d30ea906Sjfb8856606 	/* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
110d30ea906Sjfb8856606 	 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
111d30ea906Sjfb8856606 	 * the queue will only have a single serving port, no
112d30ea906Sjfb8856606 	 * migration will ever happen, so the extra TYPE_ATOMIC
113d30ea906Sjfb8856606 	 * migration overhead is avoided.
114d30ea906Sjfb8856606 	 */
115d30ea906Sjfb8856606 	if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
116d30ea906Sjfb8856606 		queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
1171646932aSjfb8856606 	else {
1181646932aSjfb8856606 		if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
1191646932aSjfb8856606 			return -ENOTSUP;
1201646932aSjfb8856606 		/* atomic or parallel */
121d30ea906Sjfb8856606 		queue->schedule_type = conf->schedule_type;
1221646932aSjfb8856606 	}
123d30ea906Sjfb8856606 
124d30ea906Sjfb8856606 	queue->num_serving_ports = 0;
125d30ea906Sjfb8856606 
126d30ea906Sjfb8856606 	return 0;
127d30ea906Sjfb8856606 }
128d30ea906Sjfb8856606 
129d30ea906Sjfb8856606 static void
dsw_queue_def_conf(struct rte_eventdev * dev __rte_unused,uint8_t queue_id __rte_unused,struct rte_event_queue_conf * queue_conf)130d30ea906Sjfb8856606 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
131d30ea906Sjfb8856606 		   uint8_t queue_id __rte_unused,
132d30ea906Sjfb8856606 		   struct rte_event_queue_conf *queue_conf)
133d30ea906Sjfb8856606 {
134d30ea906Sjfb8856606 	*queue_conf = (struct rte_event_queue_conf) {
135d30ea906Sjfb8856606 		.nb_atomic_flows = 4096,
136d30ea906Sjfb8856606 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
137d30ea906Sjfb8856606 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL
138d30ea906Sjfb8856606 	};
139d30ea906Sjfb8856606 }
140d30ea906Sjfb8856606 
141d30ea906Sjfb8856606 static void
dsw_queue_release(struct rte_eventdev * dev __rte_unused,uint8_t queue_id __rte_unused)142d30ea906Sjfb8856606 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
143d30ea906Sjfb8856606 		  uint8_t queue_id __rte_unused)
144d30ea906Sjfb8856606 {
145d30ea906Sjfb8856606 }
146d30ea906Sjfb8856606 
147d30ea906Sjfb8856606 static void
queue_add_port(struct dsw_queue * queue,uint16_t port_id)148d30ea906Sjfb8856606 queue_add_port(struct dsw_queue *queue, uint16_t port_id)
149d30ea906Sjfb8856606 {
150d30ea906Sjfb8856606 	queue->serving_ports[queue->num_serving_ports] = port_id;
151d30ea906Sjfb8856606 	queue->num_serving_ports++;
152d30ea906Sjfb8856606 }
153d30ea906Sjfb8856606 
154d30ea906Sjfb8856606 static bool
queue_remove_port(struct dsw_queue * queue,uint16_t port_id)155d30ea906Sjfb8856606 queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
156d30ea906Sjfb8856606 {
157d30ea906Sjfb8856606 	uint16_t i;
158d30ea906Sjfb8856606 
159d30ea906Sjfb8856606 	for (i = 0; i < queue->num_serving_ports; i++)
160d30ea906Sjfb8856606 		if (queue->serving_ports[i] == port_id) {
161d30ea906Sjfb8856606 			uint16_t last_idx = queue->num_serving_ports - 1;
162d30ea906Sjfb8856606 			if (i != last_idx)
163d30ea906Sjfb8856606 				queue->serving_ports[i] =
164d30ea906Sjfb8856606 					queue->serving_ports[last_idx];
165d30ea906Sjfb8856606 			queue->num_serving_ports--;
166d30ea906Sjfb8856606 			return true;
167d30ea906Sjfb8856606 		}
168d30ea906Sjfb8856606 	return false;
169d30ea906Sjfb8856606 }
170d30ea906Sjfb8856606 
171d30ea906Sjfb8856606 static int
dsw_port_link_unlink(struct rte_eventdev * dev,void * port,const uint8_t queues[],uint16_t num,bool link)172d30ea906Sjfb8856606 dsw_port_link_unlink(struct rte_eventdev *dev, void *port,
173d30ea906Sjfb8856606 		     const uint8_t queues[], uint16_t num, bool link)
174d30ea906Sjfb8856606 {
175d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
176d30ea906Sjfb8856606 	struct dsw_port *p = port;
177d30ea906Sjfb8856606 	uint16_t i;
178d30ea906Sjfb8856606 	uint16_t count = 0;
179d30ea906Sjfb8856606 
180d30ea906Sjfb8856606 	for (i = 0; i < num; i++) {
181d30ea906Sjfb8856606 		uint8_t qid = queues[i];
182d30ea906Sjfb8856606 		struct dsw_queue *q = &dsw->queues[qid];
183d30ea906Sjfb8856606 		if (link) {
184d30ea906Sjfb8856606 			queue_add_port(q, p->id);
185d30ea906Sjfb8856606 			count++;
186d30ea906Sjfb8856606 		} else {
187d30ea906Sjfb8856606 			bool removed = queue_remove_port(q, p->id);
188d30ea906Sjfb8856606 			if (removed)
189d30ea906Sjfb8856606 				count++;
190d30ea906Sjfb8856606 		}
191d30ea906Sjfb8856606 	}
192d30ea906Sjfb8856606 
193d30ea906Sjfb8856606 	return count;
194d30ea906Sjfb8856606 }
195d30ea906Sjfb8856606 
196d30ea906Sjfb8856606 static int
dsw_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[]__rte_unused,uint16_t num)197d30ea906Sjfb8856606 dsw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
198d30ea906Sjfb8856606 	      const uint8_t priorities[] __rte_unused, uint16_t num)
199d30ea906Sjfb8856606 {
200d30ea906Sjfb8856606 	return dsw_port_link_unlink(dev, port, queues, num, true);
201d30ea906Sjfb8856606 }
202d30ea906Sjfb8856606 
203d30ea906Sjfb8856606 static int
dsw_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t num)204d30ea906Sjfb8856606 dsw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
205d30ea906Sjfb8856606 		uint16_t num)
206d30ea906Sjfb8856606 {
207d30ea906Sjfb8856606 	return dsw_port_link_unlink(dev, port, queues, num, false);
208d30ea906Sjfb8856606 }
209d30ea906Sjfb8856606 
210d30ea906Sjfb8856606 static void
dsw_info_get(struct rte_eventdev * dev __rte_unused,struct rte_event_dev_info * info)211d30ea906Sjfb8856606 dsw_info_get(struct rte_eventdev *dev __rte_unused,
212d30ea906Sjfb8856606 	     struct rte_event_dev_info *info)
213d30ea906Sjfb8856606 {
214d30ea906Sjfb8856606 	*info = (struct rte_event_dev_info) {
215d30ea906Sjfb8856606 		.driver_name = DSW_PMD_NAME,
216d30ea906Sjfb8856606 		.max_event_queues = DSW_MAX_QUEUES,
217d30ea906Sjfb8856606 		.max_event_queue_flows = DSW_MAX_FLOWS,
218d30ea906Sjfb8856606 		.max_event_queue_priority_levels = 1,
219d30ea906Sjfb8856606 		.max_event_priority_levels = 1,
220d30ea906Sjfb8856606 		.max_event_ports = DSW_MAX_PORTS,
221d30ea906Sjfb8856606 		.max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
222d30ea906Sjfb8856606 		.max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
223d30ea906Sjfb8856606 		.max_num_events = DSW_MAX_EVENTS,
224d30ea906Sjfb8856606 		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
2251646932aSjfb8856606 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
2261646932aSjfb8856606 		RTE_EVENT_DEV_CAP_NONSEQ_MODE|
227*2d9fd380Sjfb8856606 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
228*2d9fd380Sjfb8856606 		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
229d30ea906Sjfb8856606 	};
230d30ea906Sjfb8856606 }
231d30ea906Sjfb8856606 
232d30ea906Sjfb8856606 static int
dsw_configure(const struct rte_eventdev * dev)233d30ea906Sjfb8856606 dsw_configure(const struct rte_eventdev *dev)
234d30ea906Sjfb8856606 {
235d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
236d30ea906Sjfb8856606 	const struct rte_event_dev_config *conf = &dev->data->dev_conf;
237d30ea906Sjfb8856606 	int32_t min_max_in_flight;
238d30ea906Sjfb8856606 
239d30ea906Sjfb8856606 	dsw->num_ports = conf->nb_event_ports;
240d30ea906Sjfb8856606 	dsw->num_queues = conf->nb_event_queues;
241d30ea906Sjfb8856606 
242d30ea906Sjfb8856606 	/* Avoid a situation where consumer ports are holding all the
243d30ea906Sjfb8856606 	 * credits, without making use of them.
244d30ea906Sjfb8856606 	 */
245d30ea906Sjfb8856606 	min_max_in_flight = conf->nb_event_ports * DSW_PORT_MAX_CREDITS;
246d30ea906Sjfb8856606 
247d30ea906Sjfb8856606 	dsw->max_inflight = RTE_MAX(conf->nb_events_limit, min_max_in_flight);
248d30ea906Sjfb8856606 
249d30ea906Sjfb8856606 	return 0;
250d30ea906Sjfb8856606 }
251d30ea906Sjfb8856606 
252d30ea906Sjfb8856606 
253d30ea906Sjfb8856606 static void
initial_flow_to_port_assignment(struct dsw_evdev * dsw)254d30ea906Sjfb8856606 initial_flow_to_port_assignment(struct dsw_evdev *dsw)
255d30ea906Sjfb8856606 {
256d30ea906Sjfb8856606 	uint8_t queue_id;
257d30ea906Sjfb8856606 	for (queue_id = 0; queue_id < dsw->num_queues; queue_id++) {
258d30ea906Sjfb8856606 		struct dsw_queue *queue = &dsw->queues[queue_id];
259d30ea906Sjfb8856606 		uint16_t flow_hash;
260d30ea906Sjfb8856606 		for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
261d30ea906Sjfb8856606 			uint8_t port_idx =
262d30ea906Sjfb8856606 				rte_rand() % queue->num_serving_ports;
263d30ea906Sjfb8856606 			uint8_t port_id =
264d30ea906Sjfb8856606 				queue->serving_ports[port_idx];
265d30ea906Sjfb8856606 			dsw->queues[queue_id].flow_to_port_map[flow_hash] =
266d30ea906Sjfb8856606 				port_id;
267d30ea906Sjfb8856606 		}
268d30ea906Sjfb8856606 	}
269d30ea906Sjfb8856606 }
270d30ea906Sjfb8856606 
271d30ea906Sjfb8856606 static int
dsw_start(struct rte_eventdev * dev)272d30ea906Sjfb8856606 dsw_start(struct rte_eventdev *dev)
273d30ea906Sjfb8856606 {
274d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
275d30ea906Sjfb8856606 	uint16_t i;
276d30ea906Sjfb8856606 	uint64_t now;
277d30ea906Sjfb8856606 
278d30ea906Sjfb8856606 	rte_atomic32_init(&dsw->credits_on_loan);
279d30ea906Sjfb8856606 
280d30ea906Sjfb8856606 	initial_flow_to_port_assignment(dsw);
281d30ea906Sjfb8856606 
282d30ea906Sjfb8856606 	now = rte_get_timer_cycles();
283d30ea906Sjfb8856606 	for (i = 0; i < dsw->num_ports; i++) {
284d30ea906Sjfb8856606 		dsw->ports[i].measurement_start = now;
285d30ea906Sjfb8856606 		dsw->ports[i].busy_start = now;
286d30ea906Sjfb8856606 	}
287d30ea906Sjfb8856606 
288d30ea906Sjfb8856606 	return 0;
289d30ea906Sjfb8856606 }
290d30ea906Sjfb8856606 
291d30ea906Sjfb8856606 static void
dsw_port_drain_buf(uint8_t dev_id,struct rte_event * buf,uint16_t buf_len,eventdev_stop_flush_t flush,void * flush_arg)292d30ea906Sjfb8856606 dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len,
293d30ea906Sjfb8856606 		   eventdev_stop_flush_t flush, void *flush_arg)
294d30ea906Sjfb8856606 {
295d30ea906Sjfb8856606 	uint16_t i;
296d30ea906Sjfb8856606 
297d30ea906Sjfb8856606 	for (i = 0; i < buf_len; i++)
298d30ea906Sjfb8856606 		flush(dev_id, buf[i], flush_arg);
299d30ea906Sjfb8856606 }
300d30ea906Sjfb8856606 
301d30ea906Sjfb8856606 static void
dsw_port_drain_paused(uint8_t dev_id,struct dsw_port * port,eventdev_stop_flush_t flush,void * flush_arg)302d30ea906Sjfb8856606 dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port,
303d30ea906Sjfb8856606 		      eventdev_stop_flush_t flush, void *flush_arg)
304d30ea906Sjfb8856606 {
305d30ea906Sjfb8856606 	dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len,
306d30ea906Sjfb8856606 			   flush, flush_arg);
307d30ea906Sjfb8856606 }
308d30ea906Sjfb8856606 
309d30ea906Sjfb8856606 static void
dsw_port_drain_out(uint8_t dev_id,struct dsw_evdev * dsw,struct dsw_port * port,eventdev_stop_flush_t flush,void * flush_arg)310d30ea906Sjfb8856606 dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port,
311d30ea906Sjfb8856606 		   eventdev_stop_flush_t flush, void *flush_arg)
312d30ea906Sjfb8856606 {
313d30ea906Sjfb8856606 	uint16_t dport_id;
314d30ea906Sjfb8856606 
315d30ea906Sjfb8856606 	for (dport_id = 0; dport_id < dsw->num_ports; dport_id++)
316d30ea906Sjfb8856606 		if (dport_id != port->id)
317d30ea906Sjfb8856606 			dsw_port_drain_buf(dev_id, port->out_buffer[dport_id],
318d30ea906Sjfb8856606 					   port->out_buffer_len[dport_id],
319d30ea906Sjfb8856606 					   flush, flush_arg);
320d30ea906Sjfb8856606 }
321d30ea906Sjfb8856606 
322d30ea906Sjfb8856606 static void
dsw_port_drain_in_ring(uint8_t dev_id,struct dsw_port * port,eventdev_stop_flush_t flush,void * flush_arg)323d30ea906Sjfb8856606 dsw_port_drain_in_ring(uint8_t dev_id, struct dsw_port *port,
324d30ea906Sjfb8856606 		       eventdev_stop_flush_t flush, void *flush_arg)
325d30ea906Sjfb8856606 {
326d30ea906Sjfb8856606 	struct rte_event ev;
327d30ea906Sjfb8856606 
328d30ea906Sjfb8856606 	while (rte_event_ring_dequeue_burst(port->in_ring, &ev, 1, NULL))
329d30ea906Sjfb8856606 		flush(dev_id, ev, flush_arg);
330d30ea906Sjfb8856606 }
331d30ea906Sjfb8856606 
332d30ea906Sjfb8856606 static void
dsw_drain(uint8_t dev_id,struct dsw_evdev * dsw,eventdev_stop_flush_t flush,void * flush_arg)333d30ea906Sjfb8856606 dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw,
334d30ea906Sjfb8856606 	  eventdev_stop_flush_t flush, void *flush_arg)
335d30ea906Sjfb8856606 {
336d30ea906Sjfb8856606 	uint16_t port_id;
337d30ea906Sjfb8856606 
338d30ea906Sjfb8856606 	if (flush == NULL)
339d30ea906Sjfb8856606 		return;
340d30ea906Sjfb8856606 
341d30ea906Sjfb8856606 	for (port_id = 0; port_id < dsw->num_ports; port_id++) {
342d30ea906Sjfb8856606 		struct dsw_port *port = &dsw->ports[port_id];
343d30ea906Sjfb8856606 
344d30ea906Sjfb8856606 		dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg);
345d30ea906Sjfb8856606 		dsw_port_drain_paused(dev_id, port, flush, flush_arg);
346d30ea906Sjfb8856606 		dsw_port_drain_in_ring(dev_id, port, flush, flush_arg);
347d30ea906Sjfb8856606 	}
348d30ea906Sjfb8856606 }
349d30ea906Sjfb8856606 
350d30ea906Sjfb8856606 static void
dsw_stop(struct rte_eventdev * dev)351d30ea906Sjfb8856606 dsw_stop(struct rte_eventdev *dev)
352d30ea906Sjfb8856606 {
353d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
354d30ea906Sjfb8856606 	uint8_t dev_id;
355d30ea906Sjfb8856606 	eventdev_stop_flush_t flush;
356d30ea906Sjfb8856606 	void *flush_arg;
357d30ea906Sjfb8856606 
358d30ea906Sjfb8856606 	dev_id = dev->data->dev_id;
359d30ea906Sjfb8856606 	flush = dev->dev_ops->dev_stop_flush;
360d30ea906Sjfb8856606 	flush_arg = dev->data->dev_stop_flush_arg;
361d30ea906Sjfb8856606 
362d30ea906Sjfb8856606 	dsw_drain(dev_id, dsw, flush, flush_arg);
363d30ea906Sjfb8856606 }
364d30ea906Sjfb8856606 
365d30ea906Sjfb8856606 static int
dsw_close(struct rte_eventdev * dev)366d30ea906Sjfb8856606 dsw_close(struct rte_eventdev *dev)
367d30ea906Sjfb8856606 {
368d30ea906Sjfb8856606 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
369d30ea906Sjfb8856606 
370d30ea906Sjfb8856606 	dsw->num_ports = 0;
371d30ea906Sjfb8856606 	dsw->num_queues = 0;
372d30ea906Sjfb8856606 
373d30ea906Sjfb8856606 	return 0;
374d30ea906Sjfb8856606 }
375d30ea906Sjfb8856606 
376d30ea906Sjfb8856606 static struct rte_eventdev_ops dsw_evdev_ops = {
377d30ea906Sjfb8856606 	.port_setup = dsw_port_setup,
378d30ea906Sjfb8856606 	.port_def_conf = dsw_port_def_conf,
379d30ea906Sjfb8856606 	.port_release = dsw_port_release,
380d30ea906Sjfb8856606 	.queue_setup = dsw_queue_setup,
381d30ea906Sjfb8856606 	.queue_def_conf = dsw_queue_def_conf,
382d30ea906Sjfb8856606 	.queue_release = dsw_queue_release,
383d30ea906Sjfb8856606 	.port_link = dsw_port_link,
384d30ea906Sjfb8856606 	.port_unlink = dsw_port_unlink,
385d30ea906Sjfb8856606 	.dev_infos_get = dsw_info_get,
386d30ea906Sjfb8856606 	.dev_configure = dsw_configure,
387d30ea906Sjfb8856606 	.dev_start = dsw_start,
388d30ea906Sjfb8856606 	.dev_stop = dsw_stop,
389d30ea906Sjfb8856606 	.dev_close = dsw_close,
390d30ea906Sjfb8856606 	.xstats_get = dsw_xstats_get,
391d30ea906Sjfb8856606 	.xstats_get_names = dsw_xstats_get_names,
392d30ea906Sjfb8856606 	.xstats_get_by_name = dsw_xstats_get_by_name
393d30ea906Sjfb8856606 };
394d30ea906Sjfb8856606 
395d30ea906Sjfb8856606 static int
dsw_probe(struct rte_vdev_device * vdev)396d30ea906Sjfb8856606 dsw_probe(struct rte_vdev_device *vdev)
397d30ea906Sjfb8856606 {
398d30ea906Sjfb8856606 	const char *name;
399d30ea906Sjfb8856606 	struct rte_eventdev *dev;
400d30ea906Sjfb8856606 	struct dsw_evdev *dsw;
401d30ea906Sjfb8856606 
402d30ea906Sjfb8856606 	name = rte_vdev_device_name(vdev);
403d30ea906Sjfb8856606 
404d30ea906Sjfb8856606 	dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
405d30ea906Sjfb8856606 				      rte_socket_id());
406d30ea906Sjfb8856606 	if (dev == NULL)
407d30ea906Sjfb8856606 		return -EFAULT;
408d30ea906Sjfb8856606 
409d30ea906Sjfb8856606 	dev->dev_ops = &dsw_evdev_ops;
410d30ea906Sjfb8856606 	dev->enqueue = dsw_event_enqueue;
411d30ea906Sjfb8856606 	dev->enqueue_burst = dsw_event_enqueue_burst;
412d30ea906Sjfb8856606 	dev->enqueue_new_burst = dsw_event_enqueue_new_burst;
413d30ea906Sjfb8856606 	dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst;
414d30ea906Sjfb8856606 	dev->dequeue = dsw_event_dequeue;
415d30ea906Sjfb8856606 	dev->dequeue_burst = dsw_event_dequeue_burst;
416d30ea906Sjfb8856606 
417d30ea906Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
418d30ea906Sjfb8856606 		return 0;
419d30ea906Sjfb8856606 
420d30ea906Sjfb8856606 	dsw = dev->data->dev_private;
421d30ea906Sjfb8856606 	dsw->data = dev->data;
422d30ea906Sjfb8856606 
423d30ea906Sjfb8856606 	return 0;
424d30ea906Sjfb8856606 }
425d30ea906Sjfb8856606 
426d30ea906Sjfb8856606 static int
dsw_remove(struct rte_vdev_device * vdev)427d30ea906Sjfb8856606 dsw_remove(struct rte_vdev_device *vdev)
428d30ea906Sjfb8856606 {
429d30ea906Sjfb8856606 	const char *name;
430d30ea906Sjfb8856606 
431d30ea906Sjfb8856606 	name = rte_vdev_device_name(vdev);
432d30ea906Sjfb8856606 	if (name == NULL)
433d30ea906Sjfb8856606 		return -EINVAL;
434d30ea906Sjfb8856606 
435d30ea906Sjfb8856606 	return rte_event_pmd_vdev_uninit(name);
436d30ea906Sjfb8856606 }
437d30ea906Sjfb8856606 
438d30ea906Sjfb8856606 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
439d30ea906Sjfb8856606 	.probe = dsw_probe,
440d30ea906Sjfb8856606 	.remove = dsw_remove
441d30ea906Sjfb8856606 };
442d30ea906Sjfb8856606 
443d30ea906Sjfb8856606 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);
444