xref: /dpdk/drivers/event/dpaa2/dpaa2_eventdev.c (revision ed1cdbed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017,2019-2021 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
25 #include <rte_pci.h>
26 #include <rte_bus_vdev.h>
27 #include <ethdev_driver.h>
28 #include <cryptodev_pmd.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
31 
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include <dpaa2_sec_event.h>
38 #include "dpaa2_eventdev.h"
39 #include "dpaa2_eventdev_logs.h"
40 #include <portal/dpaa2_hw_pvt.h>
41 #include <mc/fsl_dpci.h>
42 
43 /* Clarifications
44  * Evendev = SoC Instance
45  * Eventport = DPIO Instance
46  * Eventqueue = DPCON Instance
47  * 1 Eventdev can have N Eventqueue
48  * Soft Event Flow is DPCI Instance
49  */
50 
51 #define DPAA2_EV_TX_RETRY_COUNT 10000
52 
53 static uint16_t
dpaa2_eventdev_enqueue_burst(void * port,const struct rte_event ev[],uint16_t nb_events)54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
55 			     uint16_t nb_events)
56 {
57 
58 	struct dpaa2_port *dpaa2_portal = port;
59 	struct dpaa2_dpio_dev *dpio_dev;
60 	uint32_t queue_id = ev[0].queue_id;
61 	struct dpaa2_eventq *evq_info;
62 	uint32_t fqid, retry_count;
63 	struct qbman_swp *swp;
64 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65 	uint32_t loop, frames_to_send;
66 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
67 	uint16_t num_tx = 0;
68 	int i, n, ret;
69 	uint8_t channel_index;
70 
71 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
72 		/* Affine current thread context to a qman portal */
73 		ret = dpaa2_affine_qbman_swp();
74 		if (ret < 0) {
75 			DPAA2_EVENTDEV_ERR(
76 				"Failed to allocate IO portal, tid: %d\n",
77 				rte_gettid());
78 			return 0;
79 		}
80 	}
81 	/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
82 	dpio_dev = DPAA2_PER_LCORE_DPIO;
83 	swp = DPAA2_PER_LCORE_PORTAL;
84 
85 	if (likely(dpaa2_portal->is_port_linked))
86 		goto skip_linking;
87 
88 	/* Create mapping between portal and channel to receive packets */
89 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
90 		evq_info = &dpaa2_portal->evq_info[i];
91 		if (!evq_info->event_port)
92 			continue;
93 
94 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
95 						      CMD_PRI_LOW,
96 						      dpio_dev->token,
97 						      evq_info->dpcon->dpcon_id,
98 						      &channel_index);
99 		if (ret < 0) {
100 			DPAA2_EVENTDEV_ERR(
101 				"Static dequeue config failed: err(%d)", ret);
102 			goto err;
103 		}
104 
105 		qbman_swp_push_set(swp, channel_index, 1);
106 		evq_info->dpcon->channel_index = channel_index;
107 	}
108 	dpaa2_portal->is_port_linked = true;
109 
110 skip_linking:
111 	evq_info = &dpaa2_portal->evq_info[queue_id];
112 
113 	while (nb_events) {
114 		frames_to_send = (nb_events > dpaa2_eqcr_size) ?
115 			dpaa2_eqcr_size : nb_events;
116 
117 		for (loop = 0; loop < frames_to_send; loop++) {
118 			const struct rte_event *event = &ev[num_tx + loop];
119 
120 			if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
121 				fqid = evq_info->dpci->rx_queue[
122 					DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
123 			else
124 				fqid = evq_info->dpci->rx_queue[
125 					DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
126 
127 			/* Prepare enqueue descriptor */
128 			qbman_eq_desc_clear(&eqdesc[loop]);
129 			qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
130 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
131 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
132 
133 			if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
134 				&& *dpaa2_seqn(event->mbuf)) {
135 				uint8_t dqrr_index =
136 					*dpaa2_seqn(event->mbuf) - 1;
137 
138 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
139 						      dqrr_index, 0);
140 				DPAA2_PER_LCORE_DQRR_SIZE--;
141 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
142 			}
143 
144 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
145 
146 			/*
147 			 * todo - need to align with hw context data
148 			 * to avoid copy
149 			 */
150 			struct rte_event *ev_temp = rte_malloc(NULL,
151 						sizeof(struct rte_event), 0);
152 
153 			if (!ev_temp) {
154 				if (!loop)
155 					return num_tx;
156 				frames_to_send = loop;
157 				DPAA2_EVENTDEV_ERR(
158 					"Unable to allocate event object");
159 				goto send_partial;
160 			}
161 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
162 			DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
163 			DPAA2_SET_FD_LEN((&fd_arr[loop]),
164 					 sizeof(struct rte_event));
165 		}
166 send_partial:
167 		loop = 0;
168 		retry_count = 0;
169 		while (loop < frames_to_send) {
170 			ret = qbman_swp_enqueue_multiple_desc(swp,
171 					&eqdesc[loop], &fd_arr[loop],
172 					frames_to_send - loop);
173 			if (unlikely(ret < 0)) {
174 				retry_count++;
175 				if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
176 					num_tx += loop;
177 					nb_events -= loop;
178 					return num_tx + loop;
179 				}
180 			} else {
181 				loop += ret;
182 				retry_count = 0;
183 			}
184 		}
185 		num_tx += loop;
186 		nb_events -= loop;
187 	}
188 
189 	return num_tx;
190 err:
191 	for (n = 0; n < i; n++) {
192 		evq_info = &dpaa2_portal->evq_info[n];
193 		if (!evq_info->event_port)
194 			continue;
195 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
196 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
197 						dpio_dev->token,
198 						evq_info->dpcon->dpcon_id);
199 	}
200 	return 0;
201 
202 }
203 
204 static uint16_t
dpaa2_eventdev_enqueue(void * port,const struct rte_event * ev)205 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
206 {
207 	return dpaa2_eventdev_enqueue_burst(port, ev, 1);
208 }
209 
dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)210 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
211 {
212 	struct epoll_event epoll_ev;
213 
214 	qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
215 					 QBMAN_SWP_INTERRUPT_DQRI);
216 
217 	epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
218 			 &epoll_ev, 1, timeout_ticks);
219 }
220 
dpaa2_eventdev_process_parallel(struct qbman_swp * swp,const struct qbman_fd * fd,const struct qbman_result * dq,struct dpaa2_queue * rxq,struct rte_event * ev)221 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
222 					    const struct qbman_fd *fd,
223 					    const struct qbman_result *dq,
224 					    struct dpaa2_queue *rxq,
225 					    struct rte_event *ev)
226 {
227 	struct rte_event *ev_temp =
228 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
229 
230 	RTE_SET_USED(rxq);
231 
232 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
233 	rte_free(ev_temp);
234 
235 	qbman_swp_dqrr_consume(swp, dq);
236 }
237 
dpaa2_eventdev_process_atomic(struct qbman_swp * swp,const struct qbman_fd * fd,const struct qbman_result * dq,struct dpaa2_queue * rxq,struct rte_event * ev)238 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
239 					  const struct qbman_fd *fd,
240 					  const struct qbman_result *dq,
241 					  struct dpaa2_queue *rxq,
242 					  struct rte_event *ev)
243 {
244 	struct rte_event *ev_temp =
245 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
246 	uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
247 
248 	RTE_SET_USED(swp);
249 	RTE_SET_USED(rxq);
250 
251 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
252 	rte_free(ev_temp);
253 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
254 	DPAA2_PER_LCORE_DQRR_SIZE++;
255 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
256 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
257 }
258 
259 static uint16_t
dpaa2_eventdev_dequeue_burst(void * port,struct rte_event ev[],uint16_t nb_events,uint64_t timeout_ticks)260 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
261 			     uint16_t nb_events, uint64_t timeout_ticks)
262 {
263 	const struct qbman_result *dq;
264 	struct dpaa2_dpio_dev *dpio_dev = NULL;
265 	struct dpaa2_port *dpaa2_portal = port;
266 	struct dpaa2_eventq *evq_info;
267 	struct qbman_swp *swp;
268 	const struct qbman_fd *fd;
269 	struct dpaa2_queue *rxq;
270 	int num_pkts = 0, ret, i = 0, n;
271 	uint8_t channel_index;
272 
273 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
274 		/* Affine current thread context to a qman portal */
275 		ret = dpaa2_affine_qbman_swp();
276 		if (ret < 0) {
277 			DPAA2_EVENTDEV_ERR(
278 				"Failed to allocate IO portal, tid: %d\n",
279 				rte_gettid());
280 			return 0;
281 		}
282 	}
283 
284 	dpio_dev = DPAA2_PER_LCORE_DPIO;
285 	swp = DPAA2_PER_LCORE_PORTAL;
286 
287 	if (likely(dpaa2_portal->is_port_linked))
288 		goto skip_linking;
289 
290 	/* Create mapping between portal and channel to receive packets */
291 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
292 		evq_info = &dpaa2_portal->evq_info[i];
293 		if (!evq_info->event_port)
294 			continue;
295 
296 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
297 						      CMD_PRI_LOW,
298 						      dpio_dev->token,
299 						      evq_info->dpcon->dpcon_id,
300 						      &channel_index);
301 		if (ret < 0) {
302 			DPAA2_EVENTDEV_ERR(
303 				"Static dequeue config failed: err(%d)", ret);
304 			goto err;
305 		}
306 
307 		qbman_swp_push_set(swp, channel_index, 1);
308 		evq_info->dpcon->channel_index = channel_index;
309 	}
310 	dpaa2_portal->is_port_linked = true;
311 
312 skip_linking:
313 	/* Check if there are atomic contexts to be released */
314 	while (DPAA2_PER_LCORE_DQRR_SIZE) {
315 		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
316 			qbman_swp_dqrr_idx_consume(swp, i);
317 			DPAA2_PER_LCORE_DQRR_SIZE--;
318 			*dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) =
319 				DPAA2_INVALID_MBUF_SEQN;
320 		}
321 		i++;
322 	}
323 	DPAA2_PER_LCORE_DQRR_HELD = 0;
324 
325 	do {
326 		dq = qbman_swp_dqrr_next(swp);
327 		if (!dq) {
328 			if (!num_pkts && timeout_ticks) {
329 				dpaa2_eventdev_dequeue_wait(timeout_ticks);
330 				timeout_ticks = 0;
331 				continue;
332 			}
333 			return num_pkts;
334 		}
335 		qbman_swp_prefetch_dqrr_next(swp);
336 
337 		fd = qbman_result_DQ_fd(dq);
338 		rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
339 		if (rxq) {
340 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
341 		} else {
342 			qbman_swp_dqrr_consume(swp, dq);
343 			DPAA2_EVENTDEV_ERR("Null Return VQ received");
344 			return 0;
345 		}
346 
347 		num_pkts++;
348 	} while (num_pkts < nb_events);
349 
350 	return num_pkts;
351 err:
352 	for (n = 0; n < i; n++) {
353 		evq_info = &dpaa2_portal->evq_info[n];
354 		if (!evq_info->event_port)
355 			continue;
356 
357 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
358 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
359 							dpio_dev->token,
360 						evq_info->dpcon->dpcon_id);
361 	}
362 	return 0;
363 }
364 
365 static uint16_t
dpaa2_eventdev_dequeue(void * port,struct rte_event * ev,uint64_t timeout_ticks)366 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
367 		       uint64_t timeout_ticks)
368 {
369 	return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
370 }
371 
372 static void
dpaa2_eventdev_info_get(struct rte_eventdev * dev,struct rte_event_dev_info * dev_info)373 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
374 			struct rte_event_dev_info *dev_info)
375 {
376 	struct dpaa2_eventdev *priv = dev->data->dev_private;
377 
378 	EVENTDEV_INIT_FUNC_TRACE();
379 
380 	RTE_SET_USED(dev);
381 
382 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
383 	dev_info->min_dequeue_timeout_ns =
384 		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
385 	dev_info->max_dequeue_timeout_ns =
386 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
387 	dev_info->dequeue_timeout_ns =
388 		DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
389 	dev_info->max_event_queues = priv->max_event_queues;
390 	dev_info->max_event_queue_flows =
391 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
392 	dev_info->max_event_queue_priority_levels =
393 		DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
394 	dev_info->max_event_priority_levels =
395 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
396 	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
397 	/* we only support dpio up to number of cores */
398 	if (dev_info->max_event_ports > rte_lcore_count())
399 		dev_info->max_event_ports = rte_lcore_count();
400 	dev_info->max_event_port_dequeue_depth =
401 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
402 	dev_info->max_event_port_enqueue_depth =
403 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
404 	dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
405 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
406 		RTE_EVENT_DEV_CAP_BURST_MODE|
407 		RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
408 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
409 		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
410 		RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
411 		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
412 		RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
413 
414 }
415 
416 static int
dpaa2_eventdev_configure(const struct rte_eventdev * dev)417 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
418 {
419 	struct dpaa2_eventdev *priv = dev->data->dev_private;
420 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
421 
422 	EVENTDEV_INIT_FUNC_TRACE();
423 
424 	priv->nb_event_queues = conf->nb_event_queues;
425 	priv->nb_event_ports = conf->nb_event_ports;
426 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
427 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
428 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
429 	priv->event_dev_cfg = conf->event_dev_cfg;
430 
431 	/* Check dequeue timeout method is per dequeue or global */
432 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
433 		/*
434 		 * Use timeout value as given in dequeue operation.
435 		 * So invalidating this timeout value.
436 		 */
437 		priv->dequeue_timeout_ns = 0;
438 
439 	} else if (conf->dequeue_timeout_ns == 0) {
440 		priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
441 	} else {
442 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
443 	}
444 
445 	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
446 			     dev->data->dev_id);
447 	return 0;
448 }
449 
450 static int
dpaa2_eventdev_start(struct rte_eventdev * dev)451 dpaa2_eventdev_start(struct rte_eventdev *dev)
452 {
453 	EVENTDEV_INIT_FUNC_TRACE();
454 
455 	RTE_SET_USED(dev);
456 
457 	return 0;
458 }
459 
460 static void
dpaa2_eventdev_stop(struct rte_eventdev * dev)461 dpaa2_eventdev_stop(struct rte_eventdev *dev)
462 {
463 	EVENTDEV_INIT_FUNC_TRACE();
464 
465 	RTE_SET_USED(dev);
466 }
467 
468 static int
dpaa2_eventdev_close(struct rte_eventdev * dev)469 dpaa2_eventdev_close(struct rte_eventdev *dev)
470 {
471 	EVENTDEV_INIT_FUNC_TRACE();
472 
473 	RTE_SET_USED(dev);
474 
475 	return 0;
476 }
477 
478 static void
dpaa2_eventdev_queue_def_conf(struct rte_eventdev * dev,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)479 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
480 			      struct rte_event_queue_conf *queue_conf)
481 {
482 	EVENTDEV_INIT_FUNC_TRACE();
483 
484 	RTE_SET_USED(dev);
485 	RTE_SET_USED(queue_id);
486 
487 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
488 	queue_conf->nb_atomic_order_sequences =
489 				DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
490 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
491 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
492 }
493 
494 static int
dpaa2_eventdev_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)495 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
496 			   const struct rte_event_queue_conf *queue_conf)
497 {
498 	struct dpaa2_eventdev *priv = dev->data->dev_private;
499 	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
500 
501 	EVENTDEV_INIT_FUNC_TRACE();
502 
503 	switch (queue_conf->schedule_type) {
504 	case RTE_SCHED_TYPE_PARALLEL:
505 	case RTE_SCHED_TYPE_ATOMIC:
506 	case RTE_SCHED_TYPE_ORDERED:
507 		break;
508 	default:
509 		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
510 		return -1;
511 	}
512 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
513 	evq_info->event_queue_id = queue_id;
514 
515 	return 0;
516 }
517 
518 static void
dpaa2_eventdev_queue_release(struct rte_eventdev * dev,uint8_t queue_id)519 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
520 {
521 	EVENTDEV_INIT_FUNC_TRACE();
522 
523 	RTE_SET_USED(dev);
524 	RTE_SET_USED(queue_id);
525 }
526 
527 static void
dpaa2_eventdev_port_def_conf(struct rte_eventdev * dev,uint8_t port_id,struct rte_event_port_conf * port_conf)528 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
529 			     struct rte_event_port_conf *port_conf)
530 {
531 	EVENTDEV_INIT_FUNC_TRACE();
532 
533 	RTE_SET_USED(dev);
534 	RTE_SET_USED(port_id);
535 
536 	port_conf->new_event_threshold =
537 		DPAA2_EVENT_MAX_NUM_EVENTS;
538 	port_conf->dequeue_depth =
539 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
540 	port_conf->enqueue_depth =
541 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
542 	port_conf->event_port_cfg = 0;
543 }
544 
545 static int
dpaa2_eventdev_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * port_conf)546 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
547 			  const struct rte_event_port_conf *port_conf)
548 {
549 	char event_port_name[32];
550 	struct dpaa2_port *portal;
551 
552 	EVENTDEV_INIT_FUNC_TRACE();
553 
554 	RTE_SET_USED(port_conf);
555 
556 	sprintf(event_port_name, "event-port-%d", port_id);
557 	portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
558 	if (!portal) {
559 		DPAA2_EVENTDEV_ERR("Memory allocation failure");
560 		return -ENOMEM;
561 	}
562 
563 	memset(portal, 0, sizeof(struct dpaa2_port));
564 	dev->data->ports[port_id] = portal;
565 	return 0;
566 }
567 
568 static void
dpaa2_eventdev_port_release(void * port)569 dpaa2_eventdev_port_release(void *port)
570 {
571 	struct dpaa2_port *portal = port;
572 
573 	EVENTDEV_INIT_FUNC_TRACE();
574 
575 	if (portal == NULL)
576 		return;
577 
578 	/* TODO: Cleanup is required when ports are in linked state. */
579 	if (portal->is_port_linked)
580 		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
581 
582 	rte_free(portal);
583 }
584 
585 static int
dpaa2_eventdev_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)586 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
587 			 const uint8_t queues[], const uint8_t priorities[],
588 			uint16_t nb_links)
589 {
590 	struct dpaa2_eventdev *priv = dev->data->dev_private;
591 	struct dpaa2_port *dpaa2_portal = port;
592 	struct dpaa2_eventq *evq_info;
593 	uint16_t i;
594 
595 	EVENTDEV_INIT_FUNC_TRACE();
596 
597 	RTE_SET_USED(priorities);
598 
599 	for (i = 0; i < nb_links; i++) {
600 		evq_info = &priv->evq_info[queues[i]];
601 		memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
602 			   sizeof(struct dpaa2_eventq));
603 		dpaa2_portal->evq_info[queues[i]].event_port = port;
604 		dpaa2_portal->num_linked_evq++;
605 	}
606 
607 	return (int)nb_links;
608 }
609 
610 static int
dpaa2_eventdev_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t nb_unlinks)611 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
612 			   uint8_t queues[], uint16_t nb_unlinks)
613 {
614 	struct dpaa2_port *dpaa2_portal = port;
615 	int i;
616 	struct dpaa2_dpio_dev *dpio_dev = NULL;
617 	struct dpaa2_eventq *evq_info;
618 	struct qbman_swp *swp;
619 
620 	EVENTDEV_INIT_FUNC_TRACE();
621 
622 	RTE_SET_USED(dev);
623 	RTE_SET_USED(queues);
624 
625 	for (i = 0; i < nb_unlinks; i++) {
626 		evq_info = &dpaa2_portal->evq_info[queues[i]];
627 
628 		if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
629 			/* todo dpaa2_portal shall have dpio_dev-no per lcore*/
630 			dpio_dev = DPAA2_PER_LCORE_DPIO;
631 			swp = DPAA2_PER_LCORE_PORTAL;
632 
633 			qbman_swp_push_set(swp,
634 					evq_info->dpcon->channel_index, 0);
635 			dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
636 						dpio_dev->token,
637 						evq_info->dpcon->dpcon_id);
638 		}
639 		memset(evq_info, 0, sizeof(struct dpaa2_eventq));
640 		if (dpaa2_portal->num_linked_evq)
641 			dpaa2_portal->num_linked_evq--;
642 	}
643 
644 	if (!dpaa2_portal->num_linked_evq)
645 		dpaa2_portal->is_port_linked = false;
646 
647 	return (int)nb_unlinks;
648 }
649 
650 
651 static int
dpaa2_eventdev_timeout_ticks(struct rte_eventdev * dev,uint64_t ns,uint64_t * timeout_ticks)652 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
653 			     uint64_t *timeout_ticks)
654 {
655 	uint32_t scale = 1000*1000;
656 
657 	EVENTDEV_INIT_FUNC_TRACE();
658 
659 	RTE_SET_USED(dev);
660 	*timeout_ticks = ns / scale;
661 
662 	return 0;
663 }
664 
665 static void
dpaa2_eventdev_dump(struct rte_eventdev * dev,FILE * f)666 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
667 {
668 	EVENTDEV_INIT_FUNC_TRACE();
669 
670 	RTE_SET_USED(dev);
671 	RTE_SET_USED(f);
672 }
673 
674 static int
dpaa2_eventdev_eth_caps_get(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)675 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
676 			    const struct rte_eth_dev *eth_dev,
677 			    uint32_t *caps)
678 {
679 	const char *ethdev_driver = eth_dev->device->driver->name;
680 
681 	EVENTDEV_INIT_FUNC_TRACE();
682 
683 	RTE_SET_USED(dev);
684 
685 	if (!strcmp(ethdev_driver, "net_dpaa2"))
686 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
687 	else
688 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
689 
690 	return 0;
691 }
692 
693 static int
dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)694 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
695 		const struct rte_eth_dev *eth_dev,
696 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
697 {
698 	struct dpaa2_eventdev *priv = dev->data->dev_private;
699 	uint8_t ev_qid = queue_conf->ev.queue_id;
700 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
701 	int i, ret;
702 
703 	EVENTDEV_INIT_FUNC_TRACE();
704 
705 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
706 		ret = dpaa2_eth_eventq_attach(eth_dev, i,
707 					      dpcon, queue_conf);
708 		if (ret) {
709 			DPAA2_EVENTDEV_ERR(
710 				"Event queue attach failed: err(%d)", ret);
711 			goto fail;
712 		}
713 	}
714 	return 0;
715 fail:
716 	for (i = (i - 1); i >= 0 ; i--)
717 		dpaa2_eth_eventq_detach(eth_dev, i);
718 
719 	return ret;
720 }
721 
722 static int
dpaa2_eventdev_eth_queue_add(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)723 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
724 		const struct rte_eth_dev *eth_dev,
725 		int32_t rx_queue_id,
726 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
727 {
728 	struct dpaa2_eventdev *priv = dev->data->dev_private;
729 	uint8_t ev_qid = queue_conf->ev.queue_id;
730 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
731 	int ret;
732 
733 	EVENTDEV_INIT_FUNC_TRACE();
734 
735 	if (rx_queue_id == -1)
736 		return dpaa2_eventdev_eth_queue_add_all(dev,
737 				eth_dev, queue_conf);
738 
739 	ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
740 				      dpcon, queue_conf);
741 	if (ret) {
742 		DPAA2_EVENTDEV_ERR(
743 			"Event queue attach failed: err(%d)", ret);
744 		return ret;
745 	}
746 	return 0;
747 }
748 
749 static int
dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev)750 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
751 			     const struct rte_eth_dev *eth_dev)
752 {
753 	int i, ret;
754 
755 	EVENTDEV_INIT_FUNC_TRACE();
756 
757 	RTE_SET_USED(dev);
758 
759 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
760 		ret = dpaa2_eth_eventq_detach(eth_dev, i);
761 		if (ret) {
762 			DPAA2_EVENTDEV_ERR(
763 				"Event queue detach failed: err(%d)", ret);
764 			return ret;
765 		}
766 	}
767 
768 	return 0;
769 }
770 
771 static int
dpaa2_eventdev_eth_queue_del(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id)772 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
773 			     const struct rte_eth_dev *eth_dev,
774 			     int32_t rx_queue_id)
775 {
776 	int ret;
777 
778 	EVENTDEV_INIT_FUNC_TRACE();
779 
780 	if (rx_queue_id == -1)
781 		return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
782 
783 	ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
784 	if (ret) {
785 		DPAA2_EVENTDEV_ERR(
786 			"Event queue detach failed: err(%d)", ret);
787 		return ret;
788 	}
789 
790 	return 0;
791 }
792 
793 static int
dpaa2_eventdev_eth_start(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev)794 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
795 			 const struct rte_eth_dev *eth_dev)
796 {
797 	EVENTDEV_INIT_FUNC_TRACE();
798 
799 	RTE_SET_USED(dev);
800 	RTE_SET_USED(eth_dev);
801 
802 	return 0;
803 }
804 
805 static int
dpaa2_eventdev_eth_stop(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev)806 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
807 			const struct rte_eth_dev *eth_dev)
808 {
809 	EVENTDEV_INIT_FUNC_TRACE();
810 
811 	RTE_SET_USED(dev);
812 	RTE_SET_USED(eth_dev);
813 
814 	return 0;
815 }
816 
817 static int
dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev * dev,const struct rte_cryptodev * cdev,uint32_t * caps)818 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
819 			    const struct rte_cryptodev *cdev,
820 			    uint32_t *caps)
821 {
822 	const char *name = cdev->data->name;
823 
824 	EVENTDEV_INIT_FUNC_TRACE();
825 
826 	RTE_SET_USED(dev);
827 
828 	if (!strncmp(name, "dpsec-", 6))
829 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
830 	else
831 		return -1;
832 
833 	return 0;
834 }
835 
836 static int
dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,const struct rte_event * ev)837 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
838 		const struct rte_cryptodev *cryptodev,
839 		const struct rte_event *ev)
840 {
841 	struct dpaa2_eventdev *priv = dev->data->dev_private;
842 	uint8_t ev_qid = ev->queue_id;
843 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
844 	int i, ret;
845 
846 	EVENTDEV_INIT_FUNC_TRACE();
847 
848 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
849 		ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
850 		if (ret) {
851 			DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
852 				    ret);
853 			goto fail;
854 		}
855 	}
856 	return 0;
857 fail:
858 	for (i = (i - 1); i >= 0 ; i--)
859 		dpaa2_sec_eventq_detach(cryptodev, i);
860 
861 	return ret;
862 }
863 
864 static int
dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,int32_t rx_queue_id,const struct rte_event * ev)865 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
866 		const struct rte_cryptodev *cryptodev,
867 		int32_t rx_queue_id,
868 		const struct rte_event *ev)
869 {
870 	struct dpaa2_eventdev *priv = dev->data->dev_private;
871 	uint8_t ev_qid = ev->queue_id;
872 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
873 	int ret;
874 
875 	EVENTDEV_INIT_FUNC_TRACE();
876 
877 	if (rx_queue_id == -1)
878 		return dpaa2_eventdev_crypto_queue_add_all(dev,
879 				cryptodev, ev);
880 
881 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
882 				      dpcon, ev);
883 	if (ret) {
884 		DPAA2_EVENTDEV_ERR(
885 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
886 		return ret;
887 	}
888 	return 0;
889 }
890 
891 static int
dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev * dev,const struct rte_cryptodev * cdev)892 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
893 			     const struct rte_cryptodev *cdev)
894 {
895 	int i, ret;
896 
897 	EVENTDEV_INIT_FUNC_TRACE();
898 
899 	RTE_SET_USED(dev);
900 
901 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
902 		ret = dpaa2_sec_eventq_detach(cdev, i);
903 		if (ret) {
904 			DPAA2_EVENTDEV_ERR(
905 				"dpaa2_sec_eventq_detach failed:ret %d\n", ret);
906 			return ret;
907 		}
908 	}
909 
910 	return 0;
911 }
912 
913 static int
dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,int32_t rx_queue_id)914 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
915 			     const struct rte_cryptodev *cryptodev,
916 			     int32_t rx_queue_id)
917 {
918 	int ret;
919 
920 	EVENTDEV_INIT_FUNC_TRACE();
921 
922 	if (rx_queue_id == -1)
923 		return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
924 
925 	ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
926 	if (ret) {
927 		DPAA2_EVENTDEV_ERR(
928 			"dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
929 		return ret;
930 	}
931 
932 	return 0;
933 }
934 
935 static int
dpaa2_eventdev_crypto_start(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev)936 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
937 			    const struct rte_cryptodev *cryptodev)
938 {
939 	EVENTDEV_INIT_FUNC_TRACE();
940 
941 	RTE_SET_USED(dev);
942 	RTE_SET_USED(cryptodev);
943 
944 	return 0;
945 }
946 
947 static int
dpaa2_eventdev_crypto_stop(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev)948 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
949 			   const struct rte_cryptodev *cryptodev)
950 {
951 	EVENTDEV_INIT_FUNC_TRACE();
952 
953 	RTE_SET_USED(dev);
954 	RTE_SET_USED(cryptodev);
955 
956 	return 0;
957 }
958 
959 static int
dpaa2_eventdev_tx_adapter_create(uint8_t id,const struct rte_eventdev * dev)960 dpaa2_eventdev_tx_adapter_create(uint8_t id,
961 				 const struct rte_eventdev *dev)
962 {
963 	RTE_SET_USED(id);
964 	RTE_SET_USED(dev);
965 
966 	/* Nothing to do. Simply return. */
967 	return 0;
968 }
969 
970 static int
dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)971 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
972 			       const struct rte_eth_dev *eth_dev,
973 			       uint32_t *caps)
974 {
975 	RTE_SET_USED(dev);
976 	RTE_SET_USED(eth_dev);
977 
978 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
979 	return 0;
980 }
981 
982 static uint16_t
dpaa2_eventdev_txa_enqueue_same_dest(void * port,struct rte_event ev[],uint16_t nb_events)983 dpaa2_eventdev_txa_enqueue_same_dest(void *port,
984 				     struct rte_event ev[],
985 				     uint16_t nb_events)
986 {
987 	struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
988 	uint8_t qid, i;
989 
990 	RTE_SET_USED(port);
991 
992 	m0 = (struct rte_mbuf *)ev[0].mbuf;
993 	qid = rte_event_eth_tx_adapter_txq_get(m0);
994 
995 	for (i = 0; i < nb_events; i++)
996 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
997 
998 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
999 }
1000 
1001 static uint16_t
dpaa2_eventdev_txa_enqueue(void * port,struct rte_event ev[],uint16_t nb_events)1002 dpaa2_eventdev_txa_enqueue(void *port,
1003 			   struct rte_event ev[],
1004 			   uint16_t nb_events)
1005 {
1006 	void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
1007 	struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
1008 	uint8_t qid, i;
1009 
1010 	RTE_SET_USED(port);
1011 
1012 	for (i = 0; i < nb_events; i++) {
1013 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
1014 		qid = rte_event_eth_tx_adapter_txq_get(m[i]);
1015 		txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
1016 	}
1017 
1018 	dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
1019 
1020 	return nb_events;
1021 }
1022 
1023 static struct eventdev_ops dpaa2_eventdev_ops = {
1024 	.dev_infos_get    = dpaa2_eventdev_info_get,
1025 	.dev_configure    = dpaa2_eventdev_configure,
1026 	.dev_start        = dpaa2_eventdev_start,
1027 	.dev_stop         = dpaa2_eventdev_stop,
1028 	.dev_close        = dpaa2_eventdev_close,
1029 	.queue_def_conf   = dpaa2_eventdev_queue_def_conf,
1030 	.queue_setup      = dpaa2_eventdev_queue_setup,
1031 	.queue_release    = dpaa2_eventdev_queue_release,
1032 	.port_def_conf    = dpaa2_eventdev_port_def_conf,
1033 	.port_setup       = dpaa2_eventdev_port_setup,
1034 	.port_release     = dpaa2_eventdev_port_release,
1035 	.port_link        = dpaa2_eventdev_port_link,
1036 	.port_unlink      = dpaa2_eventdev_port_unlink,
1037 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
1038 	.dump             = dpaa2_eventdev_dump,
1039 	.dev_selftest     = test_eventdev_dpaa2,
1040 	.eth_rx_adapter_caps_get	= dpaa2_eventdev_eth_caps_get,
1041 	.eth_rx_adapter_queue_add	= dpaa2_eventdev_eth_queue_add,
1042 	.eth_rx_adapter_queue_del	= dpaa2_eventdev_eth_queue_del,
1043 	.eth_rx_adapter_start		= dpaa2_eventdev_eth_start,
1044 	.eth_rx_adapter_stop		= dpaa2_eventdev_eth_stop,
1045 	.eth_tx_adapter_caps_get	= dpaa2_eventdev_tx_adapter_caps,
1046 	.eth_tx_adapter_create		= dpaa2_eventdev_tx_adapter_create,
1047 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
1048 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
1049 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
1050 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
1051 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
1052 };
1053 
1054 static int
dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev * dpci_dev,struct dpaa2_dpcon_dev * dpcon_dev)1055 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
1056 			  struct dpaa2_dpcon_dev *dpcon_dev)
1057 {
1058 	struct dpci_rx_queue_cfg rx_queue_cfg;
1059 	int ret, i;
1060 
1061 	/*Do settings to get the frame on a DPCON object*/
1062 	rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
1063 		  DPCI_QUEUE_OPT_USER_CTX;
1064 	rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
1065 	rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
1066 	rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
1067 
1068 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
1069 		dpaa2_eventdev_process_parallel;
1070 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
1071 		dpaa2_eventdev_process_atomic;
1072 
1073 	for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
1074 		rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
1075 		ret = dpci_set_rx_queue(&dpci_dev->dpci,
1076 					CMD_PRI_LOW,
1077 					dpci_dev->token, i,
1078 					&rx_queue_cfg);
1079 		if (ret) {
1080 			DPAA2_EVENTDEV_ERR(
1081 				"DPCI Rx queue setup failed: err(%d)",
1082 				ret);
1083 			return ret;
1084 		}
1085 	}
1086 	return 0;
1087 }
1088 
1089 static int
dpaa2_eventdev_create(const char * name)1090 dpaa2_eventdev_create(const char *name)
1091 {
1092 	struct rte_eventdev *eventdev;
1093 	struct dpaa2_eventdev *priv;
1094 	struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1095 	struct dpaa2_dpci_dev *dpci_dev = NULL;
1096 	int ret;
1097 
1098 	eventdev = rte_event_pmd_vdev_init(name,
1099 					   sizeof(struct dpaa2_eventdev),
1100 					   rte_socket_id());
1101 	if (eventdev == NULL) {
1102 		DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1103 		goto fail;
1104 	}
1105 
1106 	eventdev->dev_ops       = &dpaa2_eventdev_ops;
1107 	eventdev->enqueue       = dpaa2_eventdev_enqueue;
1108 	eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1109 	eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1110 	eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1111 	eventdev->dequeue       = dpaa2_eventdev_dequeue;
1112 	eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1113 	eventdev->txa_enqueue	= dpaa2_eventdev_txa_enqueue;
1114 	eventdev->txa_enqueue_same_dest	= dpaa2_eventdev_txa_enqueue_same_dest;
1115 
1116 	/* For secondary processes, the primary has done all the work */
1117 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1118 		goto done;
1119 
1120 	priv = eventdev->data->dev_private;
1121 	priv->max_event_queues = 0;
1122 
1123 	do {
1124 		dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1125 		if (!dpcon_dev)
1126 			break;
1127 		priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1128 
1129 		dpci_dev = rte_dpaa2_alloc_dpci_dev();
1130 		if (!dpci_dev) {
1131 			rte_dpaa2_free_dpcon_dev(dpcon_dev);
1132 			break;
1133 		}
1134 		priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1135 
1136 		ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1137 		if (ret) {
1138 			DPAA2_EVENTDEV_ERR(
1139 				    "DPCI setup failed: err(%d)", ret);
1140 			return ret;
1141 		}
1142 		priv->max_event_queues++;
1143 	} while (dpcon_dev && dpci_dev);
1144 
1145 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1146 
1147 done:
1148 	event_dev_probing_finish(eventdev);
1149 	return 0;
1150 fail:
1151 	return -EFAULT;
1152 }
1153 
1154 static int
dpaa2_eventdev_destroy(const char * name)1155 dpaa2_eventdev_destroy(const char *name)
1156 {
1157 	struct rte_eventdev *eventdev;
1158 	struct dpaa2_eventdev *priv;
1159 	int i;
1160 
1161 	eventdev = rte_event_pmd_get_named_dev(name);
1162 	if (eventdev == NULL) {
1163 		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
1164 		return -1;
1165 	}
1166 
1167 	/* For secondary processes, the primary has done all the work */
1168 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1169 		return 0;
1170 
1171 	priv = eventdev->data->dev_private;
1172 	for (i = 0; i < priv->max_event_queues; i++) {
1173 		if (priv->evq_info[i].dpcon)
1174 			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
1175 
1176 		if (priv->evq_info[i].dpci)
1177 			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
1178 
1179 	}
1180 	priv->max_event_queues = 0;
1181 
1182 	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
1183 	return 0;
1184 }
1185 
1186 
1187 static int
dpaa2_eventdev_probe(struct rte_vdev_device * vdev)1188 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1189 {
1190 	const char *name;
1191 
1192 	name = rte_vdev_device_name(vdev);
1193 	DPAA2_EVENTDEV_INFO("Initializing %s", name);
1194 	return dpaa2_eventdev_create(name);
1195 }
1196 
1197 static int
dpaa2_eventdev_remove(struct rte_vdev_device * vdev)1198 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1199 {
1200 	const char *name;
1201 
1202 	name = rte_vdev_device_name(vdev);
1203 	DPAA2_EVENTDEV_INFO("Closing %s", name);
1204 
1205 	dpaa2_eventdev_destroy(name);
1206 
1207 	return rte_event_pmd_vdev_uninit(name);
1208 }
1209 
1210 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1211 	.probe = dpaa2_eventdev_probe,
1212 	.remove = dpaa2_eventdev_remove
1213 };
1214 
1215 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1216 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE);
1217