1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 NXP.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of NXP nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/epoll.h>
40 
41 #include <rte_atomic.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_debug.h>
45 #include <rte_dev.h>
46 #include <rte_eal.h>
47 #include <rte_fslmc.h>
48 #include <rte_lcore.h>
49 #include <rte_log.h>
50 #include <rte_malloc.h>
51 #include <rte_memcpy.h>
52 #include <rte_memory.h>
53 #include <rte_pci.h>
54 #include <rte_bus_vdev.h>
55 #include <rte_ethdev.h>
56 #include <rte_event_eth_rx_adapter.h>
57 
58 #include <fslmc_vfio.h>
59 #include <dpaa2_hw_pvt.h>
60 #include <dpaa2_hw_mempool.h>
61 #include <dpaa2_hw_dpio.h>
62 #include <dpaa2_ethdev.h>
63 #include "dpaa2_eventdev.h"
64 #include <portal/dpaa2_hw_pvt.h>
65 #include <mc/fsl_dpci.h>
66 
67 /* Clarifications
68  * Evendev = SoC Instance
69  * Eventport = DPIO Instance
70  * Eventqueue = DPCON Instance
71  * 1 Eventdev can have N Eventqueue
72  * Soft Event Flow is DPCI Instance
73  */
74 
75 static uint16_t
76 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
77 			     uint16_t nb_events)
78 {
79 	struct rte_eventdev *ev_dev =
80 			((struct dpaa2_io_portal_t *)port)->eventdev;
81 	struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
82 	uint32_t queue_id = ev[0].queue_id;
83 	struct evq_info_t *evq_info = &priv->evq_info[queue_id];
84 	uint32_t fqid;
85 	struct qbman_swp *swp;
86 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
87 	uint32_t loop, frames_to_send;
88 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
89 	uint16_t num_tx = 0;
90 	int ret;
91 
92 	RTE_SET_USED(port);
93 
94 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
95 		ret = dpaa2_affine_qbman_swp();
96 		if (ret) {
97 			PMD_DRV_LOG(ERR, "Failure in affining portal\n");
98 			return 0;
99 		}
100 	}
101 
102 	swp = DPAA2_PER_LCORE_PORTAL;
103 
104 	while (nb_events) {
105 		frames_to_send = (nb_events >> 3) ?
106 			MAX_TX_RING_SLOTS : nb_events;
107 
108 		for (loop = 0; loop < frames_to_send; loop++) {
109 			const struct rte_event *event = &ev[num_tx + loop];
110 
111 			if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
112 				fqid = evq_info->dpci->queue[
113 					DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
114 			else
115 				fqid = evq_info->dpci->queue[
116 					DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
117 
118 			/* Prepare enqueue descriptor */
119 			qbman_eq_desc_clear(&eqdesc[loop]);
120 			qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
121 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
122 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
123 
124 			if (event->impl_opaque) {
125 				uint8_t dqrr_index = event->impl_opaque - 1;
126 
127 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
128 						      dqrr_index, 0);
129 				DPAA2_PER_LCORE_DPIO->dqrr_size--;
130 				DPAA2_PER_LCORE_DPIO->dqrr_held &=
131 					~(1 << dqrr_index);
132 			}
133 
134 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
135 
136 			/*
137 			 * todo - need to align with hw context data
138 			 * to avoid copy
139 			 */
140 			struct rte_event *ev_temp = rte_malloc(NULL,
141 				sizeof(struct rte_event), 0);
142 
143 			if (!ev_temp) {
144 				if (!loop)
145 					return num_tx;
146 				frames_to_send = loop;
147 				PMD_DRV_LOG(ERR, "Unable to allocate memory");
148 				goto send_partial;
149 			}
150 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
151 			DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
152 			DPAA2_SET_FD_LEN((&fd_arr[loop]),
153 					 sizeof(struct rte_event));
154 		}
155 send_partial:
156 		loop = 0;
157 		while (loop < frames_to_send) {
158 			loop += qbman_swp_enqueue_multiple_desc(swp,
159 					&eqdesc[loop], &fd_arr[loop],
160 					frames_to_send - loop);
161 		}
162 		num_tx += frames_to_send;
163 		nb_events -= frames_to_send;
164 	}
165 
166 	return num_tx;
167 }
168 
169 static uint16_t
170 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
171 {
172 	return dpaa2_eventdev_enqueue_burst(port, ev, 1);
173 }
174 
175 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
176 {
177 	struct epoll_event epoll_ev;
178 	int ret, i = 0;
179 
180 	qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
181 					 QBMAN_SWP_INTERRUPT_DQRI);
182 
183 RETRY:
184 	ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
185 			 &epoll_ev, 1, timeout_ticks);
186 	if (ret < 1) {
187 		/* sometimes due to some spurious interrupts epoll_wait fails
188 		 * with errno EINTR. so here we are retrying epoll_wait in such
189 		 * case to avoid the problem.
190 		 */
191 		if (errno == EINTR) {
192 			PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
193 			if (i++ > 10)
194 				PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
195 		goto RETRY;
196 		}
197 	}
198 }
199 
200 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
201 					    const struct qbman_fd *fd,
202 					    const struct qbman_result *dq,
203 					    struct dpaa2_queue *rxq,
204 					    struct rte_event *ev)
205 {
206 	struct rte_event *ev_temp =
207 		(struct rte_event *)DPAA2_GET_FD_ADDR(fd);
208 
209 	RTE_SET_USED(rxq);
210 
211 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
212 	rte_free(ev_temp);
213 
214 	qbman_swp_dqrr_consume(swp, dq);
215 }
216 
217 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
218 					  const struct qbman_fd *fd,
219 					  const struct qbman_result *dq,
220 					  struct dpaa2_queue *rxq,
221 					  struct rte_event *ev)
222 {
223 	struct rte_event *ev_temp =
224 		(struct rte_event *)DPAA2_GET_FD_ADDR(fd);
225 	uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
226 
227 	RTE_SET_USED(swp);
228 	RTE_SET_USED(rxq);
229 
230 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
231 	rte_free(ev_temp);
232 	ev->impl_opaque = dqrr_index + 1;
233 	DPAA2_PER_LCORE_DPIO->dqrr_size++;
234 	DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
235 }
236 
237 static uint16_t
238 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
239 			     uint16_t nb_events, uint64_t timeout_ticks)
240 {
241 	const struct qbman_result *dq;
242 	struct qbman_swp *swp;
243 	const struct qbman_fd *fd;
244 	struct dpaa2_queue *rxq;
245 	int num_pkts = 0, ret, i = 0;
246 
247 	RTE_SET_USED(port);
248 
249 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
250 		ret = dpaa2_affine_qbman_swp();
251 		if (ret) {
252 			PMD_DRV_LOG(ERR, "Failure in affining portal\n");
253 			return 0;
254 		}
255 	}
256 
257 	swp = DPAA2_PER_LCORE_PORTAL;
258 
259 	/* Check if there are atomic contexts to be released */
260 	while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
261 		if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
262 			dq = qbman_get_dqrr_from_idx(swp, i);
263 			qbman_swp_dqrr_consume(swp, dq);
264 			DPAA2_PER_LCORE_DPIO->dqrr_size--;
265 		}
266 		i++;
267 	}
268 	DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
269 
270 	do {
271 		dq = qbman_swp_dqrr_next(swp);
272 		if (!dq) {
273 			if (!num_pkts && timeout_ticks) {
274 				dpaa2_eventdev_dequeue_wait(timeout_ticks);
275 				timeout_ticks = 0;
276 				continue;
277 			}
278 			return num_pkts;
279 		}
280 
281 		fd = qbman_result_DQ_fd(dq);
282 
283 		rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
284 		if (rxq) {
285 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
286 		} else {
287 			qbman_swp_dqrr_consume(swp, dq);
288 			PMD_DRV_LOG(ERR, "Null Return VQ received\n");
289 			return 0;
290 		}
291 
292 		num_pkts++;
293 	} while (num_pkts < nb_events);
294 
295 	return num_pkts;
296 }
297 
298 static uint16_t
299 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
300 		       uint64_t timeout_ticks)
301 {
302 	return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
303 }
304 
305 static void
306 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
307 			struct rte_event_dev_info *dev_info)
308 {
309 	struct dpaa2_eventdev *priv = dev->data->dev_private;
310 
311 	PMD_DRV_FUNC_TRACE();
312 
313 	RTE_SET_USED(dev);
314 
315 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
316 	dev_info->min_dequeue_timeout_ns =
317 		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
318 	dev_info->max_dequeue_timeout_ns =
319 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
320 	dev_info->dequeue_timeout_ns =
321 		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
322 	dev_info->max_event_queues = priv->max_event_queues;
323 	dev_info->max_event_queue_flows =
324 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
325 	dev_info->max_event_queue_priority_levels =
326 		DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
327 	dev_info->max_event_priority_levels =
328 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
329 	dev_info->max_event_ports = RTE_MAX_LCORE;
330 	dev_info->max_event_port_dequeue_depth =
331 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
332 	dev_info->max_event_port_enqueue_depth =
333 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
334 	dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
335 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
336 		RTE_EVENT_DEV_CAP_BURST_MODE;
337 }
338 
339 static int
340 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
341 {
342 	struct dpaa2_eventdev *priv = dev->data->dev_private;
343 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
344 
345 	PMD_DRV_FUNC_TRACE();
346 
347 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
348 	priv->nb_event_queues = conf->nb_event_queues;
349 	priv->nb_event_ports = conf->nb_event_ports;
350 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
351 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
352 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
353 	priv->event_dev_cfg = conf->event_dev_cfg;
354 
355 	PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
356 	return 0;
357 }
358 
359 static int
360 dpaa2_eventdev_start(struct rte_eventdev *dev)
361 {
362 	PMD_DRV_FUNC_TRACE();
363 
364 	RTE_SET_USED(dev);
365 
366 	return 0;
367 }
368 
369 static void
370 dpaa2_eventdev_stop(struct rte_eventdev *dev)
371 {
372 	PMD_DRV_FUNC_TRACE();
373 
374 	RTE_SET_USED(dev);
375 }
376 
377 static int
378 dpaa2_eventdev_close(struct rte_eventdev *dev)
379 {
380 	PMD_DRV_FUNC_TRACE();
381 
382 	RTE_SET_USED(dev);
383 
384 	return 0;
385 }
386 
387 static void
388 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
389 			      struct rte_event_queue_conf *queue_conf)
390 {
391 	PMD_DRV_FUNC_TRACE();
392 
393 	RTE_SET_USED(dev);
394 	RTE_SET_USED(queue_id);
395 	RTE_SET_USED(queue_conf);
396 
397 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
398 	queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
399 				      RTE_SCHED_TYPE_PARALLEL;
400 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
401 }
402 
403 static void
404 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
405 {
406 	PMD_DRV_FUNC_TRACE();
407 
408 	RTE_SET_USED(dev);
409 	RTE_SET_USED(queue_id);
410 }
411 
412 static int
413 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
414 			   const struct rte_event_queue_conf *queue_conf)
415 {
416 	struct dpaa2_eventdev *priv = dev->data->dev_private;
417 	struct evq_info_t *evq_info =
418 		&priv->evq_info[queue_id];
419 
420 	PMD_DRV_FUNC_TRACE();
421 
422 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
423 
424 	return 0;
425 }
426 
427 static void
428 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
429 			     struct rte_event_port_conf *port_conf)
430 {
431 	PMD_DRV_FUNC_TRACE();
432 
433 	RTE_SET_USED(dev);
434 	RTE_SET_USED(port_id);
435 	RTE_SET_USED(port_conf);
436 
437 	port_conf->new_event_threshold =
438 		DPAA2_EVENT_MAX_NUM_EVENTS;
439 	port_conf->dequeue_depth =
440 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
441 	port_conf->enqueue_depth =
442 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
443 }
444 
445 static void
446 dpaa2_eventdev_port_release(void *port)
447 {
448 	PMD_DRV_FUNC_TRACE();
449 
450 	RTE_SET_USED(port);
451 }
452 
453 static int
454 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
455 			  const struct rte_event_port_conf *port_conf)
456 {
457 	PMD_DRV_FUNC_TRACE();
458 
459 	RTE_SET_USED(port_conf);
460 
461 	if (!dpaa2_io_portal[port_id].dpio_dev) {
462 		dpaa2_io_portal[port_id].dpio_dev =
463 				dpaa2_get_qbman_swp(port_id);
464 		rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
465 		if (!dpaa2_io_portal[port_id].dpio_dev)
466 			return -1;
467 	}
468 
469 	dpaa2_io_portal[port_id].eventdev = dev;
470 	dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
471 	return 0;
472 }
473 
474 static int
475 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
476 			   uint8_t queues[], uint16_t nb_unlinks)
477 {
478 	struct dpaa2_eventdev *priv = dev->data->dev_private;
479 	struct dpaa2_io_portal_t *dpaa2_portal = port;
480 	struct evq_info_t *evq_info;
481 	int i;
482 
483 	PMD_DRV_FUNC_TRACE();
484 
485 	for (i = 0; i < nb_unlinks; i++) {
486 		evq_info = &priv->evq_info[queues[i]];
487 		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
488 				   evq_info->dpcon->channel_index, 0);
489 		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
490 					0, dpaa2_portal->dpio_dev->token,
491 			evq_info->dpcon->dpcon_id);
492 	}
493 
494 	return (int)nb_unlinks;
495 }
496 
497 static int
498 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
499 			 const uint8_t queues[], const uint8_t priorities[],
500 			uint16_t nb_links)
501 {
502 	struct dpaa2_eventdev *priv = dev->data->dev_private;
503 	struct dpaa2_io_portal_t *dpaa2_portal = port;
504 	struct evq_info_t *evq_info;
505 	uint8_t channel_index;
506 	int ret, i, n;
507 
508 	PMD_DRV_FUNC_TRACE();
509 
510 	for (i = 0; i < nb_links; i++) {
511 		evq_info = &priv->evq_info[queues[i]];
512 
513 		ret = dpio_add_static_dequeue_channel(
514 			dpaa2_portal->dpio_dev->dpio,
515 			CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
516 			evq_info->dpcon->dpcon_id, &channel_index);
517 		if (ret < 0) {
518 			PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
519 				    ret);
520 			goto err;
521 		}
522 
523 		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
524 				   channel_index, 1);
525 		evq_info->dpcon->channel_index = channel_index;
526 	}
527 
528 	RTE_SET_USED(priorities);
529 
530 	return (int)nb_links;
531 err:
532 	for (n = 0; n < i; n++) {
533 		evq_info = &priv->evq_info[queues[n]];
534 		qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
535 				   evq_info->dpcon->channel_index, 0);
536 		dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
537 					0, dpaa2_portal->dpio_dev->token,
538 			evq_info->dpcon->dpcon_id);
539 	}
540 	return ret;
541 }
542 
543 static int
544 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
545 			     uint64_t *timeout_ticks)
546 {
547 	uint32_t scale = 1;
548 
549 	PMD_DRV_FUNC_TRACE();
550 
551 	RTE_SET_USED(dev);
552 	*timeout_ticks = ns * scale;
553 
554 	return 0;
555 }
556 
557 static void
558 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
559 {
560 	PMD_DRV_FUNC_TRACE();
561 
562 	RTE_SET_USED(dev);
563 	RTE_SET_USED(f);
564 }
565 
566 static int
567 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
568 			    const struct rte_eth_dev *eth_dev,
569 			    uint32_t *caps)
570 {
571 	const char *ethdev_driver = eth_dev->device->driver->name;
572 
573 	PMD_DRV_FUNC_TRACE();
574 
575 	RTE_SET_USED(dev);
576 
577 	if (!strcmp(ethdev_driver, "net_dpaa2"))
578 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
579 	else
580 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
581 
582 	return 0;
583 }
584 
585 static int
586 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
587 		const struct rte_eth_dev *eth_dev,
588 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
589 {
590 	struct dpaa2_eventdev *priv = dev->data->dev_private;
591 	uint8_t ev_qid = queue_conf->ev.queue_id;
592 	uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
593 	int i, ret;
594 
595 	PMD_DRV_FUNC_TRACE();
596 
597 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
598 		ret = dpaa2_eth_eventq_attach(eth_dev, i,
599 				dpcon_id, queue_conf);
600 		if (ret) {
601 			PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
602 				    ret);
603 			goto fail;
604 		}
605 	}
606 	return 0;
607 fail:
608 	for (i = (i - 1); i >= 0 ; i--)
609 		dpaa2_eth_eventq_detach(eth_dev, i);
610 
611 	return ret;
612 }
613 
614 static int
615 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
616 		const struct rte_eth_dev *eth_dev,
617 		int32_t rx_queue_id,
618 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
619 {
620 	struct dpaa2_eventdev *priv = dev->data->dev_private;
621 	uint8_t ev_qid = queue_conf->ev.queue_id;
622 	uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
623 	int ret;
624 
625 	PMD_DRV_FUNC_TRACE();
626 
627 	if (rx_queue_id == -1)
628 		return dpaa2_eventdev_eth_queue_add_all(dev,
629 				eth_dev, queue_conf);
630 
631 	ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
632 			dpcon_id, queue_conf);
633 	if (ret) {
634 		PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
635 		return ret;
636 	}
637 	return 0;
638 }
639 
640 static int
641 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
642 			     const struct rte_eth_dev *eth_dev)
643 {
644 	int i, ret;
645 
646 	PMD_DRV_FUNC_TRACE();
647 
648 	RTE_SET_USED(dev);
649 
650 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
651 		ret = dpaa2_eth_eventq_detach(eth_dev, i);
652 		if (ret) {
653 			PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
654 				    ret);
655 			return ret;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
662 static int
663 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
664 			     const struct rte_eth_dev *eth_dev,
665 			     int32_t rx_queue_id)
666 {
667 	int ret;
668 
669 	PMD_DRV_FUNC_TRACE();
670 
671 	if (rx_queue_id == -1)
672 		return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
673 
674 	ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
675 	if (ret) {
676 		PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
677 		return ret;
678 	}
679 
680 	return 0;
681 }
682 
683 static int
684 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
685 			 const struct rte_eth_dev *eth_dev)
686 {
687 	PMD_DRV_FUNC_TRACE();
688 
689 	RTE_SET_USED(dev);
690 	RTE_SET_USED(eth_dev);
691 
692 	return 0;
693 }
694 
695 static int
696 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
697 			const struct rte_eth_dev *eth_dev)
698 {
699 	PMD_DRV_FUNC_TRACE();
700 
701 	RTE_SET_USED(dev);
702 	RTE_SET_USED(eth_dev);
703 
704 	return 0;
705 }
706 
707 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
708 	.dev_infos_get    = dpaa2_eventdev_info_get,
709 	.dev_configure    = dpaa2_eventdev_configure,
710 	.dev_start        = dpaa2_eventdev_start,
711 	.dev_stop         = dpaa2_eventdev_stop,
712 	.dev_close        = dpaa2_eventdev_close,
713 	.queue_def_conf   = dpaa2_eventdev_queue_def_conf,
714 	.queue_setup      = dpaa2_eventdev_queue_setup,
715 	.queue_release    = dpaa2_eventdev_queue_release,
716 	.port_def_conf    = dpaa2_eventdev_port_def_conf,
717 	.port_setup       = dpaa2_eventdev_port_setup,
718 	.port_release     = dpaa2_eventdev_port_release,
719 	.port_link        = dpaa2_eventdev_port_link,
720 	.port_unlink      = dpaa2_eventdev_port_unlink,
721 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
722 	.dump             = dpaa2_eventdev_dump,
723 	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
724 	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
725 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
726 	.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
727 	.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
728 };
729 
730 static int
731 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
732 			  struct dpaa2_dpcon_dev *dpcon_dev)
733 {
734 	struct dpci_rx_queue_cfg rx_queue_cfg;
735 	int ret, i;
736 
737 	/*Do settings to get the frame on a DPCON object*/
738 	rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
739 		  DPCI_QUEUE_OPT_USER_CTX;
740 	rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
741 	rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
742 	rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
743 
744 	dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
745 		dpaa2_eventdev_process_parallel;
746 	dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
747 		dpaa2_eventdev_process_atomic;
748 
749 	for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
750 		rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
751 		ret = dpci_set_rx_queue(&dpci_dev->dpci,
752 					CMD_PRI_LOW,
753 					dpci_dev->token, i,
754 					&rx_queue_cfg);
755 		if (ret) {
756 			PMD_DRV_LOG(ERR,
757 				    "set_rx_q failed with err code: %d", ret);
758 			return ret;
759 		}
760 	}
761 	return 0;
762 }
763 
764 static int
765 dpaa2_eventdev_create(const char *name)
766 {
767 	struct rte_eventdev *eventdev;
768 	struct dpaa2_eventdev *priv;
769 	struct dpaa2_dpcon_dev *dpcon_dev = NULL;
770 	struct dpaa2_dpci_dev *dpci_dev = NULL;
771 	int ret;
772 
773 	eventdev = rte_event_pmd_vdev_init(name,
774 					   sizeof(struct dpaa2_eventdev),
775 					   rte_socket_id());
776 	if (eventdev == NULL) {
777 		PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
778 		goto fail;
779 	}
780 
781 	eventdev->dev_ops       = &dpaa2_eventdev_ops;
782 	eventdev->enqueue       = dpaa2_eventdev_enqueue;
783 	eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
784 	eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
785 	eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
786 	eventdev->dequeue       = dpaa2_eventdev_dequeue;
787 	eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
788 
789 	/* For secondary processes, the primary has done all the work */
790 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
791 		return 0;
792 
793 	priv = eventdev->data->dev_private;
794 	priv->max_event_queues = 0;
795 
796 	do {
797 		dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
798 		if (!dpcon_dev)
799 			break;
800 		priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
801 
802 		dpci_dev = rte_dpaa2_alloc_dpci_dev();
803 		if (!dpci_dev) {
804 			rte_dpaa2_free_dpcon_dev(dpcon_dev);
805 			break;
806 		}
807 		priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
808 
809 		ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
810 		if (ret) {
811 			PMD_DRV_LOG(ERR,
812 				    "dpci setup failed with err code: %d", ret);
813 			return ret;
814 		}
815 		priv->max_event_queues++;
816 	} while (dpcon_dev && dpci_dev);
817 
818 	return 0;
819 fail:
820 	return -EFAULT;
821 }
822 
823 static int
824 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
825 {
826 	const char *name;
827 
828 	name = rte_vdev_device_name(vdev);
829 	PMD_DRV_LOG(INFO, "Initializing %s", name);
830 	return dpaa2_eventdev_create(name);
831 }
832 
833 static int
834 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
835 {
836 	const char *name;
837 
838 	name = rte_vdev_device_name(vdev);
839 	PMD_DRV_LOG(INFO, "Closing %s", name);
840 
841 	return rte_event_pmd_vdev_uninit(name);
842 }
843 
844 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
845 	.probe = dpaa2_eventdev_probe,
846 	.remove = dpaa2_eventdev_remove
847 };
848 
849 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
850