1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017,2019 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
25 #include <rte_pci.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
31 
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include <dpaa2_sec_event.h>
38 #include "dpaa2_eventdev.h"
39 #include "dpaa2_eventdev_logs.h"
40 #include <portal/dpaa2_hw_pvt.h>
41 #include <mc/fsl_dpci.h>
42 
43 /* Clarifications
44  * Evendev = SoC Instance
45  * Eventport = DPIO Instance
46  * Eventqueue = DPCON Instance
47  * 1 Eventdev can have N Eventqueue
48  * Soft Event Flow is DPCI Instance
49  */
50 
51 /* Dynamic logging identified for mempool */
52 int dpaa2_logtype_event;
53 #define DPAA2_EV_TX_RETRY_COUNT 10000
54 
55 static uint16_t
56 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
57 			     uint16_t nb_events)
58 {
59 
60 	struct dpaa2_port *dpaa2_portal = port;
61 	struct dpaa2_dpio_dev *dpio_dev;
62 	uint32_t queue_id = ev[0].queue_id;
63 	struct dpaa2_eventq *evq_info;
64 	uint32_t fqid, retry_count;
65 	struct qbman_swp *swp;
66 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
67 	uint32_t loop, frames_to_send;
68 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
69 	uint16_t num_tx = 0;
70 	int i, n, ret;
71 	uint8_t channel_index;
72 
73 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
74 		/* Affine current thread context to a qman portal */
75 		ret = dpaa2_affine_qbman_swp();
76 		if (ret < 0) {
77 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
78 			return 0;
79 		}
80 	}
81 	/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
82 	dpio_dev = DPAA2_PER_LCORE_DPIO;
83 	swp = DPAA2_PER_LCORE_PORTAL;
84 
85 	if (likely(dpaa2_portal->is_port_linked))
86 		goto skip_linking;
87 
88 	/* Create mapping between portal and channel to receive packets */
89 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
90 		evq_info = &dpaa2_portal->evq_info[i];
91 		if (!evq_info->event_port)
92 			continue;
93 
94 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
95 						      CMD_PRI_LOW,
96 						      dpio_dev->token,
97 						      evq_info->dpcon->dpcon_id,
98 						      &channel_index);
99 		if (ret < 0) {
100 			DPAA2_EVENTDEV_ERR(
101 				"Static dequeue config failed: err(%d)", ret);
102 			goto err;
103 		}
104 
105 		qbman_swp_push_set(swp, channel_index, 1);
106 		evq_info->dpcon->channel_index = channel_index;
107 	}
108 	dpaa2_portal->is_port_linked = true;
109 
110 skip_linking:
111 	evq_info = &dpaa2_portal->evq_info[queue_id];
112 
113 	while (nb_events) {
114 		frames_to_send = (nb_events > dpaa2_eqcr_size) ?
115 			dpaa2_eqcr_size : nb_events;
116 
117 		for (loop = 0; loop < frames_to_send; loop++) {
118 			const struct rte_event *event = &ev[num_tx + loop];
119 
120 			if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
121 				fqid = evq_info->dpci->rx_queue[
122 					DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
123 			else
124 				fqid = evq_info->dpci->rx_queue[
125 					DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
126 
127 			/* Prepare enqueue descriptor */
128 			qbman_eq_desc_clear(&eqdesc[loop]);
129 			qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
130 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
131 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
132 
133 			if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
134 				&& event->mbuf->seqn) {
135 				uint8_t dqrr_index = event->mbuf->seqn - 1;
136 
137 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
138 						      dqrr_index, 0);
139 				DPAA2_PER_LCORE_DQRR_SIZE--;
140 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
141 			}
142 
143 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
144 
145 			/*
146 			 * todo - need to align with hw context data
147 			 * to avoid copy
148 			 */
149 			struct rte_event *ev_temp = rte_malloc(NULL,
150 						sizeof(struct rte_event), 0);
151 
152 			if (!ev_temp) {
153 				if (!loop)
154 					return num_tx;
155 				frames_to_send = loop;
156 				DPAA2_EVENTDEV_ERR(
157 					"Unable to allocate event object");
158 				goto send_partial;
159 			}
160 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
161 			DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
162 			DPAA2_SET_FD_LEN((&fd_arr[loop]),
163 					 sizeof(struct rte_event));
164 		}
165 send_partial:
166 		loop = 0;
167 		retry_count = 0;
168 		while (loop < frames_to_send) {
169 			ret = qbman_swp_enqueue_multiple_desc(swp,
170 					&eqdesc[loop], &fd_arr[loop],
171 					frames_to_send - loop);
172 			if (unlikely(ret < 0)) {
173 				retry_count++;
174 				if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
175 					num_tx += loop;
176 					nb_events -= loop;
177 					return num_tx + loop;
178 				}
179 			} else {
180 				loop += ret;
181 				retry_count = 0;
182 			}
183 		}
184 		num_tx += loop;
185 		nb_events -= loop;
186 	}
187 
188 	return num_tx;
189 err:
190 	for (n = 0; n < i; n++) {
191 		evq_info = &dpaa2_portal->evq_info[n];
192 		if (!evq_info->event_port)
193 			continue;
194 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
195 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
196 						dpio_dev->token,
197 						evq_info->dpcon->dpcon_id);
198 	}
199 	return 0;
200 
201 }
202 
203 static uint16_t
204 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
205 {
206 	return dpaa2_eventdev_enqueue_burst(port, ev, 1);
207 }
208 
209 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
210 {
211 	struct epoll_event epoll_ev;
212 
213 	qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
214 					 QBMAN_SWP_INTERRUPT_DQRI);
215 
216 	epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
217 			 &epoll_ev, 1, timeout_ticks);
218 }
219 
220 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
221 					    const struct qbman_fd *fd,
222 					    const struct qbman_result *dq,
223 					    struct dpaa2_queue *rxq,
224 					    struct rte_event *ev)
225 {
226 	struct rte_event *ev_temp =
227 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
228 
229 	RTE_SET_USED(rxq);
230 
231 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
232 	rte_free(ev_temp);
233 
234 	qbman_swp_dqrr_consume(swp, dq);
235 }
236 
237 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
238 					  const struct qbman_fd *fd,
239 					  const struct qbman_result *dq,
240 					  struct dpaa2_queue *rxq,
241 					  struct rte_event *ev)
242 {
243 	struct rte_event *ev_temp =
244 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
245 	uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
246 
247 	RTE_SET_USED(swp);
248 	RTE_SET_USED(rxq);
249 
250 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
251 	rte_free(ev_temp);
252 	ev->mbuf->seqn = dqrr_index + 1;
253 	DPAA2_PER_LCORE_DQRR_SIZE++;
254 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
255 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
256 }
257 
258 static uint16_t
259 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
260 			     uint16_t nb_events, uint64_t timeout_ticks)
261 {
262 	const struct qbman_result *dq;
263 	struct dpaa2_dpio_dev *dpio_dev = NULL;
264 	struct dpaa2_port *dpaa2_portal = port;
265 	struct dpaa2_eventq *evq_info;
266 	struct qbman_swp *swp;
267 	const struct qbman_fd *fd;
268 	struct dpaa2_queue *rxq;
269 	int num_pkts = 0, ret, i = 0, n;
270 	uint8_t channel_index;
271 
272 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
273 		/* Affine current thread context to a qman portal */
274 		ret = dpaa2_affine_qbman_swp();
275 		if (ret < 0) {
276 			DPAA2_EVENTDEV_ERR("Failure in affining portal");
277 			return 0;
278 		}
279 	}
280 
281 	dpio_dev = DPAA2_PER_LCORE_DPIO;
282 	swp = DPAA2_PER_LCORE_PORTAL;
283 
284 	if (likely(dpaa2_portal->is_port_linked))
285 		goto skip_linking;
286 
287 	/* Create mapping between portal and channel to receive packets */
288 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
289 		evq_info = &dpaa2_portal->evq_info[i];
290 		if (!evq_info->event_port)
291 			continue;
292 
293 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
294 						      CMD_PRI_LOW,
295 						      dpio_dev->token,
296 						      evq_info->dpcon->dpcon_id,
297 						      &channel_index);
298 		if (ret < 0) {
299 			DPAA2_EVENTDEV_ERR(
300 				"Static dequeue config failed: err(%d)", ret);
301 			goto err;
302 		}
303 
304 		qbman_swp_push_set(swp, channel_index, 1);
305 		evq_info->dpcon->channel_index = channel_index;
306 	}
307 	dpaa2_portal->is_port_linked = true;
308 
309 skip_linking:
310 	/* Check if there are atomic contexts to be released */
311 	while (DPAA2_PER_LCORE_DQRR_SIZE) {
312 		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
313 			qbman_swp_dqrr_idx_consume(swp, i);
314 			DPAA2_PER_LCORE_DQRR_SIZE--;
315 			DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
316 				DPAA2_INVALID_MBUF_SEQN;
317 		}
318 		i++;
319 	}
320 	DPAA2_PER_LCORE_DQRR_HELD = 0;
321 
322 	do {
323 		dq = qbman_swp_dqrr_next(swp);
324 		if (!dq) {
325 			if (!num_pkts && timeout_ticks) {
326 				dpaa2_eventdev_dequeue_wait(timeout_ticks);
327 				timeout_ticks = 0;
328 				continue;
329 			}
330 			return num_pkts;
331 		}
332 		qbman_swp_prefetch_dqrr_next(swp);
333 
334 		fd = qbman_result_DQ_fd(dq);
335 		rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
336 		if (rxq) {
337 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
338 		} else {
339 			qbman_swp_dqrr_consume(swp, dq);
340 			DPAA2_EVENTDEV_ERR("Null Return VQ received");
341 			return 0;
342 		}
343 
344 		num_pkts++;
345 	} while (num_pkts < nb_events);
346 
347 	return num_pkts;
348 err:
349 	for (n = 0; n < i; n++) {
350 		evq_info = &dpaa2_portal->evq_info[n];
351 		if (!evq_info->event_port)
352 			continue;
353 
354 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
355 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
356 							dpio_dev->token,
357 						evq_info->dpcon->dpcon_id);
358 	}
359 	return 0;
360 }
361 
362 static uint16_t
363 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
364 		       uint64_t timeout_ticks)
365 {
366 	return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
367 }
368 
369 static void
370 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
371 			struct rte_event_dev_info *dev_info)
372 {
373 	struct dpaa2_eventdev *priv = dev->data->dev_private;
374 
375 	EVENTDEV_INIT_FUNC_TRACE();
376 
377 	RTE_SET_USED(dev);
378 
379 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
380 	dev_info->min_dequeue_timeout_ns =
381 		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
382 	dev_info->max_dequeue_timeout_ns =
383 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
384 	dev_info->dequeue_timeout_ns =
385 		DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
386 	dev_info->max_event_queues = priv->max_event_queues;
387 	dev_info->max_event_queue_flows =
388 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
389 	dev_info->max_event_queue_priority_levels =
390 		DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
391 	dev_info->max_event_priority_levels =
392 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
393 	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
394 	/* we only support dpio upto number of cores*/
395 	if (dev_info->max_event_ports > rte_lcore_count())
396 		dev_info->max_event_ports = rte_lcore_count();
397 	dev_info->max_event_port_dequeue_depth =
398 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
399 	dev_info->max_event_port_enqueue_depth =
400 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
401 	dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
402 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
403 		RTE_EVENT_DEV_CAP_BURST_MODE|
404 		RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
405 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
406 		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
407 
408 }
409 
410 static int
411 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
412 {
413 	struct dpaa2_eventdev *priv = dev->data->dev_private;
414 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
415 
416 	EVENTDEV_INIT_FUNC_TRACE();
417 
418 	priv->nb_event_queues = conf->nb_event_queues;
419 	priv->nb_event_ports = conf->nb_event_ports;
420 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
421 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
422 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
423 	priv->event_dev_cfg = conf->event_dev_cfg;
424 
425 	/* Check dequeue timeout method is per dequeue or global */
426 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
427 		/*
428 		 * Use timeout value as given in dequeue operation.
429 		 * So invalidating this timeout value.
430 		 */
431 		priv->dequeue_timeout_ns = 0;
432 
433 	} else if (conf->dequeue_timeout_ns == 0) {
434 		priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
435 	} else {
436 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
437 	}
438 
439 	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
440 			     dev->data->dev_id);
441 	return 0;
442 }
443 
444 static int
445 dpaa2_eventdev_start(struct rte_eventdev *dev)
446 {
447 	EVENTDEV_INIT_FUNC_TRACE();
448 
449 	RTE_SET_USED(dev);
450 
451 	return 0;
452 }
453 
454 static void
455 dpaa2_eventdev_stop(struct rte_eventdev *dev)
456 {
457 	EVENTDEV_INIT_FUNC_TRACE();
458 
459 	RTE_SET_USED(dev);
460 }
461 
462 static int
463 dpaa2_eventdev_close(struct rte_eventdev *dev)
464 {
465 	EVENTDEV_INIT_FUNC_TRACE();
466 
467 	RTE_SET_USED(dev);
468 
469 	return 0;
470 }
471 
472 static void
473 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
474 			      struct rte_event_queue_conf *queue_conf)
475 {
476 	EVENTDEV_INIT_FUNC_TRACE();
477 
478 	RTE_SET_USED(dev);
479 	RTE_SET_USED(queue_id);
480 
481 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
482 	queue_conf->nb_atomic_order_sequences =
483 				DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
484 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
485 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
486 }
487 
488 static int
489 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
490 			   const struct rte_event_queue_conf *queue_conf)
491 {
492 	struct dpaa2_eventdev *priv = dev->data->dev_private;
493 	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
494 
495 	EVENTDEV_INIT_FUNC_TRACE();
496 
497 	switch (queue_conf->schedule_type) {
498 	case RTE_SCHED_TYPE_PARALLEL:
499 	case RTE_SCHED_TYPE_ATOMIC:
500 	case RTE_SCHED_TYPE_ORDERED:
501 		break;
502 	default:
503 		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
504 		return -1;
505 	}
506 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
507 	evq_info->event_queue_id = queue_id;
508 
509 	return 0;
510 }
511 
512 static void
513 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
514 {
515 	EVENTDEV_INIT_FUNC_TRACE();
516 
517 	RTE_SET_USED(dev);
518 	RTE_SET_USED(queue_id);
519 }
520 
521 static void
522 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
523 			     struct rte_event_port_conf *port_conf)
524 {
525 	EVENTDEV_INIT_FUNC_TRACE();
526 
527 	RTE_SET_USED(dev);
528 	RTE_SET_USED(port_id);
529 
530 	port_conf->new_event_threshold =
531 		DPAA2_EVENT_MAX_NUM_EVENTS;
532 	port_conf->dequeue_depth =
533 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
534 	port_conf->enqueue_depth =
535 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
536 	port_conf->disable_implicit_release = 0;
537 }
538 
539 static int
540 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
541 			  const struct rte_event_port_conf *port_conf)
542 {
543 	char event_port_name[32];
544 	struct dpaa2_port *portal;
545 
546 	EVENTDEV_INIT_FUNC_TRACE();
547 
548 	RTE_SET_USED(port_conf);
549 
550 	sprintf(event_port_name, "event-port-%d", port_id);
551 	portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
552 	if (!portal) {
553 		DPAA2_EVENTDEV_ERR("Memory allocation failure");
554 		return -ENOMEM;
555 	}
556 
557 	memset(portal, 0, sizeof(struct dpaa2_port));
558 	dev->data->ports[port_id] = portal;
559 	return 0;
560 }
561 
562 static void
563 dpaa2_eventdev_port_release(void *port)
564 {
565 	struct dpaa2_port *portal = port;
566 
567 	EVENTDEV_INIT_FUNC_TRACE();
568 
569 	/* TODO: Cleanup is required when ports are in linked state. */
570 	if (portal->is_port_linked)
571 		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
572 
573 	if (portal)
574 		rte_free(portal);
575 
576 	portal = NULL;
577 }
578 
579 static int
580 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
581 			 const uint8_t queues[], const uint8_t priorities[],
582 			uint16_t nb_links)
583 {
584 	struct dpaa2_eventdev *priv = dev->data->dev_private;
585 	struct dpaa2_port *dpaa2_portal = port;
586 	struct dpaa2_eventq *evq_info;
587 	uint16_t i;
588 
589 	EVENTDEV_INIT_FUNC_TRACE();
590 
591 	RTE_SET_USED(priorities);
592 
593 	for (i = 0; i < nb_links; i++) {
594 		evq_info = &priv->evq_info[queues[i]];
595 		memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
596 			   sizeof(struct dpaa2_eventq));
597 		dpaa2_portal->evq_info[queues[i]].event_port = port;
598 		dpaa2_portal->num_linked_evq++;
599 	}
600 
601 	return (int)nb_links;
602 }
603 
604 static int
605 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
606 			   uint8_t queues[], uint16_t nb_unlinks)
607 {
608 	struct dpaa2_port *dpaa2_portal = port;
609 	int i;
610 	struct dpaa2_dpio_dev *dpio_dev = NULL;
611 	struct dpaa2_eventq *evq_info;
612 	struct qbman_swp *swp;
613 
614 	EVENTDEV_INIT_FUNC_TRACE();
615 
616 	RTE_SET_USED(dev);
617 	RTE_SET_USED(queues);
618 
619 	for (i = 0; i < nb_unlinks; i++) {
620 		evq_info = &dpaa2_portal->evq_info[queues[i]];
621 
622 		if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
623 			/* todo dpaa2_portal shall have dpio_dev-no per lcore*/
624 			dpio_dev = DPAA2_PER_LCORE_DPIO;
625 			swp = DPAA2_PER_LCORE_PORTAL;
626 
627 			qbman_swp_push_set(swp,
628 					evq_info->dpcon->channel_index, 0);
629 			dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
630 						dpio_dev->token,
631 						evq_info->dpcon->dpcon_id);
632 		}
633 		memset(evq_info, 0, sizeof(struct dpaa2_eventq));
634 		if (dpaa2_portal->num_linked_evq)
635 			dpaa2_portal->num_linked_evq--;
636 	}
637 
638 	if (!dpaa2_portal->num_linked_evq)
639 		dpaa2_portal->is_port_linked = false;
640 
641 	return (int)nb_unlinks;
642 }
643 
644 
645 static int
646 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
647 			     uint64_t *timeout_ticks)
648 {
649 	uint32_t scale = 1000*1000;
650 
651 	EVENTDEV_INIT_FUNC_TRACE();
652 
653 	RTE_SET_USED(dev);
654 	*timeout_ticks = ns / scale;
655 
656 	return 0;
657 }
658 
659 static void
660 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
661 {
662 	EVENTDEV_INIT_FUNC_TRACE();
663 
664 	RTE_SET_USED(dev);
665 	RTE_SET_USED(f);
666 }
667 
668 static int
669 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
670 			    const struct rte_eth_dev *eth_dev,
671 			    uint32_t *caps)
672 {
673 	const char *ethdev_driver = eth_dev->device->driver->name;
674 
675 	EVENTDEV_INIT_FUNC_TRACE();
676 
677 	RTE_SET_USED(dev);
678 
679 	if (!strcmp(ethdev_driver, "net_dpaa2"))
680 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
681 	else
682 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
683 
684 	return 0;
685 }
686 
687 static int
688 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
689 		const struct rte_eth_dev *eth_dev,
690 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
691 {
692 	struct dpaa2_eventdev *priv = dev->data->dev_private;
693 	uint8_t ev_qid = queue_conf->ev.queue_id;
694 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
695 	int i, ret;
696 
697 	EVENTDEV_INIT_FUNC_TRACE();
698 
699 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
700 		ret = dpaa2_eth_eventq_attach(eth_dev, i,
701 					      dpcon, queue_conf);
702 		if (ret) {
703 			DPAA2_EVENTDEV_ERR(
704 				"Event queue attach failed: err(%d)", ret);
705 			goto fail;
706 		}
707 	}
708 	return 0;
709 fail:
710 	for (i = (i - 1); i >= 0 ; i--)
711 		dpaa2_eth_eventq_detach(eth_dev, i);
712 
713 	return ret;
714 }
715 
716 static int
717 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
718 		const struct rte_eth_dev *eth_dev,
719 		int32_t rx_queue_id,
720 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
721 {
722 	struct dpaa2_eventdev *priv = dev->data->dev_private;
723 	uint8_t ev_qid = queue_conf->ev.queue_id;
724 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
725 	int ret;
726 
727 	EVENTDEV_INIT_FUNC_TRACE();
728 
729 	if (rx_queue_id == -1)
730 		return dpaa2_eventdev_eth_queue_add_all(dev,
731 				eth_dev, queue_conf);
732 
733 	ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
734 				      dpcon, queue_conf);
735 	if (ret) {
736 		DPAA2_EVENTDEV_ERR(
737 			"Event queue attach failed: err(%d)", ret);
738 		return ret;
739 	}
740 	return 0;
741 }
742 
743 static int
744 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
745 			     const struct rte_eth_dev *eth_dev)
746 {
747 	int i, ret;
748 
749 	EVENTDEV_INIT_FUNC_TRACE();
750 
751 	RTE_SET_USED(dev);
752 
753 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
754 		ret = dpaa2_eth_eventq_detach(eth_dev, i);
755 		if (ret) {
756 			DPAA2_EVENTDEV_ERR(
757 				"Event queue detach failed: err(%d)", ret);
758 			return ret;
759 		}
760 	}
761 
762 	return 0;
763 }
764 
765 static int
766 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
767 			     const struct rte_eth_dev *eth_dev,
768 			     int32_t rx_queue_id)
769 {
770 	int ret;
771 
772 	EVENTDEV_INIT_FUNC_TRACE();
773 
774 	if (rx_queue_id == -1)
775 		return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
776 
777 	ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
778 	if (ret) {
779 		DPAA2_EVENTDEV_ERR(
780 			"Event queue detach failed: err(%d)", ret);
781 		return ret;
782 	}
783 
784 	return 0;
785 }
786 
787 static int
788 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
789 			 const struct rte_eth_dev *eth_dev)
790 {
791 	EVENTDEV_INIT_FUNC_TRACE();
792 
793 	RTE_SET_USED(dev);
794 	RTE_SET_USED(eth_dev);
795 
796 	return 0;
797 }
798 
799 static int
800 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
801 			const struct rte_eth_dev *eth_dev)
802 {
803 	EVENTDEV_INIT_FUNC_TRACE();
804 
805 	RTE_SET_USED(dev);
806 	RTE_SET_USED(eth_dev);
807 
808 	return 0;
809 }
810 
811 static int
812 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
813 			    const struct rte_cryptodev *cdev,
814 			    uint32_t *caps)
815 {
816 	const char *name = cdev->data->name;
817 
818 	EVENTDEV_INIT_FUNC_TRACE();
819 
820 	RTE_SET_USED(dev);
821 
822 	if (!strncmp(name, "dpsec-", 6))
823 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
824 	else
825 		return -1;
826 
827 	return 0;
828 }
829 
830 static int
831 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
832 		const struct rte_cryptodev *cryptodev,
833 		const struct rte_event *ev)
834 {
835 	struct dpaa2_eventdev *priv = dev->data->dev_private;
836 	uint8_t ev_qid = ev->queue_id;
837 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
838 	int i, ret;
839 
840 	EVENTDEV_INIT_FUNC_TRACE();
841 
842 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
843 		ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
844 		if (ret) {
845 			DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
846 				    ret);
847 			goto fail;
848 		}
849 	}
850 	return 0;
851 fail:
852 	for (i = (i - 1); i >= 0 ; i--)
853 		dpaa2_sec_eventq_detach(cryptodev, i);
854 
855 	return ret;
856 }
857 
858 static int
859 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
860 		const struct rte_cryptodev *cryptodev,
861 		int32_t rx_queue_id,
862 		const struct rte_event *ev)
863 {
864 	struct dpaa2_eventdev *priv = dev->data->dev_private;
865 	uint8_t ev_qid = ev->queue_id;
866 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
867 	int ret;
868 
869 	EVENTDEV_INIT_FUNC_TRACE();
870 
871 	if (rx_queue_id == -1)
872 		return dpaa2_eventdev_crypto_queue_add_all(dev,
873 				cryptodev, ev);
874 
875 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
876 				      dpcon, ev);
877 	if (ret) {
878 		DPAA2_EVENTDEV_ERR(
879 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
880 		return ret;
881 	}
882 	return 0;
883 }
884 
885 static int
886 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
887 			     const struct rte_cryptodev *cdev)
888 {
889 	int i, ret;
890 
891 	EVENTDEV_INIT_FUNC_TRACE();
892 
893 	RTE_SET_USED(dev);
894 
895 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
896 		ret = dpaa2_sec_eventq_detach(cdev, i);
897 		if (ret) {
898 			DPAA2_EVENTDEV_ERR(
899 				"dpaa2_sec_eventq_detach failed:ret %d\n", ret);
900 			return ret;
901 		}
902 	}
903 
904 	return 0;
905 }
906 
907 static int
908 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
909 			     const struct rte_cryptodev *cryptodev,
910 			     int32_t rx_queue_id)
911 {
912 	int ret;
913 
914 	EVENTDEV_INIT_FUNC_TRACE();
915 
916 	if (rx_queue_id == -1)
917 		return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
918 
919 	ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
920 	if (ret) {
921 		DPAA2_EVENTDEV_ERR(
922 			"dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
923 		return ret;
924 	}
925 
926 	return 0;
927 }
928 
929 static int
930 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
931 			    const struct rte_cryptodev *cryptodev)
932 {
933 	EVENTDEV_INIT_FUNC_TRACE();
934 
935 	RTE_SET_USED(dev);
936 	RTE_SET_USED(cryptodev);
937 
938 	return 0;
939 }
940 
941 static int
942 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
943 			   const struct rte_cryptodev *cryptodev)
944 {
945 	EVENTDEV_INIT_FUNC_TRACE();
946 
947 	RTE_SET_USED(dev);
948 	RTE_SET_USED(cryptodev);
949 
950 	return 0;
951 }
952 
953 static int
954 dpaa2_eventdev_tx_adapter_create(uint8_t id,
955 				 const struct rte_eventdev *dev)
956 {
957 	RTE_SET_USED(id);
958 	RTE_SET_USED(dev);
959 
960 	/* Nothing to do. Simply return. */
961 	return 0;
962 }
963 
964 static int
965 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
966 			       const struct rte_eth_dev *eth_dev,
967 			       uint32_t *caps)
968 {
969 	RTE_SET_USED(dev);
970 	RTE_SET_USED(eth_dev);
971 
972 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
973 	return 0;
974 }
975 
976 static uint16_t
977 dpaa2_eventdev_txa_enqueue_same_dest(void *port,
978 				     struct rte_event ev[],
979 				     uint16_t nb_events)
980 {
981 	struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
982 	uint8_t qid, i;
983 
984 	RTE_SET_USED(port);
985 
986 	m0 = (struct rte_mbuf *)ev[0].mbuf;
987 	qid = rte_event_eth_tx_adapter_txq_get(m0);
988 
989 	for (i = 0; i < nb_events; i++)
990 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
991 
992 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
993 }
994 
995 static uint16_t
996 dpaa2_eventdev_txa_enqueue(void *port,
997 			   struct rte_event ev[],
998 			   uint16_t nb_events)
999 {
1000 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
1001 	uint8_t qid, i;
1002 
1003 	RTE_SET_USED(port);
1004 
1005 	for (i = 0; i < nb_events; i++) {
1006 		qid = rte_event_eth_tx_adapter_txq_get(m);
1007 		rte_eth_tx_burst(m->port, qid, &m, 1);
1008 	}
1009 
1010 	return nb_events;
1011 }
1012 
1013 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
1014 	.dev_infos_get    = dpaa2_eventdev_info_get,
1015 	.dev_configure    = dpaa2_eventdev_configure,
1016 	.dev_start        = dpaa2_eventdev_start,
1017 	.dev_stop         = dpaa2_eventdev_stop,
1018 	.dev_close        = dpaa2_eventdev_close,
1019 	.queue_def_conf   = dpaa2_eventdev_queue_def_conf,
1020 	.queue_setup      = dpaa2_eventdev_queue_setup,
1021 	.queue_release    = dpaa2_eventdev_queue_release,
1022 	.port_def_conf    = dpaa2_eventdev_port_def_conf,
1023 	.port_setup       = dpaa2_eventdev_port_setup,
1024 	.port_release     = dpaa2_eventdev_port_release,
1025 	.port_link        = dpaa2_eventdev_port_link,
1026 	.port_unlink      = dpaa2_eventdev_port_unlink,
1027 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
1028 	.dump             = dpaa2_eventdev_dump,
1029 	.dev_selftest     = test_eventdev_dpaa2,
1030 	.eth_rx_adapter_caps_get	= dpaa2_eventdev_eth_caps_get,
1031 	.eth_rx_adapter_queue_add	= dpaa2_eventdev_eth_queue_add,
1032 	.eth_rx_adapter_queue_del	= dpaa2_eventdev_eth_queue_del,
1033 	.eth_rx_adapter_start		= dpaa2_eventdev_eth_start,
1034 	.eth_rx_adapter_stop		= dpaa2_eventdev_eth_stop,
1035 	.eth_tx_adapter_caps_get	= dpaa2_eventdev_tx_adapter_caps,
1036 	.eth_tx_adapter_create		= dpaa2_eventdev_tx_adapter_create,
1037 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
1038 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
1039 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
1040 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
1041 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
1042 };
1043 
1044 static int
1045 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
1046 			  struct dpaa2_dpcon_dev *dpcon_dev)
1047 {
1048 	struct dpci_rx_queue_cfg rx_queue_cfg;
1049 	int ret, i;
1050 
1051 	/*Do settings to get the frame on a DPCON object*/
1052 	rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
1053 		  DPCI_QUEUE_OPT_USER_CTX;
1054 	rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
1055 	rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
1056 	rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
1057 
1058 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
1059 		dpaa2_eventdev_process_parallel;
1060 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
1061 		dpaa2_eventdev_process_atomic;
1062 
1063 	for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
1064 		rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
1065 		ret = dpci_set_rx_queue(&dpci_dev->dpci,
1066 					CMD_PRI_LOW,
1067 					dpci_dev->token, i,
1068 					&rx_queue_cfg);
1069 		if (ret) {
1070 			DPAA2_EVENTDEV_ERR(
1071 				"DPCI Rx queue setup failed: err(%d)",
1072 				ret);
1073 			return ret;
1074 		}
1075 	}
1076 	return 0;
1077 }
1078 
1079 static int
1080 dpaa2_eventdev_create(const char *name)
1081 {
1082 	struct rte_eventdev *eventdev;
1083 	struct dpaa2_eventdev *priv;
1084 	struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1085 	struct dpaa2_dpci_dev *dpci_dev = NULL;
1086 	int ret;
1087 
1088 	eventdev = rte_event_pmd_vdev_init(name,
1089 					   sizeof(struct dpaa2_eventdev),
1090 					   rte_socket_id());
1091 	if (eventdev == NULL) {
1092 		DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1093 		goto fail;
1094 	}
1095 
1096 	eventdev->dev_ops       = &dpaa2_eventdev_ops;
1097 	eventdev->enqueue       = dpaa2_eventdev_enqueue;
1098 	eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1099 	eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1100 	eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1101 	eventdev->dequeue       = dpaa2_eventdev_dequeue;
1102 	eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1103 	eventdev->txa_enqueue	= dpaa2_eventdev_txa_enqueue;
1104 	eventdev->txa_enqueue_same_dest	= dpaa2_eventdev_txa_enqueue_same_dest;
1105 
1106 	/* For secondary processes, the primary has done all the work */
1107 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1108 		return 0;
1109 
1110 	priv = eventdev->data->dev_private;
1111 	priv->max_event_queues = 0;
1112 
1113 	do {
1114 		dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1115 		if (!dpcon_dev)
1116 			break;
1117 		priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1118 
1119 		dpci_dev = rte_dpaa2_alloc_dpci_dev();
1120 		if (!dpci_dev) {
1121 			rte_dpaa2_free_dpcon_dev(dpcon_dev);
1122 			break;
1123 		}
1124 		priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1125 
1126 		ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1127 		if (ret) {
1128 			DPAA2_EVENTDEV_ERR(
1129 				    "DPCI setup failed: err(%d)", ret);
1130 			return ret;
1131 		}
1132 		priv->max_event_queues++;
1133 	} while (dpcon_dev && dpci_dev);
1134 
1135 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1136 
1137 	return 0;
1138 fail:
1139 	return -EFAULT;
1140 }
1141 
1142 static int
1143 dpaa2_eventdev_destroy(const char *name)
1144 {
1145 	struct rte_eventdev *eventdev;
1146 	struct dpaa2_eventdev *priv;
1147 	int i;
1148 
1149 	eventdev = rte_event_pmd_get_named_dev(name);
1150 	if (eventdev == NULL) {
1151 		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
1152 		return -1;
1153 	}
1154 
1155 	/* For secondary processes, the primary has done all the work */
1156 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1157 		return 0;
1158 
1159 	priv = eventdev->data->dev_private;
1160 	for (i = 0; i < priv->max_event_queues; i++) {
1161 		if (priv->evq_info[i].dpcon)
1162 			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
1163 
1164 		if (priv->evq_info[i].dpci)
1165 			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
1166 
1167 	}
1168 	priv->max_event_queues = 0;
1169 
1170 	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
1171 	return 0;
1172 }
1173 
1174 
1175 static int
1176 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1177 {
1178 	const char *name;
1179 
1180 	name = rte_vdev_device_name(vdev);
1181 	DPAA2_EVENTDEV_INFO("Initializing %s", name);
1182 	return dpaa2_eventdev_create(name);
1183 }
1184 
1185 static int
1186 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1187 {
1188 	const char *name;
1189 
1190 	name = rte_vdev_device_name(vdev);
1191 	DPAA2_EVENTDEV_INFO("Closing %s", name);
1192 
1193 	dpaa2_eventdev_destroy(name);
1194 
1195 	return rte_event_pmd_vdev_uninit(name);
1196 }
1197 
1198 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1199 	.probe = dpaa2_eventdev_probe,
1200 	.remove = dpaa2_eventdev_remove
1201 };
1202 
1203 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1204 
1205 RTE_INIT(dpaa2_eventdev_init_log)
1206 {
1207 	dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
1208 	if (dpaa2_logtype_event >= 0)
1209 		rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
1210 }
1211