1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
24418919fSjohnjiang * Copyright 2017-2019 NXP
3d30ea906Sjfb8856606 */
4d30ea906Sjfb8856606
5d30ea906Sjfb8856606 #include <assert.h>
6d30ea906Sjfb8856606 #include <stdio.h>
7d30ea906Sjfb8856606 #include <stdbool.h>
8d30ea906Sjfb8856606 #include <errno.h>
9d30ea906Sjfb8856606 #include <stdint.h>
10d30ea906Sjfb8856606 #include <string.h>
11d30ea906Sjfb8856606 #include <sys/epoll.h>
12d30ea906Sjfb8856606
13d30ea906Sjfb8856606 #include <rte_atomic.h>
14d30ea906Sjfb8856606 #include <rte_byteorder.h>
15d30ea906Sjfb8856606 #include <rte_common.h>
16d30ea906Sjfb8856606 #include <rte_debug.h>
17d30ea906Sjfb8856606 #include <rte_dev.h>
18d30ea906Sjfb8856606 #include <rte_eal.h>
19d30ea906Sjfb8856606 #include <rte_lcore.h>
20d30ea906Sjfb8856606 #include <rte_log.h>
21d30ea906Sjfb8856606 #include <rte_malloc.h>
22d30ea906Sjfb8856606 #include <rte_memcpy.h>
23d30ea906Sjfb8856606 #include <rte_memory.h>
24d30ea906Sjfb8856606 #include <rte_memzone.h>
25d30ea906Sjfb8856606 #include <rte_pci.h>
26d30ea906Sjfb8856606 #include <rte_eventdev.h>
27d30ea906Sjfb8856606 #include <rte_eventdev_pmd_vdev.h>
28d30ea906Sjfb8856606 #include <rte_ethdev.h>
29d30ea906Sjfb8856606 #include <rte_event_eth_rx_adapter.h>
304418919fSjohnjiang #include <rte_event_eth_tx_adapter.h>
314418919fSjohnjiang #include <rte_cryptodev.h>
32d30ea906Sjfb8856606 #include <rte_dpaa_bus.h>
33d30ea906Sjfb8856606 #include <rte_dpaa_logs.h>
34d30ea906Sjfb8856606 #include <rte_cycles.h>
35d30ea906Sjfb8856606 #include <rte_kvargs.h>
36d30ea906Sjfb8856606
37d30ea906Sjfb8856606 #include <dpaa_ethdev.h>
384418919fSjohnjiang #include <dpaa_sec_event.h>
39d30ea906Sjfb8856606 #include "dpaa_eventdev.h"
40d30ea906Sjfb8856606 #include <dpaa_mempool.h>
41d30ea906Sjfb8856606
42d30ea906Sjfb8856606 /*
43d30ea906Sjfb8856606 * Clarifications
44d30ea906Sjfb8856606 * Evendev = Virtual Instance for SoC
45d30ea906Sjfb8856606 * Eventport = Portal Instance
46d30ea906Sjfb8856606 * Eventqueue = Channel Instance
47d30ea906Sjfb8856606 * 1 Eventdev can have N Eventqueue
48d30ea906Sjfb8856606 */
49*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(dpaa_logtype_eventdev, pmd.event.dpaa, NOTICE);
50d30ea906Sjfb8856606
51d30ea906Sjfb8856606 #define DISABLE_INTR_MODE "disable_intr"
52d30ea906Sjfb8856606
53d30ea906Sjfb8856606 static int
dpaa_event_dequeue_timeout_ticks(struct rte_eventdev * dev,uint64_t ns,uint64_t * timeout_ticks)54d30ea906Sjfb8856606 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
55d30ea906Sjfb8856606 uint64_t *timeout_ticks)
56d30ea906Sjfb8856606 {
57d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
58d30ea906Sjfb8856606
59d30ea906Sjfb8856606 RTE_SET_USED(dev);
60d30ea906Sjfb8856606
61d30ea906Sjfb8856606 uint64_t cycles_per_second;
62d30ea906Sjfb8856606
63d30ea906Sjfb8856606 cycles_per_second = rte_get_timer_hz();
64d30ea906Sjfb8856606 *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
65d30ea906Sjfb8856606
66d30ea906Sjfb8856606 return 0;
67d30ea906Sjfb8856606 }
68d30ea906Sjfb8856606
69d30ea906Sjfb8856606 static int
dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev * dev,uint64_t ns,uint64_t * timeout_ticks)70d30ea906Sjfb8856606 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
71d30ea906Sjfb8856606 uint64_t *timeout_ticks)
72d30ea906Sjfb8856606 {
73d30ea906Sjfb8856606 RTE_SET_USED(dev);
74d30ea906Sjfb8856606
75d30ea906Sjfb8856606 *timeout_ticks = ns/1000;
76d30ea906Sjfb8856606 return 0;
77d30ea906Sjfb8856606 }
78d30ea906Sjfb8856606
79d30ea906Sjfb8856606 static void
dpaa_eventq_portal_add(u16 ch_id)80d30ea906Sjfb8856606 dpaa_eventq_portal_add(u16 ch_id)
81d30ea906Sjfb8856606 {
82d30ea906Sjfb8856606 uint32_t sdqcr;
83d30ea906Sjfb8856606
84d30ea906Sjfb8856606 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
85d30ea906Sjfb8856606 qman_static_dequeue_add(sdqcr, NULL);
86d30ea906Sjfb8856606 }
87d30ea906Sjfb8856606
88d30ea906Sjfb8856606 static uint16_t
dpaa_event_enqueue_burst(void * port,const struct rte_event ev[],uint16_t nb_events)89d30ea906Sjfb8856606 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
90d30ea906Sjfb8856606 uint16_t nb_events)
91d30ea906Sjfb8856606 {
92d30ea906Sjfb8856606 uint16_t i;
93d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
94d30ea906Sjfb8856606
95d30ea906Sjfb8856606 RTE_SET_USED(port);
96d30ea906Sjfb8856606 /*Release all the contexts saved previously*/
97d30ea906Sjfb8856606 for (i = 0; i < nb_events; i++) {
98d30ea906Sjfb8856606 switch (ev[i].op) {
99d30ea906Sjfb8856606 case RTE_EVENT_OP_RELEASE:
100d30ea906Sjfb8856606 qman_dca_index(ev[i].impl_opaque, 0);
101d30ea906Sjfb8856606 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
102*2d9fd380Sjfb8856606 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
103d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
104d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_SIZE--;
105d30ea906Sjfb8856606 break;
106d30ea906Sjfb8856606 default:
107d30ea906Sjfb8856606 break;
108d30ea906Sjfb8856606 }
109d30ea906Sjfb8856606 }
110d30ea906Sjfb8856606
111d30ea906Sjfb8856606 return nb_events;
112d30ea906Sjfb8856606 }
113d30ea906Sjfb8856606
114d30ea906Sjfb8856606 static uint16_t
dpaa_event_enqueue(void * port,const struct rte_event * ev)115d30ea906Sjfb8856606 dpaa_event_enqueue(void *port, const struct rte_event *ev)
116d30ea906Sjfb8856606 {
117d30ea906Sjfb8856606 return dpaa_event_enqueue_burst(port, ev, 1);
118d30ea906Sjfb8856606 }
119d30ea906Sjfb8856606
drain_4_bytes(int fd,fd_set * fdset)120d30ea906Sjfb8856606 static void drain_4_bytes(int fd, fd_set *fdset)
121d30ea906Sjfb8856606 {
122d30ea906Sjfb8856606 if (FD_ISSET(fd, fdset)) {
123d30ea906Sjfb8856606 /* drain 4 bytes */
124d30ea906Sjfb8856606 uint32_t junk;
125d30ea906Sjfb8856606 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
126d30ea906Sjfb8856606 if (sjunk != sizeof(junk))
127d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("UIO irq read error");
128d30ea906Sjfb8856606 }
129d30ea906Sjfb8856606 }
130d30ea906Sjfb8856606
131d30ea906Sjfb8856606 static inline int
dpaa_event_dequeue_wait(uint64_t timeout_ticks)132d30ea906Sjfb8856606 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
133d30ea906Sjfb8856606 {
134d30ea906Sjfb8856606 int fd_qman, nfds;
135d30ea906Sjfb8856606 int ret;
136d30ea906Sjfb8856606 fd_set readset;
137d30ea906Sjfb8856606
138d30ea906Sjfb8856606 /* Go into (and back out of) IRQ mode for each select,
139d30ea906Sjfb8856606 * it simplifies exit-path considerations and other
140d30ea906Sjfb8856606 * potential nastiness.
141d30ea906Sjfb8856606 */
142d30ea906Sjfb8856606 struct timeval tv = {
143d30ea906Sjfb8856606 .tv_sec = timeout_ticks / 1000000,
144d30ea906Sjfb8856606 .tv_usec = timeout_ticks % 1000000
145d30ea906Sjfb8856606 };
146d30ea906Sjfb8856606
147d30ea906Sjfb8856606 fd_qman = qman_thread_fd();
148d30ea906Sjfb8856606 nfds = fd_qman + 1;
149d30ea906Sjfb8856606 FD_ZERO(&readset);
150d30ea906Sjfb8856606 FD_SET(fd_qman, &readset);
151d30ea906Sjfb8856606
152d30ea906Sjfb8856606 qman_irqsource_add(QM_PIRQ_DQRI);
153d30ea906Sjfb8856606
154d30ea906Sjfb8856606 ret = select(nfds, &readset, NULL, NULL, &tv);
155d30ea906Sjfb8856606 if (ret < 0)
156d30ea906Sjfb8856606 return ret;
157d30ea906Sjfb8856606 /* Calling irqsource_remove() prior to thread_irq()
158d30ea906Sjfb8856606 * means thread_irq() will not process whatever caused
159d30ea906Sjfb8856606 * the interrupts, however it does ensure that, once
160d30ea906Sjfb8856606 * thread_irq() re-enables interrupts, they won't fire
161d30ea906Sjfb8856606 * again immediately.
162d30ea906Sjfb8856606 */
163d30ea906Sjfb8856606 qman_irqsource_remove(~0);
164d30ea906Sjfb8856606 drain_4_bytes(fd_qman, &readset);
165d30ea906Sjfb8856606 qman_thread_irq();
166d30ea906Sjfb8856606
167d30ea906Sjfb8856606 return ret;
168d30ea906Sjfb8856606 }
169d30ea906Sjfb8856606
170d30ea906Sjfb8856606 static uint16_t
dpaa_event_dequeue_burst(void * port,struct rte_event ev[],uint16_t nb_events,uint64_t timeout_ticks)171d30ea906Sjfb8856606 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
172d30ea906Sjfb8856606 uint16_t nb_events, uint64_t timeout_ticks)
173d30ea906Sjfb8856606 {
174d30ea906Sjfb8856606 int ret;
175d30ea906Sjfb8856606 u16 ch_id;
176d30ea906Sjfb8856606 void *buffers[8];
1770c6bd470Sfengbojiang u32 num_frames, i;
178d30ea906Sjfb8856606 uint64_t cur_ticks = 0, wait_time_ticks = 0;
179d30ea906Sjfb8856606 struct dpaa_port *portal = (struct dpaa_port *)port;
180d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
181d30ea906Sjfb8856606
182*2d9fd380Sjfb8856606 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
183d30ea906Sjfb8856606 /* Affine current thread context to a qman portal */
184d30ea906Sjfb8856606 ret = rte_dpaa_portal_init((void *)0);
185d30ea906Sjfb8856606 if (ret) {
186d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("Unable to initialize portal");
187d30ea906Sjfb8856606 return ret;
188d30ea906Sjfb8856606 }
189d30ea906Sjfb8856606 }
190d30ea906Sjfb8856606
191d30ea906Sjfb8856606 if (unlikely(!portal->is_port_linked)) {
192d30ea906Sjfb8856606 /*
193d30ea906Sjfb8856606 * Affine event queue for current thread context
194d30ea906Sjfb8856606 * to a qman portal.
195d30ea906Sjfb8856606 */
196d30ea906Sjfb8856606 for (i = 0; i < portal->num_linked_evq; i++) {
197d30ea906Sjfb8856606 ch_id = portal->evq_info[i].ch_id;
198d30ea906Sjfb8856606 dpaa_eventq_portal_add(ch_id);
199d30ea906Sjfb8856606 }
200d30ea906Sjfb8856606 portal->is_port_linked = true;
201d30ea906Sjfb8856606 }
202d30ea906Sjfb8856606
203d30ea906Sjfb8856606 /* Check if there are atomic contexts to be released */
204d30ea906Sjfb8856606 i = 0;
205d30ea906Sjfb8856606 while (DPAA_PER_LCORE_DQRR_SIZE) {
206d30ea906Sjfb8856606 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
207d30ea906Sjfb8856606 qman_dca_index(i, 0);
208d30ea906Sjfb8856606 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
209*2d9fd380Sjfb8856606 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
210d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
211d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_SIZE--;
212d30ea906Sjfb8856606 }
213d30ea906Sjfb8856606 i++;
214d30ea906Sjfb8856606 }
215d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_HELD = 0;
216d30ea906Sjfb8856606
217d30ea906Sjfb8856606 if (timeout_ticks)
218d30ea906Sjfb8856606 wait_time_ticks = timeout_ticks;
219d30ea906Sjfb8856606 else
220d30ea906Sjfb8856606 wait_time_ticks = portal->timeout_us;
221d30ea906Sjfb8856606
222d30ea906Sjfb8856606 wait_time_ticks += rte_get_timer_cycles();
223d30ea906Sjfb8856606 do {
224d30ea906Sjfb8856606 /* Lets dequeue the frames */
225d30ea906Sjfb8856606 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
226d30ea906Sjfb8856606 if (num_frames)
227d30ea906Sjfb8856606 break;
228d30ea906Sjfb8856606 cur_ticks = rte_get_timer_cycles();
229d30ea906Sjfb8856606 } while (cur_ticks < wait_time_ticks);
230d30ea906Sjfb8856606
231d30ea906Sjfb8856606 return num_frames;
232d30ea906Sjfb8856606 }
233d30ea906Sjfb8856606
234d30ea906Sjfb8856606 static uint16_t
dpaa_event_dequeue(void * port,struct rte_event * ev,uint64_t timeout_ticks)235d30ea906Sjfb8856606 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
236d30ea906Sjfb8856606 {
237d30ea906Sjfb8856606 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
238d30ea906Sjfb8856606 }
239d30ea906Sjfb8856606
240d30ea906Sjfb8856606 static uint16_t
dpaa_event_dequeue_burst_intr(void * port,struct rte_event ev[],uint16_t nb_events,uint64_t timeout_ticks)241d30ea906Sjfb8856606 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
242d30ea906Sjfb8856606 uint16_t nb_events, uint64_t timeout_ticks)
243d30ea906Sjfb8856606 {
244d30ea906Sjfb8856606 int ret;
245d30ea906Sjfb8856606 u16 ch_id;
246d30ea906Sjfb8856606 void *buffers[8];
247d30ea906Sjfb8856606 u32 num_frames, i, irq = 0;
248d30ea906Sjfb8856606 uint64_t cur_ticks = 0, wait_time_ticks = 0;
249d30ea906Sjfb8856606 struct dpaa_port *portal = (struct dpaa_port *)port;
250d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
251d30ea906Sjfb8856606
252*2d9fd380Sjfb8856606 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
253d30ea906Sjfb8856606 /* Affine current thread context to a qman portal */
254d30ea906Sjfb8856606 ret = rte_dpaa_portal_init((void *)0);
255d30ea906Sjfb8856606 if (ret) {
256d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("Unable to initialize portal");
257d30ea906Sjfb8856606 return ret;
258d30ea906Sjfb8856606 }
259d30ea906Sjfb8856606 }
260d30ea906Sjfb8856606
261d30ea906Sjfb8856606 if (unlikely(!portal->is_port_linked)) {
262d30ea906Sjfb8856606 /*
263d30ea906Sjfb8856606 * Affine event queue for current thread context
264d30ea906Sjfb8856606 * to a qman portal.
265d30ea906Sjfb8856606 */
266d30ea906Sjfb8856606 for (i = 0; i < portal->num_linked_evq; i++) {
267d30ea906Sjfb8856606 ch_id = portal->evq_info[i].ch_id;
268d30ea906Sjfb8856606 dpaa_eventq_portal_add(ch_id);
269d30ea906Sjfb8856606 }
270d30ea906Sjfb8856606 portal->is_port_linked = true;
271d30ea906Sjfb8856606 }
272d30ea906Sjfb8856606
273d30ea906Sjfb8856606 /* Check if there are atomic contexts to be released */
274d30ea906Sjfb8856606 i = 0;
275d30ea906Sjfb8856606 while (DPAA_PER_LCORE_DQRR_SIZE) {
276d30ea906Sjfb8856606 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
277d30ea906Sjfb8856606 qman_dca_index(i, 0);
278d30ea906Sjfb8856606 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
279*2d9fd380Sjfb8856606 *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
280d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
281d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_SIZE--;
282d30ea906Sjfb8856606 }
283d30ea906Sjfb8856606 i++;
284d30ea906Sjfb8856606 }
285d30ea906Sjfb8856606 DPAA_PER_LCORE_DQRR_HELD = 0;
286d30ea906Sjfb8856606
287d30ea906Sjfb8856606 if (timeout_ticks)
288d30ea906Sjfb8856606 wait_time_ticks = timeout_ticks;
289d30ea906Sjfb8856606 else
290d30ea906Sjfb8856606 wait_time_ticks = portal->timeout_us;
291d30ea906Sjfb8856606
292d30ea906Sjfb8856606 do {
293d30ea906Sjfb8856606 /* Lets dequeue the frames */
294d30ea906Sjfb8856606 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
295d30ea906Sjfb8856606 if (irq)
296d30ea906Sjfb8856606 irq = 0;
297d30ea906Sjfb8856606 if (num_frames)
298d30ea906Sjfb8856606 break;
299d30ea906Sjfb8856606 if (wait_time_ticks) { /* wait for time */
300d30ea906Sjfb8856606 if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
301d30ea906Sjfb8856606 irq = 1;
302d30ea906Sjfb8856606 continue;
303d30ea906Sjfb8856606 }
304d30ea906Sjfb8856606 break; /* no event after waiting */
305d30ea906Sjfb8856606 }
306d30ea906Sjfb8856606 cur_ticks = rte_get_timer_cycles();
307d30ea906Sjfb8856606 } while (cur_ticks < wait_time_ticks);
308d30ea906Sjfb8856606
309d30ea906Sjfb8856606 return num_frames;
310d30ea906Sjfb8856606 }
311d30ea906Sjfb8856606
312d30ea906Sjfb8856606 static uint16_t
dpaa_event_dequeue_intr(void * port,struct rte_event * ev,uint64_t timeout_ticks)313d30ea906Sjfb8856606 dpaa_event_dequeue_intr(void *port,
314d30ea906Sjfb8856606 struct rte_event *ev,
315d30ea906Sjfb8856606 uint64_t timeout_ticks)
316d30ea906Sjfb8856606 {
317d30ea906Sjfb8856606 return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
318d30ea906Sjfb8856606 }
319d30ea906Sjfb8856606
320d30ea906Sjfb8856606 static void
dpaa_event_dev_info_get(struct rte_eventdev * dev,struct rte_event_dev_info * dev_info)321d30ea906Sjfb8856606 dpaa_event_dev_info_get(struct rte_eventdev *dev,
322d30ea906Sjfb8856606 struct rte_event_dev_info *dev_info)
323d30ea906Sjfb8856606 {
324d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
325d30ea906Sjfb8856606
326d30ea906Sjfb8856606 RTE_SET_USED(dev);
3274418919fSjohnjiang dev_info->driver_name = "event_dpaa1";
328d30ea906Sjfb8856606 dev_info->min_dequeue_timeout_ns =
329d30ea906Sjfb8856606 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
330d30ea906Sjfb8856606 dev_info->max_dequeue_timeout_ns =
331d30ea906Sjfb8856606 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
332d30ea906Sjfb8856606 dev_info->dequeue_timeout_ns =
333d30ea906Sjfb8856606 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
334d30ea906Sjfb8856606 dev_info->max_event_queues =
335d30ea906Sjfb8856606 DPAA_EVENT_MAX_QUEUES;
336d30ea906Sjfb8856606 dev_info->max_event_queue_flows =
337d30ea906Sjfb8856606 DPAA_EVENT_MAX_QUEUE_FLOWS;
338d30ea906Sjfb8856606 dev_info->max_event_queue_priority_levels =
339d30ea906Sjfb8856606 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
340d30ea906Sjfb8856606 dev_info->max_event_priority_levels =
341d30ea906Sjfb8856606 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
342d30ea906Sjfb8856606 dev_info->max_event_ports =
343d30ea906Sjfb8856606 DPAA_EVENT_MAX_EVENT_PORT;
344d30ea906Sjfb8856606 dev_info->max_event_port_dequeue_depth =
345d30ea906Sjfb8856606 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
346d30ea906Sjfb8856606 dev_info->max_event_port_enqueue_depth =
347d30ea906Sjfb8856606 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
348d30ea906Sjfb8856606 /*
349d30ea906Sjfb8856606 * TODO: Need to find out that how to fetch this info
350d30ea906Sjfb8856606 * from kernel or somewhere else.
351d30ea906Sjfb8856606 */
352d30ea906Sjfb8856606 dev_info->max_num_events =
353d30ea906Sjfb8856606 DPAA_EVENT_MAX_NUM_EVENTS;
354d30ea906Sjfb8856606 dev_info->event_dev_cap =
355d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
356d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_BURST_MODE |
357d30ea906Sjfb8856606 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
358*2d9fd380Sjfb8856606 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
359*2d9fd380Sjfb8856606 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
360d30ea906Sjfb8856606 }
361d30ea906Sjfb8856606
362d30ea906Sjfb8856606 static int
dpaa_event_dev_configure(const struct rte_eventdev * dev)363d30ea906Sjfb8856606 dpaa_event_dev_configure(const struct rte_eventdev *dev)
364d30ea906Sjfb8856606 {
365d30ea906Sjfb8856606 struct dpaa_eventdev *priv = dev->data->dev_private;
366d30ea906Sjfb8856606 struct rte_event_dev_config *conf = &dev->data->dev_conf;
367d30ea906Sjfb8856606 int ret, i;
368d30ea906Sjfb8856606 uint32_t *ch_id;
369d30ea906Sjfb8856606
370d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
371d30ea906Sjfb8856606 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
372d30ea906Sjfb8856606 priv->nb_events_limit = conf->nb_events_limit;
373d30ea906Sjfb8856606 priv->nb_event_queues = conf->nb_event_queues;
374d30ea906Sjfb8856606 priv->nb_event_ports = conf->nb_event_ports;
375d30ea906Sjfb8856606 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
376d30ea906Sjfb8856606 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
377d30ea906Sjfb8856606 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
378d30ea906Sjfb8856606 priv->event_dev_cfg = conf->event_dev_cfg;
379d30ea906Sjfb8856606
380d30ea906Sjfb8856606 ch_id = rte_malloc("dpaa-channels",
381d30ea906Sjfb8856606 sizeof(uint32_t) * priv->nb_event_queues,
382d30ea906Sjfb8856606 RTE_CACHE_LINE_SIZE);
383d30ea906Sjfb8856606 if (ch_id == NULL) {
384d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
385d30ea906Sjfb8856606 return -ENOMEM;
386d30ea906Sjfb8856606 }
387d30ea906Sjfb8856606 /* Create requested event queues within the given event device */
388d30ea906Sjfb8856606 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
389d30ea906Sjfb8856606 if (ret < 0) {
390d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
391d30ea906Sjfb8856606 priv->nb_event_queues, ret);
392d30ea906Sjfb8856606 rte_free(ch_id);
393d30ea906Sjfb8856606 return ret;
394d30ea906Sjfb8856606 }
395d30ea906Sjfb8856606 for (i = 0; i < priv->nb_event_queues; i++)
396d30ea906Sjfb8856606 priv->evq_info[i].ch_id = (u16)ch_id[i];
397d30ea906Sjfb8856606
398d30ea906Sjfb8856606 /* Lets prepare event ports */
399d30ea906Sjfb8856606 memset(&priv->ports[0], 0,
400d30ea906Sjfb8856606 sizeof(struct dpaa_port) * priv->nb_event_ports);
401d30ea906Sjfb8856606
402d30ea906Sjfb8856606 /* Check dequeue timeout method is per dequeue or global */
403d30ea906Sjfb8856606 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
404d30ea906Sjfb8856606 /*
405d30ea906Sjfb8856606 * Use timeout value as given in dequeue operation.
406d30ea906Sjfb8856606 * So invalidating this timeout value.
407d30ea906Sjfb8856606 */
408d30ea906Sjfb8856606 priv->dequeue_timeout_ns = 0;
409d30ea906Sjfb8856606
410d30ea906Sjfb8856606 } else if (conf->dequeue_timeout_ns == 0) {
411d30ea906Sjfb8856606 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
412d30ea906Sjfb8856606 } else {
413d30ea906Sjfb8856606 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
414d30ea906Sjfb8856606 }
415d30ea906Sjfb8856606
416d30ea906Sjfb8856606 for (i = 0; i < priv->nb_event_ports; i++) {
417d30ea906Sjfb8856606 if (priv->intr_mode) {
418d30ea906Sjfb8856606 priv->ports[i].timeout_us =
419d30ea906Sjfb8856606 priv->dequeue_timeout_ns/1000;
420d30ea906Sjfb8856606 } else {
421d30ea906Sjfb8856606 uint64_t cycles_per_second;
422d30ea906Sjfb8856606
423d30ea906Sjfb8856606 cycles_per_second = rte_get_timer_hz();
424d30ea906Sjfb8856606 priv->ports[i].timeout_us =
425d30ea906Sjfb8856606 (priv->dequeue_timeout_ns * cycles_per_second)
426d30ea906Sjfb8856606 / NS_PER_S;
427d30ea906Sjfb8856606 }
428d30ea906Sjfb8856606 }
429d30ea906Sjfb8856606
430d30ea906Sjfb8856606 /*
431d30ea906Sjfb8856606 * TODO: Currently portals are affined with threads. Maximum threads
432d30ea906Sjfb8856606 * can be created equals to number of lcore.
433d30ea906Sjfb8856606 */
434d30ea906Sjfb8856606 rte_free(ch_id);
435d30ea906Sjfb8856606 DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
436d30ea906Sjfb8856606
437d30ea906Sjfb8856606 return 0;
438d30ea906Sjfb8856606 }
439d30ea906Sjfb8856606
440d30ea906Sjfb8856606 static int
dpaa_event_dev_start(struct rte_eventdev * dev)441d30ea906Sjfb8856606 dpaa_event_dev_start(struct rte_eventdev *dev)
442d30ea906Sjfb8856606 {
443d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
444d30ea906Sjfb8856606 RTE_SET_USED(dev);
445d30ea906Sjfb8856606
446d30ea906Sjfb8856606 return 0;
447d30ea906Sjfb8856606 }
448d30ea906Sjfb8856606
449d30ea906Sjfb8856606 static void
dpaa_event_dev_stop(struct rte_eventdev * dev)450d30ea906Sjfb8856606 dpaa_event_dev_stop(struct rte_eventdev *dev)
451d30ea906Sjfb8856606 {
452d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
453d30ea906Sjfb8856606 RTE_SET_USED(dev);
454d30ea906Sjfb8856606 }
455d30ea906Sjfb8856606
456d30ea906Sjfb8856606 static int
dpaa_event_dev_close(struct rte_eventdev * dev)457d30ea906Sjfb8856606 dpaa_event_dev_close(struct rte_eventdev *dev)
458d30ea906Sjfb8856606 {
459d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
460d30ea906Sjfb8856606 RTE_SET_USED(dev);
461d30ea906Sjfb8856606
462d30ea906Sjfb8856606 return 0;
463d30ea906Sjfb8856606 }
464d30ea906Sjfb8856606
465d30ea906Sjfb8856606 static void
dpaa_event_queue_def_conf(struct rte_eventdev * dev,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)466d30ea906Sjfb8856606 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
467d30ea906Sjfb8856606 struct rte_event_queue_conf *queue_conf)
468d30ea906Sjfb8856606 {
469d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
470d30ea906Sjfb8856606
471d30ea906Sjfb8856606 RTE_SET_USED(dev);
472d30ea906Sjfb8856606 RTE_SET_USED(queue_id);
473d30ea906Sjfb8856606
474d30ea906Sjfb8856606 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
4754418919fSjohnjiang queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
476d30ea906Sjfb8856606 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
477d30ea906Sjfb8856606 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
478d30ea906Sjfb8856606 }
479d30ea906Sjfb8856606
480d30ea906Sjfb8856606 static int
dpaa_event_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)481d30ea906Sjfb8856606 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
482d30ea906Sjfb8856606 const struct rte_event_queue_conf *queue_conf)
483d30ea906Sjfb8856606 {
484d30ea906Sjfb8856606 struct dpaa_eventdev *priv = dev->data->dev_private;
485d30ea906Sjfb8856606 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
486d30ea906Sjfb8856606
487d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
488d30ea906Sjfb8856606
489d30ea906Sjfb8856606 switch (queue_conf->schedule_type) {
490d30ea906Sjfb8856606 case RTE_SCHED_TYPE_PARALLEL:
491d30ea906Sjfb8856606 case RTE_SCHED_TYPE_ATOMIC:
492d30ea906Sjfb8856606 break;
493d30ea906Sjfb8856606 case RTE_SCHED_TYPE_ORDERED:
494d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("Schedule type is not supported.");
495d30ea906Sjfb8856606 return -1;
496d30ea906Sjfb8856606 }
497d30ea906Sjfb8856606 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
498d30ea906Sjfb8856606 evq_info->event_queue_id = queue_id;
499d30ea906Sjfb8856606
500d30ea906Sjfb8856606 return 0;
501d30ea906Sjfb8856606 }
502d30ea906Sjfb8856606
503d30ea906Sjfb8856606 static void
dpaa_event_queue_release(struct rte_eventdev * dev,uint8_t queue_id)504d30ea906Sjfb8856606 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
505d30ea906Sjfb8856606 {
506d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
507d30ea906Sjfb8856606
508d30ea906Sjfb8856606 RTE_SET_USED(dev);
509d30ea906Sjfb8856606 RTE_SET_USED(queue_id);
510d30ea906Sjfb8856606 }
511d30ea906Sjfb8856606
512d30ea906Sjfb8856606 static void
dpaa_event_port_default_conf_get(struct rte_eventdev * dev,uint8_t port_id,struct rte_event_port_conf * port_conf)513d30ea906Sjfb8856606 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
514d30ea906Sjfb8856606 struct rte_event_port_conf *port_conf)
515d30ea906Sjfb8856606 {
516d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
517d30ea906Sjfb8856606
518d30ea906Sjfb8856606 RTE_SET_USED(dev);
519d30ea906Sjfb8856606 RTE_SET_USED(port_id);
520d30ea906Sjfb8856606
521d30ea906Sjfb8856606 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
522d30ea906Sjfb8856606 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
523d30ea906Sjfb8856606 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
524d30ea906Sjfb8856606 }
525d30ea906Sjfb8856606
526d30ea906Sjfb8856606 static int
dpaa_event_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * port_conf)527d30ea906Sjfb8856606 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
528d30ea906Sjfb8856606 const struct rte_event_port_conf *port_conf)
529d30ea906Sjfb8856606 {
530d30ea906Sjfb8856606 struct dpaa_eventdev *eventdev = dev->data->dev_private;
531d30ea906Sjfb8856606
532d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
533d30ea906Sjfb8856606
534d30ea906Sjfb8856606 RTE_SET_USED(port_conf);
535d30ea906Sjfb8856606 dev->data->ports[port_id] = &eventdev->ports[port_id];
536d30ea906Sjfb8856606
537d30ea906Sjfb8856606 return 0;
538d30ea906Sjfb8856606 }
539d30ea906Sjfb8856606
540d30ea906Sjfb8856606 static void
dpaa_event_port_release(void * port)541d30ea906Sjfb8856606 dpaa_event_port_release(void *port)
542d30ea906Sjfb8856606 {
543d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
544d30ea906Sjfb8856606
545d30ea906Sjfb8856606 RTE_SET_USED(port);
546d30ea906Sjfb8856606 }
547d30ea906Sjfb8856606
548d30ea906Sjfb8856606 static int
dpaa_event_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)549d30ea906Sjfb8856606 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
550d30ea906Sjfb8856606 const uint8_t queues[], const uint8_t priorities[],
551d30ea906Sjfb8856606 uint16_t nb_links)
552d30ea906Sjfb8856606 {
553d30ea906Sjfb8856606 struct dpaa_eventdev *priv = dev->data->dev_private;
554d30ea906Sjfb8856606 struct dpaa_port *event_port = (struct dpaa_port *)port;
555d30ea906Sjfb8856606 struct dpaa_eventq *event_queue;
556d30ea906Sjfb8856606 uint8_t eventq_id;
557d30ea906Sjfb8856606 int i;
558d30ea906Sjfb8856606
559d30ea906Sjfb8856606 RTE_SET_USED(dev);
560d30ea906Sjfb8856606 RTE_SET_USED(priorities);
561d30ea906Sjfb8856606
562d30ea906Sjfb8856606 /* First check that input configuration are valid */
563d30ea906Sjfb8856606 for (i = 0; i < nb_links; i++) {
564d30ea906Sjfb8856606 eventq_id = queues[i];
565d30ea906Sjfb8856606 event_queue = &priv->evq_info[eventq_id];
566d30ea906Sjfb8856606 if ((event_queue->event_queue_cfg
567d30ea906Sjfb8856606 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
568d30ea906Sjfb8856606 && (event_queue->event_port)) {
569d30ea906Sjfb8856606 return -EINVAL;
570d30ea906Sjfb8856606 }
571d30ea906Sjfb8856606 }
572d30ea906Sjfb8856606
573d30ea906Sjfb8856606 for (i = 0; i < nb_links; i++) {
574d30ea906Sjfb8856606 eventq_id = queues[i];
575d30ea906Sjfb8856606 event_queue = &priv->evq_info[eventq_id];
576d30ea906Sjfb8856606 event_port->evq_info[i].event_queue_id = eventq_id;
577d30ea906Sjfb8856606 event_port->evq_info[i].ch_id = event_queue->ch_id;
578d30ea906Sjfb8856606 event_queue->event_port = port;
579d30ea906Sjfb8856606 }
580d30ea906Sjfb8856606
581d30ea906Sjfb8856606 event_port->num_linked_evq = event_port->num_linked_evq + i;
582d30ea906Sjfb8856606
583d30ea906Sjfb8856606 return (int)i;
584d30ea906Sjfb8856606 }
585d30ea906Sjfb8856606
586d30ea906Sjfb8856606 static int
dpaa_event_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t nb_links)587d30ea906Sjfb8856606 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
588d30ea906Sjfb8856606 uint8_t queues[], uint16_t nb_links)
589d30ea906Sjfb8856606 {
590d30ea906Sjfb8856606 int i;
591d30ea906Sjfb8856606 uint8_t eventq_id;
592d30ea906Sjfb8856606 struct dpaa_eventq *event_queue;
593d30ea906Sjfb8856606 struct dpaa_eventdev *priv = dev->data->dev_private;
594d30ea906Sjfb8856606 struct dpaa_port *event_port = (struct dpaa_port *)port;
595d30ea906Sjfb8856606
596d30ea906Sjfb8856606 if (!event_port->num_linked_evq)
597d30ea906Sjfb8856606 return nb_links;
598d30ea906Sjfb8856606
599d30ea906Sjfb8856606 for (i = 0; i < nb_links; i++) {
600d30ea906Sjfb8856606 eventq_id = queues[i];
601d30ea906Sjfb8856606 event_port->evq_info[eventq_id].event_queue_id = -1;
602d30ea906Sjfb8856606 event_port->evq_info[eventq_id].ch_id = 0;
603d30ea906Sjfb8856606 event_queue = &priv->evq_info[eventq_id];
604d30ea906Sjfb8856606 event_queue->event_port = NULL;
605d30ea906Sjfb8856606 }
606d30ea906Sjfb8856606
607d30ea906Sjfb8856606 if (event_port->num_linked_evq)
608d30ea906Sjfb8856606 event_port->num_linked_evq = event_port->num_linked_evq - i;
609d30ea906Sjfb8856606
610d30ea906Sjfb8856606 return (int)i;
611d30ea906Sjfb8856606 }
612d30ea906Sjfb8856606
613d30ea906Sjfb8856606 static int
dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)614d30ea906Sjfb8856606 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
615d30ea906Sjfb8856606 const struct rte_eth_dev *eth_dev,
616d30ea906Sjfb8856606 uint32_t *caps)
617d30ea906Sjfb8856606 {
618d30ea906Sjfb8856606 const char *ethdev_driver = eth_dev->device->driver->name;
619d30ea906Sjfb8856606
620d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
621d30ea906Sjfb8856606
622d30ea906Sjfb8856606 RTE_SET_USED(dev);
623d30ea906Sjfb8856606
624d30ea906Sjfb8856606 if (!strcmp(ethdev_driver, "net_dpaa"))
625d30ea906Sjfb8856606 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
626d30ea906Sjfb8856606 else
627d30ea906Sjfb8856606 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
628d30ea906Sjfb8856606
629d30ea906Sjfb8856606 return 0;
630d30ea906Sjfb8856606 }
631d30ea906Sjfb8856606
632d30ea906Sjfb8856606 static int
dpaa_event_eth_rx_adapter_queue_add(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)633d30ea906Sjfb8856606 dpaa_event_eth_rx_adapter_queue_add(
634d30ea906Sjfb8856606 const struct rte_eventdev *dev,
635d30ea906Sjfb8856606 const struct rte_eth_dev *eth_dev,
636d30ea906Sjfb8856606 int32_t rx_queue_id,
637d30ea906Sjfb8856606 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
638d30ea906Sjfb8856606 {
639d30ea906Sjfb8856606 struct dpaa_eventdev *eventdev = dev->data->dev_private;
640d30ea906Sjfb8856606 uint8_t ev_qid = queue_conf->ev.queue_id;
641d30ea906Sjfb8856606 u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
642d30ea906Sjfb8856606 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
643d30ea906Sjfb8856606 int ret, i;
644d30ea906Sjfb8856606
645d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
646d30ea906Sjfb8856606
647d30ea906Sjfb8856606 if (rx_queue_id == -1) {
648d30ea906Sjfb8856606 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
649d30ea906Sjfb8856606 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
650d30ea906Sjfb8856606 queue_conf);
651d30ea906Sjfb8856606 if (ret) {
652d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR(
653d30ea906Sjfb8856606 "Event Queue attach failed:%d\n", ret);
654d30ea906Sjfb8856606 goto detach_configured_queues;
655d30ea906Sjfb8856606 }
656d30ea906Sjfb8856606 }
657d30ea906Sjfb8856606 return 0;
658d30ea906Sjfb8856606 }
659d30ea906Sjfb8856606
660d30ea906Sjfb8856606 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
661d30ea906Sjfb8856606 if (ret)
662d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
663d30ea906Sjfb8856606 return ret;
664d30ea906Sjfb8856606
665d30ea906Sjfb8856606 detach_configured_queues:
666d30ea906Sjfb8856606
667d30ea906Sjfb8856606 for (i = (i - 1); i >= 0 ; i--)
668d30ea906Sjfb8856606 dpaa_eth_eventq_detach(eth_dev, i);
669d30ea906Sjfb8856606
670d30ea906Sjfb8856606 return ret;
671d30ea906Sjfb8856606 }
672d30ea906Sjfb8856606
673d30ea906Sjfb8856606 static int
dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,int32_t rx_queue_id)674d30ea906Sjfb8856606 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
675d30ea906Sjfb8856606 const struct rte_eth_dev *eth_dev,
676d30ea906Sjfb8856606 int32_t rx_queue_id)
677d30ea906Sjfb8856606 {
678d30ea906Sjfb8856606 int ret, i;
679d30ea906Sjfb8856606 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
680d30ea906Sjfb8856606
681d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
682d30ea906Sjfb8856606
683d30ea906Sjfb8856606 RTE_SET_USED(dev);
684d30ea906Sjfb8856606 if (rx_queue_id == -1) {
685d30ea906Sjfb8856606 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
686d30ea906Sjfb8856606 ret = dpaa_eth_eventq_detach(eth_dev, i);
687d30ea906Sjfb8856606 if (ret)
688d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR(
689d30ea906Sjfb8856606 "Event Queue detach failed:%d\n", ret);
690d30ea906Sjfb8856606 }
691d30ea906Sjfb8856606
692d30ea906Sjfb8856606 return 0;
693d30ea906Sjfb8856606 }
694d30ea906Sjfb8856606
695d30ea906Sjfb8856606 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
696d30ea906Sjfb8856606 if (ret)
697d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
698d30ea906Sjfb8856606 return ret;
699d30ea906Sjfb8856606 }
700d30ea906Sjfb8856606
701d30ea906Sjfb8856606 static int
dpaa_event_eth_rx_adapter_start(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev)702d30ea906Sjfb8856606 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
703d30ea906Sjfb8856606 const struct rte_eth_dev *eth_dev)
704d30ea906Sjfb8856606 {
705d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
706d30ea906Sjfb8856606
707d30ea906Sjfb8856606 RTE_SET_USED(dev);
708d30ea906Sjfb8856606 RTE_SET_USED(eth_dev);
709d30ea906Sjfb8856606
710d30ea906Sjfb8856606 return 0;
711d30ea906Sjfb8856606 }
712d30ea906Sjfb8856606
713d30ea906Sjfb8856606 static int
dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev)714d30ea906Sjfb8856606 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
715d30ea906Sjfb8856606 const struct rte_eth_dev *eth_dev)
716d30ea906Sjfb8856606 {
717d30ea906Sjfb8856606 EVENTDEV_INIT_FUNC_TRACE();
718d30ea906Sjfb8856606
719d30ea906Sjfb8856606 RTE_SET_USED(dev);
720d30ea906Sjfb8856606 RTE_SET_USED(eth_dev);
721d30ea906Sjfb8856606
722d30ea906Sjfb8856606 return 0;
723d30ea906Sjfb8856606 }
724d30ea906Sjfb8856606
7254418919fSjohnjiang static int
dpaa_eventdev_crypto_caps_get(const struct rte_eventdev * dev,const struct rte_cryptodev * cdev,uint32_t * caps)7264418919fSjohnjiang dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
7274418919fSjohnjiang const struct rte_cryptodev *cdev,
7284418919fSjohnjiang uint32_t *caps)
7294418919fSjohnjiang {
7304418919fSjohnjiang const char *name = cdev->data->name;
7314418919fSjohnjiang
7324418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
7334418919fSjohnjiang
7344418919fSjohnjiang RTE_SET_USED(dev);
7354418919fSjohnjiang
7364418919fSjohnjiang if (!strncmp(name, "dpaa_sec-", 9))
7374418919fSjohnjiang *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
7384418919fSjohnjiang else
7394418919fSjohnjiang return -1;
7404418919fSjohnjiang
7414418919fSjohnjiang return 0;
7424418919fSjohnjiang }
7434418919fSjohnjiang
7444418919fSjohnjiang static int
dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,const struct rte_event * ev)7454418919fSjohnjiang dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
7464418919fSjohnjiang const struct rte_cryptodev *cryptodev,
7474418919fSjohnjiang const struct rte_event *ev)
7484418919fSjohnjiang {
7494418919fSjohnjiang struct dpaa_eventdev *priv = dev->data->dev_private;
7504418919fSjohnjiang uint8_t ev_qid = ev->queue_id;
7514418919fSjohnjiang u16 ch_id = priv->evq_info[ev_qid].ch_id;
7524418919fSjohnjiang int i, ret;
7534418919fSjohnjiang
7544418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
7554418919fSjohnjiang
7564418919fSjohnjiang for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
7574418919fSjohnjiang ret = dpaa_sec_eventq_attach(cryptodev, i,
7584418919fSjohnjiang ch_id, ev);
7594418919fSjohnjiang if (ret) {
7604418919fSjohnjiang DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
7614418919fSjohnjiang ret);
7624418919fSjohnjiang goto fail;
7634418919fSjohnjiang }
7644418919fSjohnjiang }
7654418919fSjohnjiang return 0;
7664418919fSjohnjiang fail:
7674418919fSjohnjiang for (i = (i - 1); i >= 0 ; i--)
7684418919fSjohnjiang dpaa_sec_eventq_detach(cryptodev, i);
7694418919fSjohnjiang
7704418919fSjohnjiang return ret;
7714418919fSjohnjiang }
7724418919fSjohnjiang
7734418919fSjohnjiang static int
dpaa_eventdev_crypto_queue_add(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,int32_t rx_queue_id,const struct rte_event * ev)7744418919fSjohnjiang dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
7754418919fSjohnjiang const struct rte_cryptodev *cryptodev,
7764418919fSjohnjiang int32_t rx_queue_id,
7774418919fSjohnjiang const struct rte_event *ev)
7784418919fSjohnjiang {
7794418919fSjohnjiang struct dpaa_eventdev *priv = dev->data->dev_private;
7804418919fSjohnjiang uint8_t ev_qid = ev->queue_id;
7814418919fSjohnjiang u16 ch_id = priv->evq_info[ev_qid].ch_id;
7824418919fSjohnjiang int ret;
7834418919fSjohnjiang
7844418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
7854418919fSjohnjiang
7864418919fSjohnjiang if (rx_queue_id == -1)
7874418919fSjohnjiang return dpaa_eventdev_crypto_queue_add_all(dev,
7884418919fSjohnjiang cryptodev, ev);
7894418919fSjohnjiang
7904418919fSjohnjiang ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
7914418919fSjohnjiang ch_id, ev);
7924418919fSjohnjiang if (ret) {
7934418919fSjohnjiang DPAA_EVENTDEV_ERR(
7944418919fSjohnjiang "dpaa_sec_eventq_attach failed: ret: %d\n", ret);
7954418919fSjohnjiang return ret;
7964418919fSjohnjiang }
7974418919fSjohnjiang return 0;
7984418919fSjohnjiang }
7994418919fSjohnjiang
8004418919fSjohnjiang static int
dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev * dev,const struct rte_cryptodev * cdev)8014418919fSjohnjiang dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
8024418919fSjohnjiang const struct rte_cryptodev *cdev)
8034418919fSjohnjiang {
8044418919fSjohnjiang int i, ret;
8054418919fSjohnjiang
8064418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
8074418919fSjohnjiang
8084418919fSjohnjiang RTE_SET_USED(dev);
8094418919fSjohnjiang
8104418919fSjohnjiang for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
8114418919fSjohnjiang ret = dpaa_sec_eventq_detach(cdev, i);
8124418919fSjohnjiang if (ret) {
8134418919fSjohnjiang DPAA_EVENTDEV_ERR(
8144418919fSjohnjiang "dpaa_sec_eventq_detach failed:ret %d\n", ret);
8154418919fSjohnjiang return ret;
8164418919fSjohnjiang }
8174418919fSjohnjiang }
8184418919fSjohnjiang
8194418919fSjohnjiang return 0;
8204418919fSjohnjiang }
8214418919fSjohnjiang
8224418919fSjohnjiang static int
dpaa_eventdev_crypto_queue_del(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev,int32_t rx_queue_id)8234418919fSjohnjiang dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
8244418919fSjohnjiang const struct rte_cryptodev *cryptodev,
8254418919fSjohnjiang int32_t rx_queue_id)
8264418919fSjohnjiang {
8274418919fSjohnjiang int ret;
8284418919fSjohnjiang
8294418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
8304418919fSjohnjiang
8314418919fSjohnjiang if (rx_queue_id == -1)
8324418919fSjohnjiang return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
8334418919fSjohnjiang
8344418919fSjohnjiang ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
8354418919fSjohnjiang if (ret) {
8364418919fSjohnjiang DPAA_EVENTDEV_ERR(
8374418919fSjohnjiang "dpaa_sec_eventq_detach failed: ret: %d\n", ret);
8384418919fSjohnjiang return ret;
8394418919fSjohnjiang }
8404418919fSjohnjiang
8414418919fSjohnjiang return 0;
8424418919fSjohnjiang }
8434418919fSjohnjiang
8444418919fSjohnjiang static int
dpaa_eventdev_crypto_start(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev)8454418919fSjohnjiang dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
8464418919fSjohnjiang const struct rte_cryptodev *cryptodev)
8474418919fSjohnjiang {
8484418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
8494418919fSjohnjiang
8504418919fSjohnjiang RTE_SET_USED(dev);
8514418919fSjohnjiang RTE_SET_USED(cryptodev);
8524418919fSjohnjiang
8534418919fSjohnjiang return 0;
8544418919fSjohnjiang }
8554418919fSjohnjiang
8564418919fSjohnjiang static int
dpaa_eventdev_crypto_stop(const struct rte_eventdev * dev,const struct rte_cryptodev * cryptodev)8574418919fSjohnjiang dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
8584418919fSjohnjiang const struct rte_cryptodev *cryptodev)
8594418919fSjohnjiang {
8604418919fSjohnjiang EVENTDEV_INIT_FUNC_TRACE();
8614418919fSjohnjiang
8624418919fSjohnjiang RTE_SET_USED(dev);
8634418919fSjohnjiang RTE_SET_USED(cryptodev);
8644418919fSjohnjiang
8654418919fSjohnjiang return 0;
8664418919fSjohnjiang }
8674418919fSjohnjiang
8684418919fSjohnjiang static int
dpaa_eventdev_tx_adapter_create(uint8_t id,const struct rte_eventdev * dev)8694418919fSjohnjiang dpaa_eventdev_tx_adapter_create(uint8_t id,
8704418919fSjohnjiang const struct rte_eventdev *dev)
8714418919fSjohnjiang {
8724418919fSjohnjiang RTE_SET_USED(id);
8734418919fSjohnjiang RTE_SET_USED(dev);
8744418919fSjohnjiang
8754418919fSjohnjiang /* Nothing to do. Simply return. */
8764418919fSjohnjiang return 0;
8774418919fSjohnjiang }
8784418919fSjohnjiang
8794418919fSjohnjiang static int
dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev * dev,const struct rte_eth_dev * eth_dev,uint32_t * caps)8804418919fSjohnjiang dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
8814418919fSjohnjiang const struct rte_eth_dev *eth_dev,
8824418919fSjohnjiang uint32_t *caps)
8834418919fSjohnjiang {
8844418919fSjohnjiang RTE_SET_USED(dev);
8854418919fSjohnjiang RTE_SET_USED(eth_dev);
8864418919fSjohnjiang
8874418919fSjohnjiang *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
8884418919fSjohnjiang return 0;
8894418919fSjohnjiang }
8904418919fSjohnjiang
8914418919fSjohnjiang static uint16_t
dpaa_eventdev_txa_enqueue_same_dest(void * port,struct rte_event ev[],uint16_t nb_events)8924418919fSjohnjiang dpaa_eventdev_txa_enqueue_same_dest(void *port,
8934418919fSjohnjiang struct rte_event ev[],
8944418919fSjohnjiang uint16_t nb_events)
8954418919fSjohnjiang {
8964418919fSjohnjiang struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
8974418919fSjohnjiang uint8_t qid, i;
8984418919fSjohnjiang
8994418919fSjohnjiang RTE_SET_USED(port);
9004418919fSjohnjiang
9014418919fSjohnjiang m0 = (struct rte_mbuf *)ev[0].mbuf;
9024418919fSjohnjiang qid = rte_event_eth_tx_adapter_txq_get(m0);
9034418919fSjohnjiang
9044418919fSjohnjiang for (i = 0; i < nb_events; i++)
9054418919fSjohnjiang m[i] = (struct rte_mbuf *)ev[i].mbuf;
9064418919fSjohnjiang
9074418919fSjohnjiang return rte_eth_tx_burst(m0->port, qid, m, nb_events);
9084418919fSjohnjiang }
9094418919fSjohnjiang
9104418919fSjohnjiang static uint16_t
dpaa_eventdev_txa_enqueue(void * port,struct rte_event ev[],uint16_t nb_events)9114418919fSjohnjiang dpaa_eventdev_txa_enqueue(void *port,
9124418919fSjohnjiang struct rte_event ev[],
9134418919fSjohnjiang uint16_t nb_events)
9144418919fSjohnjiang {
9154418919fSjohnjiang struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
9164418919fSjohnjiang uint8_t qid, i;
9174418919fSjohnjiang
9184418919fSjohnjiang RTE_SET_USED(port);
9194418919fSjohnjiang
9204418919fSjohnjiang for (i = 0; i < nb_events; i++) {
9214418919fSjohnjiang qid = rte_event_eth_tx_adapter_txq_get(m);
9224418919fSjohnjiang rte_eth_tx_burst(m->port, qid, &m, 1);
9234418919fSjohnjiang }
9244418919fSjohnjiang
9254418919fSjohnjiang return nb_events;
9264418919fSjohnjiang }
9274418919fSjohnjiang
928d30ea906Sjfb8856606 static struct rte_eventdev_ops dpaa_eventdev_ops = {
929d30ea906Sjfb8856606 .dev_infos_get = dpaa_event_dev_info_get,
930d30ea906Sjfb8856606 .dev_configure = dpaa_event_dev_configure,
931d30ea906Sjfb8856606 .dev_start = dpaa_event_dev_start,
932d30ea906Sjfb8856606 .dev_stop = dpaa_event_dev_stop,
933d30ea906Sjfb8856606 .dev_close = dpaa_event_dev_close,
934d30ea906Sjfb8856606 .queue_def_conf = dpaa_event_queue_def_conf,
935d30ea906Sjfb8856606 .queue_setup = dpaa_event_queue_setup,
936d30ea906Sjfb8856606 .queue_release = dpaa_event_queue_release,
937d30ea906Sjfb8856606 .port_def_conf = dpaa_event_port_default_conf_get,
938d30ea906Sjfb8856606 .port_setup = dpaa_event_port_setup,
939d30ea906Sjfb8856606 .port_release = dpaa_event_port_release,
940d30ea906Sjfb8856606 .port_link = dpaa_event_port_link,
941d30ea906Sjfb8856606 .port_unlink = dpaa_event_port_unlink,
942d30ea906Sjfb8856606 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
943d30ea906Sjfb8856606 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
944d30ea906Sjfb8856606 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
945d30ea906Sjfb8856606 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
946d30ea906Sjfb8856606 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
947d30ea906Sjfb8856606 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
9484418919fSjohnjiang .eth_tx_adapter_caps_get = dpaa_eventdev_tx_adapter_caps,
9494418919fSjohnjiang .eth_tx_adapter_create = dpaa_eventdev_tx_adapter_create,
9504418919fSjohnjiang .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get,
9514418919fSjohnjiang .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add,
9524418919fSjohnjiang .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del,
9534418919fSjohnjiang .crypto_adapter_start = dpaa_eventdev_crypto_start,
9544418919fSjohnjiang .crypto_adapter_stop = dpaa_eventdev_crypto_stop,
955d30ea906Sjfb8856606 };
956d30ea906Sjfb8856606
flag_check_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)957d30ea906Sjfb8856606 static int flag_check_handler(__rte_unused const char *key,
958d30ea906Sjfb8856606 const char *value, __rte_unused void *opaque)
959d30ea906Sjfb8856606 {
960d30ea906Sjfb8856606 if (strcmp(value, "1"))
961d30ea906Sjfb8856606 return -1;
962d30ea906Sjfb8856606
963d30ea906Sjfb8856606 return 0;
964d30ea906Sjfb8856606 }
965d30ea906Sjfb8856606
966d30ea906Sjfb8856606 static int
dpaa_event_check_flags(const char * params)967d30ea906Sjfb8856606 dpaa_event_check_flags(const char *params)
968d30ea906Sjfb8856606 {
969d30ea906Sjfb8856606 struct rte_kvargs *kvlist;
970d30ea906Sjfb8856606
971d30ea906Sjfb8856606 if (params == NULL || params[0] == '\0')
972d30ea906Sjfb8856606 return 0;
973d30ea906Sjfb8856606
974d30ea906Sjfb8856606 kvlist = rte_kvargs_parse(params, NULL);
975d30ea906Sjfb8856606 if (kvlist == NULL)
976d30ea906Sjfb8856606 return 0;
977d30ea906Sjfb8856606
978d30ea906Sjfb8856606 if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
979d30ea906Sjfb8856606 rte_kvargs_free(kvlist);
980d30ea906Sjfb8856606 return 0;
981d30ea906Sjfb8856606 }
982d30ea906Sjfb8856606 /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
983d30ea906Sjfb8856606 if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
984d30ea906Sjfb8856606 flag_check_handler, NULL) < 0) {
985d30ea906Sjfb8856606 rte_kvargs_free(kvlist);
986d30ea906Sjfb8856606 return 0;
987d30ea906Sjfb8856606 }
988d30ea906Sjfb8856606 rte_kvargs_free(kvlist);
989d30ea906Sjfb8856606
990d30ea906Sjfb8856606 return 1;
991d30ea906Sjfb8856606 }
992d30ea906Sjfb8856606
993d30ea906Sjfb8856606 static int
dpaa_event_dev_create(const char * name,const char * params)994d30ea906Sjfb8856606 dpaa_event_dev_create(const char *name, const char *params)
995d30ea906Sjfb8856606 {
996d30ea906Sjfb8856606 struct rte_eventdev *eventdev;
997d30ea906Sjfb8856606 struct dpaa_eventdev *priv;
998d30ea906Sjfb8856606
999d30ea906Sjfb8856606 eventdev = rte_event_pmd_vdev_init(name,
1000d30ea906Sjfb8856606 sizeof(struct dpaa_eventdev),
1001d30ea906Sjfb8856606 rte_socket_id());
1002d30ea906Sjfb8856606 if (eventdev == NULL) {
1003d30ea906Sjfb8856606 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
1004d30ea906Sjfb8856606 goto fail;
1005d30ea906Sjfb8856606 }
1006d30ea906Sjfb8856606 priv = eventdev->data->dev_private;
1007d30ea906Sjfb8856606
1008d30ea906Sjfb8856606 eventdev->dev_ops = &dpaa_eventdev_ops;
1009d30ea906Sjfb8856606 eventdev->enqueue = dpaa_event_enqueue;
1010d30ea906Sjfb8856606 eventdev->enqueue_burst = dpaa_event_enqueue_burst;
1011d30ea906Sjfb8856606
1012d30ea906Sjfb8856606 if (dpaa_event_check_flags(params)) {
1013d30ea906Sjfb8856606 eventdev->dequeue = dpaa_event_dequeue;
1014d30ea906Sjfb8856606 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
1015d30ea906Sjfb8856606 } else {
1016d30ea906Sjfb8856606 priv->intr_mode = 1;
1017d30ea906Sjfb8856606 eventdev->dev_ops->timeout_ticks =
1018d30ea906Sjfb8856606 dpaa_event_dequeue_timeout_ticks_intr;
1019d30ea906Sjfb8856606 eventdev->dequeue = dpaa_event_dequeue_intr;
1020d30ea906Sjfb8856606 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
1021d30ea906Sjfb8856606 }
10224418919fSjohnjiang eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
10234418919fSjohnjiang eventdev->txa_enqueue_same_dest = dpaa_eventdev_txa_enqueue_same_dest;
10244418919fSjohnjiang
10254418919fSjohnjiang RTE_LOG(INFO, PMD, "%s eventdev added", name);
1026d30ea906Sjfb8856606
1027d30ea906Sjfb8856606 /* For secondary processes, the primary has done all the work */
1028d30ea906Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1029d30ea906Sjfb8856606 return 0;
1030d30ea906Sjfb8856606
1031d30ea906Sjfb8856606 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
1032d30ea906Sjfb8856606
1033d30ea906Sjfb8856606 return 0;
1034d30ea906Sjfb8856606 fail:
1035d30ea906Sjfb8856606 return -EFAULT;
1036d30ea906Sjfb8856606 }
1037d30ea906Sjfb8856606
1038d30ea906Sjfb8856606 static int
dpaa_event_dev_probe(struct rte_vdev_device * vdev)1039d30ea906Sjfb8856606 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
1040d30ea906Sjfb8856606 {
1041d30ea906Sjfb8856606 const char *name;
1042d30ea906Sjfb8856606 const char *params;
1043d30ea906Sjfb8856606
1044d30ea906Sjfb8856606 name = rte_vdev_device_name(vdev);
1045d30ea906Sjfb8856606 DPAA_EVENTDEV_INFO("Initializing %s", name);
1046d30ea906Sjfb8856606
1047d30ea906Sjfb8856606 params = rte_vdev_device_args(vdev);
1048d30ea906Sjfb8856606
1049d30ea906Sjfb8856606 return dpaa_event_dev_create(name, params);
1050d30ea906Sjfb8856606 }
1051d30ea906Sjfb8856606
1052d30ea906Sjfb8856606 static int
dpaa_event_dev_remove(struct rte_vdev_device * vdev)1053d30ea906Sjfb8856606 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
1054d30ea906Sjfb8856606 {
1055d30ea906Sjfb8856606 const char *name;
1056d30ea906Sjfb8856606
1057d30ea906Sjfb8856606 name = rte_vdev_device_name(vdev);
1058d30ea906Sjfb8856606 DPAA_EVENTDEV_INFO("Closing %s", name);
1059d30ea906Sjfb8856606
1060d30ea906Sjfb8856606 return rte_event_pmd_vdev_uninit(name);
1061d30ea906Sjfb8856606 }
1062d30ea906Sjfb8856606
1063d30ea906Sjfb8856606 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
1064d30ea906Sjfb8856606 .probe = dpaa_event_dev_probe,
1065d30ea906Sjfb8856606 .remove = dpaa_event_dev_remove
1066d30ea906Sjfb8856606 };
1067d30ea906Sjfb8856606
1068d30ea906Sjfb8856606 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1069d30ea906Sjfb8856606 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1070d30ea906Sjfb8856606 DISABLE_INTR_MODE "=<int>");
1071