1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_debug.h>
15 #include <rte_dev.h>
16 #include <rte_eal.h>
17 #include <rte_log.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_lcore.h>
21 #include <rte_bus_vdev.h>
22 
23 #include "skeleton_eventdev.h"
24 
25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
26 /**< Skeleton event device PMD name */
27 
28 static uint16_t
skeleton_eventdev_enqueue(void * port,const struct rte_event * ev)29 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
30 {
31 	struct skeleton_port *sp = port;
32 
33 	RTE_SET_USED(sp);
34 	RTE_SET_USED(ev);
35 	RTE_SET_USED(port);
36 
37 	return 0;
38 }
39 
40 static uint16_t
skeleton_eventdev_enqueue_burst(void * port,const struct rte_event ev[],uint16_t nb_events)41 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
42 			uint16_t nb_events)
43 {
44 	struct skeleton_port *sp = port;
45 
46 	RTE_SET_USED(sp);
47 	RTE_SET_USED(ev);
48 	RTE_SET_USED(port);
49 	RTE_SET_USED(nb_events);
50 
51 	return 0;
52 }
53 
54 static uint16_t
skeleton_eventdev_dequeue(void * port,struct rte_event * ev,uint64_t timeout_ticks)55 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
56 				uint64_t timeout_ticks)
57 {
58 	struct skeleton_port *sp = port;
59 
60 	RTE_SET_USED(sp);
61 	RTE_SET_USED(ev);
62 	RTE_SET_USED(timeout_ticks);
63 
64 	return 0;
65 }
66 
67 static uint16_t
skeleton_eventdev_dequeue_burst(void * port,struct rte_event ev[],uint16_t nb_events,uint64_t timeout_ticks)68 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
69 		uint16_t nb_events, uint64_t timeout_ticks)
70 {
71 	struct skeleton_port *sp = port;
72 
73 	RTE_SET_USED(sp);
74 	RTE_SET_USED(ev);
75 	RTE_SET_USED(nb_events);
76 	RTE_SET_USED(timeout_ticks);
77 
78 	return 0;
79 }
80 
81 static void
skeleton_eventdev_info_get(struct rte_eventdev * dev,struct rte_event_dev_info * dev_info)82 skeleton_eventdev_info_get(struct rte_eventdev *dev,
83 		struct rte_event_dev_info *dev_info)
84 {
85 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
86 
87 	PMD_DRV_FUNC_TRACE();
88 
89 	RTE_SET_USED(skel);
90 
91 	dev_info->min_dequeue_timeout_ns = 1;
92 	dev_info->max_dequeue_timeout_ns = 10000;
93 	dev_info->dequeue_timeout_ns = 25;
94 	dev_info->max_event_queues = 64;
95 	dev_info->max_event_queue_flows = (1ULL << 20);
96 	dev_info->max_event_queue_priority_levels = 8;
97 	dev_info->max_event_priority_levels = 8;
98 	dev_info->max_event_ports = 32;
99 	dev_info->max_event_port_dequeue_depth = 16;
100 	dev_info->max_event_port_enqueue_depth = 16;
101 	dev_info->max_num_events = (1ULL << 20);
102 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
103 					RTE_EVENT_DEV_CAP_BURST_MODE |
104 					RTE_EVENT_DEV_CAP_EVENT_QOS |
105 					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
106 					RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
107 }
108 
109 static int
skeleton_eventdev_configure(const struct rte_eventdev * dev)110 skeleton_eventdev_configure(const struct rte_eventdev *dev)
111 {
112 	struct rte_eventdev_data *data = dev->data;
113 	struct rte_event_dev_config *conf = &data->dev_conf;
114 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
115 
116 	PMD_DRV_FUNC_TRACE();
117 
118 	RTE_SET_USED(conf);
119 	RTE_SET_USED(skel);
120 
121 	PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
122 	return 0;
123 }
124 
125 static int
skeleton_eventdev_start(struct rte_eventdev * dev)126 skeleton_eventdev_start(struct rte_eventdev *dev)
127 {
128 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
129 
130 	PMD_DRV_FUNC_TRACE();
131 
132 	RTE_SET_USED(skel);
133 
134 	return 0;
135 }
136 
137 static void
skeleton_eventdev_stop(struct rte_eventdev * dev)138 skeleton_eventdev_stop(struct rte_eventdev *dev)
139 {
140 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
141 
142 	PMD_DRV_FUNC_TRACE();
143 
144 	RTE_SET_USED(skel);
145 }
146 
147 static int
skeleton_eventdev_close(struct rte_eventdev * dev)148 skeleton_eventdev_close(struct rte_eventdev *dev)
149 {
150 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
151 
152 	PMD_DRV_FUNC_TRACE();
153 
154 	RTE_SET_USED(skel);
155 
156 	return 0;
157 }
158 
159 static void
skeleton_eventdev_queue_def_conf(struct rte_eventdev * dev,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)160 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
161 				 struct rte_event_queue_conf *queue_conf)
162 {
163 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
164 
165 	PMD_DRV_FUNC_TRACE();
166 
167 	RTE_SET_USED(skel);
168 	RTE_SET_USED(queue_id);
169 
170 	queue_conf->nb_atomic_flows = (1ULL << 20);
171 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
172 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
173 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
174 }
175 
176 static void
skeleton_eventdev_queue_release(struct rte_eventdev * dev,uint8_t queue_id)177 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
178 {
179 	PMD_DRV_FUNC_TRACE();
180 
181 	RTE_SET_USED(dev);
182 	RTE_SET_USED(queue_id);
183 }
184 
185 static int
skeleton_eventdev_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)186 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
187 			      const struct rte_event_queue_conf *queue_conf)
188 {
189 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
190 
191 	PMD_DRV_FUNC_TRACE();
192 
193 	RTE_SET_USED(skel);
194 	RTE_SET_USED(queue_conf);
195 	RTE_SET_USED(queue_id);
196 
197 	return 0;
198 }
199 
200 static void
skeleton_eventdev_port_def_conf(struct rte_eventdev * dev,uint8_t port_id,struct rte_event_port_conf * port_conf)201 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
202 				 struct rte_event_port_conf *port_conf)
203 {
204 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
205 
206 	PMD_DRV_FUNC_TRACE();
207 
208 	RTE_SET_USED(skel);
209 	RTE_SET_USED(port_id);
210 
211 	port_conf->new_event_threshold = 32 * 1024;
212 	port_conf->dequeue_depth = 16;
213 	port_conf->enqueue_depth = 16;
214 	port_conf->event_port_cfg = 0;
215 }
216 
217 static void
skeleton_eventdev_port_release(void * port)218 skeleton_eventdev_port_release(void *port)
219 {
220 	struct skeleton_port *sp = port;
221 	PMD_DRV_FUNC_TRACE();
222 
223 	rte_free(sp);
224 }
225 
226 static int
skeleton_eventdev_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * port_conf)227 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
228 				const struct rte_event_port_conf *port_conf)
229 {
230 	struct skeleton_port *sp;
231 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
232 
233 	PMD_DRV_FUNC_TRACE();
234 
235 	RTE_SET_USED(skel);
236 	RTE_SET_USED(port_conf);
237 
238 	/* Free memory prior to re-allocation if needed */
239 	if (dev->data->ports[port_id] != NULL) {
240 		PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
241 				port_id);
242 		skeleton_eventdev_port_release(dev->data->ports[port_id]);
243 		dev->data->ports[port_id] = NULL;
244 	}
245 
246 	/* Allocate event port memory */
247 	sp = rte_zmalloc_socket("eventdev port",
248 			sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
249 			dev->data->socket_id);
250 	if (sp == NULL) {
251 		PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
252 		return -ENOMEM;
253 	}
254 
255 	sp->port_id = port_id;
256 
257 	PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
258 
259 	dev->data->ports[port_id] = sp;
260 	return 0;
261 }
262 
263 static int
skeleton_eventdev_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)264 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
265 			const uint8_t queues[], const uint8_t priorities[],
266 			uint16_t nb_links)
267 {
268 	struct skeleton_port *sp = port;
269 	PMD_DRV_FUNC_TRACE();
270 
271 	RTE_SET_USED(dev);
272 	RTE_SET_USED(sp);
273 	RTE_SET_USED(queues);
274 	RTE_SET_USED(priorities);
275 
276 	/* Linked all the queues */
277 	return (int)nb_links;
278 }
279 
280 static int
skeleton_eventdev_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t nb_unlinks)281 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
282 				 uint8_t queues[], uint16_t nb_unlinks)
283 {
284 	struct skeleton_port *sp = port;
285 	PMD_DRV_FUNC_TRACE();
286 
287 	RTE_SET_USED(dev);
288 	RTE_SET_USED(sp);
289 	RTE_SET_USED(queues);
290 
291 	/* Unlinked all the queues */
292 	return (int)nb_unlinks;
293 
294 }
295 
296 static int
skeleton_eventdev_timeout_ticks(struct rte_eventdev * dev,uint64_t ns,uint64_t * timeout_ticks)297 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
298 				 uint64_t *timeout_ticks)
299 {
300 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
301 	uint32_t scale = 1;
302 
303 	PMD_DRV_FUNC_TRACE();
304 
305 	RTE_SET_USED(skel);
306 	*timeout_ticks = ns * scale;
307 
308 	return 0;
309 }
310 
311 static void
skeleton_eventdev_dump(struct rte_eventdev * dev,FILE * f)312 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
313 {
314 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
315 
316 	PMD_DRV_FUNC_TRACE();
317 
318 	RTE_SET_USED(skel);
319 	RTE_SET_USED(f);
320 }
321 
322 
323 /* Initialize and register event driver with DPDK Application */
324 static struct eventdev_ops skeleton_eventdev_ops = {
325 	.dev_infos_get    = skeleton_eventdev_info_get,
326 	.dev_configure    = skeleton_eventdev_configure,
327 	.dev_start        = skeleton_eventdev_start,
328 	.dev_stop         = skeleton_eventdev_stop,
329 	.dev_close        = skeleton_eventdev_close,
330 	.queue_def_conf   = skeleton_eventdev_queue_def_conf,
331 	.queue_setup      = skeleton_eventdev_queue_setup,
332 	.queue_release    = skeleton_eventdev_queue_release,
333 	.port_def_conf    = skeleton_eventdev_port_def_conf,
334 	.port_setup       = skeleton_eventdev_port_setup,
335 	.port_release     = skeleton_eventdev_port_release,
336 	.port_link        = skeleton_eventdev_port_link,
337 	.port_unlink      = skeleton_eventdev_port_unlink,
338 	.timeout_ticks    = skeleton_eventdev_timeout_ticks,
339 	.dump             = skeleton_eventdev_dump
340 };
341 
342 static int
skeleton_eventdev_init(struct rte_eventdev * eventdev)343 skeleton_eventdev_init(struct rte_eventdev *eventdev)
344 {
345 	struct rte_pci_device *pci_dev;
346 	struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
347 	int ret = 0;
348 
349 	PMD_DRV_FUNC_TRACE();
350 
351 	eventdev->dev_ops       = &skeleton_eventdev_ops;
352 	eventdev->enqueue       = skeleton_eventdev_enqueue;
353 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
354 	eventdev->dequeue       = skeleton_eventdev_dequeue;
355 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
356 
357 	/* For secondary processes, the primary has done all the work */
358 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
359 		return 0;
360 
361 	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
362 
363 	skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
364 	if (!skel->reg_base) {
365 		PMD_DRV_ERR("Failed to map BAR0");
366 		ret = -ENODEV;
367 		goto fail;
368 	}
369 
370 	skel->device_id = pci_dev->id.device_id;
371 	skel->vendor_id = pci_dev->id.vendor_id;
372 	skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
373 	skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
374 
375 	PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
376 			pci_dev->id.vendor_id, pci_dev->id.device_id,
377 			pci_dev->addr.domain, pci_dev->addr.bus,
378 			pci_dev->addr.devid, pci_dev->addr.function);
379 
380 	PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
381 		eventdev->data->dev_id, eventdev->data->socket_id,
382 		skel->vendor_id, skel->device_id);
383 
384 fail:
385 	return ret;
386 }
387 
388 /* PCI based event device */
389 
390 #define EVENTDEV_SKEL_VENDOR_ID         0x177d
391 #define EVENTDEV_SKEL_PRODUCT_ID        0x0001
392 
393 static const struct rte_pci_id pci_id_skeleton_map[] = {
394 	{
395 		RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
396 			       EVENTDEV_SKEL_PRODUCT_ID)
397 	},
398 	{
399 		.vendor_id = 0,
400 	},
401 };
402 
403 static int
event_skeleton_pci_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)404 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
405 			 struct rte_pci_device *pci_dev)
406 {
407 	return rte_event_pmd_pci_probe(pci_drv, pci_dev,
408 		sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
409 }
410 
411 static int
event_skeleton_pci_remove(struct rte_pci_device * pci_dev)412 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
413 {
414 	return rte_event_pmd_pci_remove(pci_dev, NULL);
415 }
416 
417 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
418 	.id_table = pci_id_skeleton_map,
419 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
420 	.probe = event_skeleton_pci_probe,
421 	.remove = event_skeleton_pci_remove,
422 };
423 
424 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
425 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
426 
427 /* VDEV based event device */
428 
429 static int
skeleton_eventdev_create(const char * name,int socket_id)430 skeleton_eventdev_create(const char *name, int socket_id)
431 {
432 	struct rte_eventdev *eventdev;
433 
434 	eventdev = rte_event_pmd_vdev_init(name,
435 			sizeof(struct skeleton_eventdev), socket_id);
436 	if (eventdev == NULL) {
437 		PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
438 		goto fail;
439 	}
440 
441 	eventdev->dev_ops       = &skeleton_eventdev_ops;
442 	eventdev->enqueue       = skeleton_eventdev_enqueue;
443 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
444 	eventdev->dequeue       = skeleton_eventdev_dequeue;
445 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
446 
447 	event_dev_probing_finish(eventdev);
448 	return 0;
449 fail:
450 	return -EFAULT;
451 }
452 
453 static int
skeleton_eventdev_probe(struct rte_vdev_device * vdev)454 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
455 {
456 	const char *name;
457 
458 	name = rte_vdev_device_name(vdev);
459 	RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
460 			rte_socket_id());
461 	return skeleton_eventdev_create(name, rte_socket_id());
462 }
463 
464 static int
skeleton_eventdev_remove(struct rte_vdev_device * vdev)465 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
466 {
467 	const char *name;
468 
469 	name = rte_vdev_device_name(vdev);
470 	PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
471 
472 	return rte_event_pmd_vdev_uninit(name);
473 }
474 
475 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
476 	.probe = skeleton_eventdev_probe,
477 	.remove = skeleton_eventdev_remove
478 };
479 
480 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
481