1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
43 #include <rte_dev.h>
44 #include <rte_eal.h>
45 #include <rte_log.h>
46 #include <rte_malloc.h>
47 #include <rte_memory.h>
48 #include <rte_lcore.h>
49 #include <rte_bus_vdev.h>
50 
51 #include "skeleton_eventdev.h"
52 
53 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
54 /**< Skeleton event device PMD name */
55 
56 static uint16_t
57 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
58 {
59 	struct skeleton_port *sp = port;
60 
61 	RTE_SET_USED(sp);
62 	RTE_SET_USED(ev);
63 	RTE_SET_USED(port);
64 
65 	return 0;
66 }
67 
68 static uint16_t
69 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
70 			uint16_t nb_events)
71 {
72 	struct skeleton_port *sp = port;
73 
74 	RTE_SET_USED(sp);
75 	RTE_SET_USED(ev);
76 	RTE_SET_USED(port);
77 	RTE_SET_USED(nb_events);
78 
79 	return 0;
80 }
81 
82 static uint16_t
83 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
84 				uint64_t timeout_ticks)
85 {
86 	struct skeleton_port *sp = port;
87 
88 	RTE_SET_USED(sp);
89 	RTE_SET_USED(ev);
90 	RTE_SET_USED(timeout_ticks);
91 
92 	return 0;
93 }
94 
95 static uint16_t
96 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
97 		uint16_t nb_events, uint64_t timeout_ticks)
98 {
99 	struct skeleton_port *sp = port;
100 
101 	RTE_SET_USED(sp);
102 	RTE_SET_USED(ev);
103 	RTE_SET_USED(nb_events);
104 	RTE_SET_USED(timeout_ticks);
105 
106 	return 0;
107 }
108 
109 static void
110 skeleton_eventdev_info_get(struct rte_eventdev *dev,
111 		struct rte_event_dev_info *dev_info)
112 {
113 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
114 
115 	PMD_DRV_FUNC_TRACE();
116 
117 	RTE_SET_USED(skel);
118 
119 	dev_info->min_dequeue_timeout_ns = 1;
120 	dev_info->max_dequeue_timeout_ns = 10000;
121 	dev_info->dequeue_timeout_ns = 25;
122 	dev_info->max_event_queues = 64;
123 	dev_info->max_event_queue_flows = (1ULL << 20);
124 	dev_info->max_event_queue_priority_levels = 8;
125 	dev_info->max_event_priority_levels = 8;
126 	dev_info->max_event_ports = 32;
127 	dev_info->max_event_port_dequeue_depth = 16;
128 	dev_info->max_event_port_enqueue_depth = 16;
129 	dev_info->max_num_events = (1ULL << 20);
130 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
131 					RTE_EVENT_DEV_CAP_BURST_MODE |
132 					RTE_EVENT_DEV_CAP_EVENT_QOS;
133 }
134 
135 static int
136 skeleton_eventdev_configure(const struct rte_eventdev *dev)
137 {
138 	struct rte_eventdev_data *data = dev->data;
139 	struct rte_event_dev_config *conf = &data->dev_conf;
140 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
141 
142 	PMD_DRV_FUNC_TRACE();
143 
144 	RTE_SET_USED(conf);
145 	RTE_SET_USED(skel);
146 
147 	PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
148 	return 0;
149 }
150 
151 static int
152 skeleton_eventdev_start(struct rte_eventdev *dev)
153 {
154 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
155 
156 	PMD_DRV_FUNC_TRACE();
157 
158 	RTE_SET_USED(skel);
159 
160 	return 0;
161 }
162 
163 static void
164 skeleton_eventdev_stop(struct rte_eventdev *dev)
165 {
166 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
167 
168 	PMD_DRV_FUNC_TRACE();
169 
170 	RTE_SET_USED(skel);
171 }
172 
173 static int
174 skeleton_eventdev_close(struct rte_eventdev *dev)
175 {
176 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
177 
178 	PMD_DRV_FUNC_TRACE();
179 
180 	RTE_SET_USED(skel);
181 
182 	return 0;
183 }
184 
185 static void
186 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
187 				 struct rte_event_queue_conf *queue_conf)
188 {
189 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
190 
191 	PMD_DRV_FUNC_TRACE();
192 
193 	RTE_SET_USED(skel);
194 	RTE_SET_USED(queue_id);
195 
196 	queue_conf->nb_atomic_flows = (1ULL << 20);
197 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
198 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
199 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
200 }
201 
202 static void
203 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
204 {
205 	PMD_DRV_FUNC_TRACE();
206 
207 	RTE_SET_USED(dev);
208 	RTE_SET_USED(queue_id);
209 }
210 
211 static int
212 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
213 			      const struct rte_event_queue_conf *queue_conf)
214 {
215 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
216 
217 	PMD_DRV_FUNC_TRACE();
218 
219 	RTE_SET_USED(skel);
220 	RTE_SET_USED(queue_conf);
221 	RTE_SET_USED(queue_id);
222 
223 	return 0;
224 }
225 
226 static void
227 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
228 				 struct rte_event_port_conf *port_conf)
229 {
230 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
231 
232 	PMD_DRV_FUNC_TRACE();
233 
234 	RTE_SET_USED(skel);
235 	RTE_SET_USED(port_id);
236 
237 	port_conf->new_event_threshold = 32 * 1024;
238 	port_conf->dequeue_depth = 16;
239 	port_conf->enqueue_depth = 16;
240 }
241 
242 static void
243 skeleton_eventdev_port_release(void *port)
244 {
245 	struct skeleton_port *sp = port;
246 	PMD_DRV_FUNC_TRACE();
247 
248 	rte_free(sp);
249 }
250 
251 static int
252 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
253 				const struct rte_event_port_conf *port_conf)
254 {
255 	struct skeleton_port *sp;
256 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
257 
258 	PMD_DRV_FUNC_TRACE();
259 
260 	RTE_SET_USED(skel);
261 	RTE_SET_USED(port_conf);
262 
263 	/* Free memory prior to re-allocation if needed */
264 	if (dev->data->ports[port_id] != NULL) {
265 		PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
266 				port_id);
267 		skeleton_eventdev_port_release(dev->data->ports[port_id]);
268 		dev->data->ports[port_id] = NULL;
269 	}
270 
271 	/* Allocate event port memory */
272 	sp = rte_zmalloc_socket("eventdev port",
273 			sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
274 			dev->data->socket_id);
275 	if (sp == NULL) {
276 		PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
277 		return -ENOMEM;
278 	}
279 
280 	sp->port_id = port_id;
281 
282 	PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
283 
284 	dev->data->ports[port_id] = sp;
285 	return 0;
286 }
287 
288 static int
289 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
290 			const uint8_t queues[], const uint8_t priorities[],
291 			uint16_t nb_links)
292 {
293 	struct skeleton_port *sp = port;
294 	PMD_DRV_FUNC_TRACE();
295 
296 	RTE_SET_USED(dev);
297 	RTE_SET_USED(sp);
298 	RTE_SET_USED(queues);
299 	RTE_SET_USED(priorities);
300 
301 	/* Linked all the queues */
302 	return (int)nb_links;
303 }
304 
305 static int
306 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
307 				 uint8_t queues[], uint16_t nb_unlinks)
308 {
309 	struct skeleton_port *sp = port;
310 	PMD_DRV_FUNC_TRACE();
311 
312 	RTE_SET_USED(dev);
313 	RTE_SET_USED(sp);
314 	RTE_SET_USED(queues);
315 
316 	/* Unlinked all the queues */
317 	return (int)nb_unlinks;
318 
319 }
320 
321 static int
322 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
323 				 uint64_t *timeout_ticks)
324 {
325 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
326 	uint32_t scale = 1;
327 
328 	PMD_DRV_FUNC_TRACE();
329 
330 	RTE_SET_USED(skel);
331 	*timeout_ticks = ns * scale;
332 
333 	return 0;
334 }
335 
336 static void
337 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
338 {
339 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
340 
341 	PMD_DRV_FUNC_TRACE();
342 
343 	RTE_SET_USED(skel);
344 	RTE_SET_USED(f);
345 }
346 
347 
348 /* Initialize and register event driver with DPDK Application */
349 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
350 	.dev_infos_get    = skeleton_eventdev_info_get,
351 	.dev_configure    = skeleton_eventdev_configure,
352 	.dev_start        = skeleton_eventdev_start,
353 	.dev_stop         = skeleton_eventdev_stop,
354 	.dev_close        = skeleton_eventdev_close,
355 	.queue_def_conf   = skeleton_eventdev_queue_def_conf,
356 	.queue_setup      = skeleton_eventdev_queue_setup,
357 	.queue_release    = skeleton_eventdev_queue_release,
358 	.port_def_conf    = skeleton_eventdev_port_def_conf,
359 	.port_setup       = skeleton_eventdev_port_setup,
360 	.port_release     = skeleton_eventdev_port_release,
361 	.port_link        = skeleton_eventdev_port_link,
362 	.port_unlink      = skeleton_eventdev_port_unlink,
363 	.timeout_ticks    = skeleton_eventdev_timeout_ticks,
364 	.dump             = skeleton_eventdev_dump
365 };
366 
367 static int
368 skeleton_eventdev_init(struct rte_eventdev *eventdev)
369 {
370 	struct rte_pci_device *pci_dev;
371 	struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
372 	int ret = 0;
373 
374 	PMD_DRV_FUNC_TRACE();
375 
376 	eventdev->dev_ops       = &skeleton_eventdev_ops;
377 	eventdev->enqueue       = skeleton_eventdev_enqueue;
378 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
379 	eventdev->dequeue       = skeleton_eventdev_dequeue;
380 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
381 
382 	/* For secondary processes, the primary has done all the work */
383 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
384 		return 0;
385 
386 	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
387 
388 	skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
389 	if (!skel->reg_base) {
390 		PMD_DRV_ERR("Failed to map BAR0");
391 		ret = -ENODEV;
392 		goto fail;
393 	}
394 
395 	skel->device_id = pci_dev->id.device_id;
396 	skel->vendor_id = pci_dev->id.vendor_id;
397 	skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
398 	skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
399 
400 	PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
401 			pci_dev->id.vendor_id, pci_dev->id.device_id,
402 			pci_dev->addr.domain, pci_dev->addr.bus,
403 			pci_dev->addr.devid, pci_dev->addr.function);
404 
405 	PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
406 		eventdev->data->dev_id, eventdev->data->socket_id,
407 		skel->vendor_id, skel->device_id);
408 
409 fail:
410 	return ret;
411 }
412 
413 /* PCI based event device */
414 
415 #define EVENTDEV_SKEL_VENDOR_ID         0x177d
416 #define EVENTDEV_SKEL_PRODUCT_ID        0x0001
417 
418 static const struct rte_pci_id pci_id_skeleton_map[] = {
419 	{
420 		RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
421 			       EVENTDEV_SKEL_PRODUCT_ID)
422 	},
423 	{
424 		.vendor_id = 0,
425 	},
426 };
427 
428 static int
429 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
430 			 struct rte_pci_device *pci_dev)
431 {
432 	return rte_event_pmd_pci_probe(pci_drv, pci_dev,
433 		sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
434 }
435 
436 static int
437 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
438 {
439 	return rte_event_pmd_pci_remove(pci_dev, NULL);
440 }
441 
442 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
443 	.id_table = pci_id_skeleton_map,
444 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
445 	.probe = event_skeleton_pci_probe,
446 	.remove = event_skeleton_pci_remove,
447 };
448 
449 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
450 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
451 
452 /* VDEV based event device */
453 
454 static int
455 skeleton_eventdev_create(const char *name, int socket_id)
456 {
457 	struct rte_eventdev *eventdev;
458 
459 	eventdev = rte_event_pmd_vdev_init(name,
460 			sizeof(struct skeleton_eventdev), socket_id);
461 	if (eventdev == NULL) {
462 		PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
463 		goto fail;
464 	}
465 
466 	eventdev->dev_ops       = &skeleton_eventdev_ops;
467 	eventdev->enqueue       = skeleton_eventdev_enqueue;
468 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
469 	eventdev->dequeue       = skeleton_eventdev_dequeue;
470 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
471 
472 	return 0;
473 fail:
474 	return -EFAULT;
475 }
476 
477 static int
478 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
479 {
480 	const char *name;
481 
482 	name = rte_vdev_device_name(vdev);
483 	RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
484 			rte_socket_id());
485 	return skeleton_eventdev_create(name, rte_socket_id());
486 }
487 
488 static int
489 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
490 {
491 	const char *name;
492 
493 	name = rte_vdev_device_name(vdev);
494 	PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
495 
496 	return rte_event_pmd_vdev_uninit(name);
497 }
498 
499 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
500 	.probe = skeleton_eventdev_probe,
501 	.remove = skeleton_eventdev_remove
502 };
503 
504 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
505