1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium networks Ltd. 2016.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium networks nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdbool.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
43 #include <rte_dev.h>
44 #include <rte_eal.h>
45 #include <rte_log.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_pci.h>
50 #include <rte_lcore.h>
51 #include <rte_vdev.h>
52 
53 #include "skeleton_eventdev.h"
54 
55 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
56 /**< Skeleton event device PMD name */
57 
58 static uint16_t
59 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
60 {
61 	struct skeleton_port *sp = port;
62 
63 	RTE_SET_USED(sp);
64 	RTE_SET_USED(ev);
65 	RTE_SET_USED(port);
66 
67 	return 0;
68 }
69 
70 static uint16_t
71 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
72 			uint16_t nb_events)
73 {
74 	struct skeleton_port *sp = port;
75 
76 	RTE_SET_USED(sp);
77 	RTE_SET_USED(ev);
78 	RTE_SET_USED(port);
79 	RTE_SET_USED(nb_events);
80 
81 	return 0;
82 }
83 
84 static uint16_t
85 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
86 				uint64_t timeout_ticks)
87 {
88 	struct skeleton_port *sp = port;
89 
90 	RTE_SET_USED(sp);
91 	RTE_SET_USED(ev);
92 	RTE_SET_USED(timeout_ticks);
93 
94 	return 0;
95 }
96 
97 static uint16_t
98 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
99 		uint16_t nb_events, uint64_t timeout_ticks)
100 {
101 	struct skeleton_port *sp = port;
102 
103 	RTE_SET_USED(sp);
104 	RTE_SET_USED(ev);
105 	RTE_SET_USED(nb_events);
106 	RTE_SET_USED(timeout_ticks);
107 
108 	return 0;
109 }
110 
111 static void
112 skeleton_eventdev_info_get(struct rte_eventdev *dev,
113 		struct rte_event_dev_info *dev_info)
114 {
115 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
116 
117 	PMD_DRV_FUNC_TRACE();
118 
119 	RTE_SET_USED(skel);
120 
121 	dev_info->min_dequeue_timeout_ns = 1;
122 	dev_info->max_dequeue_timeout_ns = 10000;
123 	dev_info->dequeue_timeout_ns = 25;
124 	dev_info->max_event_queues = 64;
125 	dev_info->max_event_queue_flows = (1ULL << 20);
126 	dev_info->max_event_queue_priority_levels = 8;
127 	dev_info->max_event_priority_levels = 8;
128 	dev_info->max_event_ports = 32;
129 	dev_info->max_event_port_dequeue_depth = 16;
130 	dev_info->max_event_port_enqueue_depth = 16;
131 	dev_info->max_num_events = (1ULL << 20);
132 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
133 					RTE_EVENT_DEV_CAP_EVENT_QOS;
134 }
135 
136 static int
137 skeleton_eventdev_configure(const struct rte_eventdev *dev)
138 {
139 	struct rte_eventdev_data *data = dev->data;
140 	struct rte_event_dev_config *conf = &data->dev_conf;
141 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
142 
143 	PMD_DRV_FUNC_TRACE();
144 
145 	RTE_SET_USED(conf);
146 	RTE_SET_USED(skel);
147 
148 	PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
149 	return 0;
150 }
151 
152 static int
153 skeleton_eventdev_start(struct rte_eventdev *dev)
154 {
155 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
156 
157 	PMD_DRV_FUNC_TRACE();
158 
159 	RTE_SET_USED(skel);
160 
161 	return 0;
162 }
163 
164 static void
165 skeleton_eventdev_stop(struct rte_eventdev *dev)
166 {
167 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
168 
169 	PMD_DRV_FUNC_TRACE();
170 
171 	RTE_SET_USED(skel);
172 }
173 
174 static int
175 skeleton_eventdev_close(struct rte_eventdev *dev)
176 {
177 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
178 
179 	PMD_DRV_FUNC_TRACE();
180 
181 	RTE_SET_USED(skel);
182 
183 	return 0;
184 }
185 
186 static void
187 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
188 				 struct rte_event_queue_conf *queue_conf)
189 {
190 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
191 
192 	PMD_DRV_FUNC_TRACE();
193 
194 	RTE_SET_USED(skel);
195 	RTE_SET_USED(queue_id);
196 
197 	queue_conf->nb_atomic_flows = (1ULL << 20);
198 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
199 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;
200 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
201 }
202 
203 static void
204 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
205 {
206 	PMD_DRV_FUNC_TRACE();
207 
208 	RTE_SET_USED(dev);
209 	RTE_SET_USED(queue_id);
210 }
211 
212 static int
213 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
214 			      const struct rte_event_queue_conf *queue_conf)
215 {
216 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
217 
218 	PMD_DRV_FUNC_TRACE();
219 
220 	RTE_SET_USED(skel);
221 	RTE_SET_USED(queue_conf);
222 	RTE_SET_USED(queue_id);
223 
224 	return 0;
225 }
226 
227 static void
228 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
229 				 struct rte_event_port_conf *port_conf)
230 {
231 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
232 
233 	PMD_DRV_FUNC_TRACE();
234 
235 	RTE_SET_USED(skel);
236 	RTE_SET_USED(port_id);
237 
238 	port_conf->new_event_threshold = 32 * 1024;
239 	port_conf->dequeue_depth = 16;
240 	port_conf->enqueue_depth = 16;
241 }
242 
243 static void
244 skeleton_eventdev_port_release(void *port)
245 {
246 	struct skeleton_port *sp = port;
247 	PMD_DRV_FUNC_TRACE();
248 
249 	rte_free(sp);
250 }
251 
252 static int
253 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
254 				const struct rte_event_port_conf *port_conf)
255 {
256 	struct skeleton_port *sp;
257 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
258 
259 	PMD_DRV_FUNC_TRACE();
260 
261 	RTE_SET_USED(skel);
262 	RTE_SET_USED(port_conf);
263 
264 	/* Free memory prior to re-allocation if needed */
265 	if (dev->data->ports[port_id] != NULL) {
266 		PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
267 				port_id);
268 		skeleton_eventdev_port_release(dev->data->ports[port_id]);
269 		dev->data->ports[port_id] = NULL;
270 	}
271 
272 	/* Allocate event port memory */
273 	sp = rte_zmalloc_socket("eventdev port",
274 			sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
275 			dev->data->socket_id);
276 	if (sp == NULL) {
277 		PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
278 		return -ENOMEM;
279 	}
280 
281 	sp->port_id = port_id;
282 
283 	PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
284 
285 	dev->data->ports[port_id] = sp;
286 	return 0;
287 }
288 
289 static int
290 skeleton_eventdev_port_link(void *port,
291 			const uint8_t queues[], const uint8_t priorities[],
292 			uint16_t nb_links)
293 {
294 	struct skeleton_port *sp = port;
295 	PMD_DRV_FUNC_TRACE();
296 
297 	RTE_SET_USED(sp);
298 	RTE_SET_USED(queues);
299 	RTE_SET_USED(priorities);
300 
301 	/* Linked all the queues */
302 	return (int)nb_links;
303 }
304 
305 static int
306 skeleton_eventdev_port_unlink(void *port, uint8_t queues[],
307 				 uint16_t nb_unlinks)
308 {
309 	struct skeleton_port *sp = port;
310 	PMD_DRV_FUNC_TRACE();
311 
312 	RTE_SET_USED(sp);
313 	RTE_SET_USED(queues);
314 
315 	/* Unlinked all the queues */
316 	return (int)nb_unlinks;
317 
318 }
319 
320 static void
321 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
322 				 uint64_t *timeout_ticks)
323 {
324 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
325 	uint32_t scale = 1;
326 
327 	PMD_DRV_FUNC_TRACE();
328 
329 	RTE_SET_USED(skel);
330 	*timeout_ticks = ns * scale;
331 }
332 
333 static void
334 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
335 {
336 	struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
337 
338 	PMD_DRV_FUNC_TRACE();
339 
340 	RTE_SET_USED(skel);
341 	RTE_SET_USED(f);
342 }
343 
344 
345 /* Initialize and register event driver with DPDK Application */
346 static const struct rte_eventdev_ops skeleton_eventdev_ops = {
347 	.dev_infos_get    = skeleton_eventdev_info_get,
348 	.dev_configure    = skeleton_eventdev_configure,
349 	.dev_start        = skeleton_eventdev_start,
350 	.dev_stop         = skeleton_eventdev_stop,
351 	.dev_close        = skeleton_eventdev_close,
352 	.queue_def_conf   = skeleton_eventdev_queue_def_conf,
353 	.queue_setup      = skeleton_eventdev_queue_setup,
354 	.queue_release    = skeleton_eventdev_queue_release,
355 	.port_def_conf    = skeleton_eventdev_port_def_conf,
356 	.port_setup       = skeleton_eventdev_port_setup,
357 	.port_release     = skeleton_eventdev_port_release,
358 	.port_link        = skeleton_eventdev_port_link,
359 	.port_unlink      = skeleton_eventdev_port_unlink,
360 	.timeout_ticks    = skeleton_eventdev_timeout_ticks,
361 	.dump             = skeleton_eventdev_dump
362 };
363 
364 static int
365 skeleton_eventdev_init(struct rte_eventdev *eventdev)
366 {
367 	struct rte_pci_device *pci_dev;
368 	struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
369 	int ret = 0;
370 
371 	PMD_DRV_FUNC_TRACE();
372 
373 	eventdev->dev_ops       = &skeleton_eventdev_ops;
374 	eventdev->schedule      = NULL;
375 	eventdev->enqueue       = skeleton_eventdev_enqueue;
376 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
377 	eventdev->dequeue       = skeleton_eventdev_dequeue;
378 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
379 
380 	/* For secondary processes, the primary has done all the work */
381 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
382 		return 0;
383 
384 	pci_dev = eventdev->pci_dev;
385 
386 	skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
387 	if (!skel->reg_base) {
388 		PMD_DRV_ERR("Failed to map BAR0");
389 		ret = -ENODEV;
390 		goto fail;
391 	}
392 
393 	skel->device_id = pci_dev->id.device_id;
394 	skel->vendor_id = pci_dev->id.vendor_id;
395 	skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
396 	skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
397 
398 	PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
399 			pci_dev->id.vendor_id, pci_dev->id.device_id,
400 			pci_dev->addr.domain, pci_dev->addr.bus,
401 			pci_dev->addr.devid, pci_dev->addr.function);
402 
403 	PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
404 		eventdev->data->dev_id, eventdev->data->socket_id,
405 		skel->vendor_id, skel->device_id);
406 
407 fail:
408 	return ret;
409 }
410 
411 /* PCI based event device */
412 
413 #define EVENTDEV_SKEL_VENDOR_ID         0x177d
414 #define EVENTDEV_SKEL_PRODUCT_ID        0x0001
415 
416 static const struct rte_pci_id pci_id_skeleton_map[] = {
417 	{
418 		RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
419 			       EVENTDEV_SKEL_PRODUCT_ID)
420 	},
421 	{
422 		.vendor_id = 0,
423 	},
424 };
425 
426 static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
427 	.pci_drv = {
428 		.id_table = pci_id_skeleton_map,
429 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
430 		.probe = rte_event_pmd_pci_probe,
431 		.remove = rte_event_pmd_pci_remove,
432 	},
433 	.eventdev_init = skeleton_eventdev_init,
434 	.dev_private_size = sizeof(struct skeleton_eventdev),
435 };
436 
437 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
438 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
439 
440 /* VDEV based event device */
441 
442 /**
443  * Global static parameter used to create a unique name for each skeleton
444  * event device.
445  */
446 static unsigned int skeleton_unique_id;
447 
448 static inline int
449 skeleton_create_unique_device_name(char *name, size_t size)
450 {
451 	int ret;
452 
453 	if (name == NULL)
454 		return -EINVAL;
455 
456 	ret = snprintf(name, size, "%s_%u", RTE_STR(EVENTDEV_NAME_SKELETON_PMD),
457 			skeleton_unique_id++);
458 	if (ret < 0)
459 		return ret;
460 	return 0;
461 }
462 
463 static int
464 skeleton_eventdev_create(int socket_id)
465 {
466 	struct rte_eventdev *eventdev;
467 	char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
468 
469 	/* Create a unique device name */
470 	if (skeleton_create_unique_device_name(eventdev_name,
471 			RTE_EVENTDEV_NAME_MAX_LEN) != 0) {
472 		PMD_DRV_ERR("Failed to create unique eventdev name");
473 		return -EINVAL;
474 	}
475 
476 	eventdev = rte_event_pmd_vdev_init(eventdev_name,
477 			sizeof(struct skeleton_eventdev), socket_id);
478 	if (eventdev == NULL) {
479 		PMD_DRV_ERR("Failed to create eventdev vdev");
480 		goto fail;
481 	}
482 
483 	eventdev->dev_ops       = &skeleton_eventdev_ops;
484 	eventdev->schedule      = NULL;
485 	eventdev->enqueue       = skeleton_eventdev_enqueue;
486 	eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
487 	eventdev->dequeue       = skeleton_eventdev_dequeue;
488 	eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
489 
490 	return 0;
491 fail:
492 	return -EFAULT;
493 }
494 
495 static int
496 skeleton_eventdev_probe(const char *name, __rte_unused const char *input_args)
497 {
498 	RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
499 			rte_socket_id());
500 	return skeleton_eventdev_create(rte_socket_id());
501 }
502 
503 static int
504 skeleton_eventdev_remove(const char *name)
505 {
506 	if (name == NULL)
507 		return -EINVAL;
508 
509 	PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
510 
511 	return 0;
512 }
513 
514 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
515 	.probe = skeleton_eventdev_probe,
516 	.remove = skeleton_eventdev_remove
517 };
518 
519 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
520