1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
3 */
4
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_debug.h>
15 #include <rte_dev.h>
16 #include <rte_eal.h>
17 #include <rte_log.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <rte_lcore.h>
21 #include <rte_bus_vdev.h>
22
23 #include "skeleton_eventdev.h"
24
25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton
26 /**< Skeleton event device PMD name */
27
28 static uint16_t
skeleton_eventdev_enqueue(void * port,const struct rte_event * ev)29 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
30 {
31 struct skeleton_port *sp = port;
32
33 RTE_SET_USED(sp);
34 RTE_SET_USED(ev);
35 RTE_SET_USED(port);
36
37 return 0;
38 }
39
40 static uint16_t
skeleton_eventdev_enqueue_burst(void * port,const struct rte_event ev[],uint16_t nb_events)41 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
42 uint16_t nb_events)
43 {
44 struct skeleton_port *sp = port;
45
46 RTE_SET_USED(sp);
47 RTE_SET_USED(ev);
48 RTE_SET_USED(port);
49 RTE_SET_USED(nb_events);
50
51 return 0;
52 }
53
54 static uint16_t
skeleton_eventdev_dequeue(void * port,struct rte_event * ev,uint64_t timeout_ticks)55 skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
56 uint64_t timeout_ticks)
57 {
58 struct skeleton_port *sp = port;
59
60 RTE_SET_USED(sp);
61 RTE_SET_USED(ev);
62 RTE_SET_USED(timeout_ticks);
63
64 return 0;
65 }
66
67 static uint16_t
skeleton_eventdev_dequeue_burst(void * port,struct rte_event ev[],uint16_t nb_events,uint64_t timeout_ticks)68 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
69 uint16_t nb_events, uint64_t timeout_ticks)
70 {
71 struct skeleton_port *sp = port;
72
73 RTE_SET_USED(sp);
74 RTE_SET_USED(ev);
75 RTE_SET_USED(nb_events);
76 RTE_SET_USED(timeout_ticks);
77
78 return 0;
79 }
80
81 static void
skeleton_eventdev_info_get(struct rte_eventdev * dev,struct rte_event_dev_info * dev_info)82 skeleton_eventdev_info_get(struct rte_eventdev *dev,
83 struct rte_event_dev_info *dev_info)
84 {
85 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
86
87 PMD_DRV_FUNC_TRACE();
88
89 RTE_SET_USED(skel);
90
91 dev_info->min_dequeue_timeout_ns = 1;
92 dev_info->max_dequeue_timeout_ns = 10000;
93 dev_info->dequeue_timeout_ns = 25;
94 dev_info->max_event_queues = 64;
95 dev_info->max_event_queue_flows = (1ULL << 20);
96 dev_info->max_event_queue_priority_levels = 8;
97 dev_info->max_event_priority_levels = 8;
98 dev_info->max_event_ports = 32;
99 dev_info->max_event_port_dequeue_depth = 16;
100 dev_info->max_event_port_enqueue_depth = 16;
101 dev_info->max_num_events = (1ULL << 20);
102 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
103 RTE_EVENT_DEV_CAP_BURST_MODE |
104 RTE_EVENT_DEV_CAP_EVENT_QOS |
105 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
106 }
107
108 static int
skeleton_eventdev_configure(const struct rte_eventdev * dev)109 skeleton_eventdev_configure(const struct rte_eventdev *dev)
110 {
111 struct rte_eventdev_data *data = dev->data;
112 struct rte_event_dev_config *conf = &data->dev_conf;
113 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
114
115 PMD_DRV_FUNC_TRACE();
116
117 RTE_SET_USED(conf);
118 RTE_SET_USED(skel);
119
120 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
121 return 0;
122 }
123
124 static int
skeleton_eventdev_start(struct rte_eventdev * dev)125 skeleton_eventdev_start(struct rte_eventdev *dev)
126 {
127 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
128
129 PMD_DRV_FUNC_TRACE();
130
131 RTE_SET_USED(skel);
132
133 return 0;
134 }
135
136 static void
skeleton_eventdev_stop(struct rte_eventdev * dev)137 skeleton_eventdev_stop(struct rte_eventdev *dev)
138 {
139 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
140
141 PMD_DRV_FUNC_TRACE();
142
143 RTE_SET_USED(skel);
144 }
145
146 static int
skeleton_eventdev_close(struct rte_eventdev * dev)147 skeleton_eventdev_close(struct rte_eventdev *dev)
148 {
149 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
150
151 PMD_DRV_FUNC_TRACE();
152
153 RTE_SET_USED(skel);
154
155 return 0;
156 }
157
158 static void
skeleton_eventdev_queue_def_conf(struct rte_eventdev * dev,uint8_t queue_id,struct rte_event_queue_conf * queue_conf)159 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
160 struct rte_event_queue_conf *queue_conf)
161 {
162 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
163
164 PMD_DRV_FUNC_TRACE();
165
166 RTE_SET_USED(skel);
167 RTE_SET_USED(queue_id);
168
169 queue_conf->nb_atomic_flows = (1ULL << 20);
170 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
171 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
172 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
173 }
174
175 static void
skeleton_eventdev_queue_release(struct rte_eventdev * dev,uint8_t queue_id)176 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
177 {
178 PMD_DRV_FUNC_TRACE();
179
180 RTE_SET_USED(dev);
181 RTE_SET_USED(queue_id);
182 }
183
184 static int
skeleton_eventdev_queue_setup(struct rte_eventdev * dev,uint8_t queue_id,const struct rte_event_queue_conf * queue_conf)185 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
186 const struct rte_event_queue_conf *queue_conf)
187 {
188 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
189
190 PMD_DRV_FUNC_TRACE();
191
192 RTE_SET_USED(skel);
193 RTE_SET_USED(queue_conf);
194 RTE_SET_USED(queue_id);
195
196 return 0;
197 }
198
199 static void
skeleton_eventdev_port_def_conf(struct rte_eventdev * dev,uint8_t port_id,struct rte_event_port_conf * port_conf)200 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
201 struct rte_event_port_conf *port_conf)
202 {
203 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
204
205 PMD_DRV_FUNC_TRACE();
206
207 RTE_SET_USED(skel);
208 RTE_SET_USED(port_id);
209
210 port_conf->new_event_threshold = 32 * 1024;
211 port_conf->dequeue_depth = 16;
212 port_conf->enqueue_depth = 16;
213 port_conf->event_port_cfg = 0;
214 }
215
216 static void
skeleton_eventdev_port_release(void * port)217 skeleton_eventdev_port_release(void *port)
218 {
219 struct skeleton_port *sp = port;
220 PMD_DRV_FUNC_TRACE();
221
222 rte_free(sp);
223 }
224
225 static int
skeleton_eventdev_port_setup(struct rte_eventdev * dev,uint8_t port_id,const struct rte_event_port_conf * port_conf)226 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
227 const struct rte_event_port_conf *port_conf)
228 {
229 struct skeleton_port *sp;
230 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
231
232 PMD_DRV_FUNC_TRACE();
233
234 RTE_SET_USED(skel);
235 RTE_SET_USED(port_conf);
236
237 /* Free memory prior to re-allocation if needed */
238 if (dev->data->ports[port_id] != NULL) {
239 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
240 port_id);
241 skeleton_eventdev_port_release(dev->data->ports[port_id]);
242 dev->data->ports[port_id] = NULL;
243 }
244
245 /* Allocate event port memory */
246 sp = rte_zmalloc_socket("eventdev port",
247 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
248 dev->data->socket_id);
249 if (sp == NULL) {
250 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
251 return -ENOMEM;
252 }
253
254 sp->port_id = port_id;
255
256 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
257
258 dev->data->ports[port_id] = sp;
259 return 0;
260 }
261
262 static int
skeleton_eventdev_port_link(struct rte_eventdev * dev,void * port,const uint8_t queues[],const uint8_t priorities[],uint16_t nb_links)263 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
264 const uint8_t queues[], const uint8_t priorities[],
265 uint16_t nb_links)
266 {
267 struct skeleton_port *sp = port;
268 PMD_DRV_FUNC_TRACE();
269
270 RTE_SET_USED(dev);
271 RTE_SET_USED(sp);
272 RTE_SET_USED(queues);
273 RTE_SET_USED(priorities);
274
275 /* Linked all the queues */
276 return (int)nb_links;
277 }
278
279 static int
skeleton_eventdev_port_unlink(struct rte_eventdev * dev,void * port,uint8_t queues[],uint16_t nb_unlinks)280 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
281 uint8_t queues[], uint16_t nb_unlinks)
282 {
283 struct skeleton_port *sp = port;
284 PMD_DRV_FUNC_TRACE();
285
286 RTE_SET_USED(dev);
287 RTE_SET_USED(sp);
288 RTE_SET_USED(queues);
289
290 /* Unlinked all the queues */
291 return (int)nb_unlinks;
292
293 }
294
295 static int
skeleton_eventdev_timeout_ticks(struct rte_eventdev * dev,uint64_t ns,uint64_t * timeout_ticks)296 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
297 uint64_t *timeout_ticks)
298 {
299 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
300 uint32_t scale = 1;
301
302 PMD_DRV_FUNC_TRACE();
303
304 RTE_SET_USED(skel);
305 *timeout_ticks = ns * scale;
306
307 return 0;
308 }
309
310 static void
skeleton_eventdev_dump(struct rte_eventdev * dev,FILE * f)311 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
312 {
313 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
314
315 PMD_DRV_FUNC_TRACE();
316
317 RTE_SET_USED(skel);
318 RTE_SET_USED(f);
319 }
320
321
322 /* Initialize and register event driver with DPDK Application */
323 static struct rte_eventdev_ops skeleton_eventdev_ops = {
324 .dev_infos_get = skeleton_eventdev_info_get,
325 .dev_configure = skeleton_eventdev_configure,
326 .dev_start = skeleton_eventdev_start,
327 .dev_stop = skeleton_eventdev_stop,
328 .dev_close = skeleton_eventdev_close,
329 .queue_def_conf = skeleton_eventdev_queue_def_conf,
330 .queue_setup = skeleton_eventdev_queue_setup,
331 .queue_release = skeleton_eventdev_queue_release,
332 .port_def_conf = skeleton_eventdev_port_def_conf,
333 .port_setup = skeleton_eventdev_port_setup,
334 .port_release = skeleton_eventdev_port_release,
335 .port_link = skeleton_eventdev_port_link,
336 .port_unlink = skeleton_eventdev_port_unlink,
337 .timeout_ticks = skeleton_eventdev_timeout_ticks,
338 .dump = skeleton_eventdev_dump
339 };
340
341 static int
skeleton_eventdev_init(struct rte_eventdev * eventdev)342 skeleton_eventdev_init(struct rte_eventdev *eventdev)
343 {
344 struct rte_pci_device *pci_dev;
345 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
346 int ret = 0;
347
348 PMD_DRV_FUNC_TRACE();
349
350 eventdev->dev_ops = &skeleton_eventdev_ops;
351 eventdev->enqueue = skeleton_eventdev_enqueue;
352 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
353 eventdev->dequeue = skeleton_eventdev_dequeue;
354 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
355
356 /* For secondary processes, the primary has done all the work */
357 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
358 return 0;
359
360 pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
361
362 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
363 if (!skel->reg_base) {
364 PMD_DRV_ERR("Failed to map BAR0");
365 ret = -ENODEV;
366 goto fail;
367 }
368
369 skel->device_id = pci_dev->id.device_id;
370 skel->vendor_id = pci_dev->id.vendor_id;
371 skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
372 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
373
374 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
375 pci_dev->id.vendor_id, pci_dev->id.device_id,
376 pci_dev->addr.domain, pci_dev->addr.bus,
377 pci_dev->addr.devid, pci_dev->addr.function);
378
379 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
380 eventdev->data->dev_id, eventdev->data->socket_id,
381 skel->vendor_id, skel->device_id);
382
383 fail:
384 return ret;
385 }
386
387 /* PCI based event device */
388
389 #define EVENTDEV_SKEL_VENDOR_ID 0x177d
390 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001
391
392 static const struct rte_pci_id pci_id_skeleton_map[] = {
393 {
394 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
395 EVENTDEV_SKEL_PRODUCT_ID)
396 },
397 {
398 .vendor_id = 0,
399 },
400 };
401
402 static int
event_skeleton_pci_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)403 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv,
404 struct rte_pci_device *pci_dev)
405 {
406 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
407 sizeof(struct skeleton_eventdev), skeleton_eventdev_init);
408 }
409
410 static int
event_skeleton_pci_remove(struct rte_pci_device * pci_dev)411 event_skeleton_pci_remove(struct rte_pci_device *pci_dev)
412 {
413 return rte_event_pmd_pci_remove(pci_dev, NULL);
414 }
415
416 static struct rte_pci_driver pci_eventdev_skeleton_pmd = {
417 .id_table = pci_id_skeleton_map,
418 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
419 .probe = event_skeleton_pci_probe,
420 .remove = event_skeleton_pci_remove,
421 };
422
423 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd);
424 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
425
426 /* VDEV based event device */
427
428 static int
skeleton_eventdev_create(const char * name,int socket_id)429 skeleton_eventdev_create(const char *name, int socket_id)
430 {
431 struct rte_eventdev *eventdev;
432
433 eventdev = rte_event_pmd_vdev_init(name,
434 sizeof(struct skeleton_eventdev), socket_id);
435 if (eventdev == NULL) {
436 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
437 goto fail;
438 }
439
440 eventdev->dev_ops = &skeleton_eventdev_ops;
441 eventdev->enqueue = skeleton_eventdev_enqueue;
442 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
443 eventdev->dequeue = skeleton_eventdev_dequeue;
444 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
445
446 return 0;
447 fail:
448 return -EFAULT;
449 }
450
451 static int
skeleton_eventdev_probe(struct rte_vdev_device * vdev)452 skeleton_eventdev_probe(struct rte_vdev_device *vdev)
453 {
454 const char *name;
455
456 name = rte_vdev_device_name(vdev);
457 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
458 rte_socket_id());
459 return skeleton_eventdev_create(name, rte_socket_id());
460 }
461
462 static int
skeleton_eventdev_remove(struct rte_vdev_device * vdev)463 skeleton_eventdev_remove(struct rte_vdev_device *vdev)
464 {
465 const char *name;
466
467 name = rte_vdev_device_name(vdev);
468 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
469
470 return rte_event_pmd_vdev_uninit(name);
471 }
472
473 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
474 .probe = skeleton_eventdev_probe,
475 .remove = skeleton_eventdev_remove
476 };
477
478 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
479