1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 12 #include <rte_byteorder.h> 13 #include <rte_common.h> 14 #include <rte_debug.h> 15 #include <rte_dev.h> 16 #include <rte_eal.h> 17 #include <rte_log.h> 18 #include <rte_malloc.h> 19 #include <rte_memory.h> 20 #include <rte_lcore.h> 21 #include <rte_bus_vdev.h> 22 23 #include "skeleton_eventdev.h" 24 25 #define EVENTDEV_NAME_SKELETON_PMD event_skeleton 26 /**< Skeleton event device PMD name */ 27 28 static uint16_t 29 skeleton_eventdev_enqueue(void *port, const struct rte_event *ev) 30 { 31 struct skeleton_port *sp = port; 32 33 RTE_SET_USED(sp); 34 RTE_SET_USED(ev); 35 RTE_SET_USED(port); 36 37 return 0; 38 } 39 40 static uint16_t 41 skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 42 uint16_t nb_events) 43 { 44 struct skeleton_port *sp = port; 45 46 RTE_SET_USED(sp); 47 RTE_SET_USED(ev); 48 RTE_SET_USED(port); 49 RTE_SET_USED(nb_events); 50 51 return 0; 52 } 53 54 static uint16_t 55 skeleton_eventdev_dequeue(void *port, struct rte_event *ev, 56 uint64_t timeout_ticks) 57 { 58 struct skeleton_port *sp = port; 59 60 RTE_SET_USED(sp); 61 RTE_SET_USED(ev); 62 RTE_SET_USED(timeout_ticks); 63 64 return 0; 65 } 66 67 static uint16_t 68 skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[], 69 uint16_t nb_events, uint64_t timeout_ticks) 70 { 71 struct skeleton_port *sp = port; 72 73 RTE_SET_USED(sp); 74 RTE_SET_USED(ev); 75 RTE_SET_USED(nb_events); 76 RTE_SET_USED(timeout_ticks); 77 78 return 0; 79 } 80 81 static void 82 skeleton_eventdev_info_get(struct rte_eventdev *dev, 83 struct rte_event_dev_info *dev_info) 84 { 85 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 86 87 PMD_DRV_FUNC_TRACE(); 88 89 RTE_SET_USED(skel); 90 91 dev_info->min_dequeue_timeout_ns = 1; 92 dev_info->max_dequeue_timeout_ns = 10000; 93 dev_info->dequeue_timeout_ns = 25; 94 dev_info->max_event_queues = 64; 95 dev_info->max_event_queue_flows = (1ULL << 20); 96 dev_info->max_event_queue_priority_levels = 8; 97 dev_info->max_event_priority_levels = 8; 98 dev_info->max_event_ports = 32; 99 dev_info->max_event_port_dequeue_depth = 16; 100 dev_info->max_event_port_enqueue_depth = 16; 101 dev_info->max_num_events = (1ULL << 20); 102 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS | 103 RTE_EVENT_DEV_CAP_BURST_MODE | 104 RTE_EVENT_DEV_CAP_EVENT_QOS; 105 } 106 107 static int 108 skeleton_eventdev_configure(const struct rte_eventdev *dev) 109 { 110 struct rte_eventdev_data *data = dev->data; 111 struct rte_event_dev_config *conf = &data->dev_conf; 112 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 113 114 PMD_DRV_FUNC_TRACE(); 115 116 RTE_SET_USED(conf); 117 RTE_SET_USED(skel); 118 119 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id); 120 return 0; 121 } 122 123 static int 124 skeleton_eventdev_start(struct rte_eventdev *dev) 125 { 126 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 127 128 PMD_DRV_FUNC_TRACE(); 129 130 RTE_SET_USED(skel); 131 132 return 0; 133 } 134 135 static void 136 skeleton_eventdev_stop(struct rte_eventdev *dev) 137 { 138 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 139 140 PMD_DRV_FUNC_TRACE(); 141 142 RTE_SET_USED(skel); 143 } 144 145 static int 146 skeleton_eventdev_close(struct rte_eventdev *dev) 147 { 148 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 149 150 PMD_DRV_FUNC_TRACE(); 151 152 RTE_SET_USED(skel); 153 154 return 0; 155 } 156 157 static void 158 skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 159 struct rte_event_queue_conf *queue_conf) 160 { 161 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 162 163 PMD_DRV_FUNC_TRACE(); 164 165 RTE_SET_USED(skel); 166 RTE_SET_USED(queue_id); 167 168 queue_conf->nb_atomic_flows = (1ULL << 20); 169 queue_conf->nb_atomic_order_sequences = (1ULL << 20); 170 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES; 171 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 172 } 173 174 static void 175 skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 176 { 177 PMD_DRV_FUNC_TRACE(); 178 179 RTE_SET_USED(dev); 180 RTE_SET_USED(queue_id); 181 } 182 183 static int 184 skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 185 const struct rte_event_queue_conf *queue_conf) 186 { 187 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 188 189 PMD_DRV_FUNC_TRACE(); 190 191 RTE_SET_USED(skel); 192 RTE_SET_USED(queue_conf); 193 RTE_SET_USED(queue_id); 194 195 return 0; 196 } 197 198 static void 199 skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 200 struct rte_event_port_conf *port_conf) 201 { 202 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 203 204 PMD_DRV_FUNC_TRACE(); 205 206 RTE_SET_USED(skel); 207 RTE_SET_USED(port_id); 208 209 port_conf->new_event_threshold = 32 * 1024; 210 port_conf->dequeue_depth = 16; 211 port_conf->enqueue_depth = 16; 212 port_conf->disable_implicit_release = 0; 213 } 214 215 static void 216 skeleton_eventdev_port_release(void *port) 217 { 218 struct skeleton_port *sp = port; 219 PMD_DRV_FUNC_TRACE(); 220 221 rte_free(sp); 222 } 223 224 static int 225 skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 226 const struct rte_event_port_conf *port_conf) 227 { 228 struct skeleton_port *sp; 229 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 230 231 PMD_DRV_FUNC_TRACE(); 232 233 RTE_SET_USED(skel); 234 RTE_SET_USED(port_conf); 235 236 /* Free memory prior to re-allocation if needed */ 237 if (dev->data->ports[port_id] != NULL) { 238 PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 239 port_id); 240 skeleton_eventdev_port_release(dev->data->ports[port_id]); 241 dev->data->ports[port_id] = NULL; 242 } 243 244 /* Allocate event port memory */ 245 sp = rte_zmalloc_socket("eventdev port", 246 sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE, 247 dev->data->socket_id); 248 if (sp == NULL) { 249 PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id); 250 return -ENOMEM; 251 } 252 253 sp->port_id = port_id; 254 255 PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp); 256 257 dev->data->ports[port_id] = sp; 258 return 0; 259 } 260 261 static int 262 skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port, 263 const uint8_t queues[], const uint8_t priorities[], 264 uint16_t nb_links) 265 { 266 struct skeleton_port *sp = port; 267 PMD_DRV_FUNC_TRACE(); 268 269 RTE_SET_USED(dev); 270 RTE_SET_USED(sp); 271 RTE_SET_USED(queues); 272 RTE_SET_USED(priorities); 273 274 /* Linked all the queues */ 275 return (int)nb_links; 276 } 277 278 static int 279 skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 280 uint8_t queues[], uint16_t nb_unlinks) 281 { 282 struct skeleton_port *sp = port; 283 PMD_DRV_FUNC_TRACE(); 284 285 RTE_SET_USED(dev); 286 RTE_SET_USED(sp); 287 RTE_SET_USED(queues); 288 289 /* Unlinked all the queues */ 290 return (int)nb_unlinks; 291 292 } 293 294 static int 295 skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 296 uint64_t *timeout_ticks) 297 { 298 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 299 uint32_t scale = 1; 300 301 PMD_DRV_FUNC_TRACE(); 302 303 RTE_SET_USED(skel); 304 *timeout_ticks = ns * scale; 305 306 return 0; 307 } 308 309 static void 310 skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f) 311 { 312 struct skeleton_eventdev *skel = skeleton_pmd_priv(dev); 313 314 PMD_DRV_FUNC_TRACE(); 315 316 RTE_SET_USED(skel); 317 RTE_SET_USED(f); 318 } 319 320 321 /* Initialize and register event driver with DPDK Application */ 322 static struct rte_eventdev_ops skeleton_eventdev_ops = { 323 .dev_infos_get = skeleton_eventdev_info_get, 324 .dev_configure = skeleton_eventdev_configure, 325 .dev_start = skeleton_eventdev_start, 326 .dev_stop = skeleton_eventdev_stop, 327 .dev_close = skeleton_eventdev_close, 328 .queue_def_conf = skeleton_eventdev_queue_def_conf, 329 .queue_setup = skeleton_eventdev_queue_setup, 330 .queue_release = skeleton_eventdev_queue_release, 331 .port_def_conf = skeleton_eventdev_port_def_conf, 332 .port_setup = skeleton_eventdev_port_setup, 333 .port_release = skeleton_eventdev_port_release, 334 .port_link = skeleton_eventdev_port_link, 335 .port_unlink = skeleton_eventdev_port_unlink, 336 .timeout_ticks = skeleton_eventdev_timeout_ticks, 337 .dump = skeleton_eventdev_dump 338 }; 339 340 static int 341 skeleton_eventdev_init(struct rte_eventdev *eventdev) 342 { 343 struct rte_pci_device *pci_dev; 344 struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev); 345 int ret = 0; 346 347 PMD_DRV_FUNC_TRACE(); 348 349 eventdev->dev_ops = &skeleton_eventdev_ops; 350 eventdev->enqueue = skeleton_eventdev_enqueue; 351 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; 352 eventdev->dequeue = skeleton_eventdev_dequeue; 353 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; 354 355 /* For secondary processes, the primary has done all the work */ 356 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 357 return 0; 358 359 pci_dev = RTE_DEV_TO_PCI(eventdev->dev); 360 361 skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 362 if (!skel->reg_base) { 363 PMD_DRV_ERR("Failed to map BAR0"); 364 ret = -ENODEV; 365 goto fail; 366 } 367 368 skel->device_id = pci_dev->id.device_id; 369 skel->vendor_id = pci_dev->id.vendor_id; 370 skel->subsystem_device_id = pci_dev->id.subsystem_device_id; 371 skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 372 373 PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u", 374 pci_dev->id.vendor_id, pci_dev->id.device_id, 375 pci_dev->addr.domain, pci_dev->addr.bus, 376 pci_dev->addr.devid, pci_dev->addr.function); 377 378 PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)", 379 eventdev->data->dev_id, eventdev->data->socket_id, 380 skel->vendor_id, skel->device_id); 381 382 fail: 383 return ret; 384 } 385 386 /* PCI based event device */ 387 388 #define EVENTDEV_SKEL_VENDOR_ID 0x177d 389 #define EVENTDEV_SKEL_PRODUCT_ID 0x0001 390 391 static const struct rte_pci_id pci_id_skeleton_map[] = { 392 { 393 RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID, 394 EVENTDEV_SKEL_PRODUCT_ID) 395 }, 396 { 397 .vendor_id = 0, 398 }, 399 }; 400 401 static int 402 event_skeleton_pci_probe(struct rte_pci_driver *pci_drv, 403 struct rte_pci_device *pci_dev) 404 { 405 return rte_event_pmd_pci_probe(pci_drv, pci_dev, 406 sizeof(struct skeleton_eventdev), skeleton_eventdev_init); 407 } 408 409 static int 410 event_skeleton_pci_remove(struct rte_pci_device *pci_dev) 411 { 412 return rte_event_pmd_pci_remove(pci_dev, NULL); 413 } 414 415 static struct rte_pci_driver pci_eventdev_skeleton_pmd = { 416 .id_table = pci_id_skeleton_map, 417 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 418 .probe = event_skeleton_pci_probe, 419 .remove = event_skeleton_pci_remove, 420 }; 421 422 RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd); 423 RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map); 424 425 /* VDEV based event device */ 426 427 static int 428 skeleton_eventdev_create(const char *name, int socket_id) 429 { 430 struct rte_eventdev *eventdev; 431 432 eventdev = rte_event_pmd_vdev_init(name, 433 sizeof(struct skeleton_eventdev), socket_id); 434 if (eventdev == NULL) { 435 PMD_DRV_ERR("Failed to create eventdev vdev %s", name); 436 goto fail; 437 } 438 439 eventdev->dev_ops = &skeleton_eventdev_ops; 440 eventdev->enqueue = skeleton_eventdev_enqueue; 441 eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; 442 eventdev->dequeue = skeleton_eventdev_dequeue; 443 eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; 444 445 return 0; 446 fail: 447 return -EFAULT; 448 } 449 450 static int 451 skeleton_eventdev_probe(struct rte_vdev_device *vdev) 452 { 453 const char *name; 454 455 name = rte_vdev_device_name(vdev); 456 RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name, 457 rte_socket_id()); 458 return skeleton_eventdev_create(name, rte_socket_id()); 459 } 460 461 static int 462 skeleton_eventdev_remove(struct rte_vdev_device *vdev) 463 { 464 const char *name; 465 466 name = rte_vdev_device_name(vdev); 467 PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id()); 468 469 return rte_event_pmd_vdev_uninit(name); 470 } 471 472 static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = { 473 .probe = skeleton_eventdev_probe, 474 .remove = skeleton_eventdev_remove 475 }; 476 477 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd); 478