1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2*99a2dd95SBruce Richardson * Copyright(c) 2017 Intel Corporation 3*99a2dd95SBruce Richardson */ 4*99a2dd95SBruce Richardson 5*99a2dd95SBruce Richardson #include <stdint.h> 6*99a2dd95SBruce Richardson #include <string.h> 7*99a2dd95SBruce Richardson #include <stdbool.h> 8*99a2dd95SBruce Richardson 9*99a2dd95SBruce Richardson #include <rte_compat.h> 10*99a2dd95SBruce Richardson #include <rte_common.h> 11*99a2dd95SBruce Richardson #include <rte_errno.h> 12*99a2dd95SBruce Richardson #include <rte_log.h> 13*99a2dd95SBruce Richardson #include <rte_debug.h> 14*99a2dd95SBruce Richardson #include <rte_eal.h> 15*99a2dd95SBruce Richardson #include <rte_malloc.h> 16*99a2dd95SBruce Richardson #include <rte_mempool.h> 17*99a2dd95SBruce Richardson #include <rte_memzone.h> 18*99a2dd95SBruce Richardson #include <rte_lcore.h> 19*99a2dd95SBruce Richardson #include <rte_dev.h> 20*99a2dd95SBruce Richardson #include <rte_spinlock.h> 21*99a2dd95SBruce Richardson #include <rte_tailq.h> 22*99a2dd95SBruce Richardson #include <rte_interrupts.h> 23*99a2dd95SBruce Richardson 24*99a2dd95SBruce Richardson #include "rte_bbdev_op.h" 25*99a2dd95SBruce Richardson #include "rte_bbdev.h" 26*99a2dd95SBruce Richardson #include "rte_bbdev_pmd.h" 27*99a2dd95SBruce Richardson 28*99a2dd95SBruce Richardson #define DEV_NAME "BBDEV" 29*99a2dd95SBruce Richardson 30*99a2dd95SBruce Richardson 31*99a2dd95SBruce Richardson /* BBDev library logging ID */ 32*99a2dd95SBruce Richardson RTE_LOG_REGISTER(bbdev_logtype, lib.bbdev, NOTICE); 33*99a2dd95SBruce Richardson 34*99a2dd95SBruce Richardson /* Helper macro for logging */ 35*99a2dd95SBruce Richardson #define rte_bbdev_log(level, fmt, ...) \ 36*99a2dd95SBruce Richardson rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__) 37*99a2dd95SBruce Richardson 38*99a2dd95SBruce Richardson #define rte_bbdev_log_debug(fmt, ...) \ 39*99a2dd95SBruce Richardson rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 40*99a2dd95SBruce Richardson ##__VA_ARGS__) 41*99a2dd95SBruce Richardson 42*99a2dd95SBruce Richardson /* Helper macro to check dev_id is valid */ 43*99a2dd95SBruce Richardson #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \ 44*99a2dd95SBruce Richardson if (dev == NULL) { \ 45*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "device %u is invalid", dev_id); \ 46*99a2dd95SBruce Richardson return -ENODEV; \ 47*99a2dd95SBruce Richardson } \ 48*99a2dd95SBruce Richardson } while (0) 49*99a2dd95SBruce Richardson 50*99a2dd95SBruce Richardson /* Helper macro to check dev_ops is valid */ 51*99a2dd95SBruce Richardson #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \ 52*99a2dd95SBruce Richardson if (dev->dev_ops == NULL) { \ 53*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \ 54*99a2dd95SBruce Richardson dev_id); \ 55*99a2dd95SBruce Richardson return -ENODEV; \ 56*99a2dd95SBruce Richardson } \ 57*99a2dd95SBruce Richardson } while (0) 58*99a2dd95SBruce Richardson 59*99a2dd95SBruce Richardson /* Helper macro to check that driver implements required function pointer */ 60*99a2dd95SBruce Richardson #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \ 61*99a2dd95SBruce Richardson if (func == NULL) { \ 62*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "device %u does not support %s", \ 63*99a2dd95SBruce Richardson dev_id, #func); \ 64*99a2dd95SBruce Richardson return -ENOTSUP; \ 65*99a2dd95SBruce Richardson } \ 66*99a2dd95SBruce Richardson } while (0) 67*99a2dd95SBruce Richardson 68*99a2dd95SBruce Richardson /* Helper macro to check that queue is valid */ 69*99a2dd95SBruce Richardson #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \ 70*99a2dd95SBruce Richardson if (queue_id >= dev->data->num_queues) { \ 71*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \ 72*99a2dd95SBruce Richardson queue_id, dev->data->dev_id); \ 73*99a2dd95SBruce Richardson return -ERANGE; \ 74*99a2dd95SBruce Richardson } \ 75*99a2dd95SBruce Richardson } while (0) 76*99a2dd95SBruce Richardson 77*99a2dd95SBruce Richardson /* List of callback functions registered by an application */ 78*99a2dd95SBruce Richardson struct rte_bbdev_callback { 79*99a2dd95SBruce Richardson TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */ 80*99a2dd95SBruce Richardson rte_bbdev_cb_fn cb_fn; /* Callback address */ 81*99a2dd95SBruce Richardson void *cb_arg; /* Parameter for callback */ 82*99a2dd95SBruce Richardson void *ret_param; /* Return parameter */ 83*99a2dd95SBruce Richardson enum rte_bbdev_event_type event; /* Interrupt event type */ 84*99a2dd95SBruce Richardson uint32_t active; /* Callback is executing */ 85*99a2dd95SBruce Richardson }; 86*99a2dd95SBruce Richardson 87*99a2dd95SBruce Richardson /* spinlock for bbdev device callbacks */ 88*99a2dd95SBruce Richardson static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER; 89*99a2dd95SBruce Richardson 90*99a2dd95SBruce Richardson /* 91*99a2dd95SBruce Richardson * Global array of all devices. This is not static because it's used by the 92*99a2dd95SBruce Richardson * inline enqueue and dequeue functions 93*99a2dd95SBruce Richardson */ 94*99a2dd95SBruce Richardson struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS]; 95*99a2dd95SBruce Richardson 96*99a2dd95SBruce Richardson /* Global array with rte_bbdev_data structures */ 97*99a2dd95SBruce Richardson static struct rte_bbdev_data *rte_bbdev_data; 98*99a2dd95SBruce Richardson 99*99a2dd95SBruce Richardson /* Memzone name for global bbdev data pool */ 100*99a2dd95SBruce Richardson static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data"; 101*99a2dd95SBruce Richardson 102*99a2dd95SBruce Richardson /* Number of currently valid devices */ 103*99a2dd95SBruce Richardson static uint16_t num_devs; 104*99a2dd95SBruce Richardson 105*99a2dd95SBruce Richardson /* Return pointer to device structure, with validity check */ 106*99a2dd95SBruce Richardson static struct rte_bbdev * 107*99a2dd95SBruce Richardson get_dev(uint16_t dev_id) 108*99a2dd95SBruce Richardson { 109*99a2dd95SBruce Richardson if (rte_bbdev_is_valid(dev_id)) 110*99a2dd95SBruce Richardson return &rte_bbdev_devices[dev_id]; 111*99a2dd95SBruce Richardson return NULL; 112*99a2dd95SBruce Richardson } 113*99a2dd95SBruce Richardson 114*99a2dd95SBruce Richardson /* Allocate global data array */ 115*99a2dd95SBruce Richardson static int 116*99a2dd95SBruce Richardson rte_bbdev_data_alloc(void) 117*99a2dd95SBruce Richardson { 118*99a2dd95SBruce Richardson const unsigned int flags = 0; 119*99a2dd95SBruce Richardson const struct rte_memzone *mz; 120*99a2dd95SBruce Richardson 121*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 122*99a2dd95SBruce Richardson mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA, 123*99a2dd95SBruce Richardson RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data), 124*99a2dd95SBruce Richardson rte_socket_id(), flags); 125*99a2dd95SBruce Richardson } else 126*99a2dd95SBruce Richardson mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA); 127*99a2dd95SBruce Richardson if (mz == NULL) { 128*99a2dd95SBruce Richardson rte_bbdev_log(CRIT, 129*99a2dd95SBruce Richardson "Cannot allocate memzone for bbdev port data"); 130*99a2dd95SBruce Richardson return -ENOMEM; 131*99a2dd95SBruce Richardson } 132*99a2dd95SBruce Richardson 133*99a2dd95SBruce Richardson rte_bbdev_data = mz->addr; 134*99a2dd95SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) 135*99a2dd95SBruce Richardson memset(rte_bbdev_data, 0, 136*99a2dd95SBruce Richardson RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data)); 137*99a2dd95SBruce Richardson return 0; 138*99a2dd95SBruce Richardson } 139*99a2dd95SBruce Richardson 140*99a2dd95SBruce Richardson /* 141*99a2dd95SBruce Richardson * Find data alocated for the device or if not found return first unused bbdev 142*99a2dd95SBruce Richardson * data. If all structures are in use and none is used by the device return 143*99a2dd95SBruce Richardson * NULL. 144*99a2dd95SBruce Richardson */ 145*99a2dd95SBruce Richardson static struct rte_bbdev_data * 146*99a2dd95SBruce Richardson find_bbdev_data(const char *name) 147*99a2dd95SBruce Richardson { 148*99a2dd95SBruce Richardson uint16_t data_id; 149*99a2dd95SBruce Richardson 150*99a2dd95SBruce Richardson for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) { 151*99a2dd95SBruce Richardson if (strlen(rte_bbdev_data[data_id].name) == 0) { 152*99a2dd95SBruce Richardson memset(&rte_bbdev_data[data_id], 0, 153*99a2dd95SBruce Richardson sizeof(struct rte_bbdev_data)); 154*99a2dd95SBruce Richardson return &rte_bbdev_data[data_id]; 155*99a2dd95SBruce Richardson } else if (strncmp(rte_bbdev_data[data_id].name, name, 156*99a2dd95SBruce Richardson RTE_BBDEV_NAME_MAX_LEN) == 0) 157*99a2dd95SBruce Richardson return &rte_bbdev_data[data_id]; 158*99a2dd95SBruce Richardson } 159*99a2dd95SBruce Richardson 160*99a2dd95SBruce Richardson return NULL; 161*99a2dd95SBruce Richardson } 162*99a2dd95SBruce Richardson 163*99a2dd95SBruce Richardson /* Find lowest device id with no attached device */ 164*99a2dd95SBruce Richardson static uint16_t 165*99a2dd95SBruce Richardson find_free_dev_id(void) 166*99a2dd95SBruce Richardson { 167*99a2dd95SBruce Richardson uint16_t i; 168*99a2dd95SBruce Richardson for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) { 169*99a2dd95SBruce Richardson if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED) 170*99a2dd95SBruce Richardson return i; 171*99a2dd95SBruce Richardson } 172*99a2dd95SBruce Richardson return RTE_BBDEV_MAX_DEVS; 173*99a2dd95SBruce Richardson } 174*99a2dd95SBruce Richardson 175*99a2dd95SBruce Richardson struct rte_bbdev * 176*99a2dd95SBruce Richardson rte_bbdev_allocate(const char *name) 177*99a2dd95SBruce Richardson { 178*99a2dd95SBruce Richardson int ret; 179*99a2dd95SBruce Richardson struct rte_bbdev *bbdev; 180*99a2dd95SBruce Richardson uint16_t dev_id; 181*99a2dd95SBruce Richardson 182*99a2dd95SBruce Richardson if (name == NULL) { 183*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Invalid null device name"); 184*99a2dd95SBruce Richardson return NULL; 185*99a2dd95SBruce Richardson } 186*99a2dd95SBruce Richardson 187*99a2dd95SBruce Richardson if (rte_bbdev_get_named_dev(name) != NULL) { 188*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name); 189*99a2dd95SBruce Richardson return NULL; 190*99a2dd95SBruce Richardson } 191*99a2dd95SBruce Richardson 192*99a2dd95SBruce Richardson dev_id = find_free_dev_id(); 193*99a2dd95SBruce Richardson if (dev_id == RTE_BBDEV_MAX_DEVS) { 194*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Reached maximum number of devices"); 195*99a2dd95SBruce Richardson return NULL; 196*99a2dd95SBruce Richardson } 197*99a2dd95SBruce Richardson 198*99a2dd95SBruce Richardson bbdev = &rte_bbdev_devices[dev_id]; 199*99a2dd95SBruce Richardson 200*99a2dd95SBruce Richardson if (rte_bbdev_data == NULL) { 201*99a2dd95SBruce Richardson ret = rte_bbdev_data_alloc(); 202*99a2dd95SBruce Richardson if (ret != 0) 203*99a2dd95SBruce Richardson return NULL; 204*99a2dd95SBruce Richardson } 205*99a2dd95SBruce Richardson 206*99a2dd95SBruce Richardson bbdev->data = find_bbdev_data(name); 207*99a2dd95SBruce Richardson if (bbdev->data == NULL) { 208*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 209*99a2dd95SBruce Richardson "Max BBDevs already allocated in multi-process environment!"); 210*99a2dd95SBruce Richardson return NULL; 211*99a2dd95SBruce Richardson } 212*99a2dd95SBruce Richardson 213*99a2dd95SBruce Richardson __atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED); 214*99a2dd95SBruce Richardson bbdev->data->dev_id = dev_id; 215*99a2dd95SBruce Richardson bbdev->state = RTE_BBDEV_INITIALIZED; 216*99a2dd95SBruce Richardson 217*99a2dd95SBruce Richardson ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name); 218*99a2dd95SBruce Richardson if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) { 219*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name); 220*99a2dd95SBruce Richardson return NULL; 221*99a2dd95SBruce Richardson } 222*99a2dd95SBruce Richardson 223*99a2dd95SBruce Richardson /* init user callbacks */ 224*99a2dd95SBruce Richardson TAILQ_INIT(&(bbdev->list_cbs)); 225*99a2dd95SBruce Richardson 226*99a2dd95SBruce Richardson num_devs++; 227*99a2dd95SBruce Richardson 228*99a2dd95SBruce Richardson rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u", 229*99a2dd95SBruce Richardson name, dev_id, num_devs); 230*99a2dd95SBruce Richardson 231*99a2dd95SBruce Richardson return bbdev; 232*99a2dd95SBruce Richardson } 233*99a2dd95SBruce Richardson 234*99a2dd95SBruce Richardson int 235*99a2dd95SBruce Richardson rte_bbdev_release(struct rte_bbdev *bbdev) 236*99a2dd95SBruce Richardson { 237*99a2dd95SBruce Richardson uint16_t dev_id; 238*99a2dd95SBruce Richardson struct rte_bbdev_callback *cb, *next; 239*99a2dd95SBruce Richardson 240*99a2dd95SBruce Richardson if (bbdev == NULL) { 241*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL bbdev"); 242*99a2dd95SBruce Richardson return -ENODEV; 243*99a2dd95SBruce Richardson } 244*99a2dd95SBruce Richardson dev_id = bbdev->data->dev_id; 245*99a2dd95SBruce Richardson 246*99a2dd95SBruce Richardson /* free all callbacks from the device's list */ 247*99a2dd95SBruce Richardson for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) { 248*99a2dd95SBruce Richardson 249*99a2dd95SBruce Richardson next = TAILQ_NEXT(cb, next); 250*99a2dd95SBruce Richardson TAILQ_REMOVE(&(bbdev->list_cbs), cb, next); 251*99a2dd95SBruce Richardson rte_free(cb); 252*99a2dd95SBruce Richardson } 253*99a2dd95SBruce Richardson 254*99a2dd95SBruce Richardson /* clear shared BBDev Data if no process is using the device anymore */ 255*99a2dd95SBruce Richardson if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1, 256*99a2dd95SBruce Richardson __ATOMIC_RELAXED) == 0) 257*99a2dd95SBruce Richardson memset(bbdev->data, 0, sizeof(*bbdev->data)); 258*99a2dd95SBruce Richardson 259*99a2dd95SBruce Richardson memset(bbdev, 0, sizeof(*bbdev)); 260*99a2dd95SBruce Richardson num_devs--; 261*99a2dd95SBruce Richardson bbdev->state = RTE_BBDEV_UNUSED; 262*99a2dd95SBruce Richardson 263*99a2dd95SBruce Richardson rte_bbdev_log_debug( 264*99a2dd95SBruce Richardson "Un-initialised device id = %u. Num devices = %u", 265*99a2dd95SBruce Richardson dev_id, num_devs); 266*99a2dd95SBruce Richardson return 0; 267*99a2dd95SBruce Richardson } 268*99a2dd95SBruce Richardson 269*99a2dd95SBruce Richardson struct rte_bbdev * 270*99a2dd95SBruce Richardson rte_bbdev_get_named_dev(const char *name) 271*99a2dd95SBruce Richardson { 272*99a2dd95SBruce Richardson unsigned int i; 273*99a2dd95SBruce Richardson 274*99a2dd95SBruce Richardson if (name == NULL) { 275*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL driver name"); 276*99a2dd95SBruce Richardson return NULL; 277*99a2dd95SBruce Richardson } 278*99a2dd95SBruce Richardson 279*99a2dd95SBruce Richardson for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) { 280*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(i); 281*99a2dd95SBruce Richardson if (dev && (strncmp(dev->data->name, 282*99a2dd95SBruce Richardson name, RTE_BBDEV_NAME_MAX_LEN) == 0)) 283*99a2dd95SBruce Richardson return dev; 284*99a2dd95SBruce Richardson } 285*99a2dd95SBruce Richardson 286*99a2dd95SBruce Richardson return NULL; 287*99a2dd95SBruce Richardson } 288*99a2dd95SBruce Richardson 289*99a2dd95SBruce Richardson uint16_t 290*99a2dd95SBruce Richardson rte_bbdev_count(void) 291*99a2dd95SBruce Richardson { 292*99a2dd95SBruce Richardson return num_devs; 293*99a2dd95SBruce Richardson } 294*99a2dd95SBruce Richardson 295*99a2dd95SBruce Richardson bool 296*99a2dd95SBruce Richardson rte_bbdev_is_valid(uint16_t dev_id) 297*99a2dd95SBruce Richardson { 298*99a2dd95SBruce Richardson if ((dev_id < RTE_BBDEV_MAX_DEVS) && 299*99a2dd95SBruce Richardson rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED) 300*99a2dd95SBruce Richardson return true; 301*99a2dd95SBruce Richardson return false; 302*99a2dd95SBruce Richardson } 303*99a2dd95SBruce Richardson 304*99a2dd95SBruce Richardson uint16_t 305*99a2dd95SBruce Richardson rte_bbdev_find_next(uint16_t dev_id) 306*99a2dd95SBruce Richardson { 307*99a2dd95SBruce Richardson dev_id++; 308*99a2dd95SBruce Richardson for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++) 309*99a2dd95SBruce Richardson if (rte_bbdev_is_valid(dev_id)) 310*99a2dd95SBruce Richardson break; 311*99a2dd95SBruce Richardson return dev_id; 312*99a2dd95SBruce Richardson } 313*99a2dd95SBruce Richardson 314*99a2dd95SBruce Richardson int 315*99a2dd95SBruce Richardson rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id) 316*99a2dd95SBruce Richardson { 317*99a2dd95SBruce Richardson unsigned int i; 318*99a2dd95SBruce Richardson int ret; 319*99a2dd95SBruce Richardson struct rte_bbdev_driver_info dev_info; 320*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 321*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 322*99a2dd95SBruce Richardson 323*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 324*99a2dd95SBruce Richardson 325*99a2dd95SBruce Richardson if (dev->data->started) { 326*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 327*99a2dd95SBruce Richardson "Device %u cannot be configured when started", 328*99a2dd95SBruce Richardson dev_id); 329*99a2dd95SBruce Richardson return -EBUSY; 330*99a2dd95SBruce Richardson } 331*99a2dd95SBruce Richardson 332*99a2dd95SBruce Richardson /* Get device driver information to get max number of queues */ 333*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 334*99a2dd95SBruce Richardson memset(&dev_info, 0, sizeof(dev_info)); 335*99a2dd95SBruce Richardson dev->dev_ops->info_get(dev, &dev_info); 336*99a2dd95SBruce Richardson 337*99a2dd95SBruce Richardson if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) { 338*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 339*99a2dd95SBruce Richardson "Device %u supports 0 < N <= %u queues, not %u", 340*99a2dd95SBruce Richardson dev_id, dev_info.max_num_queues, num_queues); 341*99a2dd95SBruce Richardson return -EINVAL; 342*99a2dd95SBruce Richardson } 343*99a2dd95SBruce Richardson 344*99a2dd95SBruce Richardson /* If re-configuration, get driver to free existing internal memory */ 345*99a2dd95SBruce Richardson if (dev->data->queues != NULL) { 346*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id); 347*99a2dd95SBruce Richardson for (i = 0; i < dev->data->num_queues; i++) { 348*99a2dd95SBruce Richardson int ret = dev->dev_ops->queue_release(dev, i); 349*99a2dd95SBruce Richardson if (ret < 0) { 350*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 351*99a2dd95SBruce Richardson "Device %u queue %u release failed", 352*99a2dd95SBruce Richardson dev_id, i); 353*99a2dd95SBruce Richardson return ret; 354*99a2dd95SBruce Richardson } 355*99a2dd95SBruce Richardson } 356*99a2dd95SBruce Richardson /* Call optional device close */ 357*99a2dd95SBruce Richardson if (dev->dev_ops->close) { 358*99a2dd95SBruce Richardson ret = dev->dev_ops->close(dev); 359*99a2dd95SBruce Richardson if (ret < 0) { 360*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 361*99a2dd95SBruce Richardson "Device %u couldn't be closed", 362*99a2dd95SBruce Richardson dev_id); 363*99a2dd95SBruce Richardson return ret; 364*99a2dd95SBruce Richardson } 365*99a2dd95SBruce Richardson } 366*99a2dd95SBruce Richardson rte_free(dev->data->queues); 367*99a2dd95SBruce Richardson } 368*99a2dd95SBruce Richardson 369*99a2dd95SBruce Richardson /* Allocate queue pointers */ 370*99a2dd95SBruce Richardson dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues, 371*99a2dd95SBruce Richardson sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE, 372*99a2dd95SBruce Richardson dev->data->socket_id); 373*99a2dd95SBruce Richardson if (dev->data->queues == NULL) { 374*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 375*99a2dd95SBruce Richardson "calloc of %u queues for device %u on socket %i failed", 376*99a2dd95SBruce Richardson num_queues, dev_id, dev->data->socket_id); 377*99a2dd95SBruce Richardson return -ENOMEM; 378*99a2dd95SBruce Richardson } 379*99a2dd95SBruce Richardson 380*99a2dd95SBruce Richardson dev->data->num_queues = num_queues; 381*99a2dd95SBruce Richardson 382*99a2dd95SBruce Richardson /* Call optional device configuration */ 383*99a2dd95SBruce Richardson if (dev->dev_ops->setup_queues) { 384*99a2dd95SBruce Richardson ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id); 385*99a2dd95SBruce Richardson if (ret < 0) { 386*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 387*99a2dd95SBruce Richardson "Device %u memory configuration failed", 388*99a2dd95SBruce Richardson dev_id); 389*99a2dd95SBruce Richardson goto error; 390*99a2dd95SBruce Richardson } 391*99a2dd95SBruce Richardson } 392*99a2dd95SBruce Richardson 393*99a2dd95SBruce Richardson rte_bbdev_log_debug("Device %u set up with %u queues", dev_id, 394*99a2dd95SBruce Richardson num_queues); 395*99a2dd95SBruce Richardson return 0; 396*99a2dd95SBruce Richardson 397*99a2dd95SBruce Richardson error: 398*99a2dd95SBruce Richardson dev->data->num_queues = 0; 399*99a2dd95SBruce Richardson rte_free(dev->data->queues); 400*99a2dd95SBruce Richardson dev->data->queues = NULL; 401*99a2dd95SBruce Richardson return ret; 402*99a2dd95SBruce Richardson } 403*99a2dd95SBruce Richardson 404*99a2dd95SBruce Richardson int 405*99a2dd95SBruce Richardson rte_bbdev_intr_enable(uint16_t dev_id) 406*99a2dd95SBruce Richardson { 407*99a2dd95SBruce Richardson int ret; 408*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 409*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 410*99a2dd95SBruce Richardson 411*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 412*99a2dd95SBruce Richardson 413*99a2dd95SBruce Richardson if (dev->data->started) { 414*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 415*99a2dd95SBruce Richardson "Device %u cannot be configured when started", 416*99a2dd95SBruce Richardson dev_id); 417*99a2dd95SBruce Richardson return -EBUSY; 418*99a2dd95SBruce Richardson } 419*99a2dd95SBruce Richardson 420*99a2dd95SBruce Richardson if (dev->dev_ops->intr_enable) { 421*99a2dd95SBruce Richardson ret = dev->dev_ops->intr_enable(dev); 422*99a2dd95SBruce Richardson if (ret < 0) { 423*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 424*99a2dd95SBruce Richardson "Device %u interrupts configuration failed", 425*99a2dd95SBruce Richardson dev_id); 426*99a2dd95SBruce Richardson return ret; 427*99a2dd95SBruce Richardson } 428*99a2dd95SBruce Richardson rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id); 429*99a2dd95SBruce Richardson return 0; 430*99a2dd95SBruce Richardson } 431*99a2dd95SBruce Richardson 432*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id); 433*99a2dd95SBruce Richardson return -ENOTSUP; 434*99a2dd95SBruce Richardson } 435*99a2dd95SBruce Richardson 436*99a2dd95SBruce Richardson int 437*99a2dd95SBruce Richardson rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, 438*99a2dd95SBruce Richardson const struct rte_bbdev_queue_conf *conf) 439*99a2dd95SBruce Richardson { 440*99a2dd95SBruce Richardson int ret = 0; 441*99a2dd95SBruce Richardson struct rte_bbdev_driver_info dev_info; 442*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 443*99a2dd95SBruce Richardson const struct rte_bbdev_op_cap *p; 444*99a2dd95SBruce Richardson struct rte_bbdev_queue_conf *stored_conf; 445*99a2dd95SBruce Richardson const char *op_type_str; 446*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 447*99a2dd95SBruce Richardson 448*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 449*99a2dd95SBruce Richardson 450*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 451*99a2dd95SBruce Richardson 452*99a2dd95SBruce Richardson if (dev->data->queues[queue_id].started || dev->data->started) { 453*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 454*99a2dd95SBruce Richardson "Queue %u of device %u cannot be configured when started", 455*99a2dd95SBruce Richardson queue_id, dev_id); 456*99a2dd95SBruce Richardson return -EBUSY; 457*99a2dd95SBruce Richardson } 458*99a2dd95SBruce Richardson 459*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id); 460*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id); 461*99a2dd95SBruce Richardson 462*99a2dd95SBruce Richardson /* Get device driver information to verify config is valid */ 463*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 464*99a2dd95SBruce Richardson memset(&dev_info, 0, sizeof(dev_info)); 465*99a2dd95SBruce Richardson dev->dev_ops->info_get(dev, &dev_info); 466*99a2dd95SBruce Richardson 467*99a2dd95SBruce Richardson /* Check configuration is valid */ 468*99a2dd95SBruce Richardson if (conf != NULL) { 469*99a2dd95SBruce Richardson if ((conf->op_type == RTE_BBDEV_OP_NONE) && 470*99a2dd95SBruce Richardson (dev_info.capabilities[0].type == 471*99a2dd95SBruce Richardson RTE_BBDEV_OP_NONE)) { 472*99a2dd95SBruce Richardson ret = 1; 473*99a2dd95SBruce Richardson } else { 474*99a2dd95SBruce Richardson for (p = dev_info.capabilities; 475*99a2dd95SBruce Richardson p->type != RTE_BBDEV_OP_NONE; p++) { 476*99a2dd95SBruce Richardson if (conf->op_type == p->type) { 477*99a2dd95SBruce Richardson ret = 1; 478*99a2dd95SBruce Richardson break; 479*99a2dd95SBruce Richardson } 480*99a2dd95SBruce Richardson } 481*99a2dd95SBruce Richardson } 482*99a2dd95SBruce Richardson if (ret == 0) { 483*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Invalid operation type"); 484*99a2dd95SBruce Richardson return -EINVAL; 485*99a2dd95SBruce Richardson } 486*99a2dd95SBruce Richardson if (conf->queue_size > dev_info.queue_size_lim) { 487*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 488*99a2dd95SBruce Richardson "Size (%u) of queue %u of device %u must be: <= %u", 489*99a2dd95SBruce Richardson conf->queue_size, queue_id, dev_id, 490*99a2dd95SBruce Richardson dev_info.queue_size_lim); 491*99a2dd95SBruce Richardson return -EINVAL; 492*99a2dd95SBruce Richardson } 493*99a2dd95SBruce Richardson if (!rte_is_power_of_2(conf->queue_size)) { 494*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 495*99a2dd95SBruce Richardson "Size (%u) of queue %u of device %u must be a power of 2", 496*99a2dd95SBruce Richardson conf->queue_size, queue_id, dev_id); 497*99a2dd95SBruce Richardson return -EINVAL; 498*99a2dd95SBruce Richardson } 499*99a2dd95SBruce Richardson if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC && 500*99a2dd95SBruce Richardson conf->priority > dev_info.max_ul_queue_priority) { 501*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 502*99a2dd95SBruce Richardson "Priority (%u) of queue %u of bbdev %u must be <= %u", 503*99a2dd95SBruce Richardson conf->priority, queue_id, dev_id, 504*99a2dd95SBruce Richardson dev_info.max_ul_queue_priority); 505*99a2dd95SBruce Richardson return -EINVAL; 506*99a2dd95SBruce Richardson } 507*99a2dd95SBruce Richardson if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC && 508*99a2dd95SBruce Richardson conf->priority > dev_info.max_dl_queue_priority) { 509*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 510*99a2dd95SBruce Richardson "Priority (%u) of queue %u of bbdev %u must be <= %u", 511*99a2dd95SBruce Richardson conf->priority, queue_id, dev_id, 512*99a2dd95SBruce Richardson dev_info.max_dl_queue_priority); 513*99a2dd95SBruce Richardson return -EINVAL; 514*99a2dd95SBruce Richardson } 515*99a2dd95SBruce Richardson } 516*99a2dd95SBruce Richardson 517*99a2dd95SBruce Richardson /* Release existing queue (in case of queue reconfiguration) */ 518*99a2dd95SBruce Richardson if (dev->data->queues[queue_id].queue_private != NULL) { 519*99a2dd95SBruce Richardson ret = dev->dev_ops->queue_release(dev, queue_id); 520*99a2dd95SBruce Richardson if (ret < 0) { 521*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u queue %u release failed", 522*99a2dd95SBruce Richardson dev_id, queue_id); 523*99a2dd95SBruce Richardson return ret; 524*99a2dd95SBruce Richardson } 525*99a2dd95SBruce Richardson } 526*99a2dd95SBruce Richardson 527*99a2dd95SBruce Richardson /* Get driver to setup the queue */ 528*99a2dd95SBruce Richardson ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ? 529*99a2dd95SBruce Richardson conf : &dev_info.default_queue_conf); 530*99a2dd95SBruce Richardson if (ret < 0) { 531*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 532*99a2dd95SBruce Richardson "Device %u queue %u setup failed", dev_id, 533*99a2dd95SBruce Richardson queue_id); 534*99a2dd95SBruce Richardson return ret; 535*99a2dd95SBruce Richardson } 536*99a2dd95SBruce Richardson 537*99a2dd95SBruce Richardson /* Store configuration */ 538*99a2dd95SBruce Richardson stored_conf = &dev->data->queues[queue_id].conf; 539*99a2dd95SBruce Richardson memcpy(stored_conf, 540*99a2dd95SBruce Richardson (conf != NULL) ? conf : &dev_info.default_queue_conf, 541*99a2dd95SBruce Richardson sizeof(*stored_conf)); 542*99a2dd95SBruce Richardson 543*99a2dd95SBruce Richardson op_type_str = rte_bbdev_op_type_str(stored_conf->op_type); 544*99a2dd95SBruce Richardson if (op_type_str == NULL) 545*99a2dd95SBruce Richardson return -EINVAL; 546*99a2dd95SBruce Richardson 547*99a2dd95SBruce Richardson rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)", 548*99a2dd95SBruce Richardson dev_id, queue_id, stored_conf->queue_size, op_type_str, 549*99a2dd95SBruce Richardson stored_conf->priority); 550*99a2dd95SBruce Richardson 551*99a2dd95SBruce Richardson return 0; 552*99a2dd95SBruce Richardson } 553*99a2dd95SBruce Richardson 554*99a2dd95SBruce Richardson int 555*99a2dd95SBruce Richardson rte_bbdev_start(uint16_t dev_id) 556*99a2dd95SBruce Richardson { 557*99a2dd95SBruce Richardson int i; 558*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 559*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 560*99a2dd95SBruce Richardson 561*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 562*99a2dd95SBruce Richardson 563*99a2dd95SBruce Richardson if (dev->data->started) { 564*99a2dd95SBruce Richardson rte_bbdev_log_debug("Device %u is already started", dev_id); 565*99a2dd95SBruce Richardson return 0; 566*99a2dd95SBruce Richardson } 567*99a2dd95SBruce Richardson 568*99a2dd95SBruce Richardson if (dev->dev_ops->start) { 569*99a2dd95SBruce Richardson int ret = dev->dev_ops->start(dev); 570*99a2dd95SBruce Richardson if (ret < 0) { 571*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u start failed", dev_id); 572*99a2dd95SBruce Richardson return ret; 573*99a2dd95SBruce Richardson } 574*99a2dd95SBruce Richardson } 575*99a2dd95SBruce Richardson 576*99a2dd95SBruce Richardson /* Store new state */ 577*99a2dd95SBruce Richardson for (i = 0; i < dev->data->num_queues; i++) 578*99a2dd95SBruce Richardson if (!dev->data->queues[i].conf.deferred_start) 579*99a2dd95SBruce Richardson dev->data->queues[i].started = true; 580*99a2dd95SBruce Richardson dev->data->started = true; 581*99a2dd95SBruce Richardson 582*99a2dd95SBruce Richardson rte_bbdev_log_debug("Started device %u", dev_id); 583*99a2dd95SBruce Richardson return 0; 584*99a2dd95SBruce Richardson } 585*99a2dd95SBruce Richardson 586*99a2dd95SBruce Richardson int 587*99a2dd95SBruce Richardson rte_bbdev_stop(uint16_t dev_id) 588*99a2dd95SBruce Richardson { 589*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 590*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 591*99a2dd95SBruce Richardson 592*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 593*99a2dd95SBruce Richardson 594*99a2dd95SBruce Richardson if (!dev->data->started) { 595*99a2dd95SBruce Richardson rte_bbdev_log_debug("Device %u is already stopped", dev_id); 596*99a2dd95SBruce Richardson return 0; 597*99a2dd95SBruce Richardson } 598*99a2dd95SBruce Richardson 599*99a2dd95SBruce Richardson if (dev->dev_ops->stop) 600*99a2dd95SBruce Richardson dev->dev_ops->stop(dev); 601*99a2dd95SBruce Richardson dev->data->started = false; 602*99a2dd95SBruce Richardson 603*99a2dd95SBruce Richardson rte_bbdev_log_debug("Stopped device %u", dev_id); 604*99a2dd95SBruce Richardson return 0; 605*99a2dd95SBruce Richardson } 606*99a2dd95SBruce Richardson 607*99a2dd95SBruce Richardson int 608*99a2dd95SBruce Richardson rte_bbdev_close(uint16_t dev_id) 609*99a2dd95SBruce Richardson { 610*99a2dd95SBruce Richardson int ret; 611*99a2dd95SBruce Richardson uint16_t i; 612*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 613*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 614*99a2dd95SBruce Richardson 615*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 616*99a2dd95SBruce Richardson 617*99a2dd95SBruce Richardson if (dev->data->started) { 618*99a2dd95SBruce Richardson ret = rte_bbdev_stop(dev_id); 619*99a2dd95SBruce Richardson if (ret < 0) { 620*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u stop failed", dev_id); 621*99a2dd95SBruce Richardson return ret; 622*99a2dd95SBruce Richardson } 623*99a2dd95SBruce Richardson } 624*99a2dd95SBruce Richardson 625*99a2dd95SBruce Richardson /* Free memory used by queues */ 626*99a2dd95SBruce Richardson for (i = 0; i < dev->data->num_queues; i++) { 627*99a2dd95SBruce Richardson ret = dev->dev_ops->queue_release(dev, i); 628*99a2dd95SBruce Richardson if (ret < 0) { 629*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u queue %u release failed", 630*99a2dd95SBruce Richardson dev_id, i); 631*99a2dd95SBruce Richardson return ret; 632*99a2dd95SBruce Richardson } 633*99a2dd95SBruce Richardson } 634*99a2dd95SBruce Richardson rte_free(dev->data->queues); 635*99a2dd95SBruce Richardson 636*99a2dd95SBruce Richardson if (dev->dev_ops->close) { 637*99a2dd95SBruce Richardson ret = dev->dev_ops->close(dev); 638*99a2dd95SBruce Richardson if (ret < 0) { 639*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u close failed", dev_id); 640*99a2dd95SBruce Richardson return ret; 641*99a2dd95SBruce Richardson } 642*99a2dd95SBruce Richardson } 643*99a2dd95SBruce Richardson 644*99a2dd95SBruce Richardson /* Clear configuration */ 645*99a2dd95SBruce Richardson dev->data->queues = NULL; 646*99a2dd95SBruce Richardson dev->data->num_queues = 0; 647*99a2dd95SBruce Richardson 648*99a2dd95SBruce Richardson rte_bbdev_log_debug("Closed device %u", dev_id); 649*99a2dd95SBruce Richardson return 0; 650*99a2dd95SBruce Richardson } 651*99a2dd95SBruce Richardson 652*99a2dd95SBruce Richardson int 653*99a2dd95SBruce Richardson rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id) 654*99a2dd95SBruce Richardson { 655*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 656*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 657*99a2dd95SBruce Richardson 658*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 659*99a2dd95SBruce Richardson 660*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 661*99a2dd95SBruce Richardson 662*99a2dd95SBruce Richardson if (dev->data->queues[queue_id].started) { 663*99a2dd95SBruce Richardson rte_bbdev_log_debug("Queue %u of device %u already started", 664*99a2dd95SBruce Richardson queue_id, dev_id); 665*99a2dd95SBruce Richardson return 0; 666*99a2dd95SBruce Richardson } 667*99a2dd95SBruce Richardson 668*99a2dd95SBruce Richardson if (dev->dev_ops->queue_start) { 669*99a2dd95SBruce Richardson int ret = dev->dev_ops->queue_start(dev, queue_id); 670*99a2dd95SBruce Richardson if (ret < 0) { 671*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u queue %u start failed", 672*99a2dd95SBruce Richardson dev_id, queue_id); 673*99a2dd95SBruce Richardson return ret; 674*99a2dd95SBruce Richardson } 675*99a2dd95SBruce Richardson } 676*99a2dd95SBruce Richardson dev->data->queues[queue_id].started = true; 677*99a2dd95SBruce Richardson 678*99a2dd95SBruce Richardson rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id); 679*99a2dd95SBruce Richardson return 0; 680*99a2dd95SBruce Richardson } 681*99a2dd95SBruce Richardson 682*99a2dd95SBruce Richardson int 683*99a2dd95SBruce Richardson rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id) 684*99a2dd95SBruce Richardson { 685*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 686*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 687*99a2dd95SBruce Richardson 688*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 689*99a2dd95SBruce Richardson 690*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 691*99a2dd95SBruce Richardson 692*99a2dd95SBruce Richardson if (!dev->data->queues[queue_id].started) { 693*99a2dd95SBruce Richardson rte_bbdev_log_debug("Queue %u of device %u already stopped", 694*99a2dd95SBruce Richardson queue_id, dev_id); 695*99a2dd95SBruce Richardson return 0; 696*99a2dd95SBruce Richardson } 697*99a2dd95SBruce Richardson 698*99a2dd95SBruce Richardson if (dev->dev_ops->queue_stop) { 699*99a2dd95SBruce Richardson int ret = dev->dev_ops->queue_stop(dev, queue_id); 700*99a2dd95SBruce Richardson if (ret < 0) { 701*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u queue %u stop failed", 702*99a2dd95SBruce Richardson dev_id, queue_id); 703*99a2dd95SBruce Richardson return ret; 704*99a2dd95SBruce Richardson } 705*99a2dd95SBruce Richardson } 706*99a2dd95SBruce Richardson dev->data->queues[queue_id].started = false; 707*99a2dd95SBruce Richardson 708*99a2dd95SBruce Richardson rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id); 709*99a2dd95SBruce Richardson return 0; 710*99a2dd95SBruce Richardson } 711*99a2dd95SBruce Richardson 712*99a2dd95SBruce Richardson /* Get device statistics */ 713*99a2dd95SBruce Richardson static void 714*99a2dd95SBruce Richardson get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats) 715*99a2dd95SBruce Richardson { 716*99a2dd95SBruce Richardson unsigned int q_id; 717*99a2dd95SBruce Richardson for (q_id = 0; q_id < dev->data->num_queues; q_id++) { 718*99a2dd95SBruce Richardson struct rte_bbdev_stats *q_stats = 719*99a2dd95SBruce Richardson &dev->data->queues[q_id].queue_stats; 720*99a2dd95SBruce Richardson 721*99a2dd95SBruce Richardson stats->enqueued_count += q_stats->enqueued_count; 722*99a2dd95SBruce Richardson stats->dequeued_count += q_stats->dequeued_count; 723*99a2dd95SBruce Richardson stats->enqueue_err_count += q_stats->enqueue_err_count; 724*99a2dd95SBruce Richardson stats->dequeue_err_count += q_stats->dequeue_err_count; 725*99a2dd95SBruce Richardson } 726*99a2dd95SBruce Richardson rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id); 727*99a2dd95SBruce Richardson } 728*99a2dd95SBruce Richardson 729*99a2dd95SBruce Richardson static void 730*99a2dd95SBruce Richardson reset_stats_in_queues(struct rte_bbdev *dev) 731*99a2dd95SBruce Richardson { 732*99a2dd95SBruce Richardson unsigned int q_id; 733*99a2dd95SBruce Richardson for (q_id = 0; q_id < dev->data->num_queues; q_id++) { 734*99a2dd95SBruce Richardson struct rte_bbdev_stats *q_stats = 735*99a2dd95SBruce Richardson &dev->data->queues[q_id].queue_stats; 736*99a2dd95SBruce Richardson 737*99a2dd95SBruce Richardson memset(q_stats, 0, sizeof(*q_stats)); 738*99a2dd95SBruce Richardson } 739*99a2dd95SBruce Richardson rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id); 740*99a2dd95SBruce Richardson } 741*99a2dd95SBruce Richardson 742*99a2dd95SBruce Richardson int 743*99a2dd95SBruce Richardson rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats) 744*99a2dd95SBruce Richardson { 745*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 746*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 747*99a2dd95SBruce Richardson 748*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 749*99a2dd95SBruce Richardson 750*99a2dd95SBruce Richardson if (stats == NULL) { 751*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL stats structure"); 752*99a2dd95SBruce Richardson return -EINVAL; 753*99a2dd95SBruce Richardson } 754*99a2dd95SBruce Richardson 755*99a2dd95SBruce Richardson memset(stats, 0, sizeof(*stats)); 756*99a2dd95SBruce Richardson if (dev->dev_ops->stats_get != NULL) 757*99a2dd95SBruce Richardson dev->dev_ops->stats_get(dev, stats); 758*99a2dd95SBruce Richardson else 759*99a2dd95SBruce Richardson get_stats_from_queues(dev, stats); 760*99a2dd95SBruce Richardson 761*99a2dd95SBruce Richardson rte_bbdev_log_debug("Retrieved stats of device %u", dev_id); 762*99a2dd95SBruce Richardson return 0; 763*99a2dd95SBruce Richardson } 764*99a2dd95SBruce Richardson 765*99a2dd95SBruce Richardson int 766*99a2dd95SBruce Richardson rte_bbdev_stats_reset(uint16_t dev_id) 767*99a2dd95SBruce Richardson { 768*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 769*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 770*99a2dd95SBruce Richardson 771*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 772*99a2dd95SBruce Richardson 773*99a2dd95SBruce Richardson if (dev->dev_ops->stats_reset != NULL) 774*99a2dd95SBruce Richardson dev->dev_ops->stats_reset(dev); 775*99a2dd95SBruce Richardson else 776*99a2dd95SBruce Richardson reset_stats_in_queues(dev); 777*99a2dd95SBruce Richardson 778*99a2dd95SBruce Richardson rte_bbdev_log_debug("Reset stats of device %u", dev_id); 779*99a2dd95SBruce Richardson return 0; 780*99a2dd95SBruce Richardson } 781*99a2dd95SBruce Richardson 782*99a2dd95SBruce Richardson int 783*99a2dd95SBruce Richardson rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info) 784*99a2dd95SBruce Richardson { 785*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 786*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 787*99a2dd95SBruce Richardson 788*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id); 789*99a2dd95SBruce Richardson 790*99a2dd95SBruce Richardson if (dev_info == NULL) { 791*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL dev info structure"); 792*99a2dd95SBruce Richardson return -EINVAL; 793*99a2dd95SBruce Richardson } 794*99a2dd95SBruce Richardson 795*99a2dd95SBruce Richardson /* Copy data maintained by device interface layer */ 796*99a2dd95SBruce Richardson memset(dev_info, 0, sizeof(*dev_info)); 797*99a2dd95SBruce Richardson dev_info->dev_name = dev->data->name; 798*99a2dd95SBruce Richardson dev_info->num_queues = dev->data->num_queues; 799*99a2dd95SBruce Richardson dev_info->device = dev->device; 800*99a2dd95SBruce Richardson dev_info->socket_id = dev->data->socket_id; 801*99a2dd95SBruce Richardson dev_info->started = dev->data->started; 802*99a2dd95SBruce Richardson 803*99a2dd95SBruce Richardson /* Copy data maintained by device driver layer */ 804*99a2dd95SBruce Richardson dev->dev_ops->info_get(dev, &dev_info->drv); 805*99a2dd95SBruce Richardson 806*99a2dd95SBruce Richardson rte_bbdev_log_debug("Retrieved info of device %u", dev_id); 807*99a2dd95SBruce Richardson return 0; 808*99a2dd95SBruce Richardson } 809*99a2dd95SBruce Richardson 810*99a2dd95SBruce Richardson int 811*99a2dd95SBruce Richardson rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, 812*99a2dd95SBruce Richardson struct rte_bbdev_queue_info *queue_info) 813*99a2dd95SBruce Richardson { 814*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 815*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 816*99a2dd95SBruce Richardson 817*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 818*99a2dd95SBruce Richardson 819*99a2dd95SBruce Richardson if (queue_info == NULL) { 820*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL queue info structure"); 821*99a2dd95SBruce Richardson return -EINVAL; 822*99a2dd95SBruce Richardson } 823*99a2dd95SBruce Richardson 824*99a2dd95SBruce Richardson /* Copy data to output */ 825*99a2dd95SBruce Richardson memset(queue_info, 0, sizeof(*queue_info)); 826*99a2dd95SBruce Richardson queue_info->conf = dev->data->queues[queue_id].conf; 827*99a2dd95SBruce Richardson queue_info->started = dev->data->queues[queue_id].started; 828*99a2dd95SBruce Richardson 829*99a2dd95SBruce Richardson rte_bbdev_log_debug("Retrieved info of queue %u of device %u", 830*99a2dd95SBruce Richardson queue_id, dev_id); 831*99a2dd95SBruce Richardson return 0; 832*99a2dd95SBruce Richardson } 833*99a2dd95SBruce Richardson 834*99a2dd95SBruce Richardson /* Calculate size needed to store bbdev_op, depending on type */ 835*99a2dd95SBruce Richardson static unsigned int 836*99a2dd95SBruce Richardson get_bbdev_op_size(enum rte_bbdev_op_type type) 837*99a2dd95SBruce Richardson { 838*99a2dd95SBruce Richardson unsigned int result = 0; 839*99a2dd95SBruce Richardson switch (type) { 840*99a2dd95SBruce Richardson case RTE_BBDEV_OP_NONE: 841*99a2dd95SBruce Richardson result = RTE_MAX(sizeof(struct rte_bbdev_dec_op), 842*99a2dd95SBruce Richardson sizeof(struct rte_bbdev_enc_op)); 843*99a2dd95SBruce Richardson break; 844*99a2dd95SBruce Richardson case RTE_BBDEV_OP_TURBO_DEC: 845*99a2dd95SBruce Richardson result = sizeof(struct rte_bbdev_dec_op); 846*99a2dd95SBruce Richardson break; 847*99a2dd95SBruce Richardson case RTE_BBDEV_OP_TURBO_ENC: 848*99a2dd95SBruce Richardson result = sizeof(struct rte_bbdev_enc_op); 849*99a2dd95SBruce Richardson break; 850*99a2dd95SBruce Richardson case RTE_BBDEV_OP_LDPC_DEC: 851*99a2dd95SBruce Richardson result = sizeof(struct rte_bbdev_dec_op); 852*99a2dd95SBruce Richardson break; 853*99a2dd95SBruce Richardson case RTE_BBDEV_OP_LDPC_ENC: 854*99a2dd95SBruce Richardson result = sizeof(struct rte_bbdev_enc_op); 855*99a2dd95SBruce Richardson break; 856*99a2dd95SBruce Richardson default: 857*99a2dd95SBruce Richardson break; 858*99a2dd95SBruce Richardson } 859*99a2dd95SBruce Richardson 860*99a2dd95SBruce Richardson return result; 861*99a2dd95SBruce Richardson } 862*99a2dd95SBruce Richardson 863*99a2dd95SBruce Richardson /* Initialise a bbdev_op structure */ 864*99a2dd95SBruce Richardson static void 865*99a2dd95SBruce Richardson bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element, 866*99a2dd95SBruce Richardson __rte_unused unsigned int n) 867*99a2dd95SBruce Richardson { 868*99a2dd95SBruce Richardson enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg; 869*99a2dd95SBruce Richardson 870*99a2dd95SBruce Richardson if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) { 871*99a2dd95SBruce Richardson struct rte_bbdev_dec_op *op = element; 872*99a2dd95SBruce Richardson memset(op, 0, mempool->elt_size); 873*99a2dd95SBruce Richardson op->mempool = mempool; 874*99a2dd95SBruce Richardson } else if (type == RTE_BBDEV_OP_TURBO_ENC || 875*99a2dd95SBruce Richardson type == RTE_BBDEV_OP_LDPC_ENC) { 876*99a2dd95SBruce Richardson struct rte_bbdev_enc_op *op = element; 877*99a2dd95SBruce Richardson memset(op, 0, mempool->elt_size); 878*99a2dd95SBruce Richardson op->mempool = mempool; 879*99a2dd95SBruce Richardson } 880*99a2dd95SBruce Richardson } 881*99a2dd95SBruce Richardson 882*99a2dd95SBruce Richardson struct rte_mempool * 883*99a2dd95SBruce Richardson rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type, 884*99a2dd95SBruce Richardson unsigned int num_elements, unsigned int cache_size, 885*99a2dd95SBruce Richardson int socket_id) 886*99a2dd95SBruce Richardson { 887*99a2dd95SBruce Richardson struct rte_bbdev_op_pool_private *priv; 888*99a2dd95SBruce Richardson struct rte_mempool *mp; 889*99a2dd95SBruce Richardson const char *op_type_str; 890*99a2dd95SBruce Richardson 891*99a2dd95SBruce Richardson if (name == NULL) { 892*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL name for op pool"); 893*99a2dd95SBruce Richardson return NULL; 894*99a2dd95SBruce Richardson } 895*99a2dd95SBruce Richardson 896*99a2dd95SBruce Richardson if (type >= RTE_BBDEV_OP_TYPE_COUNT) { 897*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 898*99a2dd95SBruce Richardson "Invalid op type (%u), should be less than %u", 899*99a2dd95SBruce Richardson type, RTE_BBDEV_OP_TYPE_COUNT); 900*99a2dd95SBruce Richardson return NULL; 901*99a2dd95SBruce Richardson } 902*99a2dd95SBruce Richardson 903*99a2dd95SBruce Richardson mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type), 904*99a2dd95SBruce Richardson cache_size, sizeof(struct rte_bbdev_op_pool_private), 905*99a2dd95SBruce Richardson NULL, NULL, bbdev_op_init, &type, socket_id, 0); 906*99a2dd95SBruce Richardson if (mp == NULL) { 907*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 908*99a2dd95SBruce Richardson "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s", 909*99a2dd95SBruce Richardson name, num_elements, get_bbdev_op_size(type), 910*99a2dd95SBruce Richardson rte_strerror(rte_errno)); 911*99a2dd95SBruce Richardson return NULL; 912*99a2dd95SBruce Richardson } 913*99a2dd95SBruce Richardson 914*99a2dd95SBruce Richardson op_type_str = rte_bbdev_op_type_str(type); 915*99a2dd95SBruce Richardson if (op_type_str == NULL) 916*99a2dd95SBruce Richardson return NULL; 917*99a2dd95SBruce Richardson 918*99a2dd95SBruce Richardson rte_bbdev_log_debug( 919*99a2dd95SBruce Richardson "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)", 920*99a2dd95SBruce Richardson name, num_elements, op_type_str, cache_size, socket_id, 921*99a2dd95SBruce Richardson get_bbdev_op_size(type)); 922*99a2dd95SBruce Richardson 923*99a2dd95SBruce Richardson priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp); 924*99a2dd95SBruce Richardson priv->type = type; 925*99a2dd95SBruce Richardson 926*99a2dd95SBruce Richardson return mp; 927*99a2dd95SBruce Richardson } 928*99a2dd95SBruce Richardson 929*99a2dd95SBruce Richardson int 930*99a2dd95SBruce Richardson rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, 931*99a2dd95SBruce Richardson rte_bbdev_cb_fn cb_fn, void *cb_arg) 932*99a2dd95SBruce Richardson { 933*99a2dd95SBruce Richardson struct rte_bbdev_callback *user_cb; 934*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 935*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 936*99a2dd95SBruce Richardson 937*99a2dd95SBruce Richardson if (event >= RTE_BBDEV_EVENT_MAX) { 938*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 939*99a2dd95SBruce Richardson "Invalid event type (%u), should be less than %u", 940*99a2dd95SBruce Richardson event, RTE_BBDEV_EVENT_MAX); 941*99a2dd95SBruce Richardson return -EINVAL; 942*99a2dd95SBruce Richardson } 943*99a2dd95SBruce Richardson 944*99a2dd95SBruce Richardson if (cb_fn == NULL) { 945*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL callback function"); 946*99a2dd95SBruce Richardson return -EINVAL; 947*99a2dd95SBruce Richardson } 948*99a2dd95SBruce Richardson 949*99a2dd95SBruce Richardson rte_spinlock_lock(&rte_bbdev_cb_lock); 950*99a2dd95SBruce Richardson 951*99a2dd95SBruce Richardson TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) { 952*99a2dd95SBruce Richardson if (user_cb->cb_fn == cb_fn && 953*99a2dd95SBruce Richardson user_cb->cb_arg == cb_arg && 954*99a2dd95SBruce Richardson user_cb->event == event) 955*99a2dd95SBruce Richardson break; 956*99a2dd95SBruce Richardson } 957*99a2dd95SBruce Richardson 958*99a2dd95SBruce Richardson /* create a new callback. */ 959*99a2dd95SBruce Richardson if (user_cb == NULL) { 960*99a2dd95SBruce Richardson user_cb = rte_zmalloc("INTR_USER_CALLBACK", 961*99a2dd95SBruce Richardson sizeof(struct rte_bbdev_callback), 0); 962*99a2dd95SBruce Richardson if (user_cb != NULL) { 963*99a2dd95SBruce Richardson user_cb->cb_fn = cb_fn; 964*99a2dd95SBruce Richardson user_cb->cb_arg = cb_arg; 965*99a2dd95SBruce Richardson user_cb->event = event; 966*99a2dd95SBruce Richardson TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next); 967*99a2dd95SBruce Richardson } 968*99a2dd95SBruce Richardson } 969*99a2dd95SBruce Richardson 970*99a2dd95SBruce Richardson rte_spinlock_unlock(&rte_bbdev_cb_lock); 971*99a2dd95SBruce Richardson return (user_cb == NULL) ? -ENOMEM : 0; 972*99a2dd95SBruce Richardson } 973*99a2dd95SBruce Richardson 974*99a2dd95SBruce Richardson int 975*99a2dd95SBruce Richardson rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, 976*99a2dd95SBruce Richardson rte_bbdev_cb_fn cb_fn, void *cb_arg) 977*99a2dd95SBruce Richardson { 978*99a2dd95SBruce Richardson int ret = 0; 979*99a2dd95SBruce Richardson struct rte_bbdev_callback *cb, *next; 980*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 981*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 982*99a2dd95SBruce Richardson 983*99a2dd95SBruce Richardson if (event >= RTE_BBDEV_EVENT_MAX) { 984*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 985*99a2dd95SBruce Richardson "Invalid event type (%u), should be less than %u", 986*99a2dd95SBruce Richardson event, RTE_BBDEV_EVENT_MAX); 987*99a2dd95SBruce Richardson return -EINVAL; 988*99a2dd95SBruce Richardson } 989*99a2dd95SBruce Richardson 990*99a2dd95SBruce Richardson if (cb_fn == NULL) { 991*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 992*99a2dd95SBruce Richardson "NULL callback function cannot be unregistered"); 993*99a2dd95SBruce Richardson return -EINVAL; 994*99a2dd95SBruce Richardson } 995*99a2dd95SBruce Richardson 996*99a2dd95SBruce Richardson dev = &rte_bbdev_devices[dev_id]; 997*99a2dd95SBruce Richardson rte_spinlock_lock(&rte_bbdev_cb_lock); 998*99a2dd95SBruce Richardson 999*99a2dd95SBruce Richardson for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) { 1000*99a2dd95SBruce Richardson 1001*99a2dd95SBruce Richardson next = TAILQ_NEXT(cb, next); 1002*99a2dd95SBruce Richardson 1003*99a2dd95SBruce Richardson if (cb->cb_fn != cb_fn || cb->event != event || 1004*99a2dd95SBruce Richardson (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 1005*99a2dd95SBruce Richardson continue; 1006*99a2dd95SBruce Richardson 1007*99a2dd95SBruce Richardson /* If this callback is not executing right now, remove it. */ 1008*99a2dd95SBruce Richardson if (cb->active == 0) { 1009*99a2dd95SBruce Richardson TAILQ_REMOVE(&(dev->list_cbs), cb, next); 1010*99a2dd95SBruce Richardson rte_free(cb); 1011*99a2dd95SBruce Richardson } else 1012*99a2dd95SBruce Richardson ret = -EAGAIN; 1013*99a2dd95SBruce Richardson } 1014*99a2dd95SBruce Richardson 1015*99a2dd95SBruce Richardson rte_spinlock_unlock(&rte_bbdev_cb_lock); 1016*99a2dd95SBruce Richardson return ret; 1017*99a2dd95SBruce Richardson } 1018*99a2dd95SBruce Richardson 1019*99a2dd95SBruce Richardson void 1020*99a2dd95SBruce Richardson rte_bbdev_pmd_callback_process(struct rte_bbdev *dev, 1021*99a2dd95SBruce Richardson enum rte_bbdev_event_type event, void *ret_param) 1022*99a2dd95SBruce Richardson { 1023*99a2dd95SBruce Richardson struct rte_bbdev_callback *cb_lst; 1024*99a2dd95SBruce Richardson struct rte_bbdev_callback dev_cb; 1025*99a2dd95SBruce Richardson 1026*99a2dd95SBruce Richardson if (dev == NULL) { 1027*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL device"); 1028*99a2dd95SBruce Richardson return; 1029*99a2dd95SBruce Richardson } 1030*99a2dd95SBruce Richardson 1031*99a2dd95SBruce Richardson if (dev->data == NULL) { 1032*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "NULL data structure"); 1033*99a2dd95SBruce Richardson return; 1034*99a2dd95SBruce Richardson } 1035*99a2dd95SBruce Richardson 1036*99a2dd95SBruce Richardson if (event >= RTE_BBDEV_EVENT_MAX) { 1037*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 1038*99a2dd95SBruce Richardson "Invalid event type (%u), should be less than %u", 1039*99a2dd95SBruce Richardson event, RTE_BBDEV_EVENT_MAX); 1040*99a2dd95SBruce Richardson return; 1041*99a2dd95SBruce Richardson } 1042*99a2dd95SBruce Richardson 1043*99a2dd95SBruce Richardson rte_spinlock_lock(&rte_bbdev_cb_lock); 1044*99a2dd95SBruce Richardson TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) { 1045*99a2dd95SBruce Richardson if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1046*99a2dd95SBruce Richardson continue; 1047*99a2dd95SBruce Richardson dev_cb = *cb_lst; 1048*99a2dd95SBruce Richardson cb_lst->active = 1; 1049*99a2dd95SBruce Richardson if (ret_param != NULL) 1050*99a2dd95SBruce Richardson dev_cb.ret_param = ret_param; 1051*99a2dd95SBruce Richardson 1052*99a2dd95SBruce Richardson rte_spinlock_unlock(&rte_bbdev_cb_lock); 1053*99a2dd95SBruce Richardson dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1054*99a2dd95SBruce Richardson dev_cb.cb_arg, dev_cb.ret_param); 1055*99a2dd95SBruce Richardson rte_spinlock_lock(&rte_bbdev_cb_lock); 1056*99a2dd95SBruce Richardson cb_lst->active = 0; 1057*99a2dd95SBruce Richardson } 1058*99a2dd95SBruce Richardson rte_spinlock_unlock(&rte_bbdev_cb_lock); 1059*99a2dd95SBruce Richardson } 1060*99a2dd95SBruce Richardson 1061*99a2dd95SBruce Richardson int 1062*99a2dd95SBruce Richardson rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id) 1063*99a2dd95SBruce Richardson { 1064*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 1065*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 1066*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1067*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 1068*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id); 1069*99a2dd95SBruce Richardson return dev->dev_ops->queue_intr_enable(dev, queue_id); 1070*99a2dd95SBruce Richardson } 1071*99a2dd95SBruce Richardson 1072*99a2dd95SBruce Richardson int 1073*99a2dd95SBruce Richardson rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id) 1074*99a2dd95SBruce Richardson { 1075*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 1076*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 1077*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1078*99a2dd95SBruce Richardson VALID_DEV_OPS_OR_RET_ERR(dev, dev_id); 1079*99a2dd95SBruce Richardson VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id); 1080*99a2dd95SBruce Richardson return dev->dev_ops->queue_intr_disable(dev, queue_id); 1081*99a2dd95SBruce Richardson } 1082*99a2dd95SBruce Richardson 1083*99a2dd95SBruce Richardson int 1084*99a2dd95SBruce Richardson rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, 1085*99a2dd95SBruce Richardson void *data) 1086*99a2dd95SBruce Richardson { 1087*99a2dd95SBruce Richardson uint32_t vec; 1088*99a2dd95SBruce Richardson struct rte_bbdev *dev = get_dev(dev_id); 1089*99a2dd95SBruce Richardson struct rte_intr_handle *intr_handle; 1090*99a2dd95SBruce Richardson int ret; 1091*99a2dd95SBruce Richardson 1092*99a2dd95SBruce Richardson VALID_DEV_OR_RET_ERR(dev, dev_id); 1093*99a2dd95SBruce Richardson VALID_QUEUE_OR_RET_ERR(queue_id, dev); 1094*99a2dd95SBruce Richardson 1095*99a2dd95SBruce Richardson intr_handle = dev->intr_handle; 1096*99a2dd95SBruce Richardson if (!intr_handle || !intr_handle->intr_vec) { 1097*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id); 1098*99a2dd95SBruce Richardson return -ENOTSUP; 1099*99a2dd95SBruce Richardson } 1100*99a2dd95SBruce Richardson 1101*99a2dd95SBruce Richardson if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) { 1102*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n", 1103*99a2dd95SBruce Richardson dev_id, queue_id); 1104*99a2dd95SBruce Richardson return -ENOTSUP; 1105*99a2dd95SBruce Richardson } 1106*99a2dd95SBruce Richardson 1107*99a2dd95SBruce Richardson vec = intr_handle->intr_vec[queue_id]; 1108*99a2dd95SBruce Richardson ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 1109*99a2dd95SBruce Richardson if (ret && (ret != -EEXIST)) { 1110*99a2dd95SBruce Richardson rte_bbdev_log(ERR, 1111*99a2dd95SBruce Richardson "dev %u q %u int ctl error op %d epfd %d vec %u\n", 1112*99a2dd95SBruce Richardson dev_id, queue_id, op, epfd, vec); 1113*99a2dd95SBruce Richardson return ret; 1114*99a2dd95SBruce Richardson } 1115*99a2dd95SBruce Richardson 1116*99a2dd95SBruce Richardson return 0; 1117*99a2dd95SBruce Richardson } 1118*99a2dd95SBruce Richardson 1119*99a2dd95SBruce Richardson 1120*99a2dd95SBruce Richardson const char * 1121*99a2dd95SBruce Richardson rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type) 1122*99a2dd95SBruce Richardson { 1123*99a2dd95SBruce Richardson static const char * const op_types[] = { 1124*99a2dd95SBruce Richardson "RTE_BBDEV_OP_NONE", 1125*99a2dd95SBruce Richardson "RTE_BBDEV_OP_TURBO_DEC", 1126*99a2dd95SBruce Richardson "RTE_BBDEV_OP_TURBO_ENC", 1127*99a2dd95SBruce Richardson "RTE_BBDEV_OP_LDPC_DEC", 1128*99a2dd95SBruce Richardson "RTE_BBDEV_OP_LDPC_ENC", 1129*99a2dd95SBruce Richardson }; 1130*99a2dd95SBruce Richardson 1131*99a2dd95SBruce Richardson if (op_type < RTE_BBDEV_OP_TYPE_COUNT) 1132*99a2dd95SBruce Richardson return op_types[op_type]; 1133*99a2dd95SBruce Richardson 1134*99a2dd95SBruce Richardson rte_bbdev_log(ERR, "Invalid operation type"); 1135*99a2dd95SBruce Richardson return NULL; 1136*99a2dd95SBruce Richardson } 1137