1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_ring.h>
11 #include <rte_kvargs.h>
12
13 #include <rte_bbdev.h>
14 #include <rte_bbdev_pmd.h>
15
16 #define DRIVER_NAME baseband_null
17
18 RTE_LOG_REGISTER_DEFAULT(bbdev_null_logtype, NOTICE);
19
20 /* Helper macro for logging */
21 #define rte_bbdev_log(level, fmt, ...) \
22 rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__)
23
24 #define rte_bbdev_log_debug(fmt, ...) \
25 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
26 ##__VA_ARGS__)
27
28 /* Initialisation params structure that can be used by null BBDEV driver */
29 struct bbdev_null_params {
30 int socket_id; /*< Null BBDEV socket */
31 uint16_t queues_num; /*< Null BBDEV queues number */
32 };
33
34 /* Acceptable params for null BBDEV devices */
35 #define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
36 #define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
37
38 static const char * const bbdev_null_valid_params[] = {
39 BBDEV_NULL_MAX_NB_QUEUES_ARG,
40 BBDEV_NULL_SOCKET_ID_ARG
41 };
42
43 /* private data structure */
44 struct bbdev_private {
45 unsigned int max_nb_queues; /**< Max number of queues */
46 };
47
48 /* queue */
49 struct bbdev_queue {
50 struct rte_ring *processed_pkts; /* Ring for processed packets */
51 } __rte_cache_aligned;
52
53 /* Get device info */
54 static void
info_get(struct rte_bbdev * dev,struct rte_bbdev_driver_info * dev_info)55 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
56 {
57 struct bbdev_private *internals = dev->data->dev_private;
58
59 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
60 RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
61 };
62
63 static struct rte_bbdev_queue_conf default_queue_conf = {
64 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
65 };
66
67 default_queue_conf.socket = dev->data->socket_id;
68
69 dev_info->driver_name = RTE_STR(DRIVER_NAME);
70 dev_info->max_num_queues = internals->max_nb_queues;
71 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
72 dev_info->hardware_accelerated = false;
73 dev_info->max_dl_queue_priority = 0;
74 dev_info->max_ul_queue_priority = 0;
75 dev_info->default_queue_conf = default_queue_conf;
76 dev_info->capabilities = bbdev_capabilities;
77 dev_info->cpu_flag_reqs = NULL;
78 dev_info->min_alignment = 0;
79
80 /* BBDEV null device does not process the data, so
81 * endianness setting is not relevant, but setting it
82 * here for code completeness.
83 */
84 dev_info->data_endianness = RTE_LITTLE_ENDIAN;
85
86 rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
87 }
88
89 /* Release queue */
90 static int
q_release(struct rte_bbdev * dev,uint16_t q_id)91 q_release(struct rte_bbdev *dev, uint16_t q_id)
92 {
93 struct bbdev_queue *q = dev->data->queues[q_id].queue_private;
94
95 if (q != NULL) {
96 rte_ring_free(q->processed_pkts);
97 rte_free(q);
98 dev->data->queues[q_id].queue_private = NULL;
99 }
100
101 rte_bbdev_log_debug("released device queue %u:%u",
102 dev->data->dev_id, q_id);
103 return 0;
104 }
105
106 /* Setup a queue */
107 static int
q_setup(struct rte_bbdev * dev,uint16_t q_id,const struct rte_bbdev_queue_conf * queue_conf)108 q_setup(struct rte_bbdev *dev, uint16_t q_id,
109 const struct rte_bbdev_queue_conf *queue_conf)
110 {
111 struct bbdev_queue *q;
112 char ring_name[RTE_RING_NAMESIZE];
113 snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
114 dev->data->dev_id, q_id);
115
116 /* Allocate the queue data structure. */
117 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
118 RTE_CACHE_LINE_SIZE, queue_conf->socket);
119 if (q == NULL) {
120 rte_bbdev_log(ERR, "Failed to allocate queue memory");
121 return -ENOMEM;
122 }
123
124 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size,
125 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
126 if (q->processed_pkts == NULL) {
127 rte_bbdev_log(ERR, "Failed to create ring");
128 goto free_q;
129 }
130
131 dev->data->queues[q_id].queue_private = q;
132 rte_bbdev_log_debug("setup device queue %s", ring_name);
133 return 0;
134
135 free_q:
136 rte_free(q);
137 return -EFAULT;
138 }
139
140 static const struct rte_bbdev_ops pmd_ops = {
141 .info_get = info_get,
142 .queue_setup = q_setup,
143 .queue_release = q_release
144 };
145
146 /* Enqueue decode burst */
147 static uint16_t
enqueue_dec_ops(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t nb_ops)148 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
149 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
150 {
151 struct bbdev_queue *q = q_data->queue_private;
152 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
153 (void **)ops, nb_ops, NULL);
154
155 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
156 q_data->queue_stats.enqueued_count += nb_enqueued;
157
158 return nb_enqueued;
159 }
160
161 /* Enqueue encode burst */
162 static uint16_t
enqueue_enc_ops(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t nb_ops)163 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
164 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
165 {
166 struct bbdev_queue *q = q_data->queue_private;
167 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
168 (void **)ops, nb_ops, NULL);
169
170 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
171 q_data->queue_stats.enqueued_count += nb_enqueued;
172
173 return nb_enqueued;
174 }
175
176 /* Dequeue decode burst */
177 static uint16_t
dequeue_dec_ops(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_dec_op ** ops,uint16_t nb_ops)178 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
179 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
180 {
181 struct bbdev_queue *q = q_data->queue_private;
182 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
183 (void **)ops, nb_ops, NULL);
184 q_data->queue_stats.dequeued_count += nb_dequeued;
185
186 return nb_dequeued;
187 }
188
189 /* Dequeue encode burst */
190 static uint16_t
dequeue_enc_ops(struct rte_bbdev_queue_data * q_data,struct rte_bbdev_enc_op ** ops,uint16_t nb_ops)191 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
192 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
193 {
194 struct bbdev_queue *q = q_data->queue_private;
195 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
196 (void **)ops, nb_ops, NULL);
197 q_data->queue_stats.dequeued_count += nb_dequeued;
198
199 return nb_dequeued;
200 }
201
202 /* Parse 16bit integer from string argument */
203 static inline int
parse_u16_arg(const char * key,const char * value,void * extra_args)204 parse_u16_arg(const char *key, const char *value, void *extra_args)
205 {
206 uint16_t *u16 = extra_args;
207 unsigned int long result;
208
209 if ((value == NULL) || (extra_args == NULL))
210 return -EINVAL;
211 errno = 0;
212 result = strtoul(value, NULL, 0);
213 if ((result >= (1 << 16)) || (errno != 0)) {
214 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
215 return -ERANGE;
216 }
217 *u16 = (uint16_t)result;
218 return 0;
219 }
220
221 /* Parse parameters used to create device */
222 static int
parse_bbdev_null_params(struct bbdev_null_params * params,const char * input_args)223 parse_bbdev_null_params(struct bbdev_null_params *params,
224 const char *input_args)
225 {
226 struct rte_kvargs *kvlist = NULL;
227 int ret = 0;
228
229 if (params == NULL)
230 return -EINVAL;
231 if (input_args) {
232 kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params);
233 if (kvlist == NULL)
234 return -EFAULT;
235
236 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0],
237 &parse_u16_arg, ¶ms->queues_num);
238 if (ret < 0)
239 goto exit;
240
241 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1],
242 &parse_u16_arg, ¶ms->socket_id);
243 if (ret < 0)
244 goto exit;
245
246 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
247 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
248 RTE_MAX_NUMA_NODES);
249 goto exit;
250 }
251 }
252
253 exit:
254 rte_kvargs_free(kvlist);
255 return ret;
256 }
257
258 /* Create device */
259 static int
null_bbdev_create(struct rte_vdev_device * vdev,struct bbdev_null_params * init_params)260 null_bbdev_create(struct rte_vdev_device *vdev,
261 struct bbdev_null_params *init_params)
262 {
263 struct rte_bbdev *bbdev;
264 const char *name = rte_vdev_device_name(vdev);
265
266 bbdev = rte_bbdev_allocate(name);
267 if (bbdev == NULL)
268 return -ENODEV;
269
270 bbdev->data->dev_private = rte_zmalloc_socket(name,
271 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
272 init_params->socket_id);
273 if (bbdev->data->dev_private == NULL) {
274 rte_bbdev_release(bbdev);
275 return -ENOMEM;
276 }
277
278 bbdev->dev_ops = &pmd_ops;
279 bbdev->device = &vdev->device;
280 bbdev->data->socket_id = init_params->socket_id;
281 bbdev->intr_handle = NULL;
282
283 /* register rx/tx burst functions for data path */
284 bbdev->dequeue_enc_ops = dequeue_enc_ops;
285 bbdev->dequeue_dec_ops = dequeue_dec_ops;
286 bbdev->enqueue_enc_ops = enqueue_enc_ops;
287 bbdev->enqueue_dec_ops = enqueue_dec_ops;
288 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
289 init_params->queues_num;
290
291 return 0;
292 }
293
294 /* Initialise device */
295 static int
null_bbdev_probe(struct rte_vdev_device * vdev)296 null_bbdev_probe(struct rte_vdev_device *vdev)
297 {
298 struct bbdev_null_params init_params = {
299 rte_socket_id(),
300 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
301 };
302 const char *name;
303 const char *input_args;
304
305 if (vdev == NULL)
306 return -EINVAL;
307
308 name = rte_vdev_device_name(vdev);
309 if (name == NULL)
310 return -EINVAL;
311
312 input_args = rte_vdev_device_args(vdev);
313 parse_bbdev_null_params(&init_params, input_args);
314
315 rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d",
316 name, init_params.socket_id, init_params.queues_num);
317
318 return null_bbdev_create(vdev, &init_params);
319 }
320
321 /* Uninitialise device */
322 static int
null_bbdev_remove(struct rte_vdev_device * vdev)323 null_bbdev_remove(struct rte_vdev_device *vdev)
324 {
325 struct rte_bbdev *bbdev;
326 const char *name;
327
328 if (vdev == NULL)
329 return -EINVAL;
330
331 name = rte_vdev_device_name(vdev);
332 if (name == NULL)
333 return -EINVAL;
334
335 bbdev = rte_bbdev_get_named_dev(name);
336 if (bbdev == NULL)
337 return -EINVAL;
338
339 rte_free(bbdev->data->dev_private);
340
341 return rte_bbdev_release(bbdev);
342 }
343
344 static struct rte_vdev_driver bbdev_null_pmd_drv = {
345 .probe = null_bbdev_probe,
346 .remove = null_bbdev_remove
347 };
348
349 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
350 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
351 BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
352 BBDEV_NULL_SOCKET_ID_ARG"=<int>");
353 RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);
354