1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2022 Intel Corporation
3 */
4
5 #include <rte_string_fns.h>
6 #include <rte_devargs.h>
7 #include <ctype.h>
8
9 #include "qat_device.h"
10 #include "adf_transport_access_macros.h"
11 #include "qat_sym.h"
12 #include "qat_comp_pmd.h"
13 #include "adf_pf2vf_msg.h"
14 #include "qat_pf2vf.h"
15
16 /* Hardware device information per generation */
17 struct qat_gen_hw_data qat_gen_config[QAT_N_GENS];
18 struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS];
19
20 /* per-process array of device data */
21 struct qat_device_info qat_pci_devs[RTE_PMD_QAT_MAX_PCI_DEVICES];
22 static int qat_nb_pci_devices;
23
24 /*
25 * The set of PCI devices this driver supports
26 */
27
28 static const struct rte_pci_id pci_id_qat_map[] = {
29 {
30 RTE_PCI_DEVICE(0x8086, 0x0443),
31 },
32 {
33 RTE_PCI_DEVICE(0x8086, 0x37c9),
34 },
35 {
36 RTE_PCI_DEVICE(0x8086, 0x19e3),
37 },
38 {
39 RTE_PCI_DEVICE(0x8086, 0x6f55),
40 },
41 {
42 RTE_PCI_DEVICE(0x8086, 0x18ef),
43 },
44 {
45 RTE_PCI_DEVICE(0x8086, 0x18a1),
46 },
47 {
48 RTE_PCI_DEVICE(0x8086, 0x4941),
49 },
50 {
51 RTE_PCI_DEVICE(0x8086, 0x4943),
52 },
53 {.device_id = 0},
54 };
55
56 static int
qat_pci_get_extra_size(enum qat_device_gen qat_dev_gen)57 qat_pci_get_extra_size(enum qat_device_gen qat_dev_gen)
58 {
59 struct qat_dev_hw_spec_funcs *ops_hw =
60 qat_dev_hw_spec[qat_dev_gen];
61 RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_get_extra_size,
62 -ENOTSUP);
63 return ops_hw->qat_dev_get_extra_size();
64 }
65
66 static struct qat_pci_device *
qat_pci_get_named_dev(const char * name)67 qat_pci_get_named_dev(const char *name)
68 {
69 unsigned int i;
70
71 if (name == NULL)
72 return NULL;
73
74 for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
75 if (qat_pci_devs[i].mz &&
76 (strcmp(((struct qat_pci_device *)
77 qat_pci_devs[i].mz->addr)->name, name)
78 == 0))
79 return (struct qat_pci_device *)
80 qat_pci_devs[i].mz->addr;
81 }
82
83 return NULL;
84 }
85
86 static uint8_t
qat_pci_find_free_device_index(void)87 qat_pci_find_free_device_index(void)
88 {
89 uint8_t dev_id;
90
91 for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES;
92 dev_id++) {
93 if (qat_pci_devs[dev_id].mz == NULL)
94 break;
95 }
96 return dev_id;
97 }
98
99 struct qat_pci_device *
qat_get_qat_dev_from_pci_dev(struct rte_pci_device * pci_dev)100 qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
101 {
102 char name[QAT_DEV_NAME_MAX_LEN];
103
104 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
105
106 return qat_pci_get_named_dev(name);
107 }
108
109 static void
qat_dev_parse_cmd(const char * str,struct qat_dev_cmd_param * qat_dev_cmd_param)110 qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param
111 *qat_dev_cmd_param)
112 {
113 int i = 0;
114 const char *param;
115
116 while (1) {
117 char value_str[4] = { };
118
119 param = qat_dev_cmd_param[i].name;
120 if (param == NULL)
121 return;
122 long value = 0;
123 const char *arg = strstr(str, param);
124 const char *arg2 = NULL;
125
126 if (arg) {
127 arg2 = arg + strlen(param);
128 if (*arg2 != '=') {
129 QAT_LOG(DEBUG, "parsing error '=' sign"
130 " should immediately follow %s",
131 param);
132 arg2 = NULL;
133 } else
134 arg2++;
135 } else {
136 QAT_LOG(DEBUG, "%s not provided", param);
137 }
138 if (arg2) {
139 int iter = 0;
140 while (iter < 2) {
141 if (!isdigit(*(arg2 + iter)))
142 break;
143 iter++;
144 }
145 if (!iter) {
146 QAT_LOG(DEBUG, "parsing error %s"
147 " no number provided",
148 param);
149 } else {
150 memcpy(value_str, arg2, iter);
151 value = strtol(value_str, NULL, 10);
152 if (value > MAX_QP_THRESHOLD_SIZE) {
153 QAT_LOG(DEBUG, "Exceeded max size of"
154 " threshold, setting to %d",
155 MAX_QP_THRESHOLD_SIZE);
156 value = MAX_QP_THRESHOLD_SIZE;
157 }
158 QAT_LOG(DEBUG, "parsing %s = %ld",
159 param, value);
160 }
161 }
162 qat_dev_cmd_param[i].val = value;
163 i++;
164 }
165 }
166
167 struct qat_pci_device *
qat_pci_device_allocate(struct rte_pci_device * pci_dev,struct qat_dev_cmd_param * qat_dev_cmd_param)168 qat_pci_device_allocate(struct rte_pci_device *pci_dev,
169 struct qat_dev_cmd_param *qat_dev_cmd_param)
170 {
171 struct qat_pci_device *qat_dev;
172 enum qat_device_gen qat_dev_gen;
173 uint8_t qat_dev_id = 0;
174 char name[QAT_DEV_NAME_MAX_LEN];
175 struct rte_devargs *devargs = pci_dev->device.devargs;
176 struct qat_dev_hw_spec_funcs *ops_hw;
177 struct rte_mem_resource *mem_resource;
178 const struct rte_memzone *qat_dev_mz;
179 int qat_dev_size, extra_size;
180
181 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
182 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
183
184 switch (pci_dev->id.device_id) {
185 case 0x0443:
186 qat_dev_gen = QAT_GEN1;
187 break;
188 case 0x37c9:
189 case 0x19e3:
190 case 0x6f55:
191 case 0x18ef:
192 qat_dev_gen = QAT_GEN2;
193 break;
194 case 0x18a1:
195 qat_dev_gen = QAT_GEN3;
196 break;
197 case 0x4941:
198 case 0x4943:
199 qat_dev_gen = QAT_GEN4;
200 break;
201 default:
202 QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
203 return NULL;
204 }
205
206 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
207 const struct rte_memzone *mz = rte_memzone_lookup(name);
208
209 if (mz == NULL) {
210 QAT_LOG(ERR,
211 "Secondary can't find %s mz, did primary create device?",
212 name);
213 return NULL;
214 }
215 qat_dev = mz->addr;
216 qat_pci_devs[qat_dev->qat_dev_id].mz = mz;
217 qat_pci_devs[qat_dev->qat_dev_id].pci_dev = pci_dev;
218 qat_nb_pci_devices++;
219 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
220 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
221 return qat_dev;
222 }
223
224 if (qat_pci_get_named_dev(name) != NULL) {
225 QAT_LOG(ERR, "QAT device with name %s already allocated!",
226 name);
227 return NULL;
228 }
229
230 qat_dev_id = qat_pci_find_free_device_index();
231 if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
232 QAT_LOG(ERR, "Reached maximum number of QAT devices");
233 return NULL;
234 }
235
236 extra_size = qat_pci_get_extra_size(qat_dev_gen);
237 if (extra_size < 0) {
238 QAT_LOG(ERR, "QAT internal error: no pci pointer for gen %d",
239 qat_dev_gen);
240 return NULL;
241 }
242
243 qat_dev_size = sizeof(struct qat_pci_device) + extra_size;
244 qat_dev_mz = rte_memzone_reserve(name, qat_dev_size,
245 rte_socket_id(), 0);
246
247 if (qat_dev_mz == NULL) {
248 QAT_LOG(ERR, "Error when allocating memzone for QAT_%d",
249 qat_dev_id);
250 return NULL;
251 }
252
253 qat_dev = qat_dev_mz->addr;
254 memset(qat_dev, 0, qat_dev_size);
255 qat_dev->dev_private = qat_dev + 1;
256 strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
257 qat_dev->qat_dev_id = qat_dev_id;
258 qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
259 qat_dev->qat_dev_gen = qat_dev_gen;
260
261 ops_hw = qat_dev_hw_spec[qat_dev->qat_dev_gen];
262 if (ops_hw->qat_dev_get_misc_bar == NULL) {
263 QAT_LOG(ERR, "qat_dev_get_misc_bar function pointer not set");
264 rte_memzone_free(qat_dev_mz);
265 return NULL;
266 }
267 if (ops_hw->qat_dev_get_misc_bar(&mem_resource, pci_dev) == 0) {
268 if (mem_resource->addr == NULL) {
269 QAT_LOG(ERR, "QAT cannot get access to VF misc bar");
270 rte_memzone_free(qat_dev_mz);
271 return NULL;
272 }
273 qat_dev->misc_bar_io_addr = mem_resource->addr;
274 } else
275 qat_dev->misc_bar_io_addr = NULL;
276
277 if (devargs && devargs->drv_str)
278 qat_dev_parse_cmd(devargs->drv_str, qat_dev_cmd_param);
279
280 if (qat_read_qp_config(qat_dev)) {
281 QAT_LOG(ERR,
282 "Cannot acquire ring configuration for QAT_%d",
283 qat_dev_id);
284 rte_memzone_free(qat_dev_mz);
285 return NULL;
286 }
287
288 /* No errors when allocating, attach memzone with
289 * qat_dev to list of devices
290 */
291 qat_pci_devs[qat_dev_id].mz = qat_dev_mz;
292
293 rte_spinlock_init(&qat_dev->arb_csr_lock);
294 qat_nb_pci_devices++;
295
296 QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
297 qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
298
299 return qat_dev;
300 }
301
302 static int
qat_pci_device_release(struct rte_pci_device * pci_dev)303 qat_pci_device_release(struct rte_pci_device *pci_dev)
304 {
305 struct qat_pci_device *qat_dev;
306 char name[QAT_DEV_NAME_MAX_LEN];
307 int busy = 0;
308
309 if (pci_dev == NULL)
310 return -EINVAL;
311
312 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
313 snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
314 qat_dev = qat_pci_get_named_dev(name);
315 if (qat_dev != NULL) {
316
317 struct qat_device_info *inst =
318 &qat_pci_devs[qat_dev->qat_dev_id];
319 /* Check that there are no service devs still on pci device */
320
321 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
322 if (qat_dev->sym_dev != NULL) {
323 QAT_LOG(DEBUG, "QAT sym device %s is busy",
324 name);
325 busy = 1;
326 }
327 if (qat_dev->asym_dev != NULL) {
328 QAT_LOG(DEBUG, "QAT asym device %s is busy",
329 name);
330 busy = 1;
331 }
332 if (qat_dev->comp_dev != NULL) {
333 QAT_LOG(DEBUG, "QAT comp device %s is busy",
334 name);
335 busy = 1;
336 }
337 if (busy)
338 return -EBUSY;
339 rte_memzone_free(inst->mz);
340 }
341 memset(inst, 0, sizeof(struct qat_device_info));
342 qat_nb_pci_devices--;
343 QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
344 name, qat_nb_pci_devices);
345 }
346 return 0;
347 }
348
349 static int
qat_pci_dev_destroy(struct qat_pci_device * qat_pci_dev,struct rte_pci_device * pci_dev)350 qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
351 struct rte_pci_device *pci_dev)
352 {
353 qat_sym_dev_destroy(qat_pci_dev);
354 qat_comp_dev_destroy(qat_pci_dev);
355 qat_asym_dev_destroy(qat_pci_dev);
356 return qat_pci_device_release(pci_dev);
357 }
358
qat_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)359 static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
360 struct rte_pci_device *pci_dev)
361 {
362 int sym_ret = 0, asym_ret = 0, comp_ret = 0;
363 int num_pmds_created = 0;
364 struct qat_pci_device *qat_pci_dev;
365 struct qat_dev_hw_spec_funcs *ops_hw;
366 struct qat_dev_cmd_param qat_dev_cmd_param[] = {
367 { SYM_ENQ_THRESHOLD_NAME, 0 },
368 { ASYM_ENQ_THRESHOLD_NAME, 0 },
369 { COMP_ENQ_THRESHOLD_NAME, 0 },
370 { NULL, 0 },
371 };
372
373 QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
374 pci_dev->addr.bus,
375 pci_dev->addr.devid,
376 pci_dev->addr.function);
377
378 qat_pci_dev = qat_pci_device_allocate(pci_dev, qat_dev_cmd_param);
379 if (qat_pci_dev == NULL)
380 return -ENODEV;
381
382 ops_hw = qat_dev_hw_spec[qat_pci_dev->qat_dev_gen];
383 RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_reset_ring_pairs,
384 -ENOTSUP);
385 if (ops_hw->qat_dev_reset_ring_pairs(qat_pci_dev)) {
386 QAT_LOG(ERR,
387 "Cannot reset ring pairs, does pf driver supports pf2vf comms?"
388 );
389 return -ENODEV;
390 }
391
392 sym_ret = qat_sym_dev_create(qat_pci_dev, qat_dev_cmd_param);
393 if (sym_ret == 0) {
394 num_pmds_created++;
395
396 }
397 else
398 QAT_LOG(WARNING,
399 "Failed to create QAT SYM PMD on device %s",
400 qat_pci_dev->name);
401
402 comp_ret = qat_comp_dev_create(qat_pci_dev, qat_dev_cmd_param);
403 if (comp_ret == 0)
404 num_pmds_created++;
405 else
406 QAT_LOG(WARNING,
407 "Failed to create QAT COMP PMD on device %s",
408 qat_pci_dev->name);
409
410 asym_ret = qat_asym_dev_create(qat_pci_dev, qat_dev_cmd_param);
411 if (asym_ret == 0)
412 num_pmds_created++;
413 else
414 QAT_LOG(WARNING,
415 "Failed to create QAT ASYM PMD on device %s",
416 qat_pci_dev->name);
417
418 if (num_pmds_created == 0)
419 qat_pci_dev_destroy(qat_pci_dev, pci_dev);
420
421 return 0;
422 }
423
424 static int
qat_pci_remove(struct rte_pci_device * pci_dev)425 qat_pci_remove(struct rte_pci_device *pci_dev)
426 {
427 struct qat_pci_device *qat_pci_dev;
428
429 if (pci_dev == NULL)
430 return -EINVAL;
431
432 qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
433 if (qat_pci_dev == NULL)
434 return 0;
435
436 return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
437 }
438
439 static struct rte_pci_driver rte_qat_pmd = {
440 .id_table = pci_id_qat_map,
441 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
442 .probe = qat_pci_probe,
443 .remove = qat_pci_remove
444 };
445
446 __rte_weak int
qat_sym_dev_create(struct qat_pci_device * qat_pci_dev __rte_unused,struct qat_dev_cmd_param * qat_dev_cmd_param __rte_unused)447 qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
448 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
449 {
450 return 0;
451 }
452
453 __rte_weak int
qat_asym_dev_create(struct qat_pci_device * qat_pci_dev __rte_unused,struct qat_dev_cmd_param * qat_dev_cmd_param __rte_unused)454 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
455 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
456 {
457 return 0;
458 }
459
460 __rte_weak int
qat_sym_dev_destroy(struct qat_pci_device * qat_pci_dev __rte_unused)461 qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
462 {
463 return 0;
464 }
465
466 __rte_weak int
qat_asym_dev_destroy(struct qat_pci_device * qat_pci_dev __rte_unused)467 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
468 {
469 return 0;
470 }
471
472 __rte_weak int
qat_comp_dev_create(struct qat_pci_device * qat_pci_dev __rte_unused,struct qat_dev_cmd_param * qat_dev_cmd_param __rte_unused)473 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused,
474 struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
475 {
476 return 0;
477 }
478
479 __rte_weak int
qat_comp_dev_destroy(struct qat_pci_device * qat_pci_dev __rte_unused)480 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
481 {
482 return 0;
483 }
484
485 RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
486 RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
487 RTE_PMD_REGISTER_KMOD_DEP(QAT_PCI_NAME, "* igb_uio | uio_pci_generic | vfio-pci");
488