1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 - 2022 Intel Corporation
3 */
4
5 #include <stdarg.h>
6
7 #include <cryptodev_pmd.h>
8
9 #include "qat_device.h"
10 #include "qat_logs.h"
11
12 #include "qat_asym.h"
13 #include "icp_qat_fw_pke.h"
14 #include "icp_qat_fw.h"
15 #include "qat_pke.h"
16 #include "qat_ec.h"
17
18 uint8_t qat_asym_driver_id;
19
20 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
21
22 /* An rte_driver is needed in the registration of both the device and the driver
23 * with cryptodev.
24 * The actual qat pci's rte_driver can't be used as its name represents
25 * the whole pci device with all services. Think of this as a holder for a name
26 * for the crypto part of the pci device.
27 */
28 static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
29 static const struct rte_driver cryptodev_qat_asym_driver = {
30 .name = qat_asym_drv_name,
31 .alias = qat_asym_drv_name
32 };
33
34 /*
35 * Macros with suffix _F are used with some of predefinded identifiers:
36 * - cookie->input_buffer
37 * - qat_alg_bytesize
38 */
39 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40 #define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
41 where, size)
42 #define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
43 &where[idx * size], size)
44
45 #define HEXDUMP_OFF_F(name, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \
46 &cookie->input_buffer[idx * qat_alg_bytesize], \
47 qat_alg_bytesize)
48 #else
49 #define HEXDUMP(name, where, size)
50 #define HEXDUMP_OFF(name, where, size, idx)
51 #define HEXDUMP_OFF_F(name, idx)
52 #endif
53
54 #define CHECK_IF_NOT_EMPTY(param, name, pname, status) \
55 do { \
56 if (param.length == 0) { \
57 QAT_LOG(ERR, \
58 "Invalid " name \
59 " input parameter, zero length " pname \
60 ); \
61 status = -EINVAL; \
62 } else if (check_zero(param)) { \
63 QAT_LOG(ERR, \
64 "Invalid " name " input parameter, empty " \
65 pname ", length = %d", \
66 (int)param.length \
67 ); \
68 status = -EINVAL; \
69 } \
70 } while (0)
71
72 #define SET_PKE_LN(where, what, how, idx) \
73 rte_memcpy(where[idx] + how - \
74 what.length, \
75 what.data, \
76 what.length)
77
78 #define SET_PKE_LN_9A(where, what, how, idx) \
79 rte_memcpy(&where[idx * RTE_ALIGN_CEIL(how, 8)] + \
80 RTE_ALIGN_CEIL(how, 8) - \
81 what.length, \
82 what.data, \
83 what.length)
84
85 #define SET_PKE_LN_EC(where, what, how, idx) \
86 rte_memcpy(where[idx] + \
87 RTE_ALIGN_CEIL(how, 8) - \
88 how, \
89 what.data, \
90 how)
91
92 #define SET_PKE_LN_9A_F(what, idx) \
93 rte_memcpy(&cookie->input_buffer[idx * qat_alg_bytesize] + \
94 qat_alg_bytesize - what.length, \
95 what.data, what.length)
96
97 #define SET_PKE_LN_EC_F(what, how, idx) \
98 rte_memcpy(&cookie->input_buffer[idx * \
99 RTE_ALIGN_CEIL(how, 8)] + \
100 RTE_ALIGN_CEIL(how, 8) - how, \
101 what.data, how)
102
103 static void
request_init(struct icp_qat_fw_pke_request * qat_req)104 request_init(struct icp_qat_fw_pke_request *qat_req)
105 {
106 memset(qat_req, 0, sizeof(*qat_req));
107 qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
108 qat_req->pke_hdr.hdr_flags =
109 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
110 (ICP_QAT_FW_COMN_REQ_FLAG_SET);
111 }
112
113 static void
cleanup_arrays(struct qat_asym_op_cookie * cookie,int in_count,int out_count,int alg_size)114 cleanup_arrays(struct qat_asym_op_cookie *cookie,
115 int in_count, int out_count, int alg_size)
116 {
117 int i;
118
119 for (i = 0; i < in_count; i++)
120 memset(cookie->input_array[i], 0x0, alg_size);
121 for (i = 0; i < out_count; i++)
122 memset(cookie->output_array[i], 0x0, alg_size);
123 }
124
125 static void
cleanup_crt(struct qat_asym_op_cookie * cookie,int alg_size)126 cleanup_crt(struct qat_asym_op_cookie *cookie,
127 int alg_size)
128 {
129 int i;
130
131 memset(cookie->input_array[0], 0x0, alg_size);
132 for (i = 1; i < QAT_ASYM_RSA_QT_NUM_IN_PARAMS; i++)
133 memset(cookie->input_array[i], 0x0, alg_size / 2);
134 for (i = 0; i < QAT_ASYM_RSA_NUM_OUT_PARAMS; i++)
135 memset(cookie->output_array[i], 0x0, alg_size);
136 }
137
138 static void
cleanup(struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform,int alg_size)139 cleanup(struct qat_asym_op_cookie *cookie,
140 struct rte_crypto_asym_xform *xform, int alg_size)
141 {
142 if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX)
143 cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
144 QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size);
145 else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV)
146 cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
147 QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size);
148 else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
149 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT)
150 cleanup_crt(cookie, alg_size);
151 else {
152 cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS,
153 QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size);
154 }
155 }
156 }
157
158 static int
check_zero(rte_crypto_param n)159 check_zero(rte_crypto_param n)
160 {
161 int i, len = n.length;
162
163 if (len < 8) {
164 for (i = len - 1; i >= 0; i--) {
165 if (n.data[i] != 0x0)
166 return 0;
167 }
168 } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) {
169 return 1;
170 } else if (*(uint64_t *)&n.data[len - 8] == 0) {
171 for (i = len - 9; i >= 0; i--) {
172 if (n.data[i] != 0x0)
173 return 0;
174 }
175 } else
176 return 0;
177
178 return 1;
179 }
180
181 static struct qat_asym_function
get_asym_function(struct rte_crypto_asym_xform * xform)182 get_asym_function(struct rte_crypto_asym_xform *xform)
183 {
184 struct qat_asym_function qat_function;
185
186 switch (xform->xform_type) {
187 case RTE_CRYPTO_ASYM_XFORM_MODEX:
188 qat_function = get_modexp_function(xform);
189 break;
190 case RTE_CRYPTO_ASYM_XFORM_MODINV:
191 qat_function = get_modinv_function(xform);
192 break;
193 default:
194 qat_function.func_id = 0;
195 break;
196 }
197
198 return qat_function;
199 }
200
201 static int
modexp_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)202 modexp_set_input(struct rte_crypto_asym_op *asym_op,
203 struct icp_qat_fw_pke_request *qat_req,
204 struct qat_asym_op_cookie *cookie,
205 struct rte_crypto_asym_xform *xform)
206 {
207 struct qat_asym_function qat_function;
208 uint32_t alg_bytesize, func_id, in_bytesize;
209 int status = 0;
210
211 CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp",
212 "modulus", status);
213 CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp",
214 "exponent", status);
215 if (status)
216 return status;
217
218 if (asym_op->modex.base.length > xform->modex.exponent.length &&
219 asym_op->modex.base.length > xform->modex.modulus.length) {
220 in_bytesize = asym_op->modex.base.length;
221 } else if (xform->modex.exponent.length > xform->modex.modulus.length)
222 in_bytesize = xform->modex.exponent.length;
223 else
224 in_bytesize = xform->modex.modulus.length;
225
226 qat_function = get_modexp_function2(in_bytesize);
227 func_id = qat_function.func_id;
228 if (qat_function.func_id == 0) {
229 QAT_LOG(ERR, "Cannot obtain functionality id");
230 return -EINVAL;
231 }
232 alg_bytesize = qat_function.bytesize;
233
234 SET_PKE_LN(cookie->input_array, asym_op->modex.base,
235 alg_bytesize, 0);
236 SET_PKE_LN(cookie->input_array, xform->modex.exponent,
237 alg_bytesize, 1);
238 SET_PKE_LN(cookie->input_array, xform->modex.modulus,
239 alg_bytesize, 2);
240
241 cookie->alg_bytesize = alg_bytesize;
242 qat_req->pke_hdr.cd_pars.func_id = func_id;
243 qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
244 qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
245
246 HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize);
247 HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize);
248 HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize);
249
250 return status;
251 }
252
253 static uint8_t
modexp_collect(struct rte_crypto_asym_op * asym_op,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)254 modexp_collect(struct rte_crypto_asym_op *asym_op,
255 struct qat_asym_op_cookie *cookie,
256 struct rte_crypto_asym_xform *xform)
257 {
258 rte_crypto_param n = xform->modex.modulus;
259 uint32_t alg_bytesize = cookie->alg_bytesize;
260 uint8_t *modexp_result = asym_op->modex.result.data;
261
262 rte_memcpy(modexp_result,
263 cookie->output_array[0] + alg_bytesize
264 - n.length, n.length);
265 HEXDUMP("ModExp result", cookie->output_array[0],
266 alg_bytesize);
267 return RTE_CRYPTO_OP_STATUS_SUCCESS;
268 }
269
270 static int
modinv_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)271 modinv_set_input(struct rte_crypto_asym_op *asym_op,
272 struct icp_qat_fw_pke_request *qat_req,
273 struct qat_asym_op_cookie *cookie,
274 struct rte_crypto_asym_xform *xform)
275 {
276 struct qat_asym_function qat_function;
277 uint32_t alg_bytesize, func_id;
278 int status = 0;
279
280 CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv",
281 "modulus", status);
282 if (status)
283 return status;
284
285 qat_function = get_asym_function(xform);
286 func_id = qat_function.func_id;
287 if (func_id == 0) {
288 QAT_LOG(ERR, "Cannot obtain functionality id");
289 return -EINVAL;
290 }
291 alg_bytesize = qat_function.bytesize;
292
293 SET_PKE_LN(cookie->input_array, asym_op->modinv.base,
294 alg_bytesize, 0);
295 SET_PKE_LN(cookie->input_array, xform->modinv.modulus,
296 alg_bytesize, 1);
297
298 cookie->alg_bytesize = alg_bytesize;
299 qat_req->pke_hdr.cd_pars.func_id = func_id;
300 qat_req->input_param_count =
301 QAT_ASYM_MODINV_NUM_IN_PARAMS;
302 qat_req->output_param_count =
303 QAT_ASYM_MODINV_NUM_OUT_PARAMS;
304
305 HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize);
306 HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize);
307
308 return 0;
309 }
310
311 static uint8_t
modinv_collect(struct rte_crypto_asym_op * asym_op,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)312 modinv_collect(struct rte_crypto_asym_op *asym_op,
313 struct qat_asym_op_cookie *cookie,
314 struct rte_crypto_asym_xform *xform)
315 {
316 rte_crypto_param n = xform->modinv.modulus;
317 uint8_t *modinv_result = asym_op->modinv.result.data;
318 uint32_t alg_bytesize = cookie->alg_bytesize;
319
320 rte_memcpy(modinv_result + (asym_op->modinv.result.length
321 - n.length),
322 cookie->output_array[0] + alg_bytesize
323 - n.length, n.length);
324 HEXDUMP("ModInv result", cookie->output_array[0],
325 alg_bytesize);
326 return RTE_CRYPTO_OP_STATUS_SUCCESS;
327 }
328
329 static int
rsa_set_pub_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)330 rsa_set_pub_input(struct rte_crypto_asym_op *asym_op,
331 struct icp_qat_fw_pke_request *qat_req,
332 struct qat_asym_op_cookie *cookie,
333 struct rte_crypto_asym_xform *xform)
334 {
335 struct qat_asym_function qat_function;
336 uint32_t alg_bytesize, func_id;
337 int status = 0;
338
339 qat_function = get_rsa_enc_function(xform);
340 func_id = qat_function.func_id;
341 if (func_id == 0) {
342 QAT_LOG(ERR, "Cannot obtain functionality id");
343 return -EINVAL;
344 }
345 alg_bytesize = qat_function.bytesize;
346
347 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
348 switch (asym_op->rsa.pad) {
349 case RTE_CRYPTO_RSA_PADDING_NONE:
350 SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
351 alg_bytesize, 0);
352 break;
353 default:
354 QAT_LOG(ERR,
355 "Invalid RSA padding (Encryption)"
356 );
357 return -EINVAL;
358 }
359 HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize);
360 } else {
361 switch (asym_op->rsa.pad) {
362 case RTE_CRYPTO_RSA_PADDING_NONE:
363 SET_PKE_LN(cookie->input_array, asym_op->rsa.sign,
364 alg_bytesize, 0);
365 break;
366 default:
367 QAT_LOG(ERR,
368 "Invalid RSA padding (Verify)");
369 return -EINVAL;
370 }
371 HEXDUMP("RSA Signature", cookie->input_array[0],
372 alg_bytesize);
373 }
374
375 SET_PKE_LN(cookie->input_array, xform->rsa.e,
376 alg_bytesize, 1);
377 SET_PKE_LN(cookie->input_array, xform->rsa.n,
378 alg_bytesize, 2);
379
380 cookie->alg_bytesize = alg_bytesize;
381 qat_req->pke_hdr.cd_pars.func_id = func_id;
382
383 HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize);
384 HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize);
385
386 return status;
387 }
388
389 static int
rsa_set_priv_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)390 rsa_set_priv_input(struct rte_crypto_asym_op *asym_op,
391 struct icp_qat_fw_pke_request *qat_req,
392 struct qat_asym_op_cookie *cookie,
393 struct rte_crypto_asym_xform *xform)
394 {
395 struct qat_asym_function qat_function;
396 uint32_t alg_bytesize, func_id;
397 int status = 0;
398
399 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
400 qat_function = get_rsa_crt_function(xform);
401 func_id = qat_function.func_id;
402 if (func_id == 0) {
403 QAT_LOG(ERR, "Cannot obtain functionality id");
404 return -EINVAL;
405 }
406 alg_bytesize = qat_function.bytesize;
407 qat_req->input_param_count =
408 QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
409
410 SET_PKE_LN(cookie->input_array, xform->rsa.qt.p,
411 (alg_bytesize >> 1), 1);
412 SET_PKE_LN(cookie->input_array, xform->rsa.qt.q,
413 (alg_bytesize >> 1), 2);
414 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP,
415 (alg_bytesize >> 1), 3);
416 SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ,
417 (alg_bytesize >> 1), 4);
418 SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv,
419 (alg_bytesize >> 1), 5);
420
421 HEXDUMP("RSA p", cookie->input_array[1],
422 alg_bytesize);
423 HEXDUMP("RSA q", cookie->input_array[2],
424 alg_bytesize);
425 HEXDUMP("RSA dP", cookie->input_array[3],
426 alg_bytesize);
427 HEXDUMP("RSA dQ", cookie->input_array[4],
428 alg_bytesize);
429 HEXDUMP("RSA qInv", cookie->input_array[5],
430 alg_bytesize);
431 } else if (xform->rsa.key_type ==
432 RTE_RSA_KEY_TYPE_EXP) {
433 qat_function = get_rsa_dec_function(xform);
434 func_id = qat_function.func_id;
435 if (func_id == 0) {
436 QAT_LOG(ERR, "Cannot obtain functionality id");
437 return -EINVAL;
438 }
439 alg_bytesize = qat_function.bytesize;
440
441 SET_PKE_LN(cookie->input_array, xform->rsa.d,
442 alg_bytesize, 1);
443 SET_PKE_LN(cookie->input_array, xform->rsa.n,
444 alg_bytesize, 2);
445
446 HEXDUMP("RSA d", cookie->input_array[1],
447 alg_bytesize);
448 HEXDUMP("RSA n", cookie->input_array[2],
449 alg_bytesize);
450 } else {
451 QAT_LOG(ERR, "Invalid RSA key type");
452 return -EINVAL;
453 }
454
455 if (asym_op->rsa.op_type ==
456 RTE_CRYPTO_ASYM_OP_DECRYPT) {
457 switch (asym_op->rsa.pad) {
458 case RTE_CRYPTO_RSA_PADDING_NONE:
459 SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher,
460 alg_bytesize, 0);
461 HEXDUMP("RSA ciphertext", cookie->input_array[0],
462 alg_bytesize);
463 break;
464 default:
465 QAT_LOG(ERR,
466 "Invalid padding of RSA (Decrypt)");
467 return -(EINVAL);
468 }
469
470 } else if (asym_op->rsa.op_type ==
471 RTE_CRYPTO_ASYM_OP_SIGN) {
472 switch (asym_op->rsa.pad) {
473 case RTE_CRYPTO_RSA_PADDING_NONE:
474 SET_PKE_LN(cookie->input_array, asym_op->rsa.message,
475 alg_bytesize, 0);
476 HEXDUMP("RSA text to be signed", cookie->input_array[0],
477 alg_bytesize);
478 break;
479 default:
480 QAT_LOG(ERR,
481 "Invalid padding of RSA (Signature)");
482 return -(EINVAL);
483 }
484 }
485
486 cookie->alg_bytesize = alg_bytesize;
487 qat_req->pke_hdr.cd_pars.func_id = func_id;
488 return status;
489 }
490
491 static int
rsa_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)492 rsa_set_input(struct rte_crypto_asym_op *asym_op,
493 struct icp_qat_fw_pke_request *qat_req,
494 struct qat_asym_op_cookie *cookie,
495 struct rte_crypto_asym_xform *xform)
496 {
497 qat_req->input_param_count =
498 QAT_ASYM_RSA_NUM_IN_PARAMS;
499 qat_req->output_param_count =
500 QAT_ASYM_RSA_NUM_OUT_PARAMS;
501
502 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
503 asym_op->rsa.op_type ==
504 RTE_CRYPTO_ASYM_OP_VERIFY) {
505 return rsa_set_pub_input(asym_op, qat_req, cookie, xform);
506 } else {
507 return rsa_set_priv_input(asym_op, qat_req, cookie, xform);
508 }
509 }
510
511 static uint8_t
rsa_collect(struct rte_crypto_asym_op * asym_op,struct qat_asym_op_cookie * cookie)512 rsa_collect(struct rte_crypto_asym_op *asym_op,
513 struct qat_asym_op_cookie *cookie)
514 {
515 uint32_t alg_bytesize = cookie->alg_bytesize;
516
517 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
518 asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
519
520 if (asym_op->rsa.op_type ==
521 RTE_CRYPTO_ASYM_OP_ENCRYPT) {
522 uint8_t *rsa_result = asym_op->rsa.cipher.data;
523
524 rte_memcpy(rsa_result,
525 cookie->output_array[0],
526 alg_bytesize);
527 HEXDUMP("RSA Encrypted data", cookie->output_array[0],
528 alg_bytesize);
529 } else {
530 uint8_t *rsa_result = asym_op->rsa.cipher.data;
531
532 switch (asym_op->rsa.pad) {
533 case RTE_CRYPTO_RSA_PADDING_NONE:
534 rte_memcpy(rsa_result,
535 cookie->output_array[0],
536 alg_bytesize);
537 HEXDUMP("RSA signature",
538 cookie->output_array[0],
539 alg_bytesize);
540 break;
541 default:
542 QAT_LOG(ERR, "Padding not supported");
543 return RTE_CRYPTO_OP_STATUS_ERROR;
544 }
545 }
546 } else {
547 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
548 uint8_t *rsa_result = asym_op->rsa.message.data;
549
550 switch (asym_op->rsa.pad) {
551 case RTE_CRYPTO_RSA_PADDING_NONE:
552 rte_memcpy(rsa_result,
553 cookie->output_array[0],
554 alg_bytesize);
555 HEXDUMP("RSA Decrypted Message",
556 cookie->output_array[0],
557 alg_bytesize);
558 break;
559 default:
560 QAT_LOG(ERR, "Padding not supported");
561 return RTE_CRYPTO_OP_STATUS_ERROR;
562 }
563 } else {
564 uint8_t *rsa_result = asym_op->rsa.sign.data;
565
566 rte_memcpy(rsa_result,
567 cookie->output_array[0],
568 alg_bytesize);
569 HEXDUMP("RSA Signature", cookie->output_array[0],
570 alg_bytesize);
571 }
572 }
573 return RTE_CRYPTO_OP_STATUS_SUCCESS;
574 }
575
576 static int
ecdsa_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)577 ecdsa_set_input(struct rte_crypto_asym_op *asym_op,
578 struct icp_qat_fw_pke_request *qat_req,
579 struct qat_asym_op_cookie *cookie,
580 struct rte_crypto_asym_xform *xform)
581 {
582 struct qat_asym_function qat_function;
583 uint32_t alg_bytesize, qat_alg_bytesize, func_id;
584 int curve_id;
585
586 curve_id = pick_curve(xform);
587 if (curve_id < 0) {
588 QAT_LOG(ERR, "Incorrect elliptic curve");
589 return -EINVAL;
590 }
591
592 switch (asym_op->ecdsa.op_type) {
593 case RTE_CRYPTO_ASYM_OP_SIGN:
594 qat_function = get_ecdsa_function(xform);
595 func_id = qat_function.func_id;
596 if (func_id == 0) {
597 QAT_LOG(ERR, "Cannot obtain functionality id");
598 return -EINVAL;
599 }
600 alg_bytesize = qat_function.bytesize;
601 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
602
603 SET_PKE_LN_9A_F(asym_op->ecdsa.pkey, 0);
604 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 1);
605 SET_PKE_LN_9A_F(asym_op->ecdsa.k, 2);
606 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 3);
607 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 4);
608 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 5);
609 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 6);
610 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 7);
611 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 8);
612
613 cookie->alg_bytesize = alg_bytesize;
614 qat_req->pke_hdr.cd_pars.func_id = func_id;
615 qat_req->input_param_count =
616 QAT_ASYM_ECDSA_RS_SIGN_IN_PARAMS;
617 qat_req->output_param_count =
618 QAT_ASYM_ECDSA_RS_SIGN_OUT_PARAMS;
619
620 HEXDUMP_OFF_F("ECDSA d", 0);
621 HEXDUMP_OFF_F("ECDSA e", 1);
622 HEXDUMP_OFF_F("ECDSA k", 2);
623 HEXDUMP_OFF_F("ECDSA b", 3);
624 HEXDUMP_OFF_F("ECDSA a", 4);
625 HEXDUMP_OFF_F("ECDSA n", 5);
626 HEXDUMP_OFF_F("ECDSA y", 6);
627 HEXDUMP_OFF_F("ECDSA x", 7);
628 break;
629 case RTE_CRYPTO_ASYM_OP_VERIFY:
630 qat_function = get_ecdsa_verify_function(xform);
631 func_id = qat_function.func_id;
632 if (func_id == 0) {
633 QAT_LOG(ERR, "Cannot obtain functionality id");
634 return -EINVAL;
635 }
636 alg_bytesize = qat_function.bytesize;
637 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
638
639 SET_PKE_LN_9A_F(asym_op->ecdsa.message, 10);
640 SET_PKE_LN_9A_F(asym_op->ecdsa.s, 9);
641 SET_PKE_LN_9A_F(asym_op->ecdsa.r, 8);
642 SET_PKE_LN_EC_F(curve[curve_id].n, alg_bytesize, 7);
643 SET_PKE_LN_EC_F(curve[curve_id].x, alg_bytesize, 6);
644 SET_PKE_LN_EC_F(curve[curve_id].y, alg_bytesize, 5);
645 SET_PKE_LN_9A_F(asym_op->ecdsa.q.x, 4);
646 SET_PKE_LN_9A_F(asym_op->ecdsa.q.y, 3);
647 SET_PKE_LN_EC_F(curve[curve_id].a, alg_bytesize, 2);
648 SET_PKE_LN_EC_F(curve[curve_id].b, alg_bytesize, 1);
649 SET_PKE_LN_EC_F(curve[curve_id].p, alg_bytesize, 0);
650
651 cookie->alg_bytesize = alg_bytesize;
652 qat_req->pke_hdr.cd_pars.func_id = func_id;
653 qat_req->input_param_count =
654 QAT_ASYM_ECDSA_RS_VERIFY_IN_PARAMS;
655 qat_req->output_param_count =
656 QAT_ASYM_ECDSA_RS_VERIFY_OUT_PARAMS;
657
658 HEXDUMP_OFF_F("p", 0);
659 HEXDUMP_OFF_F("b", 1);
660 HEXDUMP_OFF_F("a", 2);
661 HEXDUMP_OFF_F("y", 3);
662 HEXDUMP_OFF_F("x", 4);
663 HEXDUMP_OFF_F("yG", 5);
664 HEXDUMP_OFF_F("xG", 6);
665 HEXDUMP_OFF_F("n", 7);
666 HEXDUMP_OFF_F("r", 8);
667 HEXDUMP_OFF_F("s", 9);
668 HEXDUMP_OFF_F("e", 10);
669 break;
670 default:
671 return -1;
672 }
673
674 return 0;
675 }
676
677 static uint8_t
ecdsa_collect(struct rte_crypto_asym_op * asym_op,struct qat_asym_op_cookie * cookie)678 ecdsa_collect(struct rte_crypto_asym_op *asym_op,
679 struct qat_asym_op_cookie *cookie)
680 {
681 uint32_t alg_bytesize = cookie->alg_bytesize;
682 uint32_t qat_alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8);
683 uint32_t ltrim = qat_alg_bytesize - alg_bytesize;
684
685 if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
686 uint8_t *r = asym_op->ecdsa.r.data;
687 uint8_t *s = asym_op->ecdsa.s.data;
688
689 asym_op->ecdsa.r.length = alg_bytesize;
690 asym_op->ecdsa.s.length = alg_bytesize;
691 rte_memcpy(r, &cookie->output_array[0][ltrim], alg_bytesize);
692 rte_memcpy(s, &cookie->output_array[1][ltrim], alg_bytesize);
693
694 HEXDUMP("R", cookie->output_array[0],
695 alg_bytesize);
696 HEXDUMP("S", cookie->output_array[1],
697 alg_bytesize);
698 }
699 return RTE_CRYPTO_OP_STATUS_SUCCESS;
700 }
701
702 static int
ecpm_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)703 ecpm_set_input(struct rte_crypto_asym_op *asym_op,
704 struct icp_qat_fw_pke_request *qat_req,
705 struct qat_asym_op_cookie *cookie,
706 struct rte_crypto_asym_xform *xform)
707 {
708 struct qat_asym_function qat_function;
709 uint32_t alg_bytesize, __rte_unused qat_alg_bytesize, func_id;
710 int curve_id;
711
712 curve_id = pick_curve(xform);
713 if (curve_id < 0) {
714 QAT_LOG(ERR, "Incorrect elliptic curve");
715 return -EINVAL;
716 }
717
718 qat_function = get_ecpm_function(xform);
719 func_id = qat_function.func_id;
720 if (func_id == 0) {
721 QAT_LOG(ERR, "Cannot obtain functionality id");
722 return -EINVAL;
723 }
724 alg_bytesize = qat_function.bytesize;
725 qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8);
726
727 SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.scalar,
728 asym_op->ecpm.scalar.length, 0);
729 SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.p.x,
730 asym_op->ecpm.p.x.length, 1);
731 SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.p.y,
732 asym_op->ecpm.p.y.length, 2);
733 SET_PKE_LN_EC(cookie->input_array, curve[curve_id].a,
734 alg_bytesize, 3);
735 SET_PKE_LN_EC(cookie->input_array, curve[curve_id].b,
736 alg_bytesize, 4);
737 SET_PKE_LN_EC(cookie->input_array, curve[curve_id].p,
738 alg_bytesize, 5);
739 SET_PKE_LN_EC(cookie->input_array, curve[curve_id].h,
740 alg_bytesize, 6);
741
742 cookie->alg_bytesize = alg_bytesize;
743 qat_req->pke_hdr.cd_pars.func_id = func_id;
744 qat_req->input_param_count =
745 QAT_ASYM_ECPM_IN_PARAMS;
746 qat_req->output_param_count =
747 QAT_ASYM_ECPM_OUT_PARAMS;
748
749 HEXDUMP("k", cookie->input_array[0], qat_alg_bytesize);
750 HEXDUMP("xG", cookie->input_array[1], qat_alg_bytesize);
751 HEXDUMP("yG", cookie->input_array[2], qat_alg_bytesize);
752 HEXDUMP("a", cookie->input_array[3], qat_alg_bytesize);
753 HEXDUMP("b", cookie->input_array[4], qat_alg_bytesize);
754 HEXDUMP("q", cookie->input_array[5], qat_alg_bytesize);
755 HEXDUMP("h", cookie->input_array[6], qat_alg_bytesize);
756
757 return 0;
758 }
759
760 static uint8_t
ecpm_collect(struct rte_crypto_asym_op * asym_op,struct qat_asym_op_cookie * cookie)761 ecpm_collect(struct rte_crypto_asym_op *asym_op,
762 struct qat_asym_op_cookie *cookie)
763 {
764 uint8_t *x = asym_op->ecpm.r.x.data;
765 uint8_t *y = asym_op->ecpm.r.y.data;
766 uint32_t alg_bytesize = cookie->alg_bytesize;
767 uint32_t qat_alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8);
768 uint32_t ltrim = qat_alg_bytesize - alg_bytesize;
769
770 asym_op->ecpm.r.x.length = alg_bytesize;
771 asym_op->ecpm.r.y.length = alg_bytesize;
772 rte_memcpy(x, &cookie->output_array[0][ltrim], alg_bytesize);
773 rte_memcpy(y, &cookie->output_array[1][ltrim], alg_bytesize);
774
775 HEXDUMP("rX", cookie->output_array[0],
776 alg_bytesize);
777 HEXDUMP("rY", cookie->output_array[1],
778 alg_bytesize);
779 return RTE_CRYPTO_OP_STATUS_SUCCESS;
780 }
781
782 static int
asym_set_input(struct rte_crypto_asym_op * asym_op,struct icp_qat_fw_pke_request * qat_req,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)783 asym_set_input(struct rte_crypto_asym_op *asym_op,
784 struct icp_qat_fw_pke_request *qat_req,
785 struct qat_asym_op_cookie *cookie,
786 struct rte_crypto_asym_xform *xform)
787 {
788 switch (xform->xform_type) {
789 case RTE_CRYPTO_ASYM_XFORM_MODEX:
790 return modexp_set_input(asym_op, qat_req,
791 cookie, xform);
792 case RTE_CRYPTO_ASYM_XFORM_MODINV:
793 return modinv_set_input(asym_op, qat_req,
794 cookie, xform);
795 case RTE_CRYPTO_ASYM_XFORM_RSA:
796 return rsa_set_input(asym_op, qat_req,
797 cookie, xform);
798 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
799 return ecdsa_set_input(asym_op, qat_req,
800 cookie, xform);
801 case RTE_CRYPTO_ASYM_XFORM_ECPM:
802 return ecpm_set_input(asym_op, qat_req,
803 cookie, xform);
804 default:
805 QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
806 return -EINVAL;
807 }
808 return 1;
809 }
810
811 static int
qat_asym_build_request(void * in_op,uint8_t * out_msg,void * op_cookie,__rte_unused uint64_t * opaque,__rte_unused enum qat_device_gen qat_dev_gen)812 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
813 __rte_unused uint64_t *opaque,
814 __rte_unused enum qat_device_gen qat_dev_gen)
815 {
816 struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
817 struct rte_crypto_asym_op *asym_op = op->asym;
818 struct icp_qat_fw_pke_request *qat_req =
819 (struct icp_qat_fw_pke_request *)out_msg;
820 struct qat_asym_op_cookie *cookie =
821 (struct qat_asym_op_cookie *)op_cookie;
822 struct rte_crypto_asym_xform *xform;
823 struct qat_asym_session *qat_session = (struct qat_asym_session *)
824 op->asym->session->sess_private_data;
825 int err = 0;
826
827 if (unlikely(qat_session == NULL)) {
828 QAT_DP_LOG(ERR, "Session was not created for this device");
829 goto error;
830 }
831
832 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
833 switch (op->sess_type) {
834 case RTE_CRYPTO_OP_WITH_SESSION:
835 request_init(qat_req);
836 xform = &qat_session->xform;
837 break;
838 case RTE_CRYPTO_OP_SESSIONLESS:
839 request_init(qat_req);
840 xform = op->asym->xform;
841 break;
842 default:
843 QAT_DP_LOG(ERR, "Invalid session/xform settings");
844 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
845 goto error;
846 }
847 err = asym_set_input(asym_op, qat_req, cookie,
848 xform);
849 if (err) {
850 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
851 goto error;
852 }
853
854 qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
855 qat_req->pke_mid.src_data_addr = cookie->input_addr;
856 qat_req->pke_mid.dest_data_addr = cookie->output_addr;
857
858 HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
859
860 return 0;
861 error:
862 qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
863 HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request));
864 qat_req->output_param_count = 0;
865 qat_req->input_param_count = 0;
866 qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
867 cookie->error |= err;
868
869 return 0;
870 }
871
872 static uint8_t
qat_asym_collect_response(struct rte_crypto_op * op,struct qat_asym_op_cookie * cookie,struct rte_crypto_asym_xform * xform)873 qat_asym_collect_response(struct rte_crypto_op *op,
874 struct qat_asym_op_cookie *cookie,
875 struct rte_crypto_asym_xform *xform)
876 {
877 struct rte_crypto_asym_op *asym_op = op->asym;
878
879 switch (xform->xform_type) {
880 case RTE_CRYPTO_ASYM_XFORM_MODEX:
881 return modexp_collect(asym_op, cookie, xform);
882 case RTE_CRYPTO_ASYM_XFORM_MODINV:
883 return modinv_collect(asym_op, cookie, xform);
884 case RTE_CRYPTO_ASYM_XFORM_RSA:
885 return rsa_collect(asym_op, cookie);
886 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
887 return ecdsa_collect(asym_op, cookie);
888 case RTE_CRYPTO_ASYM_XFORM_ECPM:
889 return ecpm_collect(asym_op, cookie);
890 default:
891 QAT_LOG(ERR, "Not supported xform type");
892 return RTE_CRYPTO_OP_STATUS_ERROR;
893 }
894 }
895
896 static int
qat_asym_process_response(void ** out_op,uint8_t * resp,void * op_cookie,__rte_unused uint64_t * dequeue_err_count)897 qat_asym_process_response(void **out_op, uint8_t *resp,
898 void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
899 {
900 struct icp_qat_fw_pke_resp *resp_msg =
901 (struct icp_qat_fw_pke_resp *)resp;
902 struct rte_crypto_op *op = (struct rte_crypto_op *)(uintptr_t)
903 (resp_msg->opaque);
904 struct qat_asym_op_cookie *cookie = op_cookie;
905 struct rte_crypto_asym_xform *xform;
906 struct qat_asym_session *qat_session = (struct qat_asym_session *)
907 op->asym->session->sess_private_data;
908
909 if (cookie->error) {
910 cookie->error = 0;
911 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
912 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
913 QAT_DP_LOG(ERR, "Cookie status returned error");
914 } else {
915 if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
916 resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
917 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
918 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
919 QAT_DP_LOG(ERR, "Asymmetric response status"
920 " returned error");
921 }
922 if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
923 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
924 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
925 QAT_DP_LOG(ERR, "Asymmetric common status"
926 " returned error");
927 }
928 }
929
930 switch (op->sess_type) {
931 case RTE_CRYPTO_OP_WITH_SESSION:
932 xform = &qat_session->xform;
933 break;
934 case RTE_CRYPTO_OP_SESSIONLESS:
935 xform = op->asym->xform;
936 break;
937 default:
938 QAT_DP_LOG(ERR,
939 "Invalid session/xform settings in response ring!");
940 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
941 }
942
943 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
944 op->status = qat_asym_collect_response(op,
945 cookie, xform);
946 cleanup(cookie, xform, cookie->alg_bytesize);
947 }
948
949 *out_op = op;
950 HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp));
951
952 return 1;
953 }
954
955 static int
session_set_modexp(struct qat_asym_session * qat_session,struct rte_crypto_asym_xform * xform)956 session_set_modexp(struct qat_asym_session *qat_session,
957 struct rte_crypto_asym_xform *xform)
958 {
959 uint8_t *modulus = xform->modex.modulus.data;
960 uint8_t *exponent = xform->modex.exponent.data;
961
962 qat_session->xform.modex.modulus.data =
963 rte_malloc(NULL, xform->modex.modulus.length, 0);
964 if (qat_session->xform.modex.modulus.data == NULL)
965 return -ENOMEM;
966 qat_session->xform.modex.modulus.length = xform->modex.modulus.length;
967 qat_session->xform.modex.exponent.data = rte_malloc(NULL,
968 xform->modex.exponent.length, 0);
969 if (qat_session->xform.modex.exponent.data == NULL) {
970 rte_free(qat_session->xform.modex.exponent.data);
971 return -ENOMEM;
972 }
973 qat_session->xform.modex.exponent.length = xform->modex.exponent.length;
974
975 rte_memcpy(qat_session->xform.modex.modulus.data, modulus,
976 xform->modex.modulus.length);
977 rte_memcpy(qat_session->xform.modex.exponent.data, exponent,
978 xform->modex.exponent.length);
979
980 return 0;
981 }
982
983 static int
session_set_modinv(struct qat_asym_session * qat_session,struct rte_crypto_asym_xform * xform)984 session_set_modinv(struct qat_asym_session *qat_session,
985 struct rte_crypto_asym_xform *xform)
986 {
987 uint8_t *modulus = xform->modinv.modulus.data;
988
989 qat_session->xform.modinv.modulus.data =
990 rte_malloc(NULL, xform->modinv.modulus.length, 0);
991 if (qat_session->xform.modinv.modulus.data == NULL)
992 return -ENOMEM;
993 qat_session->xform.modinv.modulus.length = xform->modinv.modulus.length;
994
995 rte_memcpy(qat_session->xform.modinv.modulus.data, modulus,
996 xform->modinv.modulus.length);
997
998 return 0;
999 }
1000
1001 static int
session_set_rsa(struct qat_asym_session * qat_session,struct rte_crypto_asym_xform * xform)1002 session_set_rsa(struct qat_asym_session *qat_session,
1003 struct rte_crypto_asym_xform *xform)
1004 {
1005 uint8_t *n = xform->rsa.n.data;
1006 uint8_t *e = xform->rsa.e.data;
1007 int ret = 0;
1008
1009 qat_session->xform.rsa.key_type = xform->rsa.key_type;
1010
1011 qat_session->xform.rsa.n.data =
1012 rte_malloc(NULL, xform->rsa.n.length, 0);
1013 if (qat_session->xform.rsa.n.data == NULL)
1014 return -ENOMEM;
1015 qat_session->xform.rsa.n.length =
1016 xform->rsa.n.length;
1017
1018 qat_session->xform.rsa.e.data =
1019 rte_malloc(NULL, xform->rsa.e.length, 0);
1020 if (qat_session->xform.rsa.e.data == NULL) {
1021 ret = -ENOMEM;
1022 goto err;
1023 }
1024 qat_session->xform.rsa.e.length =
1025 xform->rsa.e.length;
1026
1027 if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_QT) {
1028 uint8_t *p = xform->rsa.qt.p.data;
1029 uint8_t *q = xform->rsa.qt.q.data;
1030 uint8_t *dP = xform->rsa.qt.dP.data;
1031 uint8_t *dQ = xform->rsa.qt.dQ.data;
1032 uint8_t *qInv = xform->rsa.qt.qInv.data;
1033
1034 qat_session->xform.rsa.qt.p.data =
1035 rte_malloc(NULL, xform->rsa.qt.p.length, 0);
1036 if (qat_session->xform.rsa.qt.p.data == NULL) {
1037 ret = -ENOMEM;
1038 goto err;
1039 }
1040 qat_session->xform.rsa.qt.p.length =
1041 xform->rsa.qt.p.length;
1042
1043 qat_session->xform.rsa.qt.q.data =
1044 rte_malloc(NULL, xform->rsa.qt.q.length, 0);
1045 if (qat_session->xform.rsa.qt.q.data == NULL) {
1046 ret = -ENOMEM;
1047 goto err;
1048 }
1049 qat_session->xform.rsa.qt.q.length =
1050 xform->rsa.qt.q.length;
1051
1052 qat_session->xform.rsa.qt.dP.data =
1053 rte_malloc(NULL, xform->rsa.qt.dP.length, 0);
1054 if (qat_session->xform.rsa.qt.dP.data == NULL) {
1055 ret = -ENOMEM;
1056 goto err;
1057 }
1058 qat_session->xform.rsa.qt.dP.length =
1059 xform->rsa.qt.dP.length;
1060
1061 qat_session->xform.rsa.qt.dQ.data =
1062 rte_malloc(NULL, xform->rsa.qt.dQ.length, 0);
1063 if (qat_session->xform.rsa.qt.dQ.data == NULL) {
1064 ret = -ENOMEM;
1065 goto err;
1066 }
1067 qat_session->xform.rsa.qt.dQ.length =
1068 xform->rsa.qt.dQ.length;
1069
1070 qat_session->xform.rsa.qt.qInv.data =
1071 rte_malloc(NULL, xform->rsa.qt.qInv.length, 0);
1072 if (qat_session->xform.rsa.qt.qInv.data == NULL) {
1073 ret = -ENOMEM;
1074 goto err;
1075 }
1076 qat_session->xform.rsa.qt.qInv.length =
1077 xform->rsa.qt.qInv.length;
1078
1079 rte_memcpy(qat_session->xform.rsa.qt.p.data, p,
1080 xform->rsa.qt.p.length);
1081 rte_memcpy(qat_session->xform.rsa.qt.q.data, q,
1082 xform->rsa.qt.q.length);
1083 rte_memcpy(qat_session->xform.rsa.qt.dP.data, dP,
1084 xform->rsa.qt.dP.length);
1085 rte_memcpy(qat_session->xform.rsa.qt.dQ.data, dQ,
1086 xform->rsa.qt.dQ.length);
1087 rte_memcpy(qat_session->xform.rsa.qt.qInv.data, qInv,
1088 xform->rsa.qt.qInv.length);
1089
1090 } else {
1091 uint8_t *d = xform->rsa.d.data;
1092
1093 qat_session->xform.rsa.d.data =
1094 rte_malloc(NULL, xform->rsa.d.length, 0);
1095 if (qat_session->xform.rsa.d.data == NULL) {
1096 ret = -ENOMEM;
1097 goto err;
1098 }
1099 qat_session->xform.rsa.d.length =
1100 xform->rsa.d.length;
1101 rte_memcpy(qat_session->xform.rsa.d.data, d,
1102 xform->rsa.d.length);
1103 }
1104
1105 rte_memcpy(qat_session->xform.rsa.n.data, n,
1106 xform->rsa.n.length);
1107 rte_memcpy(qat_session->xform.rsa.e.data, e,
1108 xform->rsa.e.length);
1109
1110 return 0;
1111
1112 err:
1113 rte_free(qat_session->xform.rsa.n.data);
1114 rte_free(qat_session->xform.rsa.e.data);
1115 rte_free(qat_session->xform.rsa.d.data);
1116 rte_free(qat_session->xform.rsa.qt.p.data);
1117 rte_free(qat_session->xform.rsa.qt.q.data);
1118 rte_free(qat_session->xform.rsa.qt.dP.data);
1119 rte_free(qat_session->xform.rsa.qt.dQ.data);
1120 rte_free(qat_session->xform.rsa.qt.qInv.data);
1121 return ret;
1122 }
1123
1124 static void
session_set_ecdsa(struct qat_asym_session * qat_session,struct rte_crypto_asym_xform * xform)1125 session_set_ecdsa(struct qat_asym_session *qat_session,
1126 struct rte_crypto_asym_xform *xform)
1127 {
1128 qat_session->xform.ec.curve_id = xform->ec.curve_id;
1129 }
1130
1131 int
qat_asym_session_configure(struct rte_cryptodev * dev __rte_unused,struct rte_crypto_asym_xform * xform,struct rte_cryptodev_asym_session * session)1132 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
1133 struct rte_crypto_asym_xform *xform,
1134 struct rte_cryptodev_asym_session *session)
1135 {
1136 struct qat_asym_session *qat_session;
1137 int ret = 0;
1138
1139 qat_session = (struct qat_asym_session *) session->sess_private_data;
1140 memset(qat_session, 0, sizeof(*qat_session));
1141
1142 qat_session->xform.xform_type = xform->xform_type;
1143 switch (xform->xform_type) {
1144 case RTE_CRYPTO_ASYM_XFORM_MODEX:
1145 ret = session_set_modexp(qat_session, xform);
1146 break;
1147 case RTE_CRYPTO_ASYM_XFORM_MODINV:
1148 ret = session_set_modinv(qat_session, xform);
1149 break;
1150 case RTE_CRYPTO_ASYM_XFORM_RSA:
1151 ret = session_set_rsa(qat_session, xform);
1152 break;
1153 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
1154 case RTE_CRYPTO_ASYM_XFORM_ECPM:
1155 session_set_ecdsa(qat_session, xform);
1156 break;
1157 default:
1158 ret = -ENOTSUP;
1159 }
1160
1161 if (ret) {
1162 QAT_LOG(ERR, "Unsupported xform type");
1163 return ret;
1164 }
1165
1166 return 0;
1167 }
1168
1169 unsigned int
qat_asym_session_get_private_size(struct rte_cryptodev * dev __rte_unused)1170 qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
1171 {
1172 return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
1173 }
1174
1175 static void
session_clear_modexp(struct rte_crypto_modex_xform * modex)1176 session_clear_modexp(struct rte_crypto_modex_xform *modex)
1177 {
1178 memset(modex->modulus.data, 0, modex->modulus.length);
1179 rte_free(modex->modulus.data);
1180 memset(modex->exponent.data, 0, modex->exponent.length);
1181 rte_free(modex->exponent.data);
1182 }
1183
1184 static void
session_clear_modinv(struct rte_crypto_modinv_xform * modinv)1185 session_clear_modinv(struct rte_crypto_modinv_xform *modinv)
1186 {
1187 memset(modinv->modulus.data, 0, modinv->modulus.length);
1188 rte_free(modinv->modulus.data);
1189 }
1190
1191 static void
session_clear_rsa(struct rte_crypto_rsa_xform * rsa)1192 session_clear_rsa(struct rte_crypto_rsa_xform *rsa)
1193 {
1194 memset(rsa->n.data, 0, rsa->n.length);
1195 rte_free(rsa->n.data);
1196 memset(rsa->e.data, 0, rsa->e.length);
1197 rte_free(rsa->e.data);
1198 if (rsa->key_type == RTE_RSA_KEY_TYPE_EXP) {
1199 memset(rsa->d.data, 0, rsa->d.length);
1200 rte_free(rsa->d.data);
1201 } else {
1202 memset(rsa->qt.p.data, 0, rsa->qt.p.length);
1203 rte_free(rsa->qt.p.data);
1204 memset(rsa->qt.q.data, 0, rsa->qt.q.length);
1205 rte_free(rsa->qt.q.data);
1206 memset(rsa->qt.dP.data, 0, rsa->qt.dP.length);
1207 rte_free(rsa->qt.dP.data);
1208 memset(rsa->qt.dQ.data, 0, rsa->qt.dQ.length);
1209 rte_free(rsa->qt.dQ.data);
1210 memset(rsa->qt.qInv.data, 0, rsa->qt.qInv.length);
1211 rte_free(rsa->qt.qInv.data);
1212 }
1213 }
1214
1215 static void
session_clear_xform(struct qat_asym_session * qat_session)1216 session_clear_xform(struct qat_asym_session *qat_session)
1217 {
1218 switch (qat_session->xform.xform_type) {
1219 case RTE_CRYPTO_ASYM_XFORM_MODEX:
1220 session_clear_modexp(&qat_session->xform.modex);
1221 break;
1222 case RTE_CRYPTO_ASYM_XFORM_MODINV:
1223 session_clear_modinv(&qat_session->xform.modinv);
1224 break;
1225 case RTE_CRYPTO_ASYM_XFORM_RSA:
1226 session_clear_rsa(&qat_session->xform.rsa);
1227 break;
1228 default:
1229 break;
1230 }
1231 }
1232
1233 void
qat_asym_session_clear(struct rte_cryptodev * dev,struct rte_cryptodev_asym_session * session)1234 qat_asym_session_clear(struct rte_cryptodev *dev,
1235 struct rte_cryptodev_asym_session *session)
1236 {
1237 void *sess_priv = session->sess_private_data;
1238 struct qat_asym_session *qat_session =
1239 (struct qat_asym_session *)sess_priv;
1240
1241 if (sess_priv) {
1242 session_clear_xform(qat_session);
1243 memset(qat_session, 0, qat_asym_session_get_private_size(dev));
1244 }
1245 }
1246
1247 static uint16_t
qat_asym_crypto_enqueue_op_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1248 qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
1249 uint16_t nb_ops)
1250 {
1251 return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
1252 nb_ops);
1253 }
1254
1255 static uint16_t
qat_asym_crypto_dequeue_op_burst(void * qp,struct rte_crypto_op ** ops,uint16_t nb_ops)1256 qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1257 uint16_t nb_ops)
1258 {
1259 return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
1260 nb_ops);
1261 }
1262
1263 void
qat_asym_init_op_cookie(void * op_cookie)1264 qat_asym_init_op_cookie(void *op_cookie)
1265 {
1266 int j;
1267 struct qat_asym_op_cookie *cookie = op_cookie;
1268
1269 cookie->input_addr = rte_mempool_virt2iova(cookie) +
1270 offsetof(struct qat_asym_op_cookie,
1271 input_params_ptrs);
1272
1273 cookie->output_addr = rte_mempool_virt2iova(cookie) +
1274 offsetof(struct qat_asym_op_cookie,
1275 output_params_ptrs);
1276
1277 for (j = 0; j < 8; j++) {
1278 cookie->input_params_ptrs[j] =
1279 rte_mempool_virt2iova(cookie) +
1280 offsetof(struct qat_asym_op_cookie,
1281 input_array[j]);
1282 cookie->output_params_ptrs[j] =
1283 rte_mempool_virt2iova(cookie) +
1284 offsetof(struct qat_asym_op_cookie,
1285 output_array[j]);
1286 }
1287 }
1288
1289 int
qat_asym_dev_create(struct qat_pci_device * qat_pci_dev,struct qat_dev_cmd_param * qat_dev_cmd_param)1290 qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
1291 struct qat_dev_cmd_param *qat_dev_cmd_param)
1292 {
1293 struct qat_cryptodev_private *internals;
1294 struct rte_cryptodev *cryptodev;
1295 struct qat_device_info *qat_dev_instance =
1296 &qat_pci_devs[qat_pci_dev->qat_dev_id];
1297 struct rte_cryptodev_pmd_init_params init_params = {
1298 .name = "",
1299 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
1300 .private_data_size = sizeof(struct qat_cryptodev_private)
1301 };
1302 struct qat_capabilities_info capa_info;
1303 const struct rte_cryptodev_capabilities *capabilities;
1304 const struct qat_crypto_gen_dev_ops *gen_dev_ops =
1305 &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
1306 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1307 char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1308 uint64_t capa_size;
1309 int i = 0;
1310
1311 snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
1312 qat_pci_dev->name, "asym");
1313 QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
1314
1315 if (gen_dev_ops->cryptodev_ops == NULL) {
1316 QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
1317 name);
1318 return -(EFAULT);
1319 }
1320
1321 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1322 qat_pci_dev->qat_asym_driver_id =
1323 qat_asym_driver_id;
1324 } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1325 if (qat_pci_dev->qat_asym_driver_id !=
1326 qat_asym_driver_id) {
1327 QAT_LOG(ERR,
1328 "Device %s have different driver id than corresponding device in primary process",
1329 name);
1330 return -(EFAULT);
1331 }
1332 }
1333
1334 /* Populate subset device to use in cryptodev device creation */
1335 qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
1336 qat_dev_instance->asym_rte_dev.numa_node =
1337 qat_dev_instance->pci_dev->device.numa_node;
1338 qat_dev_instance->asym_rte_dev.devargs = NULL;
1339
1340 cryptodev = rte_cryptodev_pmd_create(name,
1341 &(qat_dev_instance->asym_rte_dev), &init_params);
1342
1343 if (cryptodev == NULL)
1344 return -ENODEV;
1345
1346 qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
1347 cryptodev->driver_id = qat_asym_driver_id;
1348 cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
1349
1350 cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
1351 cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
1352
1353 cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
1354
1355 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1356 return 0;
1357
1358 snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
1359 "QAT_ASYM_CAPA_GEN_%d",
1360 qat_pci_dev->qat_dev_gen);
1361
1362 internals = cryptodev->data->dev_private;
1363 internals->qat_dev = qat_pci_dev;
1364 internals->dev_id = cryptodev->data->dev_id;
1365
1366 capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
1367 capabilities = capa_info.data;
1368 capa_size = capa_info.size;
1369
1370 internals->capa_mz = rte_memzone_lookup(capa_memz_name);
1371 if (internals->capa_mz == NULL) {
1372 internals->capa_mz = rte_memzone_reserve(capa_memz_name,
1373 capa_size, rte_socket_id(), 0);
1374 if (internals->capa_mz == NULL) {
1375 QAT_LOG(DEBUG,
1376 "Error allocating memzone for capabilities, "
1377 "destroying PMD for %s",
1378 name);
1379 rte_cryptodev_pmd_destroy(cryptodev);
1380 memset(&qat_dev_instance->asym_rte_dev, 0,
1381 sizeof(qat_dev_instance->asym_rte_dev));
1382 return -EFAULT;
1383 }
1384 }
1385
1386 memcpy(internals->capa_mz->addr, capabilities, capa_size);
1387 internals->qat_dev_capabilities = internals->capa_mz->addr;
1388
1389 while (1) {
1390 if (qat_dev_cmd_param[i].name == NULL)
1391 break;
1392 if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
1393 internals->min_enq_burst_threshold =
1394 qat_dev_cmd_param[i].val;
1395 i++;
1396 }
1397
1398 qat_pci_dev->asym_dev = internals;
1399 internals->service_type = QAT_SERVICE_ASYMMETRIC;
1400 QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
1401 cryptodev->data->name, internals->dev_id);
1402 return 0;
1403 }
1404
1405 int
qat_asym_dev_destroy(struct qat_pci_device * qat_pci_dev)1406 qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
1407 {
1408 struct rte_cryptodev *cryptodev;
1409
1410 if (qat_pci_dev == NULL)
1411 return -ENODEV;
1412 if (qat_pci_dev->asym_dev == NULL)
1413 return 0;
1414 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1415 rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
1416
1417 /* free crypto device */
1418 cryptodev = rte_cryptodev_pmd_get_dev(
1419 qat_pci_dev->asym_dev->dev_id);
1420 rte_cryptodev_pmd_destroy(cryptodev);
1421 qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
1422 qat_pci_dev->asym_dev = NULL;
1423
1424 return 0;
1425 }
1426
1427 static struct cryptodev_driver qat_crypto_drv;
1428 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
1429 cryptodev_qat_asym_driver,
1430 qat_asym_driver_id);
1431