1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <rte_cryptodev_pmd.h>
6 
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
10 
11 #include "qat_sym.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
14 #include "qat_qp.h"
15 
16 struct qat_sym_dp_ctx {
17 	struct qat_sym_session *session;
18 	uint32_t tail;
19 	uint32_t head;
20 	uint16_t cached_enqueue;
21 	uint16_t cached_dequeue;
22 };
23 
24 static __rte_always_inline int32_t
qat_sym_dp_parse_data_vec(struct qat_qp * qp,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_vec * data,uint16_t n_data_vecs)25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
27 {
28 	struct qat_queue *tx_queue;
29 	struct qat_sym_op_cookie *cookie;
30 	struct qat_sgl *list;
31 	uint32_t i;
32 	uint32_t total_len;
33 
34 	if (likely(n_data_vecs == 1)) {
35 		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
36 			data[0].iova;
37 		req->comn_mid.src_length = req->comn_mid.dst_length =
38 			data[0].len;
39 		return data[0].len;
40 	}
41 
42 	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
43 		return -1;
44 
45 	total_len = 0;
46 	tx_queue = &qp->tx_q;
47 
48 	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49 			QAT_COMN_PTR_TYPE_SGL);
50 	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51 	list = (struct qat_sgl *)&cookie->qat_sgl_src;
52 
53 	for (i = 0; i < n_data_vecs; i++) {
54 		list->buffers[i].len = data[i].len;
55 		list->buffers[i].resrvd = 0;
56 		list->buffers[i].addr = data[i].iova;
57 		if (total_len + data[i].len > UINT32_MAX) {
58 			QAT_DP_LOG(ERR, "Message too long");
59 			return -1;
60 		}
61 		total_len += data[i].len;
62 	}
63 
64 	list->num_bufs = i;
65 	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66 			cookie->qat_sgl_src_phys_addr;
67 	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
68 	return total_len;
69 }
70 
71 static __rte_always_inline void
set_cipher_iv(struct icp_qat_fw_la_cipher_req_params * cipher_param,struct rte_crypto_va_iova_ptr * iv_ptr,uint32_t iv_len,struct icp_qat_fw_la_bulk_req * qat_req)72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73 		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74 		struct icp_qat_fw_la_bulk_req *qat_req)
75 {
76 	/* copy IV into request if it fits */
77 	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78 		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
79 				iv_len);
80 	else {
81 		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82 				qat_req->comn_hdr.serv_specif_flags,
83 				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84 		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
85 	}
86 }
87 
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89 	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90 	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
91 
92 static __rte_always_inline void
qat_sym_dp_fill_vec_status(int32_t * sta,int status,uint32_t n)93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
94 {
95 	uint32_t i;
96 
97 	for (i = 0; i < n; i++)
98 		sta[i] = status;
99 }
100 
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
103 
104 static __rte_always_inline void
enqueue_one_aead_job(struct qat_sym_session * ctx,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,union rte_crypto_sym_ofs ofs,uint32_t data_len)105 enqueue_one_aead_job(struct qat_sym_session *ctx,
106 	struct icp_qat_fw_la_bulk_req *req,
107 	struct rte_crypto_va_iova_ptr *iv,
108 	struct rte_crypto_va_iova_ptr *digest,
109 	struct rte_crypto_va_iova_ptr *aad,
110 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
111 {
112 	struct icp_qat_fw_la_cipher_req_params *cipher_param =
113 		(void *)&req->serv_specif_rqpars;
114 	struct icp_qat_fw_la_auth_req_params *auth_param =
115 		(void *)((uint8_t *)&req->serv_specif_rqpars +
116 		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
117 	uint8_t *aad_data;
118 	uint8_t aad_ccm_real_len;
119 	uint8_t aad_len_field_sz;
120 	uint32_t msg_len_be;
121 	rte_iova_t aad_iova = 0;
122 	uint8_t q;
123 
124 	switch (ctx->qat_hash_alg) {
125 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
126 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
127 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
128 			req->comn_hdr.serv_specif_flags,
129 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
130 		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
131 				ctx->cipher_iv.length);
132 		aad_iova = aad->iova;
133 		break;
134 	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
135 		aad_data = aad->va;
136 		aad_iova = aad->iova;
137 		aad_ccm_real_len = 0;
138 		aad_len_field_sz = 0;
139 		msg_len_be = rte_bswap32((uint32_t)data_len -
140 				ofs.ofs.cipher.head);
141 
142 		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
143 			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
144 			aad_ccm_real_len = ctx->aad_len -
145 				ICP_QAT_HW_CCM_AAD_B0_LEN -
146 				ICP_QAT_HW_CCM_AAD_LEN_INFO;
147 		} else {
148 			aad_data = iv->va;
149 			aad_iova = iv->iova;
150 		}
151 
152 		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
153 		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
154 			aad_len_field_sz, ctx->digest_length, q);
155 		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
156 			memcpy(aad_data	+ ctx->cipher_iv.length +
157 				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
158 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
159 				(uint8_t *)&msg_len_be,
160 				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
161 		} else {
162 			memcpy(aad_data	+ ctx->cipher_iv.length +
163 				ICP_QAT_HW_CCM_NONCE_OFFSET,
164 				(uint8_t *)&msg_len_be +
165 				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
166 				- q), q);
167 		}
168 
169 		if (aad_len_field_sz > 0) {
170 			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
171 				rte_bswap16(aad_ccm_real_len);
172 
173 			if ((aad_ccm_real_len + aad_len_field_sz)
174 				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
175 				uint8_t pad_len = 0;
176 				uint8_t pad_idx = 0;
177 
178 				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
179 					((aad_ccm_real_len +
180 					aad_len_field_sz) %
181 					ICP_QAT_HW_CCM_AAD_B0_LEN);
182 				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
183 					aad_ccm_real_len +
184 					aad_len_field_sz;
185 				memset(&aad_data[pad_idx], 0, pad_len);
186 			}
187 		}
188 
189 		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
190 			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
191 			(uint8_t *)iv->va +
192 			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
193 		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
194 			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
195 
196 		rte_memcpy((uint8_t *)aad->va +
197 				ICP_QAT_HW_CCM_NONCE_OFFSET,
198 			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
199 			ctx->cipher_iv.length);
200 		break;
201 	default:
202 		break;
203 	}
204 
205 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
206 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
207 			ofs.ofs.cipher.tail;
208 	auth_param->auth_off = ofs.ofs.cipher.head;
209 	auth_param->auth_len = cipher_param->cipher_length;
210 	auth_param->auth_res_addr = digest->iova;
211 	auth_param->u1.aad_adr = aad_iova;
212 
213 	if (ctx->is_single_pass) {
214 		cipher_param->spc_aad_addr = aad_iova;
215 		cipher_param->spc_auth_res_addr = digest->iova;
216 	}
217 }
218 
219 static __rte_always_inline int
qat_sym_dp_enqueue_single_aead(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,void * user_data)220 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
221 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
222 	union rte_crypto_sym_ofs ofs,
223 	struct rte_crypto_va_iova_ptr *iv,
224 	struct rte_crypto_va_iova_ptr *digest,
225 	struct rte_crypto_va_iova_ptr *aad,
226 	void *user_data)
227 {
228 	struct qat_qp *qp = qp_data;
229 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
230 	struct qat_queue *tx_queue = &qp->tx_q;
231 	struct qat_sym_session *ctx = dp_ctx->session;
232 	struct icp_qat_fw_la_bulk_req *req;
233 	int32_t data_len;
234 	uint32_t tail = dp_ctx->tail;
235 
236 	req = (struct icp_qat_fw_la_bulk_req *)(
237 		(uint8_t *)tx_queue->base_addr + tail);
238 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
239 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
240 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
241 	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
242 	if (unlikely(data_len < 0))
243 		return -1;
244 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
245 
246 	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
247 		(uint32_t)data_len);
248 
249 	dp_ctx->tail = tail;
250 	dp_ctx->cached_enqueue++;
251 
252 	return 0;
253 }
254 
255 static __rte_always_inline uint32_t
qat_sym_dp_enqueue_aead_jobs(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)256 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
257 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
258 	void *user_data[], int *status)
259 {
260 	struct qat_qp *qp = qp_data;
261 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
262 	struct qat_queue *tx_queue = &qp->tx_q;
263 	struct qat_sym_session *ctx = dp_ctx->session;
264 	uint32_t i, n;
265 	uint32_t tail;
266 	struct icp_qat_fw_la_bulk_req *req;
267 	int32_t data_len;
268 
269 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
270 	if (unlikely(n == 0)) {
271 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
272 		*status = 0;
273 		return 0;
274 	}
275 
276 	tail = dp_ctx->tail;
277 
278 	for (i = 0; i < n; i++) {
279 		req  = (struct icp_qat_fw_la_bulk_req *)(
280 			(uint8_t *)tx_queue->base_addr + tail);
281 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
282 
283 		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
284 			vec->sgl[i].num);
285 		if (unlikely(data_len < 0))
286 			break;
287 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
288 		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
289 			&vec->aad[i], ofs, (uint32_t)data_len);
290 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
291 	}
292 
293 	if (unlikely(i < n))
294 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
295 
296 	dp_ctx->tail = tail;
297 	dp_ctx->cached_enqueue += i;
298 	*status = 0;
299 	return i;
300 }
301 
302 static __rte_always_inline void
enqueue_one_cipher_job(struct qat_sym_session * ctx,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_va_iova_ptr * iv,union rte_crypto_sym_ofs ofs,uint32_t data_len)303 enqueue_one_cipher_job(struct qat_sym_session *ctx,
304 	struct icp_qat_fw_la_bulk_req *req,
305 	struct rte_crypto_va_iova_ptr *iv,
306 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
307 {
308 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
309 
310 	cipher_param = (void *)&req->serv_specif_rqpars;
311 
312 	/* cipher IV */
313 	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
314 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
315 	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
316 			ofs.ofs.cipher.tail;
317 }
318 
319 static __rte_always_inline int
qat_sym_dp_enqueue_single_cipher(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest __rte_unused,struct rte_crypto_va_iova_ptr * aad __rte_unused,void * user_data)320 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
321 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
322 	union rte_crypto_sym_ofs ofs,
323 	struct rte_crypto_va_iova_ptr *iv,
324 	struct rte_crypto_va_iova_ptr *digest __rte_unused,
325 	struct rte_crypto_va_iova_ptr *aad __rte_unused,
326 	void *user_data)
327 {
328 	struct qat_qp *qp = qp_data;
329 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
330 	struct qat_queue *tx_queue = &qp->tx_q;
331 	struct qat_sym_session *ctx = dp_ctx->session;
332 	struct icp_qat_fw_la_bulk_req *req;
333 	int32_t data_len;
334 	uint32_t tail = dp_ctx->tail;
335 
336 	req = (struct icp_qat_fw_la_bulk_req *)(
337 		(uint8_t *)tx_queue->base_addr + tail);
338 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
339 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
340 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
341 	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
342 	if (unlikely(data_len < 0))
343 		return -1;
344 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
345 
346 	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
347 
348 	dp_ctx->tail = tail;
349 	dp_ctx->cached_enqueue++;
350 
351 	return 0;
352 }
353 
354 static __rte_always_inline uint32_t
qat_sym_dp_enqueue_cipher_jobs(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)355 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
356 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
357 	void *user_data[], int *status)
358 {
359 	struct qat_qp *qp = qp_data;
360 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
361 	struct qat_queue *tx_queue = &qp->tx_q;
362 	struct qat_sym_session *ctx = dp_ctx->session;
363 	uint32_t i, n;
364 	uint32_t tail;
365 	struct icp_qat_fw_la_bulk_req *req;
366 	int32_t data_len;
367 
368 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
369 	if (unlikely(n == 0)) {
370 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
371 		*status = 0;
372 		return 0;
373 	}
374 
375 	tail = dp_ctx->tail;
376 
377 	for (i = 0; i < n; i++) {
378 		req  = (struct icp_qat_fw_la_bulk_req *)(
379 			(uint8_t *)tx_queue->base_addr + tail);
380 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
381 
382 		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
383 			vec->sgl[i].num);
384 		if (unlikely(data_len < 0))
385 			break;
386 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
387 		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
388 			(uint32_t)data_len);
389 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
390 	}
391 
392 	if (unlikely(i < n))
393 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
394 
395 	dp_ctx->tail = tail;
396 	dp_ctx->cached_enqueue += i;
397 	*status = 0;
398 	return i;
399 }
400 
401 static __rte_always_inline void
enqueue_one_auth_job(struct qat_sym_session * ctx,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,uint32_t data_len)402 enqueue_one_auth_job(struct qat_sym_session *ctx,
403 	struct icp_qat_fw_la_bulk_req *req,
404 	struct rte_crypto_va_iova_ptr *digest,
405 	struct rte_crypto_va_iova_ptr *auth_iv,
406 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
407 {
408 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
409 	struct icp_qat_fw_la_auth_req_params *auth_param;
410 
411 	cipher_param = (void *)&req->serv_specif_rqpars;
412 	auth_param = (void *)((uint8_t *)cipher_param +
413 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
414 
415 	auth_param->auth_off = ofs.ofs.auth.head;
416 	auth_param->auth_len = data_len - ofs.ofs.auth.head -
417 			ofs.ofs.auth.tail;
418 	auth_param->auth_res_addr = digest->iova;
419 
420 	switch (ctx->qat_hash_alg) {
421 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
422 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
423 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
424 		auth_param->u1.aad_adr = auth_iv->iova;
425 		break;
426 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
427 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
428 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
429 			req->comn_hdr.serv_specif_flags,
430 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
431 		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
432 				ctx->auth_iv.length);
433 		break;
434 	default:
435 		break;
436 	}
437 }
438 
439 static __rte_always_inline int
qat_sym_dp_enqueue_single_auth(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv __rte_unused,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)440 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
441 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
442 	union rte_crypto_sym_ofs ofs,
443 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
444 	struct rte_crypto_va_iova_ptr *digest,
445 	struct rte_crypto_va_iova_ptr *auth_iv,
446 	void *user_data)
447 {
448 	struct qat_qp *qp = qp_data;
449 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
450 	struct qat_queue *tx_queue = &qp->tx_q;
451 	struct qat_sym_session *ctx = dp_ctx->session;
452 	struct icp_qat_fw_la_bulk_req *req;
453 	int32_t data_len;
454 	uint32_t tail = dp_ctx->tail;
455 
456 	req = (struct icp_qat_fw_la_bulk_req *)(
457 		(uint8_t *)tx_queue->base_addr + tail);
458 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
459 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
460 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
461 	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
462 	if (unlikely(data_len < 0))
463 		return -1;
464 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
465 
466 	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
467 			(uint32_t)data_len);
468 
469 	dp_ctx->tail = tail;
470 	dp_ctx->cached_enqueue++;
471 
472 	return 0;
473 }
474 
475 static __rte_always_inline uint32_t
qat_sym_dp_enqueue_auth_jobs(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)476 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
477 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
478 	void *user_data[], int *status)
479 {
480 	struct qat_qp *qp = qp_data;
481 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
482 	struct qat_queue *tx_queue = &qp->tx_q;
483 	struct qat_sym_session *ctx = dp_ctx->session;
484 	uint32_t i, n;
485 	uint32_t tail;
486 	struct icp_qat_fw_la_bulk_req *req;
487 	int32_t data_len;
488 
489 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
490 	if (unlikely(n == 0)) {
491 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
492 		*status = 0;
493 		return 0;
494 	}
495 
496 	tail = dp_ctx->tail;
497 
498 	for (i = 0; i < n; i++) {
499 		req  = (struct icp_qat_fw_la_bulk_req *)(
500 			(uint8_t *)tx_queue->base_addr + tail);
501 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
502 
503 		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
504 			vec->sgl[i].num);
505 		if (unlikely(data_len < 0))
506 			break;
507 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
508 		enqueue_one_auth_job(ctx, req, &vec->digest[i],
509 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
510 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
511 	}
512 
513 	if (unlikely(i < n))
514 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
515 
516 	dp_ctx->tail = tail;
517 	dp_ctx->cached_enqueue += i;
518 	*status = 0;
519 	return i;
520 }
521 
522 static __rte_always_inline int
enqueue_one_chain_job(struct qat_sym_session * ctx,struct icp_qat_fw_la_bulk_req * req,struct rte_crypto_vec * data,uint16_t n_data_vecs,struct rte_crypto_va_iova_ptr * cipher_iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,union rte_crypto_sym_ofs ofs,uint32_t data_len)523 enqueue_one_chain_job(struct qat_sym_session *ctx,
524 	struct icp_qat_fw_la_bulk_req *req,
525 	struct rte_crypto_vec *data,
526 	uint16_t n_data_vecs,
527 	struct rte_crypto_va_iova_ptr *cipher_iv,
528 	struct rte_crypto_va_iova_ptr *digest,
529 	struct rte_crypto_va_iova_ptr *auth_iv,
530 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
531 {
532 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
533 	struct icp_qat_fw_la_auth_req_params *auth_param;
534 	rte_iova_t auth_iova_end;
535 	int32_t cipher_len, auth_len;
536 
537 	cipher_param = (void *)&req->serv_specif_rqpars;
538 	auth_param = (void *)((uint8_t *)cipher_param +
539 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
540 
541 	cipher_len = data_len - ofs.ofs.cipher.head -
542 			ofs.ofs.cipher.tail;
543 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
544 
545 	if (unlikely(cipher_len < 0 || auth_len < 0))
546 		return -1;
547 
548 	cipher_param->cipher_offset = ofs.ofs.cipher.head;
549 	cipher_param->cipher_length = cipher_len;
550 	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
551 
552 	auth_param->auth_off = ofs.ofs.auth.head;
553 	auth_param->auth_len = auth_len;
554 	auth_param->auth_res_addr = digest->iova;
555 
556 	switch (ctx->qat_hash_alg) {
557 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
558 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
559 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
560 		auth_param->u1.aad_adr = auth_iv->iova;
561 
562 		if (unlikely(n_data_vecs > 1)) {
563 			int auth_end_get = 0, i = n_data_vecs - 1;
564 			struct rte_crypto_vec *cvec = &data[0];
565 			uint32_t len;
566 
567 			len = data_len - ofs.ofs.auth.tail;
568 
569 			while (i >= 0 && len > 0) {
570 				if (cvec->len >= len) {
571 					auth_iova_end = cvec->iova +
572 						(cvec->len - len);
573 					len = 0;
574 					auth_end_get = 1;
575 					break;
576 				}
577 				len -= cvec->len;
578 				i--;
579 				cvec++;
580 			}
581 
582 			if (unlikely(auth_end_get == 0))
583 				return -1;
584 		} else
585 			auth_iova_end = data[0].iova + auth_param->auth_off +
586 				auth_param->auth_len;
587 
588 		/* Then check if digest-encrypted conditions are met */
589 		if ((auth_param->auth_off + auth_param->auth_len <
590 			cipher_param->cipher_offset +
591 			cipher_param->cipher_length) &&
592 			(digest->iova == auth_iova_end)) {
593 			/* Handle partial digest encryption */
594 			if (cipher_param->cipher_offset +
595 					cipher_param->cipher_length <
596 					auth_param->auth_off +
597 					auth_param->auth_len +
598 					ctx->digest_length)
599 				req->comn_mid.dst_length =
600 					req->comn_mid.src_length =
601 					auth_param->auth_off +
602 					auth_param->auth_len +
603 					ctx->digest_length;
604 			struct icp_qat_fw_comn_req_hdr *header =
605 				&req->comn_hdr;
606 			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
607 				header->serv_specif_flags,
608 				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
609 		}
610 		break;
611 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
612 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
613 		break;
614 	default:
615 		break;
616 	}
617 
618 	return 0;
619 }
620 
621 static __rte_always_inline int
qat_sym_dp_enqueue_single_chain(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * cipher_iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)622 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
623 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
624 	union rte_crypto_sym_ofs ofs,
625 	struct rte_crypto_va_iova_ptr *cipher_iv,
626 	struct rte_crypto_va_iova_ptr *digest,
627 	struct rte_crypto_va_iova_ptr *auth_iv,
628 	void *user_data)
629 {
630 	struct qat_qp *qp = qp_data;
631 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
632 	struct qat_queue *tx_queue = &qp->tx_q;
633 	struct qat_sym_session *ctx = dp_ctx->session;
634 	struct icp_qat_fw_la_bulk_req *req;
635 	int32_t data_len;
636 	uint32_t tail = dp_ctx->tail;
637 
638 	req = (struct icp_qat_fw_la_bulk_req *)(
639 		(uint8_t *)tx_queue->base_addr + tail);
640 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
641 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
642 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
643 	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
644 	if (unlikely(data_len < 0))
645 		return -1;
646 	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
647 
648 	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
649 			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
650 		return -1;
651 
652 	dp_ctx->tail = tail;
653 	dp_ctx->cached_enqueue++;
654 
655 	return 0;
656 }
657 
658 static __rte_always_inline uint32_t
qat_sym_dp_enqueue_chain_jobs(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)659 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
660 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
661 	void *user_data[], int *status)
662 {
663 	struct qat_qp *qp = qp_data;
664 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
665 	struct qat_queue *tx_queue = &qp->tx_q;
666 	struct qat_sym_session *ctx = dp_ctx->session;
667 	uint32_t i, n;
668 	uint32_t tail;
669 	struct icp_qat_fw_la_bulk_req *req;
670 	int32_t data_len;
671 
672 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
673 	if (unlikely(n == 0)) {
674 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
675 		*status = 0;
676 		return 0;
677 	}
678 
679 	tail = dp_ctx->tail;
680 
681 	for (i = 0; i < n; i++) {
682 		req  = (struct icp_qat_fw_la_bulk_req *)(
683 			(uint8_t *)tx_queue->base_addr + tail);
684 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
685 
686 		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
687 			vec->sgl[i].num);
688 		if (unlikely(data_len < 0))
689 			break;
690 		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
691 		if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
692 			vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
693 				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
694 			break;
695 
696 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
697 	}
698 
699 	if (unlikely(i < n))
700 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
701 
702 	dp_ctx->tail = tail;
703 	dp_ctx->cached_enqueue += i;
704 	*status = 0;
705 	return i;
706 }
707 
708 static __rte_always_inline uint32_t
qat_sym_dp_dequeue_burst(void * qp_data,uint8_t * drv_ctx,rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,rte_cryptodev_raw_post_dequeue_t post_dequeue,void ** out_user_data,uint8_t is_user_data_array,uint32_t * n_success_jobs,int * return_status)709 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
710 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
711 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
712 	void **out_user_data, uint8_t is_user_data_array,
713 	uint32_t *n_success_jobs, int *return_status)
714 {
715 	struct qat_qp *qp = qp_data;
716 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
717 	struct qat_queue *rx_queue = &qp->rx_q;
718 	struct icp_qat_fw_comn_resp *resp;
719 	void *resp_opaque;
720 	uint32_t i, n, inflight;
721 	uint32_t head;
722 	uint8_t status;
723 
724 	*n_success_jobs = 0;
725 	*return_status = 0;
726 	head = dp_ctx->head;
727 
728 	inflight = qp->enqueued - qp->dequeued;
729 	if (unlikely(inflight == 0))
730 		return 0;
731 
732 	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
733 			head);
734 	/* no operation ready */
735 	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
736 		return 0;
737 
738 	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
739 	/* get the dequeue count */
740 	n = get_dequeue_count(resp_opaque);
741 	if (unlikely(n == 0))
742 		return 0;
743 
744 	out_user_data[0] = resp_opaque;
745 	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
746 	post_dequeue(resp_opaque, 0, status);
747 	*n_success_jobs += status;
748 
749 	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
750 
751 	/* we already finished dequeue when n == 1 */
752 	if (unlikely(n == 1)) {
753 		i = 1;
754 		goto end_deq;
755 	}
756 
757 	if (is_user_data_array) {
758 		for (i = 1; i < n; i++) {
759 			resp = (struct icp_qat_fw_comn_resp *)(
760 				(uint8_t *)rx_queue->base_addr + head);
761 			if (unlikely(*(uint32_t *)resp ==
762 					ADF_RING_EMPTY_SIG))
763 				goto end_deq;
764 			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
765 			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
766 			*n_success_jobs += status;
767 			post_dequeue(out_user_data[i], i, status);
768 			head = (head + rx_queue->msg_size) &
769 					rx_queue->modulo_mask;
770 		}
771 
772 		goto end_deq;
773 	}
774 
775 	/* opaque is not array */
776 	for (i = 1; i < n; i++) {
777 		resp = (struct icp_qat_fw_comn_resp *)(
778 			(uint8_t *)rx_queue->base_addr + head);
779 		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780 		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
781 			goto end_deq;
782 		head = (head + rx_queue->msg_size) &
783 				rx_queue->modulo_mask;
784 		post_dequeue(resp_opaque, i, status);
785 		*n_success_jobs += status;
786 	}
787 
788 end_deq:
789 	dp_ctx->head = head;
790 	dp_ctx->cached_dequeue += i;
791 	return i;
792 }
793 
794 static __rte_always_inline void *
qat_sym_dp_dequeue(void * qp_data,uint8_t * drv_ctx,int * dequeue_status,enum rte_crypto_op_status * op_status)795 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
796 		enum rte_crypto_op_status *op_status)
797 {
798 	struct qat_qp *qp = qp_data;
799 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
800 	struct qat_queue *rx_queue = &qp->rx_q;
801 	register struct icp_qat_fw_comn_resp *resp;
802 
803 	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
804 			dp_ctx->head);
805 
806 	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
807 		return NULL;
808 
809 	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
810 			rx_queue->modulo_mask;
811 	dp_ctx->cached_dequeue++;
812 
813 	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
814 			RTE_CRYPTO_OP_STATUS_SUCCESS :
815 			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
816 	*dequeue_status = 0;
817 	return (void *)(uintptr_t)resp->opaque_data;
818 }
819 
820 static __rte_always_inline int
qat_sym_dp_kick_tail(void * qp_data,uint8_t * drv_ctx,uint32_t n)821 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
822 {
823 	struct qat_qp *qp = qp_data;
824 	struct qat_queue *tx_queue = &qp->tx_q;
825 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
826 
827 	if (unlikely(dp_ctx->cached_enqueue != n))
828 		return -1;
829 
830 	qp->enqueued += n;
831 	qp->stats.enqueued_count += n;
832 
833 	tx_queue->tail = dp_ctx->tail;
834 
835 	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
836 			tx_queue->hw_bundle_number,
837 			tx_queue->hw_queue_number, tx_queue->tail);
838 	tx_queue->csr_tail = tx_queue->tail;
839 	dp_ctx->cached_enqueue = 0;
840 
841 	return 0;
842 }
843 
844 static __rte_always_inline int
qat_sym_dp_update_head(void * qp_data,uint8_t * drv_ctx,uint32_t n)845 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
846 {
847 	struct qat_qp *qp = qp_data;
848 	struct qat_queue *rx_queue = &qp->rx_q;
849 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
850 
851 	if (unlikely(dp_ctx->cached_dequeue != n))
852 		return -1;
853 
854 	rx_queue->head = dp_ctx->head;
855 	rx_queue->nb_processed_responses += n;
856 	qp->dequeued += n;
857 	qp->stats.dequeued_count += n;
858 	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
859 		uint32_t old_head, new_head;
860 		uint32_t max_head;
861 
862 		old_head = rx_queue->csr_head;
863 		new_head = rx_queue->head;
864 		max_head = qp->nb_descriptors * rx_queue->msg_size;
865 
866 		/* write out free descriptors */
867 		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
868 
869 		if (new_head < old_head) {
870 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
871 					max_head - old_head);
872 			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
873 					new_head);
874 		} else {
875 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
876 					old_head);
877 		}
878 		rx_queue->nb_processed_responses = 0;
879 		rx_queue->csr_head = new_head;
880 
881 		/* write current head to CSR */
882 		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
883 			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
884 			new_head);
885 	}
886 
887 	dp_ctx->cached_dequeue = 0;
888 	return 0;
889 }
890 
891 int
qat_sym_configure_dp_ctx(struct rte_cryptodev * dev,uint16_t qp_id,struct rte_crypto_raw_dp_ctx * raw_dp_ctx,enum rte_crypto_op_sess_type sess_type,union rte_cryptodev_session_ctx session_ctx,uint8_t is_update)892 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
893 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
894 	enum rte_crypto_op_sess_type sess_type,
895 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
896 {
897 	struct qat_qp *qp;
898 	struct qat_sym_session *ctx;
899 	struct qat_sym_dp_ctx *dp_ctx;
900 
901 	qp = dev->data->queue_pairs[qp_id];
902 	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
903 
904 	if (!is_update) {
905 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
906 				sizeof(struct qat_sym_dp_ctx));
907 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
908 		dp_ctx->tail = qp->tx_q.tail;
909 		dp_ctx->head = qp->rx_q.head;
910 		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
911 	}
912 
913 	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
914 		return -EINVAL;
915 
916 	ctx = (struct qat_sym_session *)get_sym_session_private_data(
917 			session_ctx.crypto_sess, qat_sym_driver_id);
918 
919 	dp_ctx->session = ctx;
920 
921 	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
922 	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
923 	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
924 	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
925 
926 	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
927 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
928 		/* AES-GCM or AES-CCM */
929 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
930 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
931 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
932 			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
933 			&& ctx->qat_hash_alg ==
934 					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
935 			raw_dp_ctx->enqueue_burst =
936 					qat_sym_dp_enqueue_aead_jobs;
937 			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
938 		} else {
939 			raw_dp_ctx->enqueue_burst =
940 					qat_sym_dp_enqueue_chain_jobs;
941 			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
942 		}
943 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
944 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
945 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
946 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
947 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs;
948 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
949 	} else
950 		return -1;
951 
952 	return 0;
953 }
954 
955 int
qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev * dev)956 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
957 {
958 	return sizeof(struct qat_sym_dp_ctx);
959 }
960