1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
3 */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_ip.h>
9 #include <rte_malloc.h>
10 #include <rte_security.h>
11 #include <rte_security_driver.h>
12 #include <rte_udp.h>
13
14 #include "otx2_cryptodev.h"
15 #include "otx2_cryptodev_capabilities.h"
16 #include "otx2_cryptodev_hw_access.h"
17 #include "otx2_cryptodev_ops.h"
18 #include "otx2_cryptodev_sec.h"
19 #include "otx2_security.h"
20
21 static int
ipsec_lp_len_precalc(struct rte_security_ipsec_xform * ipsec,struct rte_crypto_sym_xform * xform,struct otx2_sec_session_ipsec_lp * lp)22 ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
23 struct rte_crypto_sym_xform *xform,
24 struct otx2_sec_session_ipsec_lp *lp)
25 {
26 struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
27
28 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
29 lp->partial_len = sizeof(struct rte_ipv4_hdr);
30 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
31 lp->partial_len = sizeof(struct rte_ipv6_hdr);
32 else
33 return -EINVAL;
34
35 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
36 lp->partial_len += sizeof(struct rte_esp_hdr);
37 lp->roundup_len = sizeof(struct rte_esp_tail);
38 } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
39 lp->partial_len += OTX2_SEC_AH_HDR_LEN;
40 } else {
41 return -EINVAL;
42 }
43
44 if (ipsec->options.udp_encap)
45 lp->partial_len += sizeof(struct rte_udp_hdr);
46
47 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
48 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
49 lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
50 lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
51 lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
52 return 0;
53 } else {
54 return -EINVAL;
55 }
56 }
57
58 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
59 cipher_xform = xform;
60 auth_xform = xform->next;
61 } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
62 auth_xform = xform;
63 cipher_xform = xform->next;
64 } else {
65 return -EINVAL;
66 }
67
68 if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
69 lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
70 lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
71 } else {
72 return -EINVAL;
73 }
74
75 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
76 lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
77 else
78 return -EINVAL;
79
80 return 0;
81 }
82
83 static int
otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp * lp,struct otx2_cpt_qp * qptr,uint8_t opcode)84 otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
85 struct otx2_cpt_qp *qptr, uint8_t opcode)
86 {
87 uint64_t lmt_status, time_out;
88 void *lmtline = qptr->lmtline;
89 struct otx2_cpt_inst_s inst;
90 struct otx2_cpt_res *res;
91 uint64_t *mdata;
92 int ret = 0;
93
94 if (unlikely(rte_mempool_get(qptr->meta_info.pool,
95 (void **)&mdata) < 0))
96 return -ENOMEM;
97
98 res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
99 res->compcode = CPT_9X_COMP_E_NOTDONE;
100
101 inst.opcode = opcode | (lp->ctx_len << 8);
102 inst.param1 = 0;
103 inst.param2 = 0;
104 inst.dlen = lp->ctx_len << 3;
105 inst.dptr = rte_mempool_virt2iova(lp);
106 inst.rptr = 0;
107 inst.cptr = rte_mempool_virt2iova(lp);
108 inst.egrp = OTX2_CPT_EGRP_SE;
109
110 inst.u64[0] = 0;
111 inst.u64[2] = 0;
112 inst.u64[3] = 0;
113 inst.res_addr = rte_mempool_virt2iova(res);
114
115 rte_io_wmb();
116
117 do {
118 /* Copy CPT command to LMTLINE */
119 otx2_lmt_mov(lmtline, &inst, 2);
120 lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
121 } while (lmt_status == 0);
122
123 time_out = rte_get_timer_cycles() +
124 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
125
126 while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
127 if (rte_get_timer_cycles() > time_out) {
128 rte_mempool_put(qptr->meta_info.pool, mdata);
129 otx2_err("Request timed out");
130 return -ETIMEDOUT;
131 }
132 rte_io_rmb();
133 }
134
135 if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
136 ret = res->compcode;
137 switch (ret) {
138 case CPT_9X_COMP_E_INSTERR:
139 otx2_err("Request failed with instruction error");
140 break;
141 case CPT_9X_COMP_E_FAULT:
142 otx2_err("Request failed with DMA fault");
143 break;
144 case CPT_9X_COMP_E_HWERR:
145 otx2_err("Request failed with hardware error");
146 break;
147 default:
148 otx2_err("Request failed with unknown hardware "
149 "completion code : 0x%x", ret);
150 }
151 goto mempool_put;
152 }
153
154 if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
155 ret = res->uc_compcode;
156 switch (ret) {
157 case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
158 otx2_err("Invalid auth type");
159 break;
160 case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
161 otx2_err("Invalid encrypt type");
162 break;
163 default:
164 otx2_err("Request failed with unknown microcode "
165 "completion code : 0x%x", ret);
166 }
167 }
168
169 mempool_put:
170 rte_mempool_put(qptr->meta_info.pool, mdata);
171 return ret;
172 }
173
174 static void
set_session_misc_attributes(struct otx2_sec_session_ipsec_lp * sess,struct rte_crypto_sym_xform * crypto_xform,struct rte_crypto_sym_xform * auth_xform,struct rte_crypto_sym_xform * cipher_xform)175 set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
176 struct rte_crypto_sym_xform *crypto_xform,
177 struct rte_crypto_sym_xform *auth_xform,
178 struct rte_crypto_sym_xform *cipher_xform)
179 {
180 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
181 sess->iv_offset = crypto_xform->aead.iv.offset;
182 sess->iv_length = crypto_xform->aead.iv.length;
183 sess->aad_length = crypto_xform->aead.aad_length;
184 sess->mac_len = crypto_xform->aead.digest_length;
185 } else {
186 sess->iv_offset = cipher_xform->cipher.iv.offset;
187 sess->iv_length = cipher_xform->cipher.iv.length;
188 sess->auth_iv_offset = auth_xform->auth.iv.offset;
189 sess->auth_iv_length = auth_xform->auth.iv.length;
190 sess->mac_len = auth_xform->auth.digest_length;
191 }
192
193 sess->ucmd_param1 = OTX2_IPSEC_PO_PER_PKT_IV;
194 sess->ucmd_param2 = 0;
195 }
196
197 static int
crypto_sec_ipsec_outb_session_create(struct rte_cryptodev * crypto_dev,struct rte_security_ipsec_xform * ipsec,struct rte_crypto_sym_xform * crypto_xform,struct rte_security_session * sec_sess)198 crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
199 struct rte_security_ipsec_xform *ipsec,
200 struct rte_crypto_sym_xform *crypto_xform,
201 struct rte_security_session *sec_sess)
202 {
203 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
204 const uint8_t *cipher_key, *auth_key;
205 struct otx2_sec_session_ipsec_lp *lp;
206 struct otx2_ipsec_po_sa_ctl *ctl;
207 int cipher_key_len, auth_key_len;
208 struct otx2_ipsec_po_out_sa *sa;
209 struct otx2_sec_session *sess;
210 struct otx2_cpt_inst_s inst;
211 struct rte_ipv6_hdr *ip6;
212 struct rte_ipv4_hdr *ip;
213 int ret;
214
215 sess = get_sec_session_private_data(sec_sess);
216 lp = &sess->ipsec.lp;
217
218 sa = &lp->out_sa;
219 ctl = &sa->ctl;
220 if (ctl->valid) {
221 otx2_err("SA already registered");
222 return -EINVAL;
223 }
224
225 memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
226
227 /* Initialize lookaside ipsec private data */
228 lp->ip_id = 0;
229 lp->seq_lo = 1;
230 lp->seq_hi = 0;
231 lp->tunnel_type = ipsec->tunnel.type;
232
233 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
234 if (ret)
235 return ret;
236
237 ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
238 if (ret)
239 return ret;
240
241 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
242
243 if (ipsec->options.udp_encap) {
244 sa->udp_src = 4500;
245 sa->udp_dst = 4500;
246 }
247
248 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
249 /* Start ip id from 1 */
250 lp->ip_id = 1;
251
252 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
253 ip = &sa->template.ipv4_hdr;
254 ip->version_ihl = RTE_IPV4_VHL_DEF;
255 ip->next_proto_id = IPPROTO_ESP;
256 ip->time_to_live = ipsec->tunnel.ipv4.ttl;
257 ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
258 if (ipsec->tunnel.ipv4.df)
259 ip->fragment_offset = BIT(14);
260 memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
261 sizeof(struct in_addr));
262 memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
263 sizeof(struct in_addr));
264 } else if (ipsec->tunnel.type ==
265 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
266 ip6 = &sa->template.ipv6_hdr;
267 ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
268 ((ipsec->tunnel.ipv6.dscp <<
269 RTE_IPV6_HDR_TC_SHIFT) &
270 RTE_IPV6_HDR_TC_MASK) |
271 ((ipsec->tunnel.ipv6.flabel <<
272 RTE_IPV6_HDR_FL_SHIFT) &
273 RTE_IPV6_HDR_FL_MASK));
274 ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
275 ip6->proto = (ipsec->proto ==
276 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
277 IPPROTO_ESP : IPPROTO_AH;
278 memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
279 sizeof(struct in6_addr));
280 memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
281 sizeof(struct in6_addr));
282 } else {
283 return -EINVAL;
284 }
285 } else {
286 return -EINVAL;
287 }
288
289 cipher_xform = crypto_xform;
290 auth_xform = crypto_xform->next;
291
292 cipher_key_len = 0;
293 auth_key_len = 0;
294
295 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
296 cipher_key = crypto_xform->aead.key.data;
297 cipher_key_len = crypto_xform->aead.key.length;
298
299 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa);
300 lp->ctx_len >>= 3;
301 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN);
302 } else {
303 cipher_key = cipher_xform->cipher.key.data;
304 cipher_key_len = cipher_xform->cipher.key.length;
305 auth_key = auth_xform->auth.key.data;
306 auth_key_len = auth_xform->auth.key.length;
307
308 /* TODO: check the ctx len for supporting ALGO */
309 lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa) >> 3;
310 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN);
311 }
312
313 if (cipher_key_len != 0)
314 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
315 else
316 return -EINVAL;
317
318 /* Use OPAD & IPAD */
319 RTE_SET_USED(auth_key);
320 RTE_SET_USED(auth_key_len);
321
322 inst.u64[7] = 0;
323 inst.egrp = OTX2_CPT_EGRP_SE;
324 inst.cptr = rte_mempool_virt2iova(sa);
325
326 lp->cpt_inst_w7 = inst.u64[7];
327 lp->ucmd_opcode = (lp->ctx_len << 8) |
328 (OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
329
330 set_session_misc_attributes(lp, crypto_xform,
331 auth_xform, cipher_xform);
332
333 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
334 OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
335 }
336
337 static int
crypto_sec_ipsec_inb_session_create(struct rte_cryptodev * crypto_dev,struct rte_security_ipsec_xform * ipsec,struct rte_crypto_sym_xform * crypto_xform,struct rte_security_session * sec_sess)338 crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
339 struct rte_security_ipsec_xform *ipsec,
340 struct rte_crypto_sym_xform *crypto_xform,
341 struct rte_security_session *sec_sess)
342 {
343 struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
344 struct otx2_sec_session_ipsec_lp *lp;
345 struct otx2_ipsec_po_sa_ctl *ctl;
346 const uint8_t *cipher_key, *auth_key;
347 int cipher_key_len, auth_key_len;
348 struct otx2_ipsec_po_in_sa *sa;
349 struct otx2_sec_session *sess;
350 struct otx2_cpt_inst_s inst;
351 int ret;
352
353 sess = get_sec_session_private_data(sec_sess);
354 lp = &sess->ipsec.lp;
355
356 sa = &lp->in_sa;
357 ctl = &sa->ctl;
358
359 if (ctl->valid) {
360 otx2_err("SA already registered");
361 return -EINVAL;
362 }
363
364 memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
365
366 ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
367 if (ret)
368 return ret;
369
370 lp->tunnel_type = ipsec->tunnel.type;
371 auth_xform = crypto_xform;
372 cipher_xform = crypto_xform->next;
373
374 cipher_key_len = 0;
375 auth_key_len = 0;
376
377 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
378 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
379 memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
380 cipher_key = crypto_xform->aead.key.data;
381 cipher_key_len = crypto_xform->aead.key.length;
382
383 lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
384 aes_gcm.hmac_key[0]) >> 3;
385 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
386 } else {
387 cipher_key = cipher_xform->cipher.key.data;
388 cipher_key_len = cipher_xform->cipher.key.length;
389 auth_key = auth_xform->auth.key.data;
390 auth_key_len = auth_xform->auth.key.length;
391
392 /* TODO: check the ctx len for supporting ALGO */
393 lp->ctx_len = sizeof(struct otx2_ipsec_po_in_sa) >> 2;
394 RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_INB_CTX_LEN);
395 }
396
397 if (cipher_key_len != 0)
398 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
399 else
400 return -EINVAL;
401
402 /* Use OPAD & IPAD */
403 RTE_SET_USED(auth_key);
404 RTE_SET_USED(auth_key_len);
405
406 inst.u64[7] = 0;
407 inst.egrp = OTX2_CPT_EGRP_SE;
408 inst.cptr = rte_mempool_virt2iova(sa);
409
410 lp->cpt_inst_w7 = inst.u64[7];
411 lp->ucmd_opcode = (lp->ctx_len << 8) |
412 (OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
413
414 set_session_misc_attributes(lp, crypto_xform,
415 auth_xform, cipher_xform);
416
417 return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
418 OTX2_IPSEC_PO_WRITE_IPSEC_INB);
419 }
420
421 static int
crypto_sec_ipsec_session_create(struct rte_cryptodev * crypto_dev,struct rte_security_ipsec_xform * ipsec,struct rte_crypto_sym_xform * crypto_xform,struct rte_security_session * sess)422 crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
423 struct rte_security_ipsec_xform *ipsec,
424 struct rte_crypto_sym_xform *crypto_xform,
425 struct rte_security_session *sess)
426 {
427 int ret;
428
429 if (crypto_dev->data->queue_pairs[0] == NULL) {
430 otx2_err("Setup cpt queue pair before creating sec session");
431 return -EPERM;
432 }
433
434 ret = ipsec_po_xform_verify(ipsec, crypto_xform);
435 if (ret)
436 return ret;
437
438 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
439 return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
440 crypto_xform, sess);
441 else
442 return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
443 crypto_xform, sess);
444 }
445
446 static int
otx2_crypto_sec_session_create(void * device,struct rte_security_session_conf * conf,struct rte_security_session * sess,struct rte_mempool * mempool)447 otx2_crypto_sec_session_create(void *device,
448 struct rte_security_session_conf *conf,
449 struct rte_security_session *sess,
450 struct rte_mempool *mempool)
451 {
452 struct otx2_sec_session *priv;
453 int ret;
454
455 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
456 return -ENOTSUP;
457
458 if (rte_security_dynfield_register() < 0)
459 return -rte_errno;
460
461 if (rte_mempool_get(mempool, (void **)&priv)) {
462 otx2_err("Could not allocate security session private data");
463 return -ENOMEM;
464 }
465
466 set_sec_session_private_data(sess, priv);
467
468 priv->userdata = conf->userdata;
469
470 if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
471 ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
472 conf->crypto_xform,
473 sess);
474 else
475 ret = -ENOTSUP;
476
477 if (ret)
478 goto mempool_put;
479
480 return 0;
481
482 mempool_put:
483 rte_mempool_put(mempool, priv);
484 set_sec_session_private_data(sess, NULL);
485 return ret;
486 }
487
488 static int
otx2_crypto_sec_session_destroy(void * device __rte_unused,struct rte_security_session * sess)489 otx2_crypto_sec_session_destroy(void *device __rte_unused,
490 struct rte_security_session *sess)
491 {
492 struct otx2_sec_session *priv;
493 struct rte_mempool *sess_mp;
494
495 priv = get_sec_session_private_data(sess);
496
497 if (priv == NULL)
498 return 0;
499
500 sess_mp = rte_mempool_from_obj(priv);
501
502 set_sec_session_private_data(sess, NULL);
503 rte_mempool_put(sess_mp, priv);
504
505 return 0;
506 }
507
508 static unsigned int
otx2_crypto_sec_session_get_size(void * device __rte_unused)509 otx2_crypto_sec_session_get_size(void *device __rte_unused)
510 {
511 return sizeof(struct otx2_sec_session);
512 }
513
514 static int
otx2_crypto_sec_set_pkt_mdata(void * device __rte_unused,struct rte_security_session * session,struct rte_mbuf * m,void * params __rte_unused)515 otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
516 struct rte_security_session *session,
517 struct rte_mbuf *m, void *params __rte_unused)
518 {
519 /* Set security session as the pkt metadata */
520 *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
521
522 return 0;
523 }
524
525 static int
otx2_crypto_sec_get_userdata(void * device __rte_unused,uint64_t md,void ** userdata)526 otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
527 void **userdata)
528 {
529 /* Retrieve userdata */
530 *userdata = (void *)md;
531
532 return 0;
533 }
534
535 static struct rte_security_ops otx2_crypto_sec_ops = {
536 .session_create = otx2_crypto_sec_session_create,
537 .session_destroy = otx2_crypto_sec_session_destroy,
538 .session_get_size = otx2_crypto_sec_session_get_size,
539 .set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
540 .get_userdata = otx2_crypto_sec_get_userdata,
541 .capabilities_get = otx2_crypto_sec_capabilities_get
542 };
543
544 int
otx2_crypto_sec_ctx_create(struct rte_cryptodev * cdev)545 otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
546 {
547 struct rte_security_ctx *ctx;
548
549 ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
550 sizeof(struct rte_security_ctx), 0);
551
552 if (ctx == NULL)
553 return -ENOMEM;
554
555 /* Populate ctx */
556 ctx->device = cdev;
557 ctx->ops = &otx2_crypto_sec_ops;
558 ctx->sess_cnt = 0;
559
560 cdev->security_ctx = ctx;
561
562 return 0;
563 }
564
565 void
otx2_crypto_sec_ctx_destroy(struct rte_cryptodev * cdev)566 otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
567 {
568 rte_free(cdev->security_ctx);
569 }
570