xref: /dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
12 #include <roc_priv.h>
13 
14 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
15 	{	/* AES GCM */
16 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 		{.sym = {
18 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
19 			{.aead = {
20 				.algo = RTE_CRYPTO_AEAD_AES_GCM,
21 				.block_size = 16,
22 				.key_size = {
23 					.min = 16,
24 					.max = 32,
25 					.increment = 8
26 				},
27 				.digest_size = {
28 					.min = 16,
29 					.max = 16,
30 					.increment = 0
31 				},
32 				.aad_size = {
33 					.min = 8,
34 					.max = 12,
35 					.increment = 4
36 				},
37 				.iv_size = {
38 					.min = 12,
39 					.max = 12,
40 					.increment = 0
41 				}
42 			}, }
43 		}, }
44 	},
45 	{	/* AES CBC */
46 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
47 		{.sym = {
48 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
49 			{.cipher = {
50 				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
51 				.block_size = 16,
52 				.key_size = {
53 					.min = 16,
54 					.max = 32,
55 					.increment = 8
56 				},
57 				.iv_size = {
58 					.min = 16,
59 					.max = 16,
60 					.increment = 0
61 				}
62 			}, }
63 		}, }
64 	},
65 	{	/* SHA1 HMAC */
66 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
67 		{.sym = {
68 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
69 			{.auth = {
70 				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
71 				.block_size = 64,
72 				.key_size = {
73 					.min = 20,
74 					.max = 64,
75 					.increment = 1
76 				},
77 				.digest_size = {
78 					.min = 12,
79 					.max = 12,
80 					.increment = 0
81 				},
82 			}, }
83 		}, }
84 	},
85 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
86 };
87 
88 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
89 	{	/* IPsec Inline Protocol ESP Tunnel Ingress */
90 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
91 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
92 		.ipsec = {
93 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
94 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
95 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
96 			.options = { 0 }
97 		},
98 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
99 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
100 	},
101 	{	/* IPsec Inline Protocol ESP Tunnel Egress */
102 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
103 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
104 		.ipsec = {
105 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
106 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
107 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
108 			.options = { 0 }
109 		},
110 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
111 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
112 	},
113 	{	/* IPsec Inline Protocol ESP Transport Egress */
114 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
115 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
116 		.ipsec = {
117 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
118 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
119 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
120 			.options = { 0 }
121 		},
122 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
123 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
124 	},
125 	{	/* IPsec Inline Protocol ESP Transport Ingress */
126 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
127 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
128 		.ipsec = {
129 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
130 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
131 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
132 			.options = { 0 }
133 		},
134 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
135 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
136 	},
137 	{
138 		.action = RTE_SECURITY_ACTION_TYPE_NONE
139 	}
140 };
141 
142 static inline void
143 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
144 {
145 	struct rte_mbuf *next;
146 
147 	if (!mbuf)
148 		return;
149 	do {
150 		next = mbuf->next;
151 		roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
152 		mbuf = next;
153 	} while (mbuf != NULL);
154 }
155 
156 void
157 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
158 {
159 	struct rte_eth_event_ipsec_desc desc;
160 	struct cn10k_sec_sess_priv sess_priv;
161 	struct cn10k_outb_priv_data *priv;
162 	struct roc_ot_ipsec_outb_sa *sa;
163 	struct cpt_cn10k_res_s *res;
164 	struct rte_eth_dev *eth_dev;
165 	struct cnxk_eth_dev *dev;
166 	static uint64_t warn_cnt;
167 	uint16_t dlen_adj, rlen;
168 	struct rte_mbuf *mbuf;
169 	uintptr_t sa_base;
170 	uintptr_t nixtx;
171 	uint8_t port;
172 
173 	RTE_SET_USED(args);
174 
175 	switch ((gw[0] >> 28) & 0xF) {
176 	case RTE_EVENT_TYPE_ETHDEV:
177 		/* Event from inbound inline dev due to IPSEC packet bad L4 */
178 		mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
179 		plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
180 		cnxk_pktmbuf_free_no_cache(mbuf);
181 		return;
182 	case RTE_EVENT_TYPE_CPU:
183 		/* Check for subtype */
184 		if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
185 			/* Event from outbound inline error */
186 			mbuf = (struct rte_mbuf *)gw[1];
187 			break;
188 		}
189 		/* Fall through */
190 	default:
191 		if (soft_exp_event & 0x1) {
192 			sa = (struct roc_ot_ipsec_outb_sa *)args;
193 			priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
194 			desc.metadata = (uint64_t)priv->userdata;
195 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY;
196 			eth_dev = &rte_eth_devices[soft_exp_event >> 8];
197 			rte_eth_dev_callback_process(eth_dev,
198 				RTE_ETH_EVENT_IPSEC, &desc);
199 		} else {
200 			plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
201 				gw[0], gw[1]);
202 		}
203 		return;
204 	}
205 
206 	/* Get ethdev port from tag */
207 	port = gw[0] & 0xFF;
208 	eth_dev = &rte_eth_devices[port];
209 	dev = cnxk_eth_pmd_priv(eth_dev);
210 
211 	sess_priv.u64 = *rte_security_dynfield(mbuf);
212 	/* Calculate dlen adj */
213 	dlen_adj = mbuf->pkt_len - mbuf->l2_len;
214 	rlen = (dlen_adj + sess_priv.roundup_len) +
215 	       (sess_priv.roundup_byte - 1);
216 	rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
217 	rlen += sess_priv.partial_len;
218 	dlen_adj = rlen - dlen_adj;
219 
220 	/* Find the res area residing on next cacheline after end of data */
221 	nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
222 	nixtx += BIT_ULL(7);
223 	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
224 	res = (struct cpt_cn10k_res_s *)nixtx;
225 
226 	plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
227 		    mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
228 
229 	sess_priv.u64 = *rte_security_dynfield(mbuf);
230 
231 	sa_base = dev->outb.sa_base;
232 	sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
233 	priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
234 
235 	memset(&desc, 0, sizeof(desc));
236 
237 	switch (res->uc_compcode) {
238 	case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
239 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
240 		break;
241 	case ROC_IE_OT_UCC_ERR_PKT_IP:
242 		warn_cnt++;
243 		if (warn_cnt % 10000 == 0)
244 			plt_warn("Outbound error, bad ip pkt, mbuf %p,"
245 				 " sa_index %u (total warnings %" PRIu64 ")",
246 				 mbuf, sess_priv.sa_idx, warn_cnt);
247 		desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
248 		break;
249 	default:
250 		warn_cnt++;
251 		if (warn_cnt % 10000 == 0)
252 			plt_warn("Outbound error, mbuf %p, sa_index %u,"
253 				 " compcode %x uc %x,"
254 				 " (total warnings %" PRIu64 ")",
255 				 mbuf, sess_priv.sa_idx, res->compcode,
256 				 res->uc_compcode, warn_cnt);
257 		desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
258 		break;
259 	}
260 
261 	desc.metadata = (uint64_t)priv->userdata;
262 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
263 	cnxk_pktmbuf_free_no_cache(mbuf);
264 }
265 
266 static void
267 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
268 {
269 	uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
270 	char *iv_str = strdup(__iv_str);
271 	char *iv_b = NULL, len = 16;
272 	char *save;
273 	int i;
274 
275 	if (!iv_str)
276 		return;
277 
278 	if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
279 	    outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
280 	    outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
281 	    outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
282 		memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
283 		memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
284 
285 		iv_dbg = outb_sa->iv.s.iv_dbg1;
286 		for (i = 0; i < 4; i++) {
287 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
288 			if (!iv_b)
289 				break;
290 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
291 		}
292 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
293 
294 		iv_dbg = outb_sa->iv.s.iv_dbg2;
295 		for (i = 0; i < 4; i++) {
296 			iv_b = strtok_r(NULL, ",", &save);
297 			if (!iv_b)
298 				break;
299 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
300 		}
301 		*(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
302 
303 	} else {
304 		iv_dbg = outb_sa->iv.iv_dbg;
305 		memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
306 
307 		for (i = 0; i < len; i++) {
308 			iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
309 			if (!iv_b)
310 				break;
311 			iv_dbg[i] = strtoul(iv_b, NULL, 0);
312 		}
313 		*(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
314 		*(uint64_t *)&iv_dbg[8] =
315 			rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
316 	}
317 
318 	/* Update source of IV */
319 	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
320 	free(iv_str);
321 }
322 
323 static int
324 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
325 				struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
326 				struct rte_security_ipsec_xform *ipsec_xfrm,
327 				uint32_t sa_idx)
328 {
329 	uint64_t *ring_base, ring_addr;
330 
331 	if (ipsec_xfrm->life.bytes_soft_limit |
332 	    ipsec_xfrm->life.packets_soft_limit) {
333 		ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
334 		if (ring_base == NULL)
335 			return -ENOTSUP;
336 
337 		ring_addr = ring_base[sa_idx >>
338 				      ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
339 		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
340 		sa->ctx.err_ctl.s.address = ring_addr >> 3;
341 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
342 	}
343 
344 	return 0;
345 }
346 
347 static int
348 cn10k_eth_sec_session_create(void *device,
349 			     struct rte_security_session_conf *conf,
350 			     struct rte_security_session *sess,
351 			     struct rte_mempool *mempool)
352 {
353 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
354 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
355 	struct rte_security_ipsec_xform *ipsec;
356 	struct cn10k_sec_sess_priv sess_priv;
357 	struct rte_crypto_sym_xform *crypto;
358 	struct cnxk_eth_sec_sess *eth_sec;
359 	struct roc_nix *nix = &dev->nix;
360 	bool inbound, inl_dev;
361 	rte_spinlock_t *lock;
362 	char tbuf[128] = {0};
363 	int rc = 0;
364 
365 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
366 		return -ENOTSUP;
367 
368 	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
369 		return -ENOTSUP;
370 
371 	if (rte_security_dynfield_register() < 0)
372 		return -ENOTSUP;
373 
374 	if (conf->ipsec.options.ip_reassembly_en &&
375 			dev->reass_dynfield_off < 0) {
376 		if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
377 					&dev->reass_dynflag_bit) < 0)
378 			return -rte_errno;
379 	}
380 
381 	ipsec = &conf->ipsec;
382 	crypto = conf->crypto_xform;
383 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
384 	inl_dev = !!dev->inb.inl_dev;
385 
386 	/* Search if a session already exits */
387 	if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
388 		plt_err("%s SA with SPI %u already in use",
389 			inbound ? "Inbound" : "Outbound", ipsec->spi);
390 		return -EEXIST;
391 	}
392 
393 	if (rte_mempool_get(mempool, (void **)&eth_sec)) {
394 		plt_err("Could not allocate security session private data");
395 		return -ENOMEM;
396 	}
397 
398 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
399 	sess_priv.u64 = 0;
400 
401 	lock = inbound ? &dev->inb.lock : &dev->outb.lock;
402 	rte_spinlock_lock(lock);
403 
404 	/* Acquire lock on inline dev for inbound */
405 	if (inbound && inl_dev)
406 		roc_nix_inl_dev_lock();
407 
408 	if (inbound) {
409 		struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
410 		struct cn10k_inb_priv_data *inb_priv;
411 		uint32_t spi_mask;
412 		uintptr_t sa;
413 
414 		PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
415 				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
416 
417 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
418 
419 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
420 		sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
421 		if (!sa && dev->inb.inl_dev) {
422 			snprintf(tbuf, sizeof(tbuf),
423 				 "Failed to create ingress sa, inline dev "
424 				 "not found or spi not in range");
425 			rc = -ENOTSUP;
426 			goto mempool_put;
427 		} else if (!sa) {
428 			snprintf(tbuf, sizeof(tbuf),
429 				 "Failed to create ingress sa");
430 			rc = -EFAULT;
431 			goto mempool_put;
432 		}
433 
434 		inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
435 
436 		/* Check if SA is already in use */
437 		if (inb_sa->w2.s.valid) {
438 			snprintf(tbuf, sizeof(tbuf),
439 				 "Inbound SA with SPI %u already in use",
440 				 ipsec->spi);
441 			rc = -EBUSY;
442 			goto mempool_put;
443 		}
444 
445 		inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
446 		memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
447 
448 		/* Fill inbound sa params */
449 		rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
450 					       true);
451 		if (rc) {
452 			snprintf(tbuf, sizeof(tbuf),
453 				 "Failed to init inbound sa, rc=%d", rc);
454 			goto mempool_put;
455 		}
456 
457 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
458 		/* Back pointer to get eth_sec */
459 		inb_priv->eth_sec = eth_sec;
460 		/* Save userdata in inb private area */
461 		inb_priv->userdata = conf->userdata;
462 
463 		/* Save SA index/SPI in cookie for now */
464 		inb_sa_dptr->w1.s.cookie =
465 			rte_cpu_to_be_32(ipsec->spi & spi_mask);
466 
467 		/* Prepare session priv */
468 		sess_priv.inb_sa = 1;
469 		sess_priv.sa_idx = ipsec->spi & spi_mask;
470 
471 		/* Pointer from eth_sec -> inb_sa */
472 		eth_sec->sa = inb_sa;
473 		eth_sec->sess = sess;
474 		eth_sec->sa_idx = ipsec->spi & spi_mask;
475 		eth_sec->spi = ipsec->spi;
476 		eth_sec->inl_dev = !!dev->inb.inl_dev;
477 		eth_sec->inb = true;
478 
479 		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
480 		dev->inb.nb_sess++;
481 		/* Sync session in context cache */
482 		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
483 					   eth_sec->inb,
484 					   sizeof(struct roc_ot_ipsec_inb_sa));
485 		if (rc)
486 			goto mempool_put;
487 
488 		if (conf->ipsec.options.ip_reassembly_en) {
489 			inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
490 			inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
491 		}
492 
493 	} else {
494 		struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
495 		struct cn10k_outb_priv_data *outb_priv;
496 		struct cnxk_ipsec_outb_rlens *rlens;
497 		uint64_t sa_base = dev->outb.sa_base;
498 		const char *iv_str;
499 		uint32_t sa_idx;
500 
501 		PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
502 				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
503 
504 		/* Alloc an sa index */
505 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
506 		if (rc)
507 			goto mempool_put;
508 
509 		outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
510 		outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
511 		rlens = &outb_priv->rlens;
512 
513 		outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
514 		memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
515 
516 		/* Fill outbound sa params */
517 		rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
518 		if (rc) {
519 			snprintf(tbuf, sizeof(tbuf),
520 				 "Failed to init outbound sa, rc=%d", rc);
521 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
522 			goto mempool_put;
523 		}
524 
525 		iv_str = getenv("CN10K_ETH_SEC_IV_OVR");
526 		if (iv_str)
527 			outb_dbg_iv_update(outb_sa_dptr, iv_str);
528 
529 		/* Fill outbound sa misc params */
530 		rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
531 						     outb_sa, ipsec, sa_idx);
532 		if (rc) {
533 			snprintf(tbuf, sizeof(tbuf),
534 				 "Failed to init outb sa misc params, rc=%d",
535 				 rc);
536 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
537 			goto mempool_put;
538 		}
539 
540 		/* Save userdata */
541 		outb_priv->userdata = conf->userdata;
542 		outb_priv->sa_idx = sa_idx;
543 		outb_priv->eth_sec = eth_sec;
544 
545 		/* Save rlen info */
546 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
547 
548 		/* Prepare session priv */
549 		sess_priv.sa_idx = outb_priv->sa_idx;
550 		sess_priv.roundup_byte = rlens->roundup_byte;
551 		sess_priv.roundup_len = rlens->roundup_len;
552 		sess_priv.partial_len = rlens->partial_len;
553 		sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
554 		sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
555 
556 		/* Pointer from eth_sec -> outb_sa */
557 		eth_sec->sa = outb_sa;
558 		eth_sec->sess = sess;
559 		eth_sec->sa_idx = sa_idx;
560 		eth_sec->spi = ipsec->spi;
561 
562 		TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
563 		dev->outb.nb_sess++;
564 		/* Sync session in context cache */
565 		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
566 					   eth_sec->inb,
567 					   sizeof(struct roc_ot_ipsec_outb_sa));
568 		if (rc)
569 			goto mempool_put;
570 	}
571 	if (inbound && inl_dev)
572 		roc_nix_inl_dev_unlock();
573 	rte_spinlock_unlock(lock);
574 
575 	plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
576 		    inbound ? "inbound" : "outbound", eth_sec->spi,
577 		    eth_sec->sa_idx, eth_sec->inl_dev);
578 	/*
579 	 * Update fast path info in priv area.
580 	 */
581 	set_sec_session_private_data(sess, (void *)sess_priv.u64);
582 
583 	return 0;
584 mempool_put:
585 	if (inbound && inl_dev)
586 		roc_nix_inl_dev_unlock();
587 	rte_spinlock_unlock(lock);
588 
589 	rte_mempool_put(mempool, eth_sec);
590 	if (rc)
591 		plt_err("%s", tbuf);
592 	return rc;
593 }
594 
595 static int
596 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
597 {
598 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
599 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
600 	struct cnxk_eth_sec_sess *eth_sec;
601 	struct rte_mempool *mp;
602 	rte_spinlock_t *lock;
603 	void *sa_dptr;
604 
605 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
606 	if (!eth_sec)
607 		return -ENOENT;
608 
609 	lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
610 	rte_spinlock_lock(lock);
611 
612 	if (eth_sec->inl_dev)
613 		roc_nix_inl_dev_lock();
614 
615 	if (eth_sec->inb) {
616 		/* Disable SA */
617 		sa_dptr = dev->inb.sa_dptr;
618 		roc_ot_ipsec_inb_sa_init(sa_dptr, true);
619 
620 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
621 				      eth_sec->inb,
622 				      sizeof(struct roc_ot_ipsec_inb_sa));
623 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
624 		dev->inb.nb_sess--;
625 	} else {
626 		/* Disable SA */
627 		sa_dptr = dev->outb.sa_dptr;
628 		roc_ot_ipsec_outb_sa_init(sa_dptr);
629 
630 		roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
631 				      eth_sec->inb,
632 				      sizeof(struct roc_ot_ipsec_outb_sa));
633 		/* Release Outbound SA index */
634 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
635 		TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
636 		dev->outb.nb_sess--;
637 	}
638 	if (eth_sec->inl_dev)
639 		roc_nix_inl_dev_unlock();
640 
641 	rte_spinlock_unlock(lock);
642 
643 	plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
644 		    eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
645 		    eth_sec->sa_idx, eth_sec->inl_dev);
646 
647 	/* Put eth_sec object back to pool */
648 	mp = rte_mempool_from_obj(eth_sec);
649 	set_sec_session_private_data(sess, NULL);
650 	rte_mempool_put(mp, eth_sec);
651 	return 0;
652 }
653 
654 static const struct rte_security_capability *
655 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
656 {
657 	return cn10k_eth_sec_capabilities;
658 }
659 
660 void
661 cn10k_eth_sec_ops_override(void)
662 {
663 	static int init_once;
664 
665 	if (init_once)
666 		return;
667 	init_once = 1;
668 
669 	/* Update platform specific ops */
670 	cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
671 	cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
672 	cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
673 }
674