xref: /dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c (revision 8efa348e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
8 
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
11 
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
13 	{	/* AES GCM */
14 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
15 		{.sym = {
16 			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
17 			{.aead = {
18 				.algo = RTE_CRYPTO_AEAD_AES_GCM,
19 				.block_size = 16,
20 				.key_size = {
21 					.min = 16,
22 					.max = 32,
23 					.increment = 8
24 				},
25 				.digest_size = {
26 					.min = 16,
27 					.max = 16,
28 					.increment = 0
29 				},
30 				.aad_size = {
31 					.min = 8,
32 					.max = 12,
33 					.increment = 4
34 				},
35 				.iv_size = {
36 					.min = 12,
37 					.max = 12,
38 					.increment = 0
39 				}
40 			}, }
41 		}, }
42 	},
43 	{	/* AES CBC */
44 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45 		{.sym = {
46 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
47 			{.cipher = {
48 				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
49 				.block_size = 16,
50 				.key_size = {
51 					.min = 16,
52 					.max = 32,
53 					.increment = 8
54 				},
55 				.iv_size = {
56 					.min = 16,
57 					.max = 16,
58 					.increment = 0
59 				}
60 			}, }
61 		}, }
62 	},
63 	{	/* SHA1 HMAC */
64 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
65 		{.sym = {
66 			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
67 			{.auth = {
68 				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
69 				.block_size = 64,
70 				.key_size = {
71 					.min = 20,
72 					.max = 64,
73 					.increment = 1
74 				},
75 				.digest_size = {
76 					.min = 12,
77 					.max = 12,
78 					.increment = 0
79 				},
80 			}, }
81 		}, }
82 	},
83 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
84 };
85 
86 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
87 	{	/* IPsec Inline Protocol ESP Tunnel Ingress */
88 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
89 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
90 		.ipsec = {
91 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
92 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
93 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
94 			.options = { 0 }
95 		},
96 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
97 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
98 	},
99 	{	/* IPsec Inline Protocol ESP Tunnel Egress */
100 		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
101 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
102 		.ipsec = {
103 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
104 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
105 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
106 			.options = { 0 }
107 		},
108 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
109 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
110 	},
111 	{
112 		.action = RTE_SECURITY_ACTION_TYPE_NONE
113 	}
114 };
115 
116 static inline int
ar_window_init(struct cn9k_inb_priv_data * inb_priv)117 ar_window_init(struct cn9k_inb_priv_data *inb_priv)
118 {
119 	if (inb_priv->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
120 		plt_err("Replay window size:%u is not supported",
121 			inb_priv->replay_win_sz);
122 		return -ENOTSUP;
123 	}
124 
125 	rte_spinlock_init(&inb_priv->ar.lock);
126 	/*
127 	 * Set window bottom to 1, base and top to size of
128 	 * window
129 	 */
130 	inb_priv->ar.winb = 1;
131 	inb_priv->ar.wint = inb_priv->replay_win_sz;
132 	inb_priv->ar.base = inb_priv->replay_win_sz;
133 
134 	return 0;
135 }
136 
137 static int
cn9k_eth_sec_session_create(void * device,struct rte_security_session_conf * conf,struct rte_security_session * sess,struct rte_mempool * mempool)138 cn9k_eth_sec_session_create(void *device,
139 			    struct rte_security_session_conf *conf,
140 			    struct rte_security_session *sess,
141 			    struct rte_mempool *mempool)
142 {
143 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
144 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
145 	struct rte_security_ipsec_xform *ipsec;
146 	struct cn9k_sec_sess_priv sess_priv;
147 	struct rte_crypto_sym_xform *crypto;
148 	struct cnxk_eth_sec_sess *eth_sec;
149 	struct roc_nix *nix = &dev->nix;
150 	rte_spinlock_t *lock;
151 	char tbuf[128] = {0};
152 	bool inbound;
153 	int rc = 0;
154 
155 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
156 		return -ENOTSUP;
157 
158 	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
159 		return -ENOTSUP;
160 
161 	if (rte_security_dynfield_register() < 0)
162 		return -ENOTSUP;
163 
164 	ipsec = &conf->ipsec;
165 	crypto = conf->crypto_xform;
166 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
167 
168 	/* Search if a session already exists */
169 	if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
170 		plt_err("%s SA with SPI %u already in use",
171 			inbound ? "Inbound" : "Outbound", ipsec->spi);
172 		return -EEXIST;
173 	}
174 
175 	if (rte_mempool_get(mempool, (void **)&eth_sec)) {
176 		plt_err("Could not allocate security session private data");
177 		return -ENOMEM;
178 	}
179 
180 	lock = inbound ? &dev->inb.lock : &dev->outb.lock;
181 	rte_spinlock_lock(lock);
182 
183 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
184 	sess_priv.u64 = 0;
185 
186 	if (inbound) {
187 		struct cn9k_inb_priv_data *inb_priv;
188 		struct roc_onf_ipsec_inb_sa *inb_sa;
189 		uint32_t spi_mask;
190 
191 		PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
192 				  ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
193 
194 		spi_mask = roc_nix_inl_inb_spi_range(nix, false, NULL, NULL);
195 
196 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
197 		 * device always for CN9K.
198 		 */
199 		inb_sa = (struct roc_onf_ipsec_inb_sa *)
200 			 roc_nix_inl_inb_sa_get(nix, false, ipsec->spi);
201 		if (!inb_sa) {
202 			snprintf(tbuf, sizeof(tbuf),
203 				 "Failed to create ingress sa");
204 			rc = -EFAULT;
205 			goto mempool_put;
206 		}
207 
208 		/* Check if SA is already in use */
209 		if (inb_sa->ctl.valid) {
210 			snprintf(tbuf, sizeof(tbuf),
211 				 "Inbound SA with SPI %u already in use",
212 				 ipsec->spi);
213 			rc = -EBUSY;
214 			goto mempool_put;
215 		}
216 
217 		memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
218 
219 		/* Fill inbound sa params */
220 		rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
221 		if (rc) {
222 			snprintf(tbuf, sizeof(tbuf),
223 				 "Failed to init inbound sa, rc=%d", rc);
224 			goto mempool_put;
225 		}
226 
227 		inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
228 		/* Back pointer to get eth_sec */
229 		inb_priv->eth_sec = eth_sec;
230 
231 		/* Save userdata in inb private area */
232 		inb_priv->userdata = conf->userdata;
233 
234 		inb_priv->replay_win_sz = ipsec->replay_win_sz;
235 		if (inb_priv->replay_win_sz) {
236 			rc = ar_window_init(inb_priv);
237 			if (rc)
238 				goto mempool_put;
239 		}
240 
241 		/* Prepare session priv */
242 		sess_priv.inb_sa = 1;
243 		sess_priv.sa_idx = ipsec->spi & spi_mask;
244 
245 		/* Pointer from eth_sec -> inb_sa */
246 		eth_sec->sa = inb_sa;
247 		eth_sec->sess = sess;
248 		eth_sec->sa_idx = ipsec->spi & spi_mask;
249 		eth_sec->spi = ipsec->spi;
250 		eth_sec->inb = true;
251 
252 		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
253 		dev->inb.nb_sess++;
254 	} else {
255 		struct cn9k_outb_priv_data *outb_priv;
256 		struct roc_onf_ipsec_outb_sa *outb_sa;
257 		uintptr_t sa_base = dev->outb.sa_base;
258 		struct cnxk_ipsec_outb_rlens *rlens;
259 		uint32_t sa_idx;
260 
261 		PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
262 				  ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
263 
264 		/* Alloc an sa index */
265 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0);
266 		if (rc)
267 			goto mempool_put;
268 
269 		outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
270 		outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
271 		rlens = &outb_priv->rlens;
272 
273 		memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
274 
275 		/* Fill outbound sa params */
276 		rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
277 		if (rc) {
278 			snprintf(tbuf, sizeof(tbuf),
279 				 "Failed to init outbound sa, rc=%d", rc);
280 			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
281 			goto mempool_put;
282 		}
283 
284 		/* Save userdata */
285 		outb_priv->userdata = conf->userdata;
286 		outb_priv->sa_idx = sa_idx;
287 		outb_priv->eth_sec = eth_sec;
288 		/* Start sequence number with 1 */
289 		outb_priv->seq = 1;
290 
291 		memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
292 		if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
293 			outb_priv->copy_salt = 1;
294 
295 		/* Save rlen info */
296 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
297 
298 		sess_priv.sa_idx = outb_priv->sa_idx;
299 		sess_priv.roundup_byte = rlens->roundup_byte;
300 		sess_priv.roundup_len = rlens->roundup_len;
301 		sess_priv.partial_len = rlens->partial_len;
302 
303 		/* Pointer from eth_sec -> outb_sa */
304 		eth_sec->sa = outb_sa;
305 		eth_sec->sess = sess;
306 		eth_sec->sa_idx = sa_idx;
307 		eth_sec->spi = ipsec->spi;
308 
309 		TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
310 		dev->outb.nb_sess++;
311 	}
312 
313 	/* Sync SA content */
314 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
315 
316 	rte_spinlock_unlock(lock);
317 
318 	plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
319 		    inbound ? "inbound" : "outbound", eth_sec->spi,
320 		    eth_sec->sa_idx);
321 	/*
322 	 * Update fast path info in priv area.
323 	 */
324 	set_sec_session_private_data(sess, (void *)sess_priv.u64);
325 
326 	return 0;
327 mempool_put:
328 	rte_spinlock_unlock(lock);
329 	rte_mempool_put(mempool, eth_sec);
330 	if (rc)
331 		plt_err("%s", tbuf);
332 	return rc;
333 }
334 
335 static int
cn9k_eth_sec_session_destroy(void * device,struct rte_security_session * sess)336 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
337 {
338 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
339 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
340 	struct roc_onf_ipsec_outb_sa *outb_sa;
341 	struct roc_onf_ipsec_inb_sa *inb_sa;
342 	struct cnxk_eth_sec_sess *eth_sec;
343 	struct rte_mempool *mp;
344 	rte_spinlock_t *lock;
345 
346 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
347 	if (!eth_sec)
348 		return -ENOENT;
349 
350 	lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
351 	rte_spinlock_lock(lock);
352 
353 	if (eth_sec->inb) {
354 		inb_sa = eth_sec->sa;
355 		/* Disable SA */
356 		inb_sa->ctl.valid = 0;
357 
358 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
359 		dev->inb.nb_sess--;
360 	} else {
361 		outb_sa = eth_sec->sa;
362 		/* Disable SA */
363 		outb_sa->ctl.valid = 0;
364 
365 		/* Release Outbound SA index */
366 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
367 		TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
368 		dev->outb.nb_sess--;
369 	}
370 
371 	/* Sync SA content */
372 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
373 
374 	rte_spinlock_unlock(lock);
375 
376 	plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
377 		    eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
378 		    eth_sec->sa_idx);
379 
380 	/* Put eth_sec object back to pool */
381 	mp = rte_mempool_from_obj(eth_sec);
382 	set_sec_session_private_data(sess, NULL);
383 	rte_mempool_put(mp, eth_sec);
384 	return 0;
385 }
386 
387 static const struct rte_security_capability *
cn9k_eth_sec_capabilities_get(void * device __rte_unused)388 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
389 {
390 	return cn9k_eth_sec_capabilities;
391 }
392 
393 void
cn9k_eth_sec_ops_override(void)394 cn9k_eth_sec_ops_override(void)
395 {
396 	static int init_once;
397 
398 	if (init_once)
399 		return;
400 	init_once = 1;
401 
402 	/* Update platform specific ops */
403 	cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
404 	cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
405 	cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
406 }
407