xref: /f-stack/dpdk/examples/ipsec-secgw/sa.c (revision 2bfe3f2e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Security Associations
36  */
37 #include <sys/types.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
41 
42 #include <rte_memzone.h>
43 #include <rte_crypto.h>
44 #include <rte_security.h>
45 #include <rte_cryptodev.h>
46 #include <rte_byteorder.h>
47 #include <rte_errno.h>
48 #include <rte_ip.h>
49 #include <rte_random.h>
50 #include <rte_ethdev.h>
51 
52 #include "ipsec.h"
53 #include "esp.h"
54 #include "parser.h"
55 
56 #define IPDEFTTL 64
57 
58 struct supported_cipher_algo {
59 	const char *keyword;
60 	enum rte_crypto_cipher_algorithm algo;
61 	uint16_t iv_len;
62 	uint16_t block_size;
63 	uint16_t key_len;
64 };
65 
66 struct supported_auth_algo {
67 	const char *keyword;
68 	enum rte_crypto_auth_algorithm algo;
69 	uint16_t digest_len;
70 	uint16_t key_len;
71 	uint8_t key_not_req;
72 };
73 
74 struct supported_aead_algo {
75 	const char *keyword;
76 	enum rte_crypto_aead_algorithm algo;
77 	uint16_t iv_len;
78 	uint16_t block_size;
79 	uint16_t digest_len;
80 	uint16_t key_len;
81 	uint8_t aad_len;
82 };
83 
84 
85 const struct supported_cipher_algo cipher_algos[] = {
86 	{
87 		.keyword = "null",
88 		.algo = RTE_CRYPTO_CIPHER_NULL,
89 		.iv_len = 0,
90 		.block_size = 4,
91 		.key_len = 0
92 	},
93 	{
94 		.keyword = "aes-128-cbc",
95 		.algo = RTE_CRYPTO_CIPHER_AES_CBC,
96 		.iv_len = 16,
97 		.block_size = 16,
98 		.key_len = 16
99 	},
100 	{
101 		.keyword = "aes-128-ctr",
102 		.algo = RTE_CRYPTO_CIPHER_AES_CTR,
103 		.iv_len = 8,
104 		.block_size = 16, /* XXX AESNI MB limition, should be 4 */
105 		.key_len = 20
106 	}
107 };
108 
109 const struct supported_auth_algo auth_algos[] = {
110 	{
111 		.keyword = "null",
112 		.algo = RTE_CRYPTO_AUTH_NULL,
113 		.digest_len = 0,
114 		.key_len = 0,
115 		.key_not_req = 1
116 	},
117 	{
118 		.keyword = "sha1-hmac",
119 		.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120 		.digest_len = 12,
121 		.key_len = 20
122 	},
123 	{
124 		.keyword = "sha256-hmac",
125 		.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126 		.digest_len = 12,
127 		.key_len = 32
128 	}
129 };
130 
131 const struct supported_aead_algo aead_algos[] = {
132 	{
133 		.keyword = "aes-128-gcm",
134 		.algo = RTE_CRYPTO_AEAD_AES_GCM,
135 		.iv_len = 8,
136 		.block_size = 4,
137 		.key_len = 20,
138 		.digest_len = 16,
139 		.aad_len = 8,
140 	}
141 };
142 
143 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
144 uint32_t nb_sa_out;
145 
146 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
147 uint32_t nb_sa_in;
148 
149 static const struct supported_cipher_algo *
150 find_match_cipher_algo(const char *cipher_keyword)
151 {
152 	size_t i;
153 
154 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
155 		const struct supported_cipher_algo *algo =
156 			&cipher_algos[i];
157 
158 		if (strcmp(cipher_keyword, algo->keyword) == 0)
159 			return algo;
160 	}
161 
162 	return NULL;
163 }
164 
165 static const struct supported_auth_algo *
166 find_match_auth_algo(const char *auth_keyword)
167 {
168 	size_t i;
169 
170 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
171 		const struct supported_auth_algo *algo =
172 			&auth_algos[i];
173 
174 		if (strcmp(auth_keyword, algo->keyword) == 0)
175 			return algo;
176 	}
177 
178 	return NULL;
179 }
180 
181 static const struct supported_aead_algo *
182 find_match_aead_algo(const char *aead_keyword)
183 {
184 	size_t i;
185 
186 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
187 		const struct supported_aead_algo *algo =
188 			&aead_algos[i];
189 
190 		if (strcmp(aead_keyword, algo->keyword) == 0)
191 			return algo;
192 	}
193 
194 	return NULL;
195 }
196 
197 /** parse_key_string
198  *  parse x:x:x:x.... hex number key string into uint8_t *key
199  *  return:
200  *  > 0: number of bytes parsed
201  *  0:   failed
202  */
203 static uint32_t
204 parse_key_string(const char *key_str, uint8_t *key)
205 {
206 	const char *pt_start = key_str, *pt_end = key_str;
207 	uint32_t nb_bytes = 0;
208 
209 	while (pt_end != NULL) {
210 		char sub_str[3] = {0};
211 
212 		pt_end = strchr(pt_start, ':');
213 
214 		if (pt_end == NULL) {
215 			if (strlen(pt_start) > 2)
216 				return 0;
217 			strncpy(sub_str, pt_start, 2);
218 		} else {
219 			if (pt_end - pt_start > 2)
220 				return 0;
221 
222 			strncpy(sub_str, pt_start, pt_end - pt_start);
223 			pt_start = pt_end + 1;
224 		}
225 
226 		key[nb_bytes++] = strtol(sub_str, NULL, 16);
227 	}
228 
229 	return nb_bytes;
230 }
231 
232 void
233 parse_sa_tokens(char **tokens, uint32_t n_tokens,
234 	struct parse_status *status)
235 {
236 	struct ipsec_sa *rule = NULL;
237 	uint32_t ti; /*token index*/
238 	uint32_t *ri /*rule index*/;
239 	uint32_t cipher_algo_p = 0;
240 	uint32_t auth_algo_p = 0;
241 	uint32_t aead_algo_p = 0;
242 	uint32_t src_p = 0;
243 	uint32_t dst_p = 0;
244 	uint32_t mode_p = 0;
245 	uint32_t type_p = 0;
246 	uint32_t portid_p = 0;
247 
248 	if (strcmp(tokens[0], "in") == 0) {
249 		ri = &nb_sa_in;
250 
251 		APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
252 			"too many sa rules, abort insertion\n");
253 		if (status->status < 0)
254 			return;
255 
256 		rule = &sa_in[*ri];
257 	} else {
258 		ri = &nb_sa_out;
259 
260 		APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
261 			"too many sa rules, abort insertion\n");
262 		if (status->status < 0)
263 			return;
264 
265 		rule = &sa_out[*ri];
266 	}
267 
268 	/* spi number */
269 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
270 	if (status->status < 0)
271 		return;
272 	if (atoi(tokens[1]) == INVALID_SPI)
273 		return;
274 	rule->spi = atoi(tokens[1]);
275 
276 	for (ti = 2; ti < n_tokens; ti++) {
277 		if (strcmp(tokens[ti], "mode") == 0) {
278 			APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
279 			if (status->status < 0)
280 				return;
281 
282 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
283 			if (status->status < 0)
284 				return;
285 
286 			if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
287 				rule->flags = IP4_TUNNEL;
288 			else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
289 				rule->flags = IP6_TUNNEL;
290 			else if (strcmp(tokens[ti], "transport") == 0)
291 				rule->flags = TRANSPORT;
292 			else {
293 				APP_CHECK(0, status, "unrecognized "
294 					"input \"%s\"", tokens[ti]);
295 				return;
296 			}
297 
298 			mode_p = 1;
299 			continue;
300 		}
301 
302 		if (strcmp(tokens[ti], "cipher_algo") == 0) {
303 			const struct supported_cipher_algo *algo;
304 			uint32_t key_len;
305 
306 			APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
307 				status);
308 			if (status->status < 0)
309 				return;
310 
311 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
312 			if (status->status < 0)
313 				return;
314 
315 			algo = find_match_cipher_algo(tokens[ti]);
316 
317 			APP_CHECK(algo != NULL, status, "unrecognized "
318 				"input \"%s\"", tokens[ti]);
319 
320 			rule->cipher_algo = algo->algo;
321 			rule->block_size = algo->block_size;
322 			rule->iv_len = algo->iv_len;
323 			rule->cipher_key_len = algo->key_len;
324 
325 			/* for NULL algorithm, no cipher key required */
326 			if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
327 				cipher_algo_p = 1;
328 				continue;
329 			}
330 
331 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332 			if (status->status < 0)
333 				return;
334 
335 			APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
336 				status, "unrecognized input \"%s\", "
337 				"expect \"cipher_key\"", tokens[ti]);
338 			if (status->status < 0)
339 				return;
340 
341 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
342 			if (status->status < 0)
343 				return;
344 
345 			key_len = parse_key_string(tokens[ti],
346 				rule->cipher_key);
347 			APP_CHECK(key_len == rule->cipher_key_len, status,
348 				"unrecognized input \"%s\"", tokens[ti]);
349 			if (status->status < 0)
350 				return;
351 
352 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
353 				rule->salt = (uint32_t)rte_rand();
354 
355 			if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
356 				key_len -= 4;
357 				rule->cipher_key_len = key_len;
358 				memcpy(&rule->salt,
359 					&rule->cipher_key[key_len], 4);
360 			}
361 
362 			cipher_algo_p = 1;
363 			continue;
364 		}
365 
366 		if (strcmp(tokens[ti], "auth_algo") == 0) {
367 			const struct supported_auth_algo *algo;
368 			uint32_t key_len;
369 
370 			APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
371 				status);
372 			if (status->status < 0)
373 				return;
374 
375 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
376 			if (status->status < 0)
377 				return;
378 
379 			algo = find_match_auth_algo(tokens[ti]);
380 			APP_CHECK(algo != NULL, status, "unrecognized "
381 				"input \"%s\"", tokens[ti]);
382 
383 			rule->auth_algo = algo->algo;
384 			rule->auth_key_len = algo->key_len;
385 			rule->digest_len = algo->digest_len;
386 
387 			/* NULL algorithm and combined algos do not
388 			 * require auth key
389 			 */
390 			if (algo->key_not_req) {
391 				auth_algo_p = 1;
392 				continue;
393 			}
394 
395 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
396 			if (status->status < 0)
397 				return;
398 
399 			APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
400 				status, "unrecognized input \"%s\", "
401 				"expect \"auth_key\"", tokens[ti]);
402 			if (status->status < 0)
403 				return;
404 
405 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
406 			if (status->status < 0)
407 				return;
408 
409 			key_len = parse_key_string(tokens[ti],
410 				rule->auth_key);
411 			APP_CHECK(key_len == rule->auth_key_len, status,
412 				"unrecognized input \"%s\"", tokens[ti]);
413 			if (status->status < 0)
414 				return;
415 
416 			auth_algo_p = 1;
417 			continue;
418 		}
419 
420 		if (strcmp(tokens[ti], "aead_algo") == 0) {
421 			const struct supported_aead_algo *algo;
422 			uint32_t key_len;
423 
424 			APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
425 				status);
426 			if (status->status < 0)
427 				return;
428 
429 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
430 			if (status->status < 0)
431 				return;
432 
433 			algo = find_match_aead_algo(tokens[ti]);
434 
435 			APP_CHECK(algo != NULL, status, "unrecognized "
436 				"input \"%s\"", tokens[ti]);
437 
438 			rule->aead_algo = algo->algo;
439 			rule->cipher_key_len = algo->key_len;
440 			rule->digest_len = algo->digest_len;
441 			rule->aad_len = algo->aad_len;
442 			rule->block_size = algo->block_size;
443 			rule->iv_len = algo->iv_len;
444 
445 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
446 			if (status->status < 0)
447 				return;
448 
449 			APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
450 				status, "unrecognized input \"%s\", "
451 				"expect \"aead_key\"", tokens[ti]);
452 			if (status->status < 0)
453 				return;
454 
455 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
456 			if (status->status < 0)
457 				return;
458 
459 			key_len = parse_key_string(tokens[ti],
460 				rule->cipher_key);
461 			APP_CHECK(key_len == rule->cipher_key_len, status,
462 				"unrecognized input \"%s\"", tokens[ti]);
463 			if (status->status < 0)
464 				return;
465 
466 			key_len -= 4;
467 			rule->cipher_key_len = key_len;
468 			memcpy(&rule->salt,
469 				&rule->cipher_key[key_len], 4);
470 
471 			aead_algo_p = 1;
472 			continue;
473 		}
474 
475 		if (strcmp(tokens[ti], "src") == 0) {
476 			APP_CHECK_PRESENCE(src_p, tokens[ti], status);
477 			if (status->status < 0)
478 				return;
479 
480 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
481 			if (status->status < 0)
482 				return;
483 
484 			if (rule->flags == IP4_TUNNEL) {
485 				struct in_addr ip;
486 
487 				APP_CHECK(parse_ipv4_addr(tokens[ti],
488 					&ip, NULL) == 0, status,
489 					"unrecognized input \"%s\", "
490 					"expect valid ipv4 addr",
491 					tokens[ti]);
492 				if (status->status < 0)
493 					return;
494 				rule->src.ip.ip4 = rte_bswap32(
495 					(uint32_t)ip.s_addr);
496 			} else if (rule->flags == IP6_TUNNEL) {
497 				struct in6_addr ip;
498 
499 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
500 					NULL) == 0, status,
501 					"unrecognized input \"%s\", "
502 					"expect valid ipv6 addr",
503 					tokens[ti]);
504 				if (status->status < 0)
505 					return;
506 				memcpy(rule->src.ip.ip6.ip6_b,
507 					ip.s6_addr, 16);
508 			} else if (rule->flags == TRANSPORT) {
509 				APP_CHECK(0, status, "unrecognized input "
510 					"\"%s\"", tokens[ti]);
511 				return;
512 			}
513 
514 			src_p = 1;
515 			continue;
516 		}
517 
518 		if (strcmp(tokens[ti], "dst") == 0) {
519 			APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
520 			if (status->status < 0)
521 				return;
522 
523 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
524 			if (status->status < 0)
525 				return;
526 
527 			if (rule->flags == IP4_TUNNEL) {
528 				struct in_addr ip;
529 
530 				APP_CHECK(parse_ipv4_addr(tokens[ti],
531 					&ip, NULL) == 0, status,
532 					"unrecognized input \"%s\", "
533 					"expect valid ipv4 addr",
534 					tokens[ti]);
535 				if (status->status < 0)
536 					return;
537 				rule->dst.ip.ip4 = rte_bswap32(
538 					(uint32_t)ip.s_addr);
539 			} else if (rule->flags == IP6_TUNNEL) {
540 				struct in6_addr ip;
541 
542 				APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
543 					NULL) == 0, status,
544 					"unrecognized input \"%s\", "
545 					"expect valid ipv6 addr",
546 					tokens[ti]);
547 				if (status->status < 0)
548 					return;
549 				memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
550 			} else if (rule->flags == TRANSPORT) {
551 				APP_CHECK(0, status, "unrecognized "
552 					"input \"%s\"",	tokens[ti]);
553 				return;
554 			}
555 
556 			dst_p = 1;
557 			continue;
558 		}
559 
560 		if (strcmp(tokens[ti], "type") == 0) {
561 			APP_CHECK_PRESENCE(type_p, tokens[ti], status);
562 			if (status->status < 0)
563 				return;
564 
565 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
566 			if (status->status < 0)
567 				return;
568 
569 			if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
570 				rule->type =
571 					RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
572 			else if (strcmp(tokens[ti],
573 					"inline-protocol-offload") == 0)
574 				rule->type =
575 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
576 			else if (strcmp(tokens[ti],
577 					"lookaside-protocol-offload") == 0)
578 				rule->type =
579 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
580 			else if (strcmp(tokens[ti], "no-offload") == 0)
581 				rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
582 			else {
583 				APP_CHECK(0, status, "Invalid input \"%s\"",
584 						tokens[ti]);
585 				return;
586 			}
587 
588 			type_p = 1;
589 			continue;
590 		}
591 
592 		if (strcmp(tokens[ti], "port_id") == 0) {
593 			APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
594 			if (status->status < 0)
595 				return;
596 			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
597 			if (status->status < 0)
598 				return;
599 			rule->portid = atoi(tokens[ti]);
600 			if (status->status < 0)
601 				return;
602 			portid_p = 1;
603 			continue;
604 		}
605 
606 		/* unrecognizeable input */
607 		APP_CHECK(0, status, "unrecognized input \"%s\"",
608 			tokens[ti]);
609 		return;
610 	}
611 
612 	if (aead_algo_p) {
613 		APP_CHECK(cipher_algo_p == 0, status,
614 				"AEAD used, no need for cipher options");
615 		if (status->status < 0)
616 			return;
617 
618 		APP_CHECK(auth_algo_p == 0, status,
619 				"AEAD used, no need for auth options");
620 		if (status->status < 0)
621 			return;
622 	} else {
623 		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
624 		if (status->status < 0)
625 			return;
626 
627 		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
628 		if (status->status < 0)
629 			return;
630 	}
631 
632 	APP_CHECK(mode_p == 1, status, "missing mode option");
633 	if (status->status < 0)
634 		return;
635 
636 	if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
637 		printf("Missing portid option, falling back to non-offload\n");
638 
639 	if (!type_p || !portid_p) {
640 		rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
641 		rule->portid = -1;
642 	}
643 
644 	*ri = *ri + 1;
645 }
646 
647 static inline void
648 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
649 {
650 	uint32_t i;
651 	uint8_t a, b, c, d;
652 
653 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
654 
655 	for (i = 0; i < RTE_DIM(cipher_algos); i++) {
656 		if (cipher_algos[i].algo == sa->cipher_algo) {
657 			printf("%s ", cipher_algos[i].keyword);
658 			break;
659 		}
660 	}
661 
662 	for (i = 0; i < RTE_DIM(auth_algos); i++) {
663 		if (auth_algos[i].algo == sa->auth_algo) {
664 			printf("%s ", auth_algos[i].keyword);
665 			break;
666 		}
667 	}
668 
669 	for (i = 0; i < RTE_DIM(aead_algos); i++) {
670 		if (aead_algos[i].algo == sa->aead_algo) {
671 			printf("%s ", aead_algos[i].keyword);
672 			break;
673 		}
674 	}
675 
676 	printf("mode:");
677 
678 	switch (sa->flags) {
679 	case IP4_TUNNEL:
680 		printf("IP4Tunnel ");
681 		uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
682 		printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
683 		uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
684 		printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
685 		break;
686 	case IP6_TUNNEL:
687 		printf("IP6Tunnel ");
688 		for (i = 0; i < 16; i++) {
689 			if (i % 2 && i != 15)
690 				printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
691 			else
692 				printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
693 		}
694 		printf(" ");
695 		for (i = 0; i < 16; i++) {
696 			if (i % 2 && i != 15)
697 				printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
698 			else
699 				printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
700 		}
701 		break;
702 	case TRANSPORT:
703 		printf("Transport");
704 		break;
705 	}
706 	printf("\n");
707 }
708 
709 struct sa_ctx {
710 	struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
711 	union {
712 		struct {
713 			struct rte_crypto_sym_xform a;
714 			struct rte_crypto_sym_xform b;
715 		};
716 	} xf[IPSEC_SA_MAX_ENTRIES];
717 };
718 
719 static struct sa_ctx *
720 sa_create(const char *name, int32_t socket_id)
721 {
722 	char s[PATH_MAX];
723 	struct sa_ctx *sa_ctx;
724 	uint32_t mz_size;
725 	const struct rte_memzone *mz;
726 
727 	snprintf(s, sizeof(s), "%s_%u", name, socket_id);
728 
729 	/* Create SA array table */
730 	printf("Creating SA context with %u maximum entries\n",
731 			IPSEC_SA_MAX_ENTRIES);
732 
733 	mz_size = sizeof(struct sa_ctx);
734 	mz = rte_memzone_reserve(s, mz_size, socket_id,
735 			RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
736 	if (mz == NULL) {
737 		printf("Failed to allocate SA DB memory\n");
738 		rte_errno = -ENOMEM;
739 		return NULL;
740 	}
741 
742 	sa_ctx = (struct sa_ctx *)mz->addr;
743 
744 	return sa_ctx;
745 }
746 
747 static int
748 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
749 {
750 	struct rte_eth_dev_info dev_info;
751 
752 	rte_eth_dev_info_get(portid, &dev_info);
753 
754 	if (inbound) {
755 		if ((dev_info.rx_offload_capa &
756 				DEV_RX_OFFLOAD_SECURITY) == 0) {
757 			RTE_LOG(WARNING, PORT,
758 				"hardware RX IPSec offload is not supported\n");
759 			return -EINVAL;
760 		}
761 
762 	} else { /* outbound */
763 		if ((dev_info.tx_offload_capa &
764 				DEV_TX_OFFLOAD_SECURITY) == 0) {
765 			RTE_LOG(WARNING, PORT,
766 				"hardware TX IPSec offload is not supported\n");
767 			return -EINVAL;
768 		}
769 	}
770 	return 0;
771 }
772 
773 
774 static int
775 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
776 		uint32_t nb_entries, uint32_t inbound)
777 {
778 	struct ipsec_sa *sa;
779 	uint32_t i, idx;
780 	uint16_t iv_length;
781 
782 	for (i = 0; i < nb_entries; i++) {
783 		idx = SPI2IDX(entries[i].spi);
784 		sa = &sa_ctx->sa[idx];
785 		if (sa->spi != 0) {
786 			printf("Index %u already in use by SPI %u\n",
787 					idx, sa->spi);
788 			return -EINVAL;
789 		}
790 		*sa = entries[i];
791 		sa->seq = 0;
792 
793 		if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
794 			sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
795 			if (check_eth_dev_caps(sa->portid, inbound))
796 				return -EINVAL;
797 		}
798 
799 		sa->direction = (inbound == 1) ?
800 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
801 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
802 
803 		switch (sa->flags) {
804 		case IP4_TUNNEL:
805 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
806 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
807 		}
808 
809 		if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
810 			iv_length = 16;
811 
812 			sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
813 			sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
814 			sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
815 			sa_ctx->xf[idx].a.aead.key.length =
816 				sa->cipher_key_len;
817 			sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
818 				RTE_CRYPTO_AEAD_OP_DECRYPT :
819 				RTE_CRYPTO_AEAD_OP_ENCRYPT;
820 			sa_ctx->xf[idx].a.next = NULL;
821 			sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
822 			sa_ctx->xf[idx].a.aead.iv.length = iv_length;
823 			sa_ctx->xf[idx].a.aead.aad_length =
824 				sa->aad_len;
825 			sa_ctx->xf[idx].a.aead.digest_length =
826 				sa->digest_len;
827 
828 			sa->xforms = &sa_ctx->xf[idx].a;
829 
830 			print_one_sa_rule(sa, inbound);
831 		} else {
832 			switch (sa->cipher_algo) {
833 			case RTE_CRYPTO_CIPHER_NULL:
834 			case RTE_CRYPTO_CIPHER_AES_CBC:
835 				iv_length = sa->iv_len;
836 				break;
837 			case RTE_CRYPTO_CIPHER_AES_CTR:
838 				iv_length = 16;
839 				break;
840 			default:
841 				RTE_LOG(ERR, IPSEC_ESP,
842 						"unsupported cipher algorithm %u\n",
843 						sa->cipher_algo);
844 				return -EINVAL;
845 			}
846 
847 			if (inbound) {
848 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
849 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
850 				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
851 				sa_ctx->xf[idx].b.cipher.key.length =
852 					sa->cipher_key_len;
853 				sa_ctx->xf[idx].b.cipher.op =
854 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
855 				sa_ctx->xf[idx].b.next = NULL;
856 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
857 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
858 
859 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
860 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
861 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
862 				sa_ctx->xf[idx].a.auth.key.length =
863 					sa->auth_key_len;
864 				sa_ctx->xf[idx].a.auth.digest_length =
865 					sa->digest_len;
866 				sa_ctx->xf[idx].a.auth.op =
867 					RTE_CRYPTO_AUTH_OP_VERIFY;
868 			} else { /* outbound */
869 				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
870 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
871 				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
872 				sa_ctx->xf[idx].a.cipher.key.length =
873 					sa->cipher_key_len;
874 				sa_ctx->xf[idx].a.cipher.op =
875 					RTE_CRYPTO_CIPHER_OP_ENCRYPT;
876 				sa_ctx->xf[idx].a.next = NULL;
877 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
878 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
879 
880 				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
881 				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
882 				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
883 				sa_ctx->xf[idx].b.auth.key.length =
884 					sa->auth_key_len;
885 				sa_ctx->xf[idx].b.auth.digest_length =
886 					sa->digest_len;
887 				sa_ctx->xf[idx].b.auth.op =
888 					RTE_CRYPTO_AUTH_OP_GENERATE;
889 			}
890 
891 			sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
892 			sa_ctx->xf[idx].b.next = NULL;
893 			sa->xforms = &sa_ctx->xf[idx].a;
894 
895 			print_one_sa_rule(sa, inbound);
896 		}
897 	}
898 
899 	return 0;
900 }
901 
902 static inline int
903 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
904 		uint32_t nb_entries)
905 {
906 	return sa_add_rules(sa_ctx, entries, nb_entries, 0);
907 }
908 
909 static inline int
910 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
911 		uint32_t nb_entries)
912 {
913 	return sa_add_rules(sa_ctx, entries, nb_entries, 1);
914 }
915 
916 void
917 sa_init(struct socket_ctx *ctx, int32_t socket_id)
918 {
919 	const char *name;
920 
921 	if (ctx == NULL)
922 		rte_exit(EXIT_FAILURE, "NULL context.\n");
923 
924 	if (ctx->sa_in != NULL)
925 		rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
926 				"initialized\n", socket_id);
927 
928 	if (ctx->sa_out != NULL)
929 		rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
930 				"initialized\n", socket_id);
931 
932 	if (nb_sa_in > 0) {
933 		name = "sa_in";
934 		ctx->sa_in = sa_create(name, socket_id);
935 		if (ctx->sa_in == NULL)
936 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
937 				"context %s in socket %d\n", rte_errno,
938 				name, socket_id);
939 
940 		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
941 	} else
942 		RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
943 
944 	if (nb_sa_out > 0) {
945 		name = "sa_out";
946 		ctx->sa_out = sa_create(name, socket_id);
947 		if (ctx->sa_out == NULL)
948 			rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
949 				"context %s in socket %d\n", rte_errno,
950 				name, socket_id);
951 
952 		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
953 	} else
954 		RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
955 			"specified\n");
956 }
957 
958 int
959 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
960 {
961 	struct ipsec_mbuf_metadata *priv;
962 
963 	priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
964 
965 	return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
966 }
967 
968 static inline void
969 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
970 		struct ipsec_sa **sa_ret)
971 {
972 	struct esp_hdr *esp;
973 	struct ip *ip;
974 	uint32_t *src4_addr;
975 	uint8_t *src6_addr;
976 	struct ipsec_sa *sa;
977 
978 	*sa_ret = NULL;
979 
980 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
981 	if (ip->ip_v == IPVERSION)
982 		esp = (struct esp_hdr *)(ip + 1);
983 	else
984 		esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
985 
986 	if (esp->spi == INVALID_SPI)
987 		return;
988 
989 	sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
990 	if (rte_be_to_cpu_32(esp->spi) != sa->spi)
991 		return;
992 
993 	switch (sa->flags) {
994 	case IP4_TUNNEL:
995 		src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
996 		if ((ip->ip_v == IPVERSION) &&
997 				(sa->src.ip.ip4 == *src4_addr) &&
998 				(sa->dst.ip.ip4 == *(src4_addr + 1)))
999 			*sa_ret = sa;
1000 		break;
1001 	case IP6_TUNNEL:
1002 		src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1003 		if ((ip->ip_v == IP6_VERSION) &&
1004 				!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1005 				!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1006 			*sa_ret = sa;
1007 		break;
1008 	case TRANSPORT:
1009 		*sa_ret = sa;
1010 	}
1011 }
1012 
1013 void
1014 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1015 		struct ipsec_sa *sa[], uint16_t nb_pkts)
1016 {
1017 	uint32_t i;
1018 
1019 	for (i = 0; i < nb_pkts; i++)
1020 		single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1021 }
1022 
1023 void
1024 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1025 		struct ipsec_sa *sa[], uint16_t nb_pkts)
1026 {
1027 	uint32_t i;
1028 
1029 	for (i = 0; i < nb_pkts; i++)
1030 		sa[i] = &sa_ctx->sa[sa_idx[i]];
1031 }
1032