1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 /* 6 * Security Associations 7 */ 8 #include <sys/types.h> 9 #include <netinet/in.h> 10 #include <netinet/ip.h> 11 #include <netinet/ip6.h> 12 13 #include <rte_memzone.h> 14 #include <rte_crypto.h> 15 #include <rte_security.h> 16 #include <rte_cryptodev.h> 17 #include <rte_byteorder.h> 18 #include <rte_errno.h> 19 #include <rte_ip.h> 20 #include <rte_random.h> 21 #include <rte_ethdev.h> 22 23 #include "ipsec.h" 24 #include "esp.h" 25 #include "parser.h" 26 27 #define IPDEFTTL 64 28 29 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT) 30 31 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT) 32 33 struct supported_cipher_algo { 34 const char *keyword; 35 enum rte_crypto_cipher_algorithm algo; 36 uint16_t iv_len; 37 uint16_t block_size; 38 uint16_t key_len; 39 }; 40 41 struct supported_auth_algo { 42 const char *keyword; 43 enum rte_crypto_auth_algorithm algo; 44 uint16_t digest_len; 45 uint16_t key_len; 46 uint8_t key_not_req; 47 }; 48 49 struct supported_aead_algo { 50 const char *keyword; 51 enum rte_crypto_aead_algorithm algo; 52 uint16_t iv_len; 53 uint16_t block_size; 54 uint16_t digest_len; 55 uint16_t key_len; 56 uint8_t aad_len; 57 }; 58 59 60 const struct supported_cipher_algo cipher_algos[] = { 61 { 62 .keyword = "null", 63 .algo = RTE_CRYPTO_CIPHER_NULL, 64 .iv_len = 0, 65 .block_size = 4, 66 .key_len = 0 67 }, 68 { 69 .keyword = "aes-128-cbc", 70 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 71 .iv_len = 16, 72 .block_size = 16, 73 .key_len = 16 74 }, 75 { 76 .keyword = "aes-256-cbc", 77 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 78 .iv_len = 16, 79 .block_size = 16, 80 .key_len = 32 81 }, 82 { 83 .keyword = "aes-128-ctr", 84 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 85 .iv_len = 8, 86 .block_size = 4, 87 .key_len = 20 88 }, 89 { 90 .keyword = "3des-cbc", 91 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 92 .iv_len = 8, 93 .block_size = 8, 94 .key_len = 24 95 } 96 }; 97 98 const struct supported_auth_algo auth_algos[] = { 99 { 100 .keyword = "null", 101 .algo = RTE_CRYPTO_AUTH_NULL, 102 .digest_len = 0, 103 .key_len = 0, 104 .key_not_req = 1 105 }, 106 { 107 .keyword = "sha1-hmac", 108 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 109 .digest_len = 12, 110 .key_len = 20 111 }, 112 { 113 .keyword = "sha256-hmac", 114 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 115 .digest_len = 12, 116 .key_len = 32 117 } 118 }; 119 120 const struct supported_aead_algo aead_algos[] = { 121 { 122 .keyword = "aes-128-gcm", 123 .algo = RTE_CRYPTO_AEAD_AES_GCM, 124 .iv_len = 8, 125 .block_size = 4, 126 .key_len = 20, 127 .digest_len = 16, 128 .aad_len = 8, 129 } 130 }; 131 132 static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; 133 static uint32_t nb_sa_out; 134 135 static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; 136 static uint32_t nb_sa_in; 137 138 static const struct supported_cipher_algo * 139 find_match_cipher_algo(const char *cipher_keyword) 140 { 141 size_t i; 142 143 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 144 const struct supported_cipher_algo *algo = 145 &cipher_algos[i]; 146 147 if (strcmp(cipher_keyword, algo->keyword) == 0) 148 return algo; 149 } 150 151 return NULL; 152 } 153 154 static const struct supported_auth_algo * 155 find_match_auth_algo(const char *auth_keyword) 156 { 157 size_t i; 158 159 for (i = 0; i < RTE_DIM(auth_algos); i++) { 160 const struct supported_auth_algo *algo = 161 &auth_algos[i]; 162 163 if (strcmp(auth_keyword, algo->keyword) == 0) 164 return algo; 165 } 166 167 return NULL; 168 } 169 170 static const struct supported_aead_algo * 171 find_match_aead_algo(const char *aead_keyword) 172 { 173 size_t i; 174 175 for (i = 0; i < RTE_DIM(aead_algos); i++) { 176 const struct supported_aead_algo *algo = 177 &aead_algos[i]; 178 179 if (strcmp(aead_keyword, algo->keyword) == 0) 180 return algo; 181 } 182 183 return NULL; 184 } 185 186 /** parse_key_string 187 * parse x:x:x:x.... hex number key string into uint8_t *key 188 * return: 189 * > 0: number of bytes parsed 190 * 0: failed 191 */ 192 static uint32_t 193 parse_key_string(const char *key_str, uint8_t *key) 194 { 195 const char *pt_start = key_str, *pt_end = key_str; 196 uint32_t nb_bytes = 0; 197 198 while (pt_end != NULL) { 199 char sub_str[3] = {0}; 200 201 pt_end = strchr(pt_start, ':'); 202 203 if (pt_end == NULL) { 204 if (strlen(pt_start) > 2) 205 return 0; 206 strncpy(sub_str, pt_start, 2); 207 } else { 208 if (pt_end - pt_start > 2) 209 return 0; 210 211 strncpy(sub_str, pt_start, pt_end - pt_start); 212 pt_start = pt_end + 1; 213 } 214 215 key[nb_bytes++] = strtol(sub_str, NULL, 16); 216 } 217 218 return nb_bytes; 219 } 220 221 void 222 parse_sa_tokens(char **tokens, uint32_t n_tokens, 223 struct parse_status *status) 224 { 225 struct ipsec_sa *rule = NULL; 226 uint32_t ti; /*token index*/ 227 uint32_t *ri /*rule index*/; 228 uint32_t cipher_algo_p = 0; 229 uint32_t auth_algo_p = 0; 230 uint32_t aead_algo_p = 0; 231 uint32_t src_p = 0; 232 uint32_t dst_p = 0; 233 uint32_t mode_p = 0; 234 uint32_t type_p = 0; 235 uint32_t portid_p = 0; 236 237 if (strcmp(tokens[0], "in") == 0) { 238 ri = &nb_sa_in; 239 240 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 241 "too many sa rules, abort insertion\n"); 242 if (status->status < 0) 243 return; 244 245 rule = &sa_in[*ri]; 246 } else { 247 ri = &nb_sa_out; 248 249 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 250 "too many sa rules, abort insertion\n"); 251 if (status->status < 0) 252 return; 253 254 rule = &sa_out[*ri]; 255 } 256 257 /* spi number */ 258 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); 259 if (status->status < 0) 260 return; 261 if (atoi(tokens[1]) == INVALID_SPI) 262 return; 263 rule->spi = atoi(tokens[1]); 264 265 for (ti = 2; ti < n_tokens; ti++) { 266 if (strcmp(tokens[ti], "mode") == 0) { 267 APP_CHECK_PRESENCE(mode_p, tokens[ti], status); 268 if (status->status < 0) 269 return; 270 271 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 272 if (status->status < 0) 273 return; 274 275 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) 276 rule->flags = IP4_TUNNEL; 277 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) 278 rule->flags = IP6_TUNNEL; 279 else if (strcmp(tokens[ti], "transport") == 0) 280 rule->flags = TRANSPORT; 281 else { 282 APP_CHECK(0, status, "unrecognized " 283 "input \"%s\"", tokens[ti]); 284 return; 285 } 286 287 mode_p = 1; 288 continue; 289 } 290 291 if (strcmp(tokens[ti], "cipher_algo") == 0) { 292 const struct supported_cipher_algo *algo; 293 uint32_t key_len; 294 295 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], 296 status); 297 if (status->status < 0) 298 return; 299 300 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 301 if (status->status < 0) 302 return; 303 304 algo = find_match_cipher_algo(tokens[ti]); 305 306 APP_CHECK(algo != NULL, status, "unrecognized " 307 "input \"%s\"", tokens[ti]); 308 309 rule->cipher_algo = algo->algo; 310 rule->block_size = algo->block_size; 311 rule->iv_len = algo->iv_len; 312 rule->cipher_key_len = algo->key_len; 313 314 /* for NULL algorithm, no cipher key required */ 315 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 316 cipher_algo_p = 1; 317 continue; 318 } 319 320 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 321 if (status->status < 0) 322 return; 323 324 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, 325 status, "unrecognized input \"%s\", " 326 "expect \"cipher_key\"", tokens[ti]); 327 if (status->status < 0) 328 return; 329 330 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 331 if (status->status < 0) 332 return; 333 334 key_len = parse_key_string(tokens[ti], 335 rule->cipher_key); 336 APP_CHECK(key_len == rule->cipher_key_len, status, 337 "unrecognized input \"%s\"", tokens[ti]); 338 if (status->status < 0) 339 return; 340 341 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC || 342 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC) 343 rule->salt = (uint32_t)rte_rand(); 344 345 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) { 346 key_len -= 4; 347 rule->cipher_key_len = key_len; 348 memcpy(&rule->salt, 349 &rule->cipher_key[key_len], 4); 350 } 351 352 cipher_algo_p = 1; 353 continue; 354 } 355 356 if (strcmp(tokens[ti], "auth_algo") == 0) { 357 const struct supported_auth_algo *algo; 358 uint32_t key_len; 359 360 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], 361 status); 362 if (status->status < 0) 363 return; 364 365 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 366 if (status->status < 0) 367 return; 368 369 algo = find_match_auth_algo(tokens[ti]); 370 APP_CHECK(algo != NULL, status, "unrecognized " 371 "input \"%s\"", tokens[ti]); 372 373 rule->auth_algo = algo->algo; 374 rule->auth_key_len = algo->key_len; 375 rule->digest_len = algo->digest_len; 376 377 /* NULL algorithm and combined algos do not 378 * require auth key 379 */ 380 if (algo->key_not_req) { 381 auth_algo_p = 1; 382 continue; 383 } 384 385 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 386 if (status->status < 0) 387 return; 388 389 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, 390 status, "unrecognized input \"%s\", " 391 "expect \"auth_key\"", tokens[ti]); 392 if (status->status < 0) 393 return; 394 395 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 396 if (status->status < 0) 397 return; 398 399 key_len = parse_key_string(tokens[ti], 400 rule->auth_key); 401 APP_CHECK(key_len == rule->auth_key_len, status, 402 "unrecognized input \"%s\"", tokens[ti]); 403 if (status->status < 0) 404 return; 405 406 auth_algo_p = 1; 407 continue; 408 } 409 410 if (strcmp(tokens[ti], "aead_algo") == 0) { 411 const struct supported_aead_algo *algo; 412 uint32_t key_len; 413 414 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti], 415 status); 416 if (status->status < 0) 417 return; 418 419 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 420 if (status->status < 0) 421 return; 422 423 algo = find_match_aead_algo(tokens[ti]); 424 425 APP_CHECK(algo != NULL, status, "unrecognized " 426 "input \"%s\"", tokens[ti]); 427 428 rule->aead_algo = algo->algo; 429 rule->cipher_key_len = algo->key_len; 430 rule->digest_len = algo->digest_len; 431 rule->aad_len = algo->aad_len; 432 rule->block_size = algo->block_size; 433 rule->iv_len = algo->iv_len; 434 435 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 436 if (status->status < 0) 437 return; 438 439 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0, 440 status, "unrecognized input \"%s\", " 441 "expect \"aead_key\"", tokens[ti]); 442 if (status->status < 0) 443 return; 444 445 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 446 if (status->status < 0) 447 return; 448 449 key_len = parse_key_string(tokens[ti], 450 rule->cipher_key); 451 APP_CHECK(key_len == rule->cipher_key_len, status, 452 "unrecognized input \"%s\"", tokens[ti]); 453 if (status->status < 0) 454 return; 455 456 key_len -= 4; 457 rule->cipher_key_len = key_len; 458 memcpy(&rule->salt, 459 &rule->cipher_key[key_len], 4); 460 461 aead_algo_p = 1; 462 continue; 463 } 464 465 if (strcmp(tokens[ti], "src") == 0) { 466 APP_CHECK_PRESENCE(src_p, tokens[ti], status); 467 if (status->status < 0) 468 return; 469 470 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 471 if (status->status < 0) 472 return; 473 474 if (IS_IP4_TUNNEL(rule->flags)) { 475 struct in_addr ip; 476 477 APP_CHECK(parse_ipv4_addr(tokens[ti], 478 &ip, NULL) == 0, status, 479 "unrecognized input \"%s\", " 480 "expect valid ipv4 addr", 481 tokens[ti]); 482 if (status->status < 0) 483 return; 484 rule->src.ip.ip4 = rte_bswap32( 485 (uint32_t)ip.s_addr); 486 } else if (IS_IP6_TUNNEL(rule->flags)) { 487 struct in6_addr ip; 488 489 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 490 NULL) == 0, status, 491 "unrecognized input \"%s\", " 492 "expect valid ipv6 addr", 493 tokens[ti]); 494 if (status->status < 0) 495 return; 496 memcpy(rule->src.ip.ip6.ip6_b, 497 ip.s6_addr, 16); 498 } else if (IS_TRANSPORT(rule->flags)) { 499 APP_CHECK(0, status, "unrecognized input " 500 "\"%s\"", tokens[ti]); 501 return; 502 } 503 504 src_p = 1; 505 continue; 506 } 507 508 if (strcmp(tokens[ti], "dst") == 0) { 509 APP_CHECK_PRESENCE(dst_p, tokens[ti], status); 510 if (status->status < 0) 511 return; 512 513 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 514 if (status->status < 0) 515 return; 516 517 if (IS_IP4_TUNNEL(rule->flags)) { 518 struct in_addr ip; 519 520 APP_CHECK(parse_ipv4_addr(tokens[ti], 521 &ip, NULL) == 0, status, 522 "unrecognized input \"%s\", " 523 "expect valid ipv4 addr", 524 tokens[ti]); 525 if (status->status < 0) 526 return; 527 rule->dst.ip.ip4 = rte_bswap32( 528 (uint32_t)ip.s_addr); 529 } else if (IS_IP6_TUNNEL(rule->flags)) { 530 struct in6_addr ip; 531 532 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 533 NULL) == 0, status, 534 "unrecognized input \"%s\", " 535 "expect valid ipv6 addr", 536 tokens[ti]); 537 if (status->status < 0) 538 return; 539 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); 540 } else if (IS_TRANSPORT(rule->flags)) { 541 APP_CHECK(0, status, "unrecognized " 542 "input \"%s\"", tokens[ti]); 543 return; 544 } 545 546 dst_p = 1; 547 continue; 548 } 549 550 if (strcmp(tokens[ti], "type") == 0) { 551 APP_CHECK_PRESENCE(type_p, tokens[ti], status); 552 if (status->status < 0) 553 return; 554 555 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 556 if (status->status < 0) 557 return; 558 559 if (strcmp(tokens[ti], "inline-crypto-offload") == 0) 560 rule->type = 561 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; 562 else if (strcmp(tokens[ti], 563 "inline-protocol-offload") == 0) 564 rule->type = 565 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 566 else if (strcmp(tokens[ti], 567 "lookaside-protocol-offload") == 0) 568 rule->type = 569 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; 570 else if (strcmp(tokens[ti], "no-offload") == 0) 571 rule->type = RTE_SECURITY_ACTION_TYPE_NONE; 572 else { 573 APP_CHECK(0, status, "Invalid input \"%s\"", 574 tokens[ti]); 575 return; 576 } 577 578 type_p = 1; 579 continue; 580 } 581 582 if (strcmp(tokens[ti], "port_id") == 0) { 583 APP_CHECK_PRESENCE(portid_p, tokens[ti], status); 584 if (status->status < 0) 585 return; 586 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 587 if (status->status < 0) 588 return; 589 rule->portid = atoi(tokens[ti]); 590 if (status->status < 0) 591 return; 592 portid_p = 1; 593 continue; 594 } 595 596 /* unrecognizeable input */ 597 APP_CHECK(0, status, "unrecognized input \"%s\"", 598 tokens[ti]); 599 return; 600 } 601 602 if (aead_algo_p) { 603 APP_CHECK(cipher_algo_p == 0, status, 604 "AEAD used, no need for cipher options"); 605 if (status->status < 0) 606 return; 607 608 APP_CHECK(auth_algo_p == 0, status, 609 "AEAD used, no need for auth options"); 610 if (status->status < 0) 611 return; 612 } else { 613 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options"); 614 if (status->status < 0) 615 return; 616 617 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options"); 618 if (status->status < 0) 619 return; 620 } 621 622 APP_CHECK(mode_p == 1, status, "missing mode option"); 623 if (status->status < 0) 624 return; 625 626 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) 627 printf("Missing portid option, falling back to non-offload\n"); 628 629 if (!type_p || !portid_p) { 630 rule->type = RTE_SECURITY_ACTION_TYPE_NONE; 631 rule->portid = -1; 632 } 633 634 *ri = *ri + 1; 635 } 636 637 static void 638 print_one_sa_rule(const struct ipsec_sa *sa, int inbound) 639 { 640 uint32_t i; 641 uint8_t a, b, c, d; 642 643 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); 644 645 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 646 if (cipher_algos[i].algo == sa->cipher_algo && 647 cipher_algos[i].key_len == sa->cipher_key_len) { 648 printf("%s ", cipher_algos[i].keyword); 649 break; 650 } 651 } 652 653 for (i = 0; i < RTE_DIM(auth_algos); i++) { 654 if (auth_algos[i].algo == sa->auth_algo) { 655 printf("%s ", auth_algos[i].keyword); 656 break; 657 } 658 } 659 660 for (i = 0; i < RTE_DIM(aead_algos); i++) { 661 if (aead_algos[i].algo == sa->aead_algo) { 662 printf("%s ", aead_algos[i].keyword); 663 break; 664 } 665 } 666 667 printf("mode:"); 668 669 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 670 case IP4_TUNNEL: 671 printf("IP4Tunnel "); 672 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); 673 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); 674 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); 675 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); 676 break; 677 case IP6_TUNNEL: 678 printf("IP6Tunnel "); 679 for (i = 0; i < 16; i++) { 680 if (i % 2 && i != 15) 681 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); 682 else 683 printf("%.2x", sa->src.ip.ip6.ip6_b[i]); 684 } 685 printf(" "); 686 for (i = 0; i < 16; i++) { 687 if (i % 2 && i != 15) 688 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); 689 else 690 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); 691 } 692 break; 693 case TRANSPORT: 694 printf("Transport "); 695 break; 696 } 697 printf(" type:"); 698 switch (sa->type) { 699 case RTE_SECURITY_ACTION_TYPE_NONE: 700 printf("no-offload "); 701 break; 702 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 703 printf("inline-crypto-offload "); 704 break; 705 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 706 printf("inline-protocol-offload "); 707 break; 708 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 709 printf("lookaside-protocol-offload "); 710 break; 711 } 712 printf("\n"); 713 } 714 715 struct sa_ctx { 716 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; 717 union { 718 struct { 719 struct rte_crypto_sym_xform a; 720 struct rte_crypto_sym_xform b; 721 }; 722 } xf[IPSEC_SA_MAX_ENTRIES]; 723 }; 724 725 static struct sa_ctx * 726 sa_create(const char *name, int32_t socket_id) 727 { 728 char s[PATH_MAX]; 729 struct sa_ctx *sa_ctx; 730 uint32_t mz_size; 731 const struct rte_memzone *mz; 732 733 snprintf(s, sizeof(s), "%s_%u", name, socket_id); 734 735 /* Create SA array table */ 736 printf("Creating SA context with %u maximum entries on socket %d\n", 737 IPSEC_SA_MAX_ENTRIES, socket_id); 738 739 mz_size = sizeof(struct sa_ctx); 740 mz = rte_memzone_reserve(s, mz_size, socket_id, 741 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); 742 if (mz == NULL) { 743 printf("Failed to allocate SA DB memory\n"); 744 rte_errno = ENOMEM; 745 return NULL; 746 } 747 748 sa_ctx = (struct sa_ctx *)mz->addr; 749 750 return sa_ctx; 751 } 752 753 static int 754 check_eth_dev_caps(uint16_t portid, uint32_t inbound) 755 { 756 struct rte_eth_dev_info dev_info; 757 758 rte_eth_dev_info_get(portid, &dev_info); 759 760 if (inbound) { 761 if ((dev_info.rx_offload_capa & 762 DEV_RX_OFFLOAD_SECURITY) == 0) { 763 RTE_LOG(WARNING, PORT, 764 "hardware RX IPSec offload is not supported\n"); 765 return -EINVAL; 766 } 767 768 } else { /* outbound */ 769 if ((dev_info.tx_offload_capa & 770 DEV_TX_OFFLOAD_SECURITY) == 0) { 771 RTE_LOG(WARNING, PORT, 772 "hardware TX IPSec offload is not supported\n"); 773 return -EINVAL; 774 } 775 } 776 return 0; 777 } 778 779 /* 780 * Helper function, tries to determine next_proto for SPI 781 * by searching though SP rules. 782 */ 783 static int 784 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, 785 struct ip_addr ip_addr[2], uint32_t mask[2]) 786 { 787 int32_t rc4, rc6; 788 789 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 790 ip_addr, mask); 791 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 792 ip_addr, mask); 793 794 if (rc4 >= 0) { 795 if (rc6 >= 0) { 796 RTE_LOG(ERR, IPSEC, 797 "%s: SPI %u used simultaeously by " 798 "IPv4(%d) and IPv6 (%d) SP rules\n", 799 __func__, spi, rc4, rc6); 800 return -EINVAL; 801 } else 802 return IPPROTO_IPIP; 803 } else if (rc6 < 0) { 804 RTE_LOG(ERR, IPSEC, 805 "%s: SPI %u is not used by any SP rule\n", 806 __func__, spi); 807 return -EINVAL; 808 } else 809 return IPPROTO_IPV6; 810 } 811 812 /* 813 * Helper function for getting source and destination IP addresses 814 * from SP. Needed for inline crypto transport mode, as addresses are not 815 * provided in config file for that mode. It checks if SP for current SA exists, 816 * and based on what type of protocol is returned, it stores appropriate 817 * addresses got from SP into SA. 818 */ 819 static int 820 sa_add_address_inline_crypto(struct ipsec_sa *sa) 821 { 822 int protocol; 823 struct ip_addr ip_addr[2]; 824 uint32_t mask[2]; 825 826 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask); 827 if (protocol < 0) 828 return protocol; 829 else if (protocol == IPPROTO_IPIP) { 830 sa->flags |= IP4_TRANSPORT; 831 if (mask[0] == IP4_FULL_MASK && 832 mask[1] == IP4_FULL_MASK && 833 ip_addr[0].ip.ip4 != 0 && 834 ip_addr[1].ip.ip4 != 0) { 835 836 sa->src.ip.ip4 = ip_addr[0].ip.ip4; 837 sa->dst.ip.ip4 = ip_addr[1].ip.ip4; 838 } else { 839 RTE_LOG(ERR, IPSEC, 840 "%s: No valid address or mask entry in" 841 " IPv4 SP rule for SPI %u\n", 842 __func__, sa->spi); 843 return -EINVAL; 844 } 845 } else if (protocol == IPPROTO_IPV6) { 846 sa->flags |= IP6_TRANSPORT; 847 if (mask[0] == IP6_FULL_MASK && 848 mask[1] == IP6_FULL_MASK && 849 (ip_addr[0].ip.ip6.ip6[0] != 0 || 850 ip_addr[0].ip.ip6.ip6[1] != 0) && 851 (ip_addr[1].ip.ip6.ip6[0] != 0 || 852 ip_addr[1].ip.ip6.ip6[1] != 0)) { 853 854 sa->src.ip.ip6 = ip_addr[0].ip.ip6; 855 sa->dst.ip.ip6 = ip_addr[1].ip.ip6; 856 } else { 857 RTE_LOG(ERR, IPSEC, 858 "%s: No valid address or mask entry in" 859 " IPv6 SP rule for SPI %u\n", 860 __func__, sa->spi); 861 return -EINVAL; 862 } 863 } 864 return 0; 865 } 866 867 static int 868 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 869 uint32_t nb_entries, uint32_t inbound) 870 { 871 struct ipsec_sa *sa; 872 uint32_t i, idx; 873 uint16_t iv_length; 874 int inline_status; 875 876 for (i = 0; i < nb_entries; i++) { 877 idx = SPI2IDX(entries[i].spi); 878 sa = &sa_ctx->sa[idx]; 879 if (sa->spi != 0) { 880 printf("Index %u already in use by SPI %u\n", 881 idx, sa->spi); 882 return -EINVAL; 883 } 884 *sa = entries[i]; 885 sa->seq = 0; 886 887 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || 888 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 889 if (check_eth_dev_caps(sa->portid, inbound)) 890 return -EINVAL; 891 } 892 893 sa->direction = (inbound == 1) ? 894 RTE_SECURITY_IPSEC_SA_DIR_INGRESS : 895 RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 896 897 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 898 case IP4_TUNNEL: 899 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); 900 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); 901 break; 902 case TRANSPORT: 903 if (sa->type == 904 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 905 inline_status = 906 sa_add_address_inline_crypto(sa); 907 if (inline_status < 0) 908 return inline_status; 909 } 910 break; 911 } 912 913 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 914 iv_length = 16; 915 916 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; 917 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; 918 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; 919 sa_ctx->xf[idx].a.aead.key.length = 920 sa->cipher_key_len; 921 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? 922 RTE_CRYPTO_AEAD_OP_DECRYPT : 923 RTE_CRYPTO_AEAD_OP_ENCRYPT; 924 sa_ctx->xf[idx].a.next = NULL; 925 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; 926 sa_ctx->xf[idx].a.aead.iv.length = iv_length; 927 sa_ctx->xf[idx].a.aead.aad_length = 928 sa->aad_len; 929 sa_ctx->xf[idx].a.aead.digest_length = 930 sa->digest_len; 931 932 sa->xforms = &sa_ctx->xf[idx].a; 933 934 print_one_sa_rule(sa, inbound); 935 } else { 936 switch (sa->cipher_algo) { 937 case RTE_CRYPTO_CIPHER_NULL: 938 case RTE_CRYPTO_CIPHER_3DES_CBC: 939 case RTE_CRYPTO_CIPHER_AES_CBC: 940 iv_length = sa->iv_len; 941 break; 942 case RTE_CRYPTO_CIPHER_AES_CTR: 943 iv_length = 16; 944 break; 945 default: 946 RTE_LOG(ERR, IPSEC_ESP, 947 "unsupported cipher algorithm %u\n", 948 sa->cipher_algo); 949 return -EINVAL; 950 } 951 952 if (inbound) { 953 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 954 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; 955 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; 956 sa_ctx->xf[idx].b.cipher.key.length = 957 sa->cipher_key_len; 958 sa_ctx->xf[idx].b.cipher.op = 959 RTE_CRYPTO_CIPHER_OP_DECRYPT; 960 sa_ctx->xf[idx].b.next = NULL; 961 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET; 962 sa_ctx->xf[idx].b.cipher.iv.length = iv_length; 963 964 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; 965 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; 966 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; 967 sa_ctx->xf[idx].a.auth.key.length = 968 sa->auth_key_len; 969 sa_ctx->xf[idx].a.auth.digest_length = 970 sa->digest_len; 971 sa_ctx->xf[idx].a.auth.op = 972 RTE_CRYPTO_AUTH_OP_VERIFY; 973 } else { /* outbound */ 974 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 975 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; 976 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; 977 sa_ctx->xf[idx].a.cipher.key.length = 978 sa->cipher_key_len; 979 sa_ctx->xf[idx].a.cipher.op = 980 RTE_CRYPTO_CIPHER_OP_ENCRYPT; 981 sa_ctx->xf[idx].a.next = NULL; 982 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET; 983 sa_ctx->xf[idx].a.cipher.iv.length = iv_length; 984 985 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; 986 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; 987 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; 988 sa_ctx->xf[idx].b.auth.key.length = 989 sa->auth_key_len; 990 sa_ctx->xf[idx].b.auth.digest_length = 991 sa->digest_len; 992 sa_ctx->xf[idx].b.auth.op = 993 RTE_CRYPTO_AUTH_OP_GENERATE; 994 } 995 996 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; 997 sa_ctx->xf[idx].b.next = NULL; 998 sa->xforms = &sa_ctx->xf[idx].a; 999 1000 print_one_sa_rule(sa, inbound); 1001 } 1002 } 1003 1004 return 0; 1005 } 1006 1007 static inline int 1008 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 1009 uint32_t nb_entries) 1010 { 1011 return sa_add_rules(sa_ctx, entries, nb_entries, 0); 1012 } 1013 1014 static inline int 1015 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 1016 uint32_t nb_entries) 1017 { 1018 return sa_add_rules(sa_ctx, entries, nb_entries, 1); 1019 } 1020 1021 /* 1022 * Walk through all SA rules to find an SA with given SPI 1023 */ 1024 int 1025 sa_spi_present(uint32_t spi, int inbound) 1026 { 1027 uint32_t i, num; 1028 const struct ipsec_sa *sar; 1029 1030 if (inbound != 0) { 1031 sar = sa_in; 1032 num = nb_sa_in; 1033 } else { 1034 sar = sa_out; 1035 num = nb_sa_out; 1036 } 1037 1038 for (i = 0; i != num; i++) { 1039 if (sar[i].spi == spi) 1040 return i; 1041 } 1042 1043 return -ENOENT; 1044 } 1045 1046 void 1047 sa_init(struct socket_ctx *ctx, int32_t socket_id) 1048 { 1049 const char *name; 1050 1051 if (ctx == NULL) 1052 rte_exit(EXIT_FAILURE, "NULL context.\n"); 1053 1054 if (ctx->sa_in != NULL) 1055 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " 1056 "initialized\n", socket_id); 1057 1058 if (ctx->sa_out != NULL) 1059 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " 1060 "initialized\n", socket_id); 1061 1062 if (nb_sa_in > 0) { 1063 name = "sa_in"; 1064 ctx->sa_in = sa_create(name, socket_id); 1065 if (ctx->sa_in == NULL) 1066 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1067 "context %s in socket %d\n", rte_errno, 1068 name, socket_id); 1069 1070 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); 1071 } else 1072 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); 1073 1074 if (nb_sa_out > 0) { 1075 name = "sa_out"; 1076 ctx->sa_out = sa_create(name, socket_id); 1077 if (ctx->sa_out == NULL) 1078 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1079 "context %s in socket %d\n", rte_errno, 1080 name, socket_id); 1081 1082 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); 1083 } else 1084 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " 1085 "specified\n"); 1086 } 1087 1088 int 1089 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) 1090 { 1091 struct ipsec_mbuf_metadata *priv; 1092 struct ipsec_sa *sa; 1093 1094 priv = get_priv(m); 1095 sa = priv->sa; 1096 if (sa != NULL) 1097 return (sa_ctx->sa[sa_idx].spi == sa->spi); 1098 1099 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); 1100 return 0; 1101 } 1102 1103 static inline void 1104 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, 1105 struct ipsec_sa **sa_ret) 1106 { 1107 struct esp_hdr *esp; 1108 struct ip *ip; 1109 uint32_t *src4_addr; 1110 uint8_t *src6_addr; 1111 struct ipsec_sa *sa; 1112 1113 *sa_ret = NULL; 1114 1115 ip = rte_pktmbuf_mtod(pkt, struct ip *); 1116 if (ip->ip_v == IPVERSION) 1117 esp = (struct esp_hdr *)(ip + 1); 1118 else 1119 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1); 1120 1121 if (esp->spi == INVALID_SPI) 1122 return; 1123 1124 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))]; 1125 if (rte_be_to_cpu_32(esp->spi) != sa->spi) 1126 return; 1127 1128 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 1129 case IP4_TUNNEL: 1130 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); 1131 if ((ip->ip_v == IPVERSION) && 1132 (sa->src.ip.ip4 == *src4_addr) && 1133 (sa->dst.ip.ip4 == *(src4_addr + 1))) 1134 *sa_ret = sa; 1135 break; 1136 case IP6_TUNNEL: 1137 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src)); 1138 if ((ip->ip_v == IP6_VERSION) && 1139 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && 1140 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) 1141 *sa_ret = sa; 1142 break; 1143 case TRANSPORT: 1144 *sa_ret = sa; 1145 } 1146 } 1147 1148 void 1149 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], 1150 struct ipsec_sa *sa[], uint16_t nb_pkts) 1151 { 1152 uint32_t i; 1153 1154 for (i = 0; i < nb_pkts; i++) 1155 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]); 1156 } 1157 1158 void 1159 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], 1160 struct ipsec_sa *sa[], uint16_t nb_pkts) 1161 { 1162 uint32_t i; 1163 1164 for (i = 0; i < nb_pkts; i++) 1165 sa[i] = &sa_ctx->sa[sa_idx[i]]; 1166 } 1167