1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Security Associations 36 */ 37 #include <sys/types.h> 38 #include <netinet/in.h> 39 #include <netinet/ip.h> 40 #include <netinet/ip6.h> 41 42 #include <rte_memzone.h> 43 #include <rte_crypto.h> 44 #include <rte_cryptodev.h> 45 #include <rte_byteorder.h> 46 #include <rte_errno.h> 47 #include <rte_ip.h> 48 #include <rte_random.h> 49 50 #include "ipsec.h" 51 #include "esp.h" 52 #include "parser.h" 53 54 struct supported_cipher_algo { 55 const char *keyword; 56 enum rte_crypto_cipher_algorithm algo; 57 uint16_t iv_len; 58 uint16_t block_size; 59 uint16_t key_len; 60 }; 61 62 struct supported_auth_algo { 63 const char *keyword; 64 enum rte_crypto_auth_algorithm algo; 65 uint16_t digest_len; 66 uint16_t key_len; 67 uint8_t aad_len; 68 uint8_t key_not_req; 69 }; 70 71 const struct supported_cipher_algo cipher_algos[] = { 72 { 73 .keyword = "null", 74 .algo = RTE_CRYPTO_CIPHER_NULL, 75 .iv_len = 0, 76 .block_size = 4, 77 .key_len = 0 78 }, 79 { 80 .keyword = "aes-128-cbc", 81 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 82 .iv_len = 16, 83 .block_size = 16, 84 .key_len = 16 85 }, 86 { 87 .keyword = "aes-128-gcm", 88 .algo = RTE_CRYPTO_CIPHER_AES_GCM, 89 .iv_len = 8, 90 .block_size = 4, 91 .key_len = 20 92 }, 93 { 94 .keyword = "aes-128-ctr", 95 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 96 .iv_len = 8, 97 .block_size = 16, /* XXX AESNI MB limition, should be 4 */ 98 .key_len = 20 99 } 100 }; 101 102 const struct supported_auth_algo auth_algos[] = { 103 { 104 .keyword = "null", 105 .algo = RTE_CRYPTO_AUTH_NULL, 106 .digest_len = 0, 107 .key_len = 0, 108 .key_not_req = 1 109 }, 110 { 111 .keyword = "sha1-hmac", 112 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 113 .digest_len = 12, 114 .key_len = 20 115 }, 116 { 117 .keyword = "aes-128-gcm", 118 .algo = RTE_CRYPTO_AUTH_AES_GCM, 119 .digest_len = 16, 120 .aad_len = 8, 121 .key_not_req = 1 122 } 123 }; 124 125 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; 126 uint32_t nb_sa_out; 127 128 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; 129 uint32_t nb_sa_in; 130 131 static const struct supported_cipher_algo * 132 find_match_cipher_algo(const char *cipher_keyword) 133 { 134 size_t i; 135 136 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 137 const struct supported_cipher_algo *algo = 138 &cipher_algos[i]; 139 140 if (strcmp(cipher_keyword, algo->keyword) == 0) 141 return algo; 142 } 143 144 return NULL; 145 } 146 147 static const struct supported_auth_algo * 148 find_match_auth_algo(const char *auth_keyword) 149 { 150 size_t i; 151 152 for (i = 0; i < RTE_DIM(auth_algos); i++) { 153 const struct supported_auth_algo *algo = 154 &auth_algos[i]; 155 156 if (strcmp(auth_keyword, algo->keyword) == 0) 157 return algo; 158 } 159 160 return NULL; 161 } 162 163 /** parse_key_string 164 * parse x:x:x:x.... hex number key string into uint8_t *key 165 * return: 166 * > 0: number of bytes parsed 167 * 0: failed 168 */ 169 static uint32_t 170 parse_key_string(const char *key_str, uint8_t *key) 171 { 172 const char *pt_start = key_str, *pt_end = key_str; 173 char sub_str[3]; 174 uint32_t nb_bytes = 0; 175 176 while (pt_end != NULL) { 177 pt_end = strchr(pt_start, ':'); 178 179 if (pt_end == NULL) 180 strncpy(sub_str, pt_start, strlen(pt_start)); 181 else { 182 if (pt_end - pt_start > 2) 183 return 0; 184 185 strncpy(sub_str, pt_start, pt_end - pt_start); 186 pt_start = pt_end + 1; 187 } 188 189 key[nb_bytes++] = strtol(sub_str, NULL, 16); 190 } 191 192 return nb_bytes; 193 } 194 195 void 196 parse_sa_tokens(char **tokens, uint32_t n_tokens, 197 struct parse_status *status) 198 { 199 struct ipsec_sa *rule = NULL; 200 uint32_t ti; /*token index*/ 201 uint32_t *ri /*rule index*/; 202 uint32_t cipher_algo_p = 0; 203 uint32_t auth_algo_p = 0; 204 uint32_t src_p = 0; 205 uint32_t dst_p = 0; 206 uint32_t mode_p = 0; 207 208 if (strcmp(tokens[0], "in") == 0) { 209 ri = &nb_sa_in; 210 211 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 212 "too many sa rules, abort insertion\n"); 213 if (status->status < 0) 214 return; 215 216 rule = &sa_in[*ri]; 217 } else { 218 ri = &nb_sa_out; 219 220 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 221 "too many sa rules, abort insertion\n"); 222 if (status->status < 0) 223 return; 224 225 rule = &sa_out[*ri]; 226 } 227 228 /* spi number */ 229 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); 230 if (status->status < 0) 231 return; 232 rule->spi = atoi(tokens[1]); 233 234 for (ti = 2; ti < n_tokens; ti++) { 235 if (strcmp(tokens[ti], "mode") == 0) { 236 APP_CHECK_PRESENCE(mode_p, tokens[ti], status); 237 if (status->status < 0) 238 return; 239 240 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 241 if (status->status < 0) 242 return; 243 244 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) 245 rule->flags = IP4_TUNNEL; 246 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) 247 rule->flags = IP6_TUNNEL; 248 else if (strcmp(tokens[ti], "transport") == 0) 249 rule->flags = TRANSPORT; 250 else { 251 APP_CHECK(0, status, "unrecognized " 252 "input \"%s\"", tokens[ti]); 253 return; 254 } 255 256 mode_p = 1; 257 continue; 258 } 259 260 if (strcmp(tokens[ti], "cipher_algo") == 0) { 261 const struct supported_cipher_algo *algo; 262 uint32_t key_len; 263 264 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], 265 status); 266 if (status->status < 0) 267 return; 268 269 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 270 if (status->status < 0) 271 return; 272 273 algo = find_match_cipher_algo(tokens[ti]); 274 275 APP_CHECK(algo != NULL, status, "unrecognized " 276 "input \"%s\"", tokens[ti]); 277 278 rule->cipher_algo = algo->algo; 279 rule->block_size = algo->block_size; 280 rule->iv_len = algo->iv_len; 281 rule->cipher_key_len = algo->key_len; 282 283 /* for NULL algorithm, no cipher key required */ 284 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 285 cipher_algo_p = 1; 286 continue; 287 } 288 289 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 290 if (status->status < 0) 291 return; 292 293 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, 294 status, "unrecognized input \"%s\", " 295 "expect \"cipher_key\"", tokens[ti]); 296 if (status->status < 0) 297 return; 298 299 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 300 if (status->status < 0) 301 return; 302 303 key_len = parse_key_string(tokens[ti], 304 rule->cipher_key); 305 APP_CHECK(key_len == rule->cipher_key_len, status, 306 "unrecognized input \"%s\"", tokens[ti]); 307 if (status->status < 0) 308 return; 309 310 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC) 311 rule->salt = (uint32_t)rte_rand(); 312 313 if ((algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) || 314 (algo->algo == RTE_CRYPTO_CIPHER_AES_GCM)) { 315 key_len -= 4; 316 rule->cipher_key_len = key_len; 317 memcpy(&rule->salt, 318 &rule->cipher_key[key_len], 4); 319 } 320 321 cipher_algo_p = 1; 322 continue; 323 } 324 325 if (strcmp(tokens[ti], "auth_algo") == 0) { 326 const struct supported_auth_algo *algo; 327 uint32_t key_len; 328 329 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], 330 status); 331 if (status->status < 0) 332 return; 333 334 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 335 if (status->status < 0) 336 return; 337 338 algo = find_match_auth_algo(tokens[ti]); 339 APP_CHECK(algo != NULL, status, "unrecognized " 340 "input \"%s\"", tokens[ti]); 341 342 rule->auth_algo = algo->algo; 343 rule->auth_key_len = algo->key_len; 344 rule->digest_len = algo->digest_len; 345 rule->aad_len = algo->key_len; 346 347 /* NULL algorithm and combined algos do not 348 * require auth key 349 */ 350 if (algo->key_not_req) { 351 auth_algo_p = 1; 352 continue; 353 } 354 355 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 356 if (status->status < 0) 357 return; 358 359 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, 360 status, "unrecognized input \"%s\", " 361 "expect \"auth_key\"", tokens[ti]); 362 if (status->status < 0) 363 return; 364 365 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 366 if (status->status < 0) 367 return; 368 369 key_len = parse_key_string(tokens[ti], 370 rule->auth_key); 371 APP_CHECK(key_len == rule->auth_key_len, status, 372 "unrecognized input \"%s\"", tokens[ti]); 373 if (status->status < 0) 374 return; 375 376 auth_algo_p = 1; 377 continue; 378 } 379 380 if (strcmp(tokens[ti], "src") == 0) { 381 APP_CHECK_PRESENCE(src_p, tokens[ti], status); 382 if (status->status < 0) 383 return; 384 385 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 386 if (status->status < 0) 387 return; 388 389 if (rule->flags == IP4_TUNNEL) { 390 struct in_addr ip; 391 392 APP_CHECK(parse_ipv4_addr(tokens[ti], 393 &ip, NULL) == 0, status, 394 "unrecognized input \"%s\", " 395 "expect valid ipv4 addr", 396 tokens[ti]); 397 if (status->status < 0) 398 return; 399 rule->src.ip.ip4 = rte_bswap32( 400 (uint32_t)ip.s_addr); 401 } else if (rule->flags == IP6_TUNNEL) { 402 struct in6_addr ip; 403 404 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 405 NULL) == 0, status, 406 "unrecognized input \"%s\", " 407 "expect valid ipv6 addr", 408 tokens[ti]); 409 if (status->status < 0) 410 return; 411 memcpy(rule->src.ip.ip6.ip6_b, 412 ip.s6_addr, 16); 413 } else if (rule->flags == TRANSPORT) { 414 APP_CHECK(0, status, "unrecognized input " 415 "\"%s\"", tokens[ti]); 416 return; 417 } 418 419 src_p = 1; 420 continue; 421 } 422 423 if (strcmp(tokens[ti], "dst") == 0) { 424 APP_CHECK_PRESENCE(dst_p, tokens[ti], status); 425 if (status->status < 0) 426 return; 427 428 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 429 if (status->status < 0) 430 return; 431 432 if (rule->flags == IP4_TUNNEL) { 433 struct in_addr ip; 434 435 APP_CHECK(parse_ipv4_addr(tokens[ti], 436 &ip, NULL) == 0, status, 437 "unrecognized input \"%s\", " 438 "expect valid ipv4 addr", 439 tokens[ti]); 440 if (status->status < 0) 441 return; 442 rule->dst.ip.ip4 = rte_bswap32( 443 (uint32_t)ip.s_addr); 444 } else if (rule->flags == IP6_TUNNEL) { 445 struct in6_addr ip; 446 447 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 448 NULL) == 0, status, 449 "unrecognized input \"%s\", " 450 "expect valid ipv6 addr", 451 tokens[ti]); 452 if (status->status < 0) 453 return; 454 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); 455 } else if (rule->flags == TRANSPORT) { 456 APP_CHECK(0, status, "unrecognized " 457 "input \"%s\"", tokens[ti]); 458 return; 459 } 460 461 dst_p = 1; 462 continue; 463 } 464 465 /* unrecognizeable input */ 466 APP_CHECK(0, status, "unrecognized input \"%s\"", 467 tokens[ti]); 468 return; 469 } 470 471 APP_CHECK(cipher_algo_p == 1, status, "missing cipher options"); 472 if (status->status < 0) 473 return; 474 475 APP_CHECK(auth_algo_p == 1, status, "missing auth options"); 476 if (status->status < 0) 477 return; 478 479 APP_CHECK(mode_p == 1, status, "missing mode option"); 480 if (status->status < 0) 481 return; 482 483 *ri = *ri + 1; 484 } 485 486 static inline void 487 print_one_sa_rule(const struct ipsec_sa *sa, int inbound) 488 { 489 uint32_t i; 490 uint8_t a, b, c, d; 491 492 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); 493 494 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 495 if (cipher_algos[i].algo == sa->cipher_algo) { 496 printf("%s ", cipher_algos[i].keyword); 497 break; 498 } 499 } 500 501 for (i = 0; i < RTE_DIM(auth_algos); i++) { 502 if (auth_algos[i].algo == sa->auth_algo) { 503 printf("%s ", auth_algos[i].keyword); 504 break; 505 } 506 } 507 508 printf("mode:"); 509 510 switch (sa->flags) { 511 case IP4_TUNNEL: 512 printf("IP4Tunnel "); 513 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); 514 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); 515 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); 516 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); 517 break; 518 case IP6_TUNNEL: 519 printf("IP6Tunnel "); 520 for (i = 0; i < 16; i++) { 521 if (i % 2 && i != 15) 522 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); 523 else 524 printf("%.2x", sa->src.ip.ip6.ip6_b[i]); 525 } 526 printf(" "); 527 for (i = 0; i < 16; i++) { 528 if (i % 2 && i != 15) 529 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); 530 else 531 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); 532 } 533 break; 534 case TRANSPORT: 535 printf("Transport"); 536 break; 537 } 538 printf("\n"); 539 } 540 541 struct sa_ctx { 542 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; 543 struct { 544 struct rte_crypto_sym_xform a; 545 struct rte_crypto_sym_xform b; 546 } xf[IPSEC_SA_MAX_ENTRIES]; 547 }; 548 549 static struct sa_ctx * 550 sa_create(const char *name, int32_t socket_id) 551 { 552 char s[PATH_MAX]; 553 struct sa_ctx *sa_ctx; 554 uint32_t mz_size; 555 const struct rte_memzone *mz; 556 557 snprintf(s, sizeof(s), "%s_%u", name, socket_id); 558 559 /* Create SA array table */ 560 printf("Creating SA context with %u maximum entries\n", 561 IPSEC_SA_MAX_ENTRIES); 562 563 mz_size = sizeof(struct sa_ctx); 564 mz = rte_memzone_reserve(s, mz_size, socket_id, 565 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); 566 if (mz == NULL) { 567 printf("Failed to allocate SA DB memory\n"); 568 rte_errno = -ENOMEM; 569 return NULL; 570 } 571 572 sa_ctx = (struct sa_ctx *)mz->addr; 573 574 return sa_ctx; 575 } 576 577 static int 578 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 579 uint32_t nb_entries, uint32_t inbound) 580 { 581 struct ipsec_sa *sa; 582 uint32_t i, idx; 583 584 for (i = 0; i < nb_entries; i++) { 585 idx = SPI2IDX(entries[i].spi); 586 sa = &sa_ctx->sa[idx]; 587 if (sa->spi != 0) { 588 printf("Index %u already in use by SPI %u\n", 589 idx, sa->spi); 590 return -EINVAL; 591 } 592 *sa = entries[i]; 593 sa->seq = 0; 594 595 switch (sa->flags) { 596 case IP4_TUNNEL: 597 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); 598 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); 599 } 600 601 if (inbound) { 602 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 603 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; 604 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; 605 sa_ctx->xf[idx].b.cipher.key.length = 606 sa->cipher_key_len; 607 sa_ctx->xf[idx].b.cipher.op = 608 RTE_CRYPTO_CIPHER_OP_DECRYPT; 609 sa_ctx->xf[idx].b.next = NULL; 610 611 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; 612 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; 613 sa_ctx->xf[idx].a.auth.add_auth_data_length = 614 sa->aad_len; 615 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; 616 sa_ctx->xf[idx].a.auth.key.length = 617 sa->auth_key_len; 618 sa_ctx->xf[idx].a.auth.digest_length = 619 sa->digest_len; 620 sa_ctx->xf[idx].a.auth.op = 621 RTE_CRYPTO_AUTH_OP_VERIFY; 622 623 } else { /* outbound */ 624 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 625 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; 626 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; 627 sa_ctx->xf[idx].a.cipher.key.length = 628 sa->cipher_key_len; 629 sa_ctx->xf[idx].a.cipher.op = 630 RTE_CRYPTO_CIPHER_OP_ENCRYPT; 631 sa_ctx->xf[idx].a.next = NULL; 632 633 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; 634 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; 635 sa_ctx->xf[idx].b.auth.add_auth_data_length = 636 sa->aad_len; 637 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; 638 sa_ctx->xf[idx].b.auth.key.length = 639 sa->auth_key_len; 640 sa_ctx->xf[idx].b.auth.digest_length = 641 sa->digest_len; 642 sa_ctx->xf[idx].b.auth.op = 643 RTE_CRYPTO_AUTH_OP_GENERATE; 644 } 645 646 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; 647 sa_ctx->xf[idx].b.next = NULL; 648 sa->xforms = &sa_ctx->xf[idx].a; 649 650 print_one_sa_rule(sa, inbound); 651 } 652 653 return 0; 654 } 655 656 static inline int 657 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 658 uint32_t nb_entries) 659 { 660 return sa_add_rules(sa_ctx, entries, nb_entries, 0); 661 } 662 663 static inline int 664 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 665 uint32_t nb_entries) 666 { 667 return sa_add_rules(sa_ctx, entries, nb_entries, 1); 668 } 669 670 void 671 sa_init(struct socket_ctx *ctx, int32_t socket_id) 672 { 673 const char *name; 674 675 if (ctx == NULL) 676 rte_exit(EXIT_FAILURE, "NULL context.\n"); 677 678 if (ctx->sa_in != NULL) 679 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " 680 "initialized\n", socket_id); 681 682 if (ctx->sa_out != NULL) 683 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " 684 "initialized\n", socket_id); 685 686 if (nb_sa_in > 0) { 687 name = "sa_in"; 688 ctx->sa_in = sa_create(name, socket_id); 689 if (ctx->sa_in == NULL) 690 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 691 "context %s in socket %d\n", rte_errno, 692 name, socket_id); 693 694 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); 695 } else 696 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); 697 698 if (nb_sa_out > 0) { 699 name = "sa_out"; 700 ctx->sa_out = sa_create(name, socket_id); 701 if (ctx->sa_out == NULL) 702 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 703 "context %s in socket %d\n", rte_errno, 704 name, socket_id); 705 706 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); 707 } else 708 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " 709 "specified\n"); 710 } 711 712 int 713 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) 714 { 715 struct ipsec_mbuf_metadata *priv; 716 717 priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf)); 718 719 return (sa_ctx->sa[sa_idx].spi == priv->sa->spi); 720 } 721 722 static inline void 723 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, 724 struct ipsec_sa **sa_ret) 725 { 726 struct esp_hdr *esp; 727 struct ip *ip; 728 uint32_t *src4_addr; 729 uint8_t *src6_addr; 730 struct ipsec_sa *sa; 731 732 *sa_ret = NULL; 733 734 ip = rte_pktmbuf_mtod(pkt, struct ip *); 735 if (ip->ip_v == IPVERSION) 736 esp = (struct esp_hdr *)(ip + 1); 737 else 738 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1); 739 740 if (esp->spi == INVALID_SPI) 741 return; 742 743 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))]; 744 if (rte_be_to_cpu_32(esp->spi) != sa->spi) 745 return; 746 747 switch (sa->flags) { 748 case IP4_TUNNEL: 749 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); 750 if ((ip->ip_v == IPVERSION) && 751 (sa->src.ip.ip4 == *src4_addr) && 752 (sa->dst.ip.ip4 == *(src4_addr + 1))) 753 *sa_ret = sa; 754 break; 755 case IP6_TUNNEL: 756 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src)); 757 if ((ip->ip_v == IP6_VERSION) && 758 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && 759 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) 760 *sa_ret = sa; 761 break; 762 case TRANSPORT: 763 *sa_ret = sa; 764 } 765 } 766 767 void 768 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], 769 struct ipsec_sa *sa[], uint16_t nb_pkts) 770 { 771 uint32_t i; 772 773 for (i = 0; i < nb_pkts; i++) 774 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]); 775 } 776 777 void 778 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], 779 struct ipsec_sa *sa[], uint16_t nb_pkts) 780 { 781 uint32_t i; 782 783 for (i = 0; i < nb_pkts; i++) 784 sa[i] = &sa_ctx->sa[sa_idx[i]]; 785 } 786