1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 /* 6 * Security Associations 7 */ 8 #include <sys/types.h> 9 #include <netinet/in.h> 10 #include <netinet/ip.h> 11 #include <netinet/ip6.h> 12 13 #include <rte_memzone.h> 14 #include <rte_crypto.h> 15 #include <rte_security.h> 16 #include <rte_cryptodev.h> 17 #include <rte_byteorder.h> 18 #include <rte_errno.h> 19 #include <rte_ip.h> 20 #include <rte_random.h> 21 #include <rte_ethdev.h> 22 #include <rte_malloc.h> 23 24 #include "ipsec.h" 25 #include "esp.h" 26 #include "parser.h" 27 28 #define IPDEFTTL 64 29 30 struct supported_cipher_algo { 31 const char *keyword; 32 enum rte_crypto_cipher_algorithm algo; 33 uint16_t iv_len; 34 uint16_t block_size; 35 uint16_t key_len; 36 }; 37 38 struct supported_auth_algo { 39 const char *keyword; 40 enum rte_crypto_auth_algorithm algo; 41 uint16_t digest_len; 42 uint16_t key_len; 43 uint8_t key_not_req; 44 }; 45 46 struct supported_aead_algo { 47 const char *keyword; 48 enum rte_crypto_aead_algorithm algo; 49 uint16_t iv_len; 50 uint16_t block_size; 51 uint16_t digest_len; 52 uint16_t key_len; 53 uint8_t aad_len; 54 }; 55 56 57 const struct supported_cipher_algo cipher_algos[] = { 58 { 59 .keyword = "null", 60 .algo = RTE_CRYPTO_CIPHER_NULL, 61 .iv_len = 0, 62 .block_size = 4, 63 .key_len = 0 64 }, 65 { 66 .keyword = "aes-128-cbc", 67 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 68 .iv_len = 16, 69 .block_size = 16, 70 .key_len = 16 71 }, 72 { 73 .keyword = "aes-256-cbc", 74 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 75 .iv_len = 16, 76 .block_size = 16, 77 .key_len = 32 78 }, 79 { 80 .keyword = "aes-128-ctr", 81 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 82 .iv_len = 8, 83 .block_size = 16, /* XXX AESNI MB limition, should be 4 */ 84 .key_len = 20 85 }, 86 { 87 .keyword = "3des-cbc", 88 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 89 .iv_len = 8, 90 .block_size = 8, 91 .key_len = 24 92 } 93 }; 94 95 const struct supported_auth_algo auth_algos[] = { 96 { 97 .keyword = "null", 98 .algo = RTE_CRYPTO_AUTH_NULL, 99 .digest_len = 0, 100 .key_len = 0, 101 .key_not_req = 1 102 }, 103 { 104 .keyword = "sha1-hmac", 105 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 106 .digest_len = 12, 107 .key_len = 20 108 }, 109 { 110 .keyword = "sha256-hmac", 111 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 112 .digest_len = 12, 113 .key_len = 32 114 } 115 }; 116 117 const struct supported_aead_algo aead_algos[] = { 118 { 119 .keyword = "aes-128-gcm", 120 .algo = RTE_CRYPTO_AEAD_AES_GCM, 121 .iv_len = 8, 122 .block_size = 4, 123 .key_len = 20, 124 .digest_len = 16, 125 .aad_len = 8, 126 } 127 }; 128 129 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES]; 130 uint32_t nb_sa_out; 131 132 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES]; 133 uint32_t nb_sa_in; 134 135 static const struct supported_cipher_algo * 136 find_match_cipher_algo(const char *cipher_keyword) 137 { 138 size_t i; 139 140 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 141 const struct supported_cipher_algo *algo = 142 &cipher_algos[i]; 143 144 if (strcmp(cipher_keyword, algo->keyword) == 0) 145 return algo; 146 } 147 148 return NULL; 149 } 150 151 static const struct supported_auth_algo * 152 find_match_auth_algo(const char *auth_keyword) 153 { 154 size_t i; 155 156 for (i = 0; i < RTE_DIM(auth_algos); i++) { 157 const struct supported_auth_algo *algo = 158 &auth_algos[i]; 159 160 if (strcmp(auth_keyword, algo->keyword) == 0) 161 return algo; 162 } 163 164 return NULL; 165 } 166 167 static const struct supported_aead_algo * 168 find_match_aead_algo(const char *aead_keyword) 169 { 170 size_t i; 171 172 for (i = 0; i < RTE_DIM(aead_algos); i++) { 173 const struct supported_aead_algo *algo = 174 &aead_algos[i]; 175 176 if (strcmp(aead_keyword, algo->keyword) == 0) 177 return algo; 178 } 179 180 return NULL; 181 } 182 183 /** parse_key_string 184 * parse x:x:x:x.... hex number key string into uint8_t *key 185 * return: 186 * > 0: number of bytes parsed 187 * 0: failed 188 */ 189 static uint32_t 190 parse_key_string(const char *key_str, uint8_t *key) 191 { 192 const char *pt_start = key_str, *pt_end = key_str; 193 uint32_t nb_bytes = 0; 194 195 while (pt_end != NULL) { 196 char sub_str[3] = {0}; 197 198 pt_end = strchr(pt_start, ':'); 199 200 if (pt_end == NULL) { 201 if (strlen(pt_start) > 2) 202 return 0; 203 strncpy(sub_str, pt_start, 2); 204 } else { 205 if (pt_end - pt_start > 2) 206 return 0; 207 208 strncpy(sub_str, pt_start, pt_end - pt_start); 209 pt_start = pt_end + 1; 210 } 211 212 key[nb_bytes++] = strtol(sub_str, NULL, 16); 213 } 214 215 return nb_bytes; 216 } 217 218 void 219 parse_sa_tokens(char **tokens, uint32_t n_tokens, 220 struct parse_status *status) 221 { 222 struct ipsec_sa *rule = NULL; 223 uint32_t ti; /*token index*/ 224 uint32_t *ri /*rule index*/; 225 uint32_t cipher_algo_p = 0; 226 uint32_t auth_algo_p = 0; 227 uint32_t aead_algo_p = 0; 228 uint32_t src_p = 0; 229 uint32_t dst_p = 0; 230 uint32_t mode_p = 0; 231 uint32_t type_p = 0; 232 uint32_t portid_p = 0; 233 234 if (strcmp(tokens[0], "in") == 0) { 235 ri = &nb_sa_in; 236 237 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 238 "too many sa rules, abort insertion\n"); 239 if (status->status < 0) 240 return; 241 242 rule = &sa_in[*ri]; 243 } else { 244 ri = &nb_sa_out; 245 246 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status, 247 "too many sa rules, abort insertion\n"); 248 if (status->status < 0) 249 return; 250 251 rule = &sa_out[*ri]; 252 } 253 254 /* spi number */ 255 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); 256 if (status->status < 0) 257 return; 258 if (atoi(tokens[1]) == INVALID_SPI) 259 return; 260 rule->spi = atoi(tokens[1]); 261 262 for (ti = 2; ti < n_tokens; ti++) { 263 if (strcmp(tokens[ti], "mode") == 0) { 264 APP_CHECK_PRESENCE(mode_p, tokens[ti], status); 265 if (status->status < 0) 266 return; 267 268 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 269 if (status->status < 0) 270 return; 271 272 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) 273 rule->flags = IP4_TUNNEL; 274 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) 275 rule->flags = IP6_TUNNEL; 276 else if (strcmp(tokens[ti], "transport") == 0) 277 rule->flags = TRANSPORT; 278 else { 279 APP_CHECK(0, status, "unrecognized " 280 "input \"%s\"", tokens[ti]); 281 return; 282 } 283 284 mode_p = 1; 285 continue; 286 } 287 288 if (strcmp(tokens[ti], "cipher_algo") == 0) { 289 const struct supported_cipher_algo *algo; 290 uint32_t key_len; 291 292 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], 293 status); 294 if (status->status < 0) 295 return; 296 297 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 298 if (status->status < 0) 299 return; 300 301 algo = find_match_cipher_algo(tokens[ti]); 302 303 APP_CHECK(algo != NULL, status, "unrecognized " 304 "input \"%s\"", tokens[ti]); 305 306 rule->cipher_algo = algo->algo; 307 rule->block_size = algo->block_size; 308 rule->iv_len = algo->iv_len; 309 rule->cipher_key_len = algo->key_len; 310 311 /* for NULL algorithm, no cipher key required */ 312 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 313 cipher_algo_p = 1; 314 continue; 315 } 316 317 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 318 if (status->status < 0) 319 return; 320 321 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, 322 status, "unrecognized input \"%s\", " 323 "expect \"cipher_key\"", tokens[ti]); 324 if (status->status < 0) 325 return; 326 327 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 328 if (status->status < 0) 329 return; 330 331 key_len = parse_key_string(tokens[ti], 332 rule->cipher_key); 333 APP_CHECK(key_len == rule->cipher_key_len, status, 334 "unrecognized input \"%s\"", tokens[ti]); 335 if (status->status < 0) 336 return; 337 338 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC || 339 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC) 340 rule->salt = (uint32_t)rte_rand(); 341 342 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) { 343 key_len -= 4; 344 rule->cipher_key_len = key_len; 345 memcpy(&rule->salt, 346 &rule->cipher_key[key_len], 4); 347 } 348 349 cipher_algo_p = 1; 350 continue; 351 } 352 353 if (strcmp(tokens[ti], "auth_algo") == 0) { 354 const struct supported_auth_algo *algo; 355 uint32_t key_len; 356 357 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], 358 status); 359 if (status->status < 0) 360 return; 361 362 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 363 if (status->status < 0) 364 return; 365 366 algo = find_match_auth_algo(tokens[ti]); 367 APP_CHECK(algo != NULL, status, "unrecognized " 368 "input \"%s\"", tokens[ti]); 369 370 rule->auth_algo = algo->algo; 371 rule->auth_key_len = algo->key_len; 372 rule->digest_len = algo->digest_len; 373 374 /* NULL algorithm and combined algos do not 375 * require auth key 376 */ 377 if (algo->key_not_req) { 378 auth_algo_p = 1; 379 continue; 380 } 381 382 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 383 if (status->status < 0) 384 return; 385 386 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, 387 status, "unrecognized input \"%s\", " 388 "expect \"auth_key\"", tokens[ti]); 389 if (status->status < 0) 390 return; 391 392 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 393 if (status->status < 0) 394 return; 395 396 key_len = parse_key_string(tokens[ti], 397 rule->auth_key); 398 APP_CHECK(key_len == rule->auth_key_len, status, 399 "unrecognized input \"%s\"", tokens[ti]); 400 if (status->status < 0) 401 return; 402 403 auth_algo_p = 1; 404 continue; 405 } 406 407 if (strcmp(tokens[ti], "aead_algo") == 0) { 408 const struct supported_aead_algo *algo; 409 uint32_t key_len; 410 411 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti], 412 status); 413 if (status->status < 0) 414 return; 415 416 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 417 if (status->status < 0) 418 return; 419 420 algo = find_match_aead_algo(tokens[ti]); 421 422 APP_CHECK(algo != NULL, status, "unrecognized " 423 "input \"%s\"", tokens[ti]); 424 425 rule->aead_algo = algo->algo; 426 rule->cipher_key_len = algo->key_len; 427 rule->digest_len = algo->digest_len; 428 rule->aad_len = algo->aad_len; 429 rule->block_size = algo->block_size; 430 rule->iv_len = algo->iv_len; 431 432 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 433 if (status->status < 0) 434 return; 435 436 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0, 437 status, "unrecognized input \"%s\", " 438 "expect \"aead_key\"", tokens[ti]); 439 if (status->status < 0) 440 return; 441 442 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 443 if (status->status < 0) 444 return; 445 446 key_len = parse_key_string(tokens[ti], 447 rule->cipher_key); 448 APP_CHECK(key_len == rule->cipher_key_len, status, 449 "unrecognized input \"%s\"", tokens[ti]); 450 if (status->status < 0) 451 return; 452 453 key_len -= 4; 454 rule->cipher_key_len = key_len; 455 memcpy(&rule->salt, 456 &rule->cipher_key[key_len], 4); 457 458 aead_algo_p = 1; 459 continue; 460 } 461 462 if (strcmp(tokens[ti], "src") == 0) { 463 APP_CHECK_PRESENCE(src_p, tokens[ti], status); 464 if (status->status < 0) 465 return; 466 467 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 468 if (status->status < 0) 469 return; 470 471 if (rule->flags == IP4_TUNNEL) { 472 struct in_addr ip; 473 474 APP_CHECK(parse_ipv4_addr(tokens[ti], 475 &ip, NULL) == 0, status, 476 "unrecognized input \"%s\", " 477 "expect valid ipv4 addr", 478 tokens[ti]); 479 if (status->status < 0) 480 return; 481 rule->src.ip.ip4 = rte_bswap32( 482 (uint32_t)ip.s_addr); 483 } else if (rule->flags == IP6_TUNNEL) { 484 struct in6_addr ip; 485 486 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 487 NULL) == 0, status, 488 "unrecognized input \"%s\", " 489 "expect valid ipv6 addr", 490 tokens[ti]); 491 if (status->status < 0) 492 return; 493 memcpy(rule->src.ip.ip6.ip6_b, 494 ip.s6_addr, 16); 495 } else if (rule->flags == TRANSPORT) { 496 APP_CHECK(0, status, "unrecognized input " 497 "\"%s\"", tokens[ti]); 498 return; 499 } 500 501 src_p = 1; 502 continue; 503 } 504 505 if (strcmp(tokens[ti], "dst") == 0) { 506 APP_CHECK_PRESENCE(dst_p, tokens[ti], status); 507 if (status->status < 0) 508 return; 509 510 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 511 if (status->status < 0) 512 return; 513 514 if (rule->flags == IP4_TUNNEL) { 515 struct in_addr ip; 516 517 APP_CHECK(parse_ipv4_addr(tokens[ti], 518 &ip, NULL) == 0, status, 519 "unrecognized input \"%s\", " 520 "expect valid ipv4 addr", 521 tokens[ti]); 522 if (status->status < 0) 523 return; 524 rule->dst.ip.ip4 = rte_bswap32( 525 (uint32_t)ip.s_addr); 526 } else if (rule->flags == IP6_TUNNEL) { 527 struct in6_addr ip; 528 529 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 530 NULL) == 0, status, 531 "unrecognized input \"%s\", " 532 "expect valid ipv6 addr", 533 tokens[ti]); 534 if (status->status < 0) 535 return; 536 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); 537 } else if (rule->flags == TRANSPORT) { 538 APP_CHECK(0, status, "unrecognized " 539 "input \"%s\"", tokens[ti]); 540 return; 541 } 542 543 dst_p = 1; 544 continue; 545 } 546 547 if (strcmp(tokens[ti], "type") == 0) { 548 APP_CHECK_PRESENCE(type_p, tokens[ti], status); 549 if (status->status < 0) 550 return; 551 552 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 553 if (status->status < 0) 554 return; 555 556 if (strcmp(tokens[ti], "inline-crypto-offload") == 0) 557 rule->type = 558 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; 559 else if (strcmp(tokens[ti], 560 "inline-protocol-offload") == 0) 561 rule->type = 562 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 563 else if (strcmp(tokens[ti], 564 "lookaside-protocol-offload") == 0) 565 rule->type = 566 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; 567 else if (strcmp(tokens[ti], "no-offload") == 0) 568 rule->type = RTE_SECURITY_ACTION_TYPE_NONE; 569 else { 570 APP_CHECK(0, status, "Invalid input \"%s\"", 571 tokens[ti]); 572 return; 573 } 574 575 type_p = 1; 576 continue; 577 } 578 579 if (strcmp(tokens[ti], "port_id") == 0) { 580 APP_CHECK_PRESENCE(portid_p, tokens[ti], status); 581 if (status->status < 0) 582 return; 583 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 584 if (status->status < 0) 585 return; 586 rule->portid = atoi(tokens[ti]); 587 if (status->status < 0) 588 return; 589 portid_p = 1; 590 continue; 591 } 592 593 /* unrecognizeable input */ 594 APP_CHECK(0, status, "unrecognized input \"%s\"", 595 tokens[ti]); 596 return; 597 } 598 599 if (aead_algo_p) { 600 APP_CHECK(cipher_algo_p == 0, status, 601 "AEAD used, no need for cipher options"); 602 if (status->status < 0) 603 return; 604 605 APP_CHECK(auth_algo_p == 0, status, 606 "AEAD used, no need for auth options"); 607 if (status->status < 0) 608 return; 609 } else { 610 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options"); 611 if (status->status < 0) 612 return; 613 614 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options"); 615 if (status->status < 0) 616 return; 617 } 618 619 APP_CHECK(mode_p == 1, status, "missing mode option"); 620 if (status->status < 0) 621 return; 622 623 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) 624 printf("Missing portid option, falling back to non-offload\n"); 625 626 if (!type_p || !portid_p) { 627 rule->type = RTE_SECURITY_ACTION_TYPE_NONE; 628 rule->portid = -1; 629 } 630 631 *ri = *ri + 1; 632 } 633 634 static inline void 635 print_one_sa_rule(const struct ipsec_sa *sa, int inbound) 636 { 637 uint32_t i; 638 uint8_t a, b, c, d; 639 640 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); 641 642 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 643 if (cipher_algos[i].algo == sa->cipher_algo && 644 cipher_algos[i].key_len == sa->cipher_key_len) { 645 printf("%s ", cipher_algos[i].keyword); 646 break; 647 } 648 } 649 650 for (i = 0; i < RTE_DIM(auth_algos); i++) { 651 if (auth_algos[i].algo == sa->auth_algo) { 652 printf("%s ", auth_algos[i].keyword); 653 break; 654 } 655 } 656 657 for (i = 0; i < RTE_DIM(aead_algos); i++) { 658 if (aead_algos[i].algo == sa->aead_algo) { 659 printf("%s ", aead_algos[i].keyword); 660 break; 661 } 662 } 663 664 printf("mode:"); 665 666 switch (sa->flags) { 667 case IP4_TUNNEL: 668 printf("IP4Tunnel "); 669 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); 670 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); 671 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); 672 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); 673 break; 674 case IP6_TUNNEL: 675 printf("IP6Tunnel "); 676 for (i = 0; i < 16; i++) { 677 if (i % 2 && i != 15) 678 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]); 679 else 680 printf("%.2x", sa->src.ip.ip6.ip6_b[i]); 681 } 682 printf(" "); 683 for (i = 0; i < 16; i++) { 684 if (i % 2 && i != 15) 685 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]); 686 else 687 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]); 688 } 689 break; 690 case TRANSPORT: 691 printf("Transport"); 692 break; 693 } 694 printf("\n"); 695 } 696 697 struct sa_ctx { 698 void *satbl; /* pointer to array of rte_ipsec_sa objects*/ 699 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; 700 union { 701 struct { 702 struct rte_crypto_sym_xform a; 703 struct rte_crypto_sym_xform b; 704 }; 705 } xf[IPSEC_SA_MAX_ENTRIES]; 706 }; 707 708 static struct sa_ctx * 709 sa_create(const char *name, int32_t socket_id) 710 { 711 char s[PATH_MAX]; 712 struct sa_ctx *sa_ctx; 713 uint32_t mz_size; 714 const struct rte_memzone *mz; 715 716 snprintf(s, sizeof(s), "%s_%u", name, socket_id); 717 718 /* Create SA array table */ 719 printf("Creating SA context with %u maximum entries\n", 720 IPSEC_SA_MAX_ENTRIES); 721 722 mz_size = sizeof(struct sa_ctx); 723 mz = rte_memzone_reserve(s, mz_size, socket_id, 724 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); 725 if (mz == NULL) { 726 printf("Failed to allocate SA DB memory\n"); 727 rte_errno = -ENOMEM; 728 return NULL; 729 } 730 731 sa_ctx = (struct sa_ctx *)mz->addr; 732 733 return sa_ctx; 734 } 735 736 static int 737 check_eth_dev_caps(uint16_t portid, uint32_t inbound) 738 { 739 struct rte_eth_dev_info dev_info; 740 741 rte_eth_dev_info_get(portid, &dev_info); 742 743 if (inbound) { 744 if ((dev_info.rx_offload_capa & 745 DEV_RX_OFFLOAD_SECURITY) == 0) { 746 RTE_LOG(WARNING, PORT, 747 "hardware RX IPSec offload is not supported\n"); 748 return -EINVAL; 749 } 750 751 } else { /* outbound */ 752 if ((dev_info.tx_offload_capa & 753 DEV_TX_OFFLOAD_SECURITY) == 0) { 754 RTE_LOG(WARNING, PORT, 755 "hardware TX IPSec offload is not supported\n"); 756 return -EINVAL; 757 } 758 } 759 return 0; 760 } 761 762 763 static int 764 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 765 uint32_t nb_entries, uint32_t inbound) 766 { 767 struct ipsec_sa *sa; 768 uint32_t i, idx; 769 uint16_t iv_length, aad_length; 770 771 /* for ESN upper 32 bits of SQN also need to be part of AAD */ 772 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0; 773 774 for (i = 0; i < nb_entries; i++) { 775 idx = SPI2IDX(entries[i].spi); 776 sa = &sa_ctx->sa[idx]; 777 if (sa->spi != 0) { 778 printf("Index %u already in use by SPI %u\n", 779 idx, sa->spi); 780 return -EINVAL; 781 } 782 *sa = entries[i]; 783 sa->seq = 0; 784 785 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || 786 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 787 if (check_eth_dev_caps(sa->portid, inbound)) 788 return -EINVAL; 789 } 790 791 sa->direction = (inbound == 1) ? 792 RTE_SECURITY_IPSEC_SA_DIR_INGRESS : 793 RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 794 795 switch (sa->flags) { 796 case IP4_TUNNEL: 797 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); 798 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); 799 } 800 801 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 802 iv_length = 16; 803 804 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; 805 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; 806 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; 807 sa_ctx->xf[idx].a.aead.key.length = 808 sa->cipher_key_len; 809 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? 810 RTE_CRYPTO_AEAD_OP_DECRYPT : 811 RTE_CRYPTO_AEAD_OP_ENCRYPT; 812 sa_ctx->xf[idx].a.next = NULL; 813 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; 814 sa_ctx->xf[idx].a.aead.iv.length = iv_length; 815 sa_ctx->xf[idx].a.aead.aad_length = 816 sa->aad_len + aad_length; 817 sa_ctx->xf[idx].a.aead.digest_length = 818 sa->digest_len; 819 820 sa->xforms = &sa_ctx->xf[idx].a; 821 822 print_one_sa_rule(sa, inbound); 823 } else { 824 switch (sa->cipher_algo) { 825 case RTE_CRYPTO_CIPHER_NULL: 826 case RTE_CRYPTO_CIPHER_3DES_CBC: 827 case RTE_CRYPTO_CIPHER_AES_CBC: 828 iv_length = sa->iv_len; 829 break; 830 case RTE_CRYPTO_CIPHER_AES_CTR: 831 iv_length = 16; 832 break; 833 default: 834 RTE_LOG(ERR, IPSEC_ESP, 835 "unsupported cipher algorithm %u\n", 836 sa->cipher_algo); 837 return -EINVAL; 838 } 839 840 if (inbound) { 841 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 842 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; 843 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; 844 sa_ctx->xf[idx].b.cipher.key.length = 845 sa->cipher_key_len; 846 sa_ctx->xf[idx].b.cipher.op = 847 RTE_CRYPTO_CIPHER_OP_DECRYPT; 848 sa_ctx->xf[idx].b.next = NULL; 849 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET; 850 sa_ctx->xf[idx].b.cipher.iv.length = iv_length; 851 852 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; 853 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; 854 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; 855 sa_ctx->xf[idx].a.auth.key.length = 856 sa->auth_key_len; 857 sa_ctx->xf[idx].a.auth.digest_length = 858 sa->digest_len; 859 sa_ctx->xf[idx].a.auth.op = 860 RTE_CRYPTO_AUTH_OP_VERIFY; 861 } else { /* outbound */ 862 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 863 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; 864 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; 865 sa_ctx->xf[idx].a.cipher.key.length = 866 sa->cipher_key_len; 867 sa_ctx->xf[idx].a.cipher.op = 868 RTE_CRYPTO_CIPHER_OP_ENCRYPT; 869 sa_ctx->xf[idx].a.next = NULL; 870 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET; 871 sa_ctx->xf[idx].a.cipher.iv.length = iv_length; 872 873 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; 874 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; 875 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; 876 sa_ctx->xf[idx].b.auth.key.length = 877 sa->auth_key_len; 878 sa_ctx->xf[idx].b.auth.digest_length = 879 sa->digest_len; 880 sa_ctx->xf[idx].b.auth.op = 881 RTE_CRYPTO_AUTH_OP_GENERATE; 882 } 883 884 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; 885 sa_ctx->xf[idx].b.next = NULL; 886 sa->xforms = &sa_ctx->xf[idx].a; 887 888 print_one_sa_rule(sa, inbound); 889 } 890 } 891 892 return 0; 893 } 894 895 static inline int 896 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 897 uint32_t nb_entries) 898 { 899 return sa_add_rules(sa_ctx, entries, nb_entries, 0); 900 } 901 902 static inline int 903 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 904 uint32_t nb_entries) 905 { 906 return sa_add_rules(sa_ctx, entries, nb_entries, 1); 907 } 908 909 /* 910 * helper function, fills parameters that are identical for all SAs 911 */ 912 static void 913 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm, 914 const struct app_sa_prm *app_prm) 915 { 916 memset(prm, 0, sizeof(*prm)); 917 918 prm->flags = app_prm->flags; 919 prm->ipsec_xform.options.esn = app_prm->enable_esn; 920 prm->replay_win_sz = app_prm->window_size; 921 } 922 923 /* 924 * Helper function, tries to determine next_proto for SPI 925 * by searching though SP rules. 926 */ 927 static int 928 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir) 929 { 930 int32_t rc4, rc6; 931 932 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS); 933 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS); 934 935 if (rc4 >= 0) { 936 if (rc6 >= 0) { 937 RTE_LOG(ERR, IPSEC, 938 "%s: SPI %u used simultaeously by " 939 "IPv4(%d) and IPv6 (%d) SP rules\n", 940 __func__, spi, rc4, rc6); 941 return -EINVAL; 942 } else 943 return IPPROTO_IPIP; 944 } else if (rc6 < 0) { 945 RTE_LOG(ERR, IPSEC, 946 "%s: SPI %u is not used by any SP rule\n", 947 __func__, spi); 948 return -EINVAL; 949 } else 950 return IPPROTO_IPV6; 951 } 952 953 static int 954 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, 955 const struct ipv4_hdr *v4, struct ipv6_hdr *v6) 956 { 957 int32_t rc; 958 959 /* 960 * Try to get SPI next proto by searching that SPI in SPD. 961 * probably not the optimal way, but there seems nothing 962 * better right now. 963 */ 964 rc = get_spi_proto(ss->spi, ss->direction); 965 if (rc < 0) 966 return rc; 967 968 fill_ipsec_app_sa_prm(prm, &app_sa_prm); 969 prm->userdata = (uintptr_t)ss; 970 971 /* setup ipsec xform */ 972 prm->ipsec_xform.spi = ss->spi; 973 prm->ipsec_xform.salt = ss->salt; 974 prm->ipsec_xform.direction = ss->direction; 975 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP; 976 prm->ipsec_xform.mode = (ss->flags == TRANSPORT) ? 977 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : 978 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 979 980 if (ss->flags == IP4_TUNNEL) { 981 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; 982 prm->tun.hdr_len = sizeof(*v4); 983 prm->tun.next_proto = rc; 984 prm->tun.hdr = v4; 985 } else if (ss->flags == IP6_TUNNEL) { 986 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6; 987 prm->tun.hdr_len = sizeof(*v6); 988 prm->tun.next_proto = rc; 989 prm->tun.hdr = v6; 990 } else { 991 /* transport mode */ 992 prm->trs.proto = rc; 993 } 994 995 /* setup crypto section */ 996 prm->crypto_xform = ss->xforms; 997 return 0; 998 } 999 1000 static void 1001 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa, 1002 const struct ipsec_sa *lsa) 1003 { 1004 ss->sa = sa; 1005 ss->type = lsa->type; 1006 1007 /* setup crypto section */ 1008 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) { 1009 ss->crypto.ses = lsa->crypto_session; 1010 /* setup session action type */ 1011 } else { 1012 ss->security.ses = lsa->sec_session; 1013 ss->security.ctx = lsa->security_ctx; 1014 ss->security.ol_flags = lsa->ol_flags; 1015 } 1016 } 1017 1018 /* 1019 * Initialise related rte_ipsec_sa object. 1020 */ 1021 static int 1022 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) 1023 { 1024 int rc; 1025 struct rte_ipsec_sa_prm prm; 1026 struct ipv4_hdr v4 = { 1027 .version_ihl = IPVERSION << 4 | 1028 sizeof(v4) / IPV4_IHL_MULTIPLIER, 1029 .time_to_live = IPDEFTTL, 1030 .next_proto_id = IPPROTO_ESP, 1031 .src_addr = lsa->src.ip.ip4, 1032 .dst_addr = lsa->dst.ip.ip4, 1033 }; 1034 struct ipv6_hdr v6 = { 1035 .vtc_flow = htonl(IP6_VERSION << 28), 1036 .proto = IPPROTO_ESP, 1037 }; 1038 1039 if (lsa->flags == IP6_TUNNEL) { 1040 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr)); 1041 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr)); 1042 } 1043 1044 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6); 1045 if (rc == 0) 1046 rc = rte_ipsec_sa_init(sa, &prm, sa_size); 1047 if (rc < 0) 1048 return rc; 1049 1050 fill_ipsec_session(&lsa->ips, sa, lsa); 1051 return 0; 1052 } 1053 1054 /* 1055 * Allocate space and init rte_ipsec_sa strcutures, 1056 * one per session. 1057 */ 1058 static int 1059 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent, 1060 uint32_t nb_ent, int32_t socket) 1061 { 1062 int32_t rc, sz; 1063 uint32_t i, idx; 1064 size_t tsz; 1065 struct rte_ipsec_sa *sa; 1066 struct ipsec_sa *lsa; 1067 struct rte_ipsec_sa_prm prm; 1068 1069 /* determine SA size */ 1070 idx = SPI2IDX(ent[0].spi); 1071 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL); 1072 sz = rte_ipsec_sa_size(&prm); 1073 if (sz < 0) { 1074 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): " 1075 "failed to determine SA size, error code: %d\n", 1076 __func__, ctx, nb_ent, socket, sz); 1077 return sz; 1078 } 1079 1080 tsz = sz * nb_ent; 1081 1082 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket); 1083 if (ctx->satbl == NULL) { 1084 RTE_LOG(ERR, IPSEC, 1085 "%s(%p, %u, %d): failed to allocate %zu bytes\n", 1086 __func__, ctx, nb_ent, socket, tsz); 1087 return -ENOMEM; 1088 } 1089 1090 rc = 0; 1091 for (i = 0; i != nb_ent && rc == 0; i++) { 1092 1093 idx = SPI2IDX(ent[i].spi); 1094 1095 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i); 1096 lsa = ctx->sa + idx; 1097 1098 rc = ipsec_sa_init(lsa, sa, sz); 1099 } 1100 1101 return rc; 1102 } 1103 1104 void 1105 sa_init(struct socket_ctx *ctx, int32_t socket_id) 1106 { 1107 int32_t rc; 1108 const char *name; 1109 1110 if (ctx == NULL) 1111 rte_exit(EXIT_FAILURE, "NULL context.\n"); 1112 1113 if (ctx->sa_in != NULL) 1114 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " 1115 "initialized\n", socket_id); 1116 1117 if (ctx->sa_out != NULL) 1118 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " 1119 "initialized\n", socket_id); 1120 1121 if (nb_sa_in > 0) { 1122 name = "sa_in"; 1123 ctx->sa_in = sa_create(name, socket_id); 1124 if (ctx->sa_in == NULL) 1125 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1126 "context %s in socket %d\n", rte_errno, 1127 name, socket_id); 1128 1129 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in); 1130 1131 if (app_sa_prm.enable != 0) { 1132 rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in, 1133 socket_id); 1134 if (rc != 0) 1135 rte_exit(EXIT_FAILURE, 1136 "failed to init inbound SAs\n"); 1137 } 1138 } else 1139 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); 1140 1141 if (nb_sa_out > 0) { 1142 name = "sa_out"; 1143 ctx->sa_out = sa_create(name, socket_id); 1144 if (ctx->sa_out == NULL) 1145 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1146 "context %s in socket %d\n", rte_errno, 1147 name, socket_id); 1148 1149 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out); 1150 1151 if (app_sa_prm.enable != 0) { 1152 rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out, 1153 socket_id); 1154 if (rc != 0) 1155 rte_exit(EXIT_FAILURE, 1156 "failed to init outbound SAs\n"); 1157 } 1158 } else 1159 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " 1160 "specified\n"); 1161 } 1162 1163 int 1164 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) 1165 { 1166 struct ipsec_mbuf_metadata *priv; 1167 struct ipsec_sa *sa; 1168 1169 priv = get_priv(m); 1170 sa = priv->sa; 1171 if (sa != NULL) 1172 return (sa_ctx->sa[sa_idx].spi == sa->spi); 1173 1174 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); 1175 return 0; 1176 } 1177 1178 static inline void 1179 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, 1180 struct ipsec_sa **sa_ret) 1181 { 1182 struct esp_hdr *esp; 1183 struct ip *ip; 1184 uint32_t *src4_addr; 1185 uint8_t *src6_addr; 1186 struct ipsec_sa *sa; 1187 1188 *sa_ret = NULL; 1189 1190 ip = rte_pktmbuf_mtod(pkt, struct ip *); 1191 if (ip->ip_v == IPVERSION) 1192 esp = (struct esp_hdr *)(ip + 1); 1193 else 1194 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1); 1195 1196 if (esp->spi == INVALID_SPI) 1197 return; 1198 1199 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))]; 1200 if (rte_be_to_cpu_32(esp->spi) != sa->spi) 1201 return; 1202 1203 switch (sa->flags) { 1204 case IP4_TUNNEL: 1205 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); 1206 if ((ip->ip_v == IPVERSION) && 1207 (sa->src.ip.ip4 == *src4_addr) && 1208 (sa->dst.ip.ip4 == *(src4_addr + 1))) 1209 *sa_ret = sa; 1210 break; 1211 case IP6_TUNNEL: 1212 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src)); 1213 if ((ip->ip_v == IP6_VERSION) && 1214 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && 1215 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) 1216 *sa_ret = sa; 1217 break; 1218 case TRANSPORT: 1219 *sa_ret = sa; 1220 } 1221 } 1222 1223 void 1224 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], 1225 struct ipsec_sa *sa[], uint16_t nb_pkts) 1226 { 1227 uint32_t i; 1228 1229 for (i = 0; i < nb_pkts; i++) 1230 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]); 1231 } 1232 1233 void 1234 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], 1235 struct ipsec_sa *sa[], uint16_t nb_pkts) 1236 { 1237 uint32_t i; 1238 1239 for (i = 0; i < nb_pkts; i++) 1240 sa[i] = &sa_ctx->sa[sa_idx[i]]; 1241 } 1242 1243 /* 1244 * Select HW offloads to be used. 1245 */ 1246 int 1247 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, 1248 uint64_t *tx_offloads) 1249 { 1250 struct ipsec_sa *rule; 1251 uint32_t idx_sa; 1252 1253 *rx_offloads = 0; 1254 *tx_offloads = 0; 1255 1256 /* Check for inbound rules that use offloads and use this port */ 1257 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { 1258 rule = &sa_in[idx_sa]; 1259 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || 1260 rule->type == 1261 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 1262 && rule->portid == port_id) 1263 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY; 1264 } 1265 1266 /* Check for outbound rules that use offloads and use this port */ 1267 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { 1268 rule = &sa_out[idx_sa]; 1269 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || 1270 rule->type == 1271 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 1272 && rule->portid == port_id) 1273 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; 1274 } 1275 return 0; 1276 } 1277