1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 5 /* 6 * Security Associations 7 */ 8 #include <stdlib.h> 9 #include <sys/types.h> 10 #include <netinet/in.h> 11 #include <netinet/ip.h> 12 #include <netinet/ip6.h> 13 14 #include <rte_memzone.h> 15 #include <rte_crypto.h> 16 #include <rte_security.h> 17 #include <rte_cryptodev.h> 18 #include <rte_byteorder.h> 19 #include <rte_errno.h> 20 #include <rte_ip.h> 21 #include <rte_udp.h> 22 #include <rte_random.h> 23 #include <rte_ethdev.h> 24 #include <rte_malloc.h> 25 26 #include "ipsec.h" 27 #include "esp.h" 28 #include "parser.h" 29 #include "sad.h" 30 31 #define IPDEFTTL 64 32 33 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT) 34 35 #define IP6_FULL_MASK RTE_IPV6_MAX_DEPTH 36 37 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0) 38 39 struct supported_cipher_algo { 40 const char *keyword; 41 enum rte_crypto_cipher_algorithm algo; 42 uint16_t iv_len; 43 uint16_t block_size; 44 uint16_t key_len; 45 }; 46 47 struct supported_auth_algo { 48 const char *keyword; 49 enum rte_crypto_auth_algorithm algo; 50 uint16_t iv_len; 51 uint16_t digest_len; 52 uint16_t key_len; 53 uint8_t key_not_req; 54 }; 55 56 struct supported_aead_algo { 57 const char *keyword; 58 enum rte_crypto_aead_algorithm algo; 59 uint16_t iv_len; 60 uint16_t block_size; 61 uint16_t digest_len; 62 uint16_t key_len; 63 uint8_t aad_len; 64 }; 65 66 67 const struct supported_cipher_algo cipher_algos[] = { 68 { 69 .keyword = "null", 70 .algo = RTE_CRYPTO_CIPHER_NULL, 71 .iv_len = 0, 72 .block_size = 4, 73 .key_len = 0 74 }, 75 { 76 .keyword = "aes-128-cbc", 77 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 78 .iv_len = 16, 79 .block_size = 16, 80 .key_len = 16 81 }, 82 { 83 .keyword = "aes-192-cbc", 84 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 85 .iv_len = 16, 86 .block_size = 16, 87 .key_len = 24 88 }, 89 { 90 .keyword = "aes-256-cbc", 91 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 92 .iv_len = 16, 93 .block_size = 16, 94 .key_len = 32 95 }, 96 { 97 .keyword = "aes-128-ctr", 98 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 99 /* Per packet IV length */ 100 .iv_len = 8, 101 .block_size = 4, 102 .key_len = 20 103 }, 104 { 105 .keyword = "aes-192-ctr", 106 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 107 .iv_len = 16, 108 .block_size = 16, 109 .key_len = 28 110 }, 111 { 112 .keyword = "aes-256-ctr", 113 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 114 .iv_len = 16, 115 .block_size = 16, 116 .key_len = 36 117 }, 118 { 119 .keyword = "3des-cbc", 120 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 121 .iv_len = 8, 122 .block_size = 8, 123 .key_len = 24 124 }, 125 { 126 .keyword = "des-cbc", 127 .algo = RTE_CRYPTO_CIPHER_DES_CBC, 128 .iv_len = 8, 129 .block_size = 8, 130 .key_len = 8 131 } 132 }; 133 134 const struct supported_auth_algo auth_algos[] = { 135 { 136 .keyword = "null", 137 .algo = RTE_CRYPTO_AUTH_NULL, 138 .digest_len = 0, 139 .key_len = 0, 140 .key_not_req = 1 141 }, 142 { 143 .keyword = "sha1-hmac", 144 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 145 .digest_len = 12, 146 .key_len = 20 147 }, 148 { 149 .keyword = "sha256-hmac", 150 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 151 .digest_len = 16, 152 .key_len = 32 153 }, 154 { 155 .keyword = "sha384-hmac", 156 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, 157 .digest_len = 24, 158 .key_len = 48 159 }, 160 { 161 .keyword = "sha512-hmac", 162 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, 163 .digest_len = 32, 164 .key_len = 64 165 }, 166 { 167 .keyword = "aes-gmac", 168 .algo = RTE_CRYPTO_AUTH_AES_GMAC, 169 .iv_len = 8, 170 .digest_len = 16, 171 .key_len = 20 172 }, 173 { 174 .keyword = "aes-xcbc-mac-96", 175 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, 176 .digest_len = 12, 177 .key_len = 16 178 } 179 }; 180 181 const struct supported_aead_algo aead_algos[] = { 182 { 183 .keyword = "aes-128-gcm", 184 .algo = RTE_CRYPTO_AEAD_AES_GCM, 185 .iv_len = 8, 186 .block_size = 4, 187 .key_len = 20, 188 .digest_len = 16, 189 .aad_len = 8, 190 }, 191 { 192 .keyword = "aes-192-gcm", 193 .algo = RTE_CRYPTO_AEAD_AES_GCM, 194 .iv_len = 8, 195 .block_size = 4, 196 .key_len = 28, 197 .digest_len = 16, 198 .aad_len = 8, 199 }, 200 { 201 .keyword = "aes-256-gcm", 202 .algo = RTE_CRYPTO_AEAD_AES_GCM, 203 .iv_len = 8, 204 .block_size = 4, 205 .key_len = 36, 206 .digest_len = 16, 207 .aad_len = 8, 208 }, 209 { 210 .keyword = "aes-128-ccm", 211 .algo = RTE_CRYPTO_AEAD_AES_CCM, 212 .iv_len = 8, 213 .block_size = 4, 214 .key_len = 20, 215 .digest_len = 16, 216 .aad_len = 8, 217 }, 218 { 219 .keyword = "aes-192-ccm", 220 .algo = RTE_CRYPTO_AEAD_AES_CCM, 221 .iv_len = 8, 222 .block_size = 4, 223 .key_len = 28, 224 .digest_len = 16, 225 .aad_len = 8, 226 }, 227 { 228 .keyword = "aes-256-ccm", 229 .algo = RTE_CRYPTO_AEAD_AES_CCM, 230 .iv_len = 8, 231 .block_size = 4, 232 .key_len = 36, 233 .digest_len = 16, 234 .aad_len = 8, 235 }, 236 { 237 .keyword = "chacha20-poly1305", 238 .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, 239 .iv_len = 12, 240 .block_size = 64, 241 .key_len = 36, 242 .digest_len = 16, 243 .aad_len = 8, 244 } 245 }; 246 247 #define SA_INIT_NB 128 248 249 static uint32_t nb_crypto_sessions; 250 struct ipsec_sa *sa_out; 251 uint32_t nb_sa_out; 252 static uint32_t sa_out_sz; 253 static struct ipsec_sa_cnt sa_out_cnt; 254 255 struct ipsec_sa *sa_in; 256 uint32_t nb_sa_in; 257 static uint32_t sa_in_sz; 258 static struct ipsec_sa_cnt sa_in_cnt; 259 260 static const struct supported_cipher_algo * 261 find_match_cipher_algo(const char *cipher_keyword) 262 { 263 size_t i; 264 265 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 266 const struct supported_cipher_algo *algo = 267 &cipher_algos[i]; 268 269 if (strcmp(cipher_keyword, algo->keyword) == 0) 270 return algo; 271 } 272 273 return NULL; 274 } 275 276 static const struct supported_auth_algo * 277 find_match_auth_algo(const char *auth_keyword) 278 { 279 size_t i; 280 281 for (i = 0; i < RTE_DIM(auth_algos); i++) { 282 const struct supported_auth_algo *algo = 283 &auth_algos[i]; 284 285 if (strcmp(auth_keyword, algo->keyword) == 0) 286 return algo; 287 } 288 289 return NULL; 290 } 291 292 static const struct supported_aead_algo * 293 find_match_aead_algo(const char *aead_keyword) 294 { 295 size_t i; 296 297 for (i = 0; i < RTE_DIM(aead_algos); i++) { 298 const struct supported_aead_algo *algo = 299 &aead_algos[i]; 300 301 if (strcmp(aead_keyword, algo->keyword) == 0) 302 return algo; 303 } 304 305 return NULL; 306 } 307 308 /** parse_key_string 309 * parse x:x:x:x.... hex number key string into uint8_t *key 310 * return: 311 * > 0: number of bytes parsed 312 * 0: failed 313 */ 314 static uint32_t 315 parse_key_string(const char *key_str, uint8_t *key) 316 { 317 const char *pt_start = key_str, *pt_end = key_str; 318 uint32_t nb_bytes = 0; 319 320 while (pt_end != NULL) { 321 char sub_str[3] = {0}; 322 323 pt_end = strchr(pt_start, ':'); 324 325 if (pt_end == NULL) { 326 if (strlen(pt_start) > 2) 327 return 0; 328 strncpy(sub_str, pt_start, 2); 329 } else { 330 if (pt_end - pt_start > 2) 331 return 0; 332 333 strncpy(sub_str, pt_start, pt_end - pt_start); 334 pt_start = pt_end + 1; 335 } 336 337 key[nb_bytes++] = strtol(sub_str, NULL, 16); 338 } 339 340 return nb_bytes; 341 } 342 343 static int 344 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz) 345 { 346 if (*sa_tbl == NULL) { 347 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa)); 348 if (*sa_tbl == NULL) 349 return -1; 350 *cur_sz = SA_INIT_NB; 351 return 0; 352 } 353 354 if (cur_cnt >= *cur_sz) { 355 *sa_tbl = realloc(*sa_tbl, 356 *cur_sz * sizeof(struct ipsec_sa) * 2); 357 if (*sa_tbl == NULL) 358 return -1; 359 /* clean reallocated extra space */ 360 memset(&(*sa_tbl)[*cur_sz], 0, 361 *cur_sz * sizeof(struct ipsec_sa)); 362 *cur_sz *= 2; 363 } 364 365 return 0; 366 } 367 368 void 369 parse_sa_tokens(char **tokens, uint32_t n_tokens, 370 struct parse_status *status) 371 { 372 struct ipsec_sa *rule = NULL; 373 struct rte_ipsec_session *ips; 374 uint32_t ti; /*token index*/ 375 uint32_t *ri /*rule index*/; 376 struct ipsec_sa_cnt *sa_cnt; 377 uint32_t cipher_algo_p = 0; 378 uint32_t auth_algo_p = 0; 379 uint32_t aead_algo_p = 0; 380 uint32_t src_p = 0; 381 uint32_t dst_p = 0; 382 uint32_t mode_p = 0; 383 uint32_t type_p = 0; 384 uint32_t portid_p = 0; 385 uint32_t fallback_p = 0; 386 int16_t status_p = 0; 387 uint16_t udp_encap_p = 0; 388 389 if (strcmp(tokens[0], "in") == 0) { 390 ri = &nb_sa_in; 391 sa_cnt = &sa_in_cnt; 392 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0) 393 return; 394 rule = &sa_in[*ri]; 395 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 396 } else { 397 ri = &nb_sa_out; 398 sa_cnt = &sa_out_cnt; 399 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0) 400 return; 401 rule = &sa_out[*ri]; 402 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 403 } 404 405 /* spi number */ 406 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status); 407 if (status->status < 0) 408 return; 409 if (atoi(tokens[1]) == INVALID_SPI) 410 return; 411 rule->flags = 0; 412 rule->spi = atoi(tokens[1]); 413 rule->portid = UINT16_MAX; 414 ips = ipsec_get_primary_session(rule); 415 416 for (ti = 2; ti < n_tokens; ti++) { 417 if (strcmp(tokens[ti], "mode") == 0) { 418 APP_CHECK_PRESENCE(mode_p, tokens[ti], status); 419 if (status->status < 0) 420 return; 421 422 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 423 if (status->status < 0) 424 return; 425 426 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) { 427 sa_cnt->nb_v4++; 428 rule->flags |= IP4_TUNNEL; 429 } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) { 430 sa_cnt->nb_v6++; 431 rule->flags |= IP6_TUNNEL; 432 } else if (strcmp(tokens[ti], "transport") == 0) { 433 sa_cnt->nb_v4++; 434 sa_cnt->nb_v6++; 435 rule->flags |= TRANSPORT; 436 } else { 437 APP_CHECK(0, status, "unrecognized " 438 "input \"%s\"", tokens[ti]); 439 return; 440 } 441 442 mode_p = 1; 443 continue; 444 } 445 446 if (strcmp(tokens[ti], "telemetry") == 0) { 447 rule->flags |= SA_TELEMETRY_ENABLE; 448 continue; 449 } 450 451 if (strcmp(tokens[ti], "cipher_algo") == 0) { 452 const struct supported_cipher_algo *algo; 453 uint32_t key_len; 454 455 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti], 456 status); 457 if (status->status < 0) 458 return; 459 460 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 461 if (status->status < 0) 462 return; 463 464 algo = find_match_cipher_algo(tokens[ti]); 465 466 APP_CHECK(algo != NULL, status, "unrecognized " 467 "input \"%s\"", tokens[ti]); 468 469 if (status->status < 0) 470 return; 471 472 rule->cipher_algo = algo->algo; 473 rule->block_size = algo->block_size; 474 rule->iv_len = algo->iv_len; 475 rule->cipher_key_len = algo->key_len; 476 477 /* for NULL algorithm, no cipher key required */ 478 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { 479 cipher_algo_p = 1; 480 continue; 481 } 482 483 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 484 if (status->status < 0) 485 return; 486 487 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0, 488 status, "unrecognized input \"%s\", " 489 "expect \"cipher_key\"", tokens[ti]); 490 if (status->status < 0) 491 return; 492 493 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 494 if (status->status < 0) 495 return; 496 497 key_len = parse_key_string(tokens[ti], 498 rule->cipher_key); 499 APP_CHECK(key_len == rule->cipher_key_len, status, 500 "unrecognized input \"%s\"", tokens[ti]); 501 if (status->status < 0) 502 return; 503 504 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC || 505 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC) 506 rule->salt = (uint32_t)rte_rand(); 507 508 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) { 509 key_len -= 4; 510 rule->cipher_key_len = key_len; 511 memcpy(&rule->salt, 512 &rule->cipher_key[key_len], 4); 513 } 514 515 cipher_algo_p = 1; 516 continue; 517 } 518 519 if (strcmp(tokens[ti], "auth_algo") == 0) { 520 const struct supported_auth_algo *algo; 521 uint32_t key_len; 522 523 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti], 524 status); 525 if (status->status < 0) 526 return; 527 528 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 529 if (status->status < 0) 530 return; 531 532 algo = find_match_auth_algo(tokens[ti]); 533 APP_CHECK(algo != NULL, status, "unrecognized " 534 "input \"%s\"", tokens[ti]); 535 536 if (status->status < 0) 537 return; 538 539 rule->auth_algo = algo->algo; 540 rule->auth_key_len = algo->key_len; 541 rule->digest_len = algo->digest_len; 542 543 /* NULL algorithm and combined algos do not 544 * require auth key 545 */ 546 if (algo->key_not_req) { 547 auth_algo_p = 1; 548 continue; 549 } 550 551 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 552 if (status->status < 0) 553 return; 554 555 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0, 556 status, "unrecognized input \"%s\", " 557 "expect \"auth_key\"", tokens[ti]); 558 if (status->status < 0) 559 return; 560 561 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 562 if (status->status < 0) 563 return; 564 565 key_len = parse_key_string(tokens[ti], 566 rule->auth_key); 567 APP_CHECK(key_len == rule->auth_key_len, status, 568 "unrecognized input \"%s\"", tokens[ti]); 569 if (status->status < 0) 570 return; 571 572 if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) { 573 key_len -= 4; 574 rule->auth_key_len = key_len; 575 rule->iv_len = algo->iv_len; 576 memcpy(&rule->salt, 577 &rule->auth_key[key_len], 4); 578 } 579 580 auth_algo_p = 1; 581 continue; 582 } 583 584 if (strcmp(tokens[ti], "aead_algo") == 0) { 585 const struct supported_aead_algo *algo; 586 uint32_t key_len; 587 588 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti], 589 status); 590 if (status->status < 0) 591 return; 592 593 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 594 if (status->status < 0) 595 return; 596 597 algo = find_match_aead_algo(tokens[ti]); 598 599 APP_CHECK(algo != NULL, status, "unrecognized " 600 "input \"%s\"", tokens[ti]); 601 602 if (status->status < 0) 603 return; 604 605 rule->aead_algo = algo->algo; 606 rule->cipher_key_len = algo->key_len; 607 rule->digest_len = algo->digest_len; 608 rule->aad_len = algo->aad_len; 609 rule->block_size = algo->block_size; 610 rule->iv_len = algo->iv_len; 611 612 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 613 if (status->status < 0) 614 return; 615 616 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0, 617 status, "unrecognized input \"%s\", " 618 "expect \"aead_key\"", tokens[ti]); 619 if (status->status < 0) 620 return; 621 622 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 623 if (status->status < 0) 624 return; 625 626 key_len = parse_key_string(tokens[ti], 627 rule->cipher_key); 628 APP_CHECK(key_len == rule->cipher_key_len, status, 629 "unrecognized input \"%s\"", tokens[ti]); 630 if (status->status < 0) 631 return; 632 633 key_len -= 4; 634 rule->cipher_key_len = key_len; 635 memcpy(&rule->salt, 636 &rule->cipher_key[key_len], 4); 637 638 aead_algo_p = 1; 639 continue; 640 } 641 642 if (strcmp(tokens[ti], "src") == 0) { 643 APP_CHECK_PRESENCE(src_p, tokens[ti], status); 644 if (status->status < 0) 645 return; 646 647 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 648 if (status->status < 0) 649 return; 650 651 if (IS_IP4_TUNNEL(rule->flags)) { 652 struct in_addr ip; 653 654 APP_CHECK(parse_ipv4_addr(tokens[ti], 655 &ip, NULL) == 0, status, 656 "unrecognized input \"%s\", " 657 "expect valid ipv4 addr", 658 tokens[ti]); 659 if (status->status < 0) 660 return; 661 rule->src.ip.ip4 = rte_bswap32( 662 (uint32_t)ip.s_addr); 663 } else if (IS_IP6_TUNNEL(rule->flags)) { 664 struct rte_ipv6_addr ip; 665 666 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 667 NULL) == 0, status, 668 "unrecognized input \"%s\", " 669 "expect valid ipv6 addr", 670 tokens[ti]); 671 if (status->status < 0) 672 return; 673 674 rule->src.ip.ip6 = ip; 675 } else if (IS_TRANSPORT(rule->flags)) { 676 APP_CHECK(0, status, "unrecognized input " 677 "\"%s\"", tokens[ti]); 678 return; 679 } 680 681 src_p = 1; 682 continue; 683 } 684 685 if (strcmp(tokens[ti], "dst") == 0) { 686 APP_CHECK_PRESENCE(dst_p, tokens[ti], status); 687 if (status->status < 0) 688 return; 689 690 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 691 if (status->status < 0) 692 return; 693 694 if (IS_IP4_TUNNEL(rule->flags)) { 695 struct in_addr ip; 696 697 APP_CHECK(parse_ipv4_addr(tokens[ti], 698 &ip, NULL) == 0, status, 699 "unrecognized input \"%s\", " 700 "expect valid ipv4 addr", 701 tokens[ti]); 702 if (status->status < 0) 703 return; 704 rule->dst.ip.ip4 = rte_bswap32( 705 (uint32_t)ip.s_addr); 706 } else if (IS_IP6_TUNNEL(rule->flags)) { 707 struct rte_ipv6_addr ip; 708 709 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, 710 NULL) == 0, status, 711 "unrecognized input \"%s\", " 712 "expect valid ipv6 addr", 713 tokens[ti]); 714 if (status->status < 0) 715 return; 716 717 rule->dst.ip.ip6 = ip; 718 } else if (IS_TRANSPORT(rule->flags)) { 719 APP_CHECK(0, status, "unrecognized " 720 "input \"%s\"", tokens[ti]); 721 return; 722 } 723 724 dst_p = 1; 725 continue; 726 } 727 728 if (strcmp(tokens[ti], "type") == 0) { 729 APP_CHECK_PRESENCE(type_p, tokens[ti], status); 730 if (status->status < 0) 731 return; 732 733 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 734 if (status->status < 0) 735 return; 736 737 if (strcmp(tokens[ti], "inline-crypto-offload") == 0) 738 ips->type = 739 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; 740 else if (strcmp(tokens[ti], 741 "inline-protocol-offload") == 0) 742 ips->type = 743 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 744 else if (strcmp(tokens[ti], 745 "lookaside-protocol-offload") == 0) 746 ips->type = 747 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; 748 else if (strcmp(tokens[ti], "no-offload") == 0) 749 ips->type = RTE_SECURITY_ACTION_TYPE_NONE; 750 else if (strcmp(tokens[ti], "cpu-crypto") == 0) 751 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; 752 else { 753 APP_CHECK(0, status, "Invalid input \"%s\"", 754 tokens[ti]); 755 return; 756 } 757 758 type_p = 1; 759 continue; 760 } 761 762 if (strcmp(tokens[ti], "port_id") == 0) { 763 APP_CHECK_PRESENCE(portid_p, tokens[ti], status); 764 if (status->status < 0) 765 return; 766 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 767 if (status->status < 0) 768 return; 769 if (rule->portid == UINT16_MAX) 770 rule->portid = atoi(tokens[ti]); 771 else if (rule->portid != atoi(tokens[ti])) { 772 APP_CHECK(0, status, 773 "portid %s not matching with already assigned portid %u", 774 tokens[ti], rule->portid); 775 return; 776 } 777 portid_p = 1; 778 continue; 779 } 780 781 if (strcmp(tokens[ti], "mss") == 0) { 782 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 783 if (status->status < 0) 784 return; 785 rule->mss = atoi(tokens[ti]); 786 if (status->status < 0) 787 return; 788 continue; 789 } 790 791 if (strcmp(tokens[ti], "reassembly_en") == 0) { 792 rule->flags |= SA_REASSEMBLY_ENABLE; 793 continue; 794 } 795 796 if (strcmp(tokens[ti], "esn") == 0) { 797 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 798 if (status->status < 0) 799 return; 800 rule->esn = atoll(tokens[ti]); 801 if (status->status < 0) 802 return; 803 continue; 804 } 805 806 if (strcmp(tokens[ti], "fallback") == 0) { 807 struct rte_ipsec_session *fb; 808 809 APP_CHECK(app_sa_prm.enable, status, "Fallback session " 810 "not allowed for legacy mode."); 811 if (status->status < 0) 812 return; 813 APP_CHECK(ips->type == 814 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status, 815 "Fallback session allowed if primary session " 816 "is of type inline-crypto-offload only."); 817 if (status->status < 0) 818 return; 819 APP_CHECK(rule->direction == 820 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status, 821 "Fallback session not allowed for egress " 822 "rule"); 823 if (status->status < 0) 824 return; 825 APP_CHECK_PRESENCE(fallback_p, tokens[ti], status); 826 if (status->status < 0) 827 return; 828 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 829 if (status->status < 0) 830 return; 831 fb = ipsec_get_fallback_session(rule); 832 if (strcmp(tokens[ti], "lookaside-none") == 0) 833 fb->type = RTE_SECURITY_ACTION_TYPE_NONE; 834 else if (strcmp(tokens[ti], "cpu-crypto") == 0) 835 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; 836 else { 837 APP_CHECK(0, status, "unrecognized fallback " 838 "type %s.", tokens[ti]); 839 return; 840 } 841 842 rule->fallback_sessions = 1; 843 nb_crypto_sessions++; 844 fallback_p = 1; 845 continue; 846 } 847 if (strcmp(tokens[ti], "flow-direction") == 0) { 848 switch (ips->type) { 849 case RTE_SECURITY_ACTION_TYPE_NONE: 850 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 851 rule->fdir_flag = 1; 852 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 853 if (status->status < 0) 854 return; 855 if (rule->portid == UINT16_MAX) 856 rule->portid = atoi(tokens[ti]); 857 else if (rule->portid != atoi(tokens[ti])) { 858 APP_CHECK(0, status, 859 "portid %s not matching with already assigned portid %u", 860 tokens[ti], rule->portid); 861 return; 862 } 863 INCREMENT_TOKEN_INDEX(ti, n_tokens, status); 864 if (status->status < 0) 865 return; 866 rule->fdir_qid = atoi(tokens[ti]); 867 /* validating portid and queueid */ 868 status_p = check_flow_params(rule->portid, 869 rule->fdir_qid); 870 if (status_p < 0) { 871 printf("port id %u / queue id %u is " 872 "not valid\n", rule->portid, 873 rule->fdir_qid); 874 } 875 break; 876 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 877 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 878 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 879 default: 880 APP_CHECK(0, status, 881 "flow director not supported for security session type %d", 882 ips->type); 883 return; 884 } 885 continue; 886 } 887 if (strcmp(tokens[ti], "udp-encap") == 0) { 888 switch (ips->type) { 889 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 890 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 891 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti], 892 status); 893 if (status->status < 0) 894 return; 895 896 rule->udp_encap = 1; 897 app_sa_prm.udp_encap = 1; 898 udp_encap_p = 1; 899 break; 900 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 901 rule->udp_encap = 1; 902 rule->udp.sport = 0; 903 rule->udp.dport = 4500; 904 break; 905 default: 906 APP_CHECK(0, status, 907 "UDP encapsulation not supported for " 908 "security session type %d", 909 ips->type); 910 return; 911 } 912 continue; 913 } 914 915 /* unrecognizable input */ 916 APP_CHECK(0, status, "unrecognized input \"%s\"", 917 tokens[ti]); 918 return; 919 } 920 921 if (aead_algo_p) { 922 APP_CHECK(cipher_algo_p == 0, status, 923 "AEAD used, no need for cipher options"); 924 if (status->status < 0) 925 return; 926 927 APP_CHECK(auth_algo_p == 0, status, 928 "AEAD used, no need for auth options"); 929 if (status->status < 0) 930 return; 931 } else { 932 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options"); 933 if (status->status < 0) 934 return; 935 936 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options"); 937 if (status->status < 0) 938 return; 939 } 940 941 APP_CHECK(mode_p == 1, status, "missing mode option"); 942 if (status->status < 0) 943 return; 944 945 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type != 946 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0)) 947 printf("Missing portid option, falling back to non-offload\n"); 948 949 if (!type_p || (!portid_p && ips->type != 950 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) { 951 ips->type = RTE_SECURITY_ACTION_TYPE_NONE; 952 } 953 954 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) 955 wrkr_flags |= INL_CR_F; 956 else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 957 wrkr_flags |= INL_PR_F; 958 else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) 959 wrkr_flags |= LA_PR_F; 960 else 961 wrkr_flags |= LA_ANY_F; 962 963 nb_crypto_sessions++; 964 *ri = *ri + 1; 965 } 966 967 static void 968 print_one_sa_rule(const struct ipsec_sa *sa, int inbound) 969 { 970 uint32_t i; 971 uint8_t a, b, c, d; 972 const struct rte_ipsec_session *ips; 973 const struct rte_ipsec_session *fallback_ips; 974 975 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi); 976 977 for (i = 0; i < RTE_DIM(cipher_algos); i++) { 978 if (cipher_algos[i].algo == sa->cipher_algo && 979 cipher_algos[i].key_len == sa->cipher_key_len) { 980 printf("%s ", cipher_algos[i].keyword); 981 break; 982 } 983 } 984 985 for (i = 0; i < RTE_DIM(auth_algos); i++) { 986 if (auth_algos[i].algo == sa->auth_algo) { 987 printf("%s ", auth_algos[i].keyword); 988 break; 989 } 990 } 991 992 for (i = 0; i < RTE_DIM(aead_algos); i++) { 993 if (aead_algos[i].algo == sa->aead_algo && 994 aead_algos[i].key_len-4 == sa->cipher_key_len) { 995 printf("%s ", aead_algos[i].keyword); 996 break; 997 } 998 } 999 1000 printf("mode:"); 1001 if (sa->udp_encap) 1002 printf("UDP encapsulated "); 1003 1004 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 1005 case IP4_TUNNEL: 1006 printf("IP4Tunnel "); 1007 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); 1008 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a); 1009 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d); 1010 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a); 1011 break; 1012 case IP6_TUNNEL: 1013 printf("IP6Tunnel "); 1014 printf(RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&sa->src.ip.ip6)); 1015 printf(" "); 1016 printf(RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&sa->dst.ip.ip6)); 1017 break; 1018 case TRANSPORT: 1019 printf("Transport "); 1020 break; 1021 } 1022 1023 ips = &sa->sessions[IPSEC_SESSION_PRIMARY]; 1024 printf(" type:"); 1025 switch (ips->type) { 1026 case RTE_SECURITY_ACTION_TYPE_NONE: 1027 printf("no-offload "); 1028 break; 1029 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 1030 printf("inline-crypto-offload "); 1031 break; 1032 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 1033 printf("inline-protocol-offload "); 1034 break; 1035 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 1036 printf("lookaside-protocol-offload "); 1037 break; 1038 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 1039 printf("cpu-crypto-accelerated "); 1040 break; 1041 } 1042 1043 fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK]; 1044 if (fallback_ips != NULL && sa->fallback_sessions > 0) { 1045 printf("inline fallback: "); 1046 switch (fallback_ips->type) { 1047 case RTE_SECURITY_ACTION_TYPE_NONE: 1048 printf("lookaside-none"); 1049 break; 1050 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 1051 printf("cpu-crypto-accelerated"); 1052 break; 1053 default: 1054 printf("invalid"); 1055 break; 1056 } 1057 } 1058 if (sa->fdir_flag == 1) 1059 printf("flow-direction port %d queue %d", sa->portid, 1060 sa->fdir_qid); 1061 1062 printf("\n"); 1063 } 1064 1065 static struct sa_ctx * 1066 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa) 1067 { 1068 char s[PATH_MAX]; 1069 struct sa_ctx *sa_ctx; 1070 uint32_t mz_size; 1071 const struct rte_memzone *mz; 1072 1073 snprintf(s, sizeof(s), "%s_%u", name, socket_id); 1074 1075 /* Create SA context */ 1076 printf("Creating SA context with %u maximum entries on socket %d\n", 1077 nb_sa, socket_id); 1078 1079 mz_size = sizeof(struct ipsec_xf) * nb_sa; 1080 mz = rte_memzone_reserve(s, mz_size, socket_id, 1081 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); 1082 if (mz == NULL) { 1083 printf("Failed to allocate SA XFORM memory\n"); 1084 rte_errno = ENOMEM; 1085 return NULL; 1086 } 1087 1088 sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) + 1089 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE); 1090 1091 if (sa_ctx == NULL) { 1092 printf("Failed to allocate SA CTX memory\n"); 1093 rte_errno = ENOMEM; 1094 rte_memzone_free(mz); 1095 return NULL; 1096 } 1097 1098 sa_ctx->xf = (struct ipsec_xf *)mz->addr; 1099 sa_ctx->nb_sa = nb_sa; 1100 1101 return sa_ctx; 1102 } 1103 1104 static int 1105 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso) 1106 { 1107 struct rte_eth_dev_info dev_info; 1108 int retval; 1109 1110 retval = rte_eth_dev_info_get(portid, &dev_info); 1111 if (retval != 0) { 1112 RTE_LOG(ERR, IPSEC, 1113 "Error during getting device (port %u) info: %s\n", 1114 portid, strerror(-retval)); 1115 1116 return retval; 1117 } 1118 1119 if (inbound) { 1120 if ((dev_info.rx_offload_capa & 1121 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) { 1122 RTE_LOG(WARNING, IPSEC, 1123 "hardware RX IPSec offload is not supported\n"); 1124 return -EINVAL; 1125 } 1126 1127 } else { /* outbound */ 1128 if ((dev_info.tx_offload_capa & 1129 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) { 1130 RTE_LOG(WARNING, IPSEC, 1131 "hardware TX IPSec offload is not supported\n"); 1132 return -EINVAL; 1133 } 1134 if (tso && (dev_info.tx_offload_capa & 1135 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) { 1136 RTE_LOG(WARNING, IPSEC, 1137 "hardware TCP TSO offload is not supported\n"); 1138 return -EINVAL; 1139 } 1140 } 1141 return 0; 1142 } 1143 1144 /* 1145 * Helper function, tries to determine next_proto for SPI 1146 * by searching though SP rules. 1147 */ 1148 static int 1149 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, 1150 struct ip_addr ip_addr[2], uint32_t mask[2]) 1151 { 1152 int32_t rc4, rc6; 1153 1154 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 1155 ip_addr, mask); 1156 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 1157 ip_addr, mask); 1158 1159 if (rc4 >= 0) { 1160 if (rc6 >= 0) { 1161 RTE_LOG(ERR, IPSEC, 1162 "%s: SPI %u used simultaneously by " 1163 "IPv4(%d) and IPv6 (%d) SP rules\n", 1164 __func__, spi, rc4, rc6); 1165 return -EINVAL; 1166 } else 1167 return IPPROTO_IPIP; 1168 } else if (rc6 < 0) { 1169 RTE_LOG(ERR, IPSEC, 1170 "%s: SPI %u is not used by any SP rule\n", 1171 __func__, spi); 1172 return -EINVAL; 1173 } else 1174 return IPPROTO_IPV6; 1175 } 1176 1177 /* 1178 * Helper function for getting source and destination IP addresses 1179 * from SP. Needed for inline crypto transport mode, as addresses are not 1180 * provided in config file for that mode. It checks if SP for current SA exists, 1181 * and based on what type of protocol is returned, it stores appropriate 1182 * addresses got from SP into SA. 1183 */ 1184 static int 1185 sa_add_address_inline_crypto(struct ipsec_sa *sa) 1186 { 1187 int protocol; 1188 struct ip_addr ip_addr[2]; 1189 uint32_t mask[2]; 1190 1191 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask); 1192 if (protocol < 0) 1193 return protocol; 1194 else if (protocol == IPPROTO_IPIP) { 1195 sa->flags |= IP4_TRANSPORT; 1196 if (mask[0] == IP4_FULL_MASK && 1197 mask[1] == IP4_FULL_MASK && 1198 ip_addr[0].ip.ip4 != 0 && 1199 ip_addr[1].ip.ip4 != 0) { 1200 1201 sa->src.ip.ip4 = ip_addr[0].ip.ip4; 1202 sa->dst.ip.ip4 = ip_addr[1].ip.ip4; 1203 } else { 1204 RTE_LOG(ERR, IPSEC, 1205 "%s: No valid address or mask entry in" 1206 " IPv4 SP rule for SPI %u\n", 1207 __func__, sa->spi); 1208 return -EINVAL; 1209 } 1210 } else if (protocol == IPPROTO_IPV6) { 1211 sa->flags |= IP6_TRANSPORT; 1212 if (mask[0] == IP6_FULL_MASK && 1213 mask[1] == IP6_FULL_MASK && 1214 !rte_ipv6_addr_is_unspec(&ip_addr[0].ip.ip6) && 1215 !rte_ipv6_addr_is_unspec(&ip_addr[1].ip.ip6)) { 1216 1217 sa->src.ip.ip6 = ip_addr[0].ip.ip6; 1218 sa->dst.ip.ip6 = ip_addr[1].ip.ip6; 1219 } else { 1220 RTE_LOG(ERR, IPSEC, 1221 "%s: No valid address or mask entry in" 1222 " IPv6 SP rule for SPI %u\n", 1223 __func__, sa->spi); 1224 return -EINVAL; 1225 } 1226 } 1227 return 0; 1228 } 1229 1230 static int 1231 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 1232 uint32_t nb_entries, uint32_t inbound, 1233 struct socket_ctx *skt_ctx, 1234 struct ipsec_ctx *ips_ctx[], 1235 const struct eventmode_conf *em_conf) 1236 { 1237 struct ipsec_sa *sa; 1238 uint32_t i, idx; 1239 uint16_t iv_length, aad_length; 1240 uint16_t auth_iv_length = 0; 1241 int inline_status; 1242 int32_t rc; 1243 struct rte_ipsec_session *ips; 1244 1245 /* for ESN upper 32 bits of SQN also need to be part of AAD */ 1246 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0; 1247 1248 for (i = 0; i < nb_entries; i++) { 1249 idx = i; 1250 sa = &sa_ctx->sa[idx]; 1251 if (sa->spi != 0) { 1252 printf("Index %u already in use by SPI %u\n", 1253 idx, sa->spi); 1254 return -EINVAL; 1255 } 1256 *sa = entries[i]; 1257 1258 if (inbound) { 1259 rc = ipsec_sad_add(&sa_ctx->sad, sa); 1260 if (rc != 0) 1261 return rc; 1262 } 1263 1264 sa->seq = 0; 1265 ips = ipsec_get_primary_session(sa); 1266 1267 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || 1268 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 1269 if (check_eth_dev_caps(sa->portid, inbound, sa->mss)) 1270 return -EINVAL; 1271 } 1272 1273 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { 1274 case IP4_TUNNEL: 1275 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); 1276 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); 1277 break; 1278 case TRANSPORT: 1279 if (ips->type == 1280 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 1281 inline_status = 1282 sa_add_address_inline_crypto(sa); 1283 if (inline_status < 0) 1284 return inline_status; 1285 } 1286 break; 1287 } 1288 1289 1290 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM || 1291 sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM || 1292 sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) { 1293 1294 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) 1295 iv_length = 11; 1296 else 1297 iv_length = 12; 1298 1299 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; 1300 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; 1301 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; 1302 sa_ctx->xf[idx].a.aead.key.length = 1303 sa->cipher_key_len; 1304 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? 1305 RTE_CRYPTO_AEAD_OP_DECRYPT : 1306 RTE_CRYPTO_AEAD_OP_ENCRYPT; 1307 sa_ctx->xf[idx].a.next = NULL; 1308 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; 1309 sa_ctx->xf[idx].a.aead.iv.length = iv_length; 1310 sa_ctx->xf[idx].a.aead.aad_length = 1311 sa->aad_len + aad_length; 1312 sa_ctx->xf[idx].a.aead.digest_length = 1313 sa->digest_len; 1314 1315 sa->xforms = &sa_ctx->xf[idx].a; 1316 } else { 1317 switch (sa->cipher_algo) { 1318 case RTE_CRYPTO_CIPHER_NULL: 1319 case RTE_CRYPTO_CIPHER_DES_CBC: 1320 case RTE_CRYPTO_CIPHER_3DES_CBC: 1321 case RTE_CRYPTO_CIPHER_AES_CBC: 1322 iv_length = sa->iv_len; 1323 break; 1324 case RTE_CRYPTO_CIPHER_AES_CTR: 1325 /* Length includes 8B per packet IV, 4B nonce and 1326 * 4B counter as populated in datapath. 1327 */ 1328 iv_length = 16; 1329 break; 1330 default: 1331 RTE_LOG(ERR, IPSEC_ESP, 1332 "unsupported cipher algorithm %u\n", 1333 sa->cipher_algo); 1334 return -EINVAL; 1335 } 1336 1337 /* AES_GMAC uses salt like AEAD algorithms */ 1338 if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) 1339 auth_iv_length = 12; 1340 1341 if (inbound) { 1342 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1343 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; 1344 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key; 1345 sa_ctx->xf[idx].b.cipher.key.length = 1346 sa->cipher_key_len; 1347 sa_ctx->xf[idx].b.cipher.op = 1348 RTE_CRYPTO_CIPHER_OP_DECRYPT; 1349 sa_ctx->xf[idx].b.next = NULL; 1350 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET; 1351 sa_ctx->xf[idx].b.cipher.iv.length = iv_length; 1352 1353 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH; 1354 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo; 1355 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key; 1356 sa_ctx->xf[idx].a.auth.key.length = 1357 sa->auth_key_len; 1358 sa_ctx->xf[idx].a.auth.digest_length = 1359 sa->digest_len; 1360 sa_ctx->xf[idx].a.auth.op = 1361 RTE_CRYPTO_AUTH_OP_VERIFY; 1362 sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET; 1363 sa_ctx->xf[idx].a.auth.iv.length = auth_iv_length; 1364 1365 } else { /* outbound */ 1366 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1367 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; 1368 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key; 1369 sa_ctx->xf[idx].a.cipher.key.length = 1370 sa->cipher_key_len; 1371 sa_ctx->xf[idx].a.cipher.op = 1372 RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1373 sa_ctx->xf[idx].a.next = NULL; 1374 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET; 1375 sa_ctx->xf[idx].a.cipher.iv.length = iv_length; 1376 1377 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH; 1378 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo; 1379 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key; 1380 sa_ctx->xf[idx].b.auth.key.length = 1381 sa->auth_key_len; 1382 sa_ctx->xf[idx].b.auth.digest_length = 1383 sa->digest_len; 1384 sa_ctx->xf[idx].b.auth.op = 1385 RTE_CRYPTO_AUTH_OP_GENERATE; 1386 sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET; 1387 sa_ctx->xf[idx].b.auth.iv.length = auth_iv_length; 1388 1389 } 1390 1391 if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) { 1392 sa->xforms = inbound ? 1393 &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b; 1394 sa->xforms->next = NULL; 1395 1396 } else { 1397 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; 1398 sa_ctx->xf[idx].b.next = NULL; 1399 sa->xforms = &sa_ctx->xf[idx].a; 1400 } 1401 } 1402 1403 if (ips->type == 1404 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || 1405 ips->type == 1406 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 1407 rc = create_inline_session(skt_ctx, sa, ips); 1408 if (rc != 0) { 1409 RTE_LOG(ERR, IPSEC_ESP, 1410 "create_inline_session() failed\n"); 1411 return -EINVAL; 1412 } 1413 } else { 1414 rc = create_lookaside_session(ips_ctx, skt_ctx, 1415 em_conf, sa, ips); 1416 if (rc != 0) { 1417 RTE_LOG(ERR, IPSEC_ESP, 1418 "create_lookaside_session() failed\n"); 1419 return -EINVAL; 1420 } 1421 } 1422 1423 if (sa->fdir_flag && inbound) { 1424 rc = create_ipsec_esp_flow(sa); 1425 if (rc != 0) 1426 RTE_LOG(ERR, IPSEC_ESP, 1427 "create_ipsec_esp_flow() failed\n"); 1428 } 1429 print_one_sa_rule(sa, inbound); 1430 } 1431 1432 return 0; 1433 } 1434 1435 static inline int 1436 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 1437 uint32_t nb_entries, struct socket_ctx *skt_ctx, 1438 struct ipsec_ctx *ips_ctx[], 1439 const struct eventmode_conf *em_conf) 1440 { 1441 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf); 1442 } 1443 1444 static inline int 1445 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], 1446 uint32_t nb_entries, struct socket_ctx *skt_ctx, 1447 struct ipsec_ctx *ips_ctx[], 1448 const struct eventmode_conf *em_conf) 1449 { 1450 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf); 1451 } 1452 1453 /* 1454 * helper function, fills parameters that are identical for all SAs 1455 */ 1456 static void 1457 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm, 1458 const struct app_sa_prm *app_prm) 1459 { 1460 memset(prm, 0, sizeof(*prm)); 1461 1462 prm->flags = app_prm->flags; 1463 prm->ipsec_xform.options.esn = app_prm->enable_esn; 1464 prm->ipsec_xform.replay_win_sz = app_prm->window_size; 1465 } 1466 1467 static int 1468 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, 1469 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6) 1470 { 1471 int32_t rc; 1472 1473 /* 1474 * Try to get SPI next proto by searching that SPI in SPD. 1475 * probably not the optimal way, but there seems nothing 1476 * better right now. 1477 */ 1478 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL); 1479 if (rc < 0) 1480 return rc; 1481 1482 fill_ipsec_app_sa_prm(prm, &app_sa_prm); 1483 prm->userdata = (uintptr_t)ss; 1484 1485 /* setup ipsec xform */ 1486 prm->ipsec_xform.spi = ss->spi; 1487 prm->ipsec_xform.salt = ss->salt; 1488 prm->ipsec_xform.direction = ss->direction; 1489 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP; 1490 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ? 1491 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : 1492 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 1493 prm->ipsec_xform.options.udp_encap = ss->udp_encap; 1494 prm->ipsec_xform.udp.dport = ss->udp.dport; 1495 prm->ipsec_xform.udp.sport = ss->udp.sport; 1496 prm->ipsec_xform.options.ecn = 1; 1497 prm->ipsec_xform.options.copy_dscp = 1; 1498 1499 if (ss->esn > 0) { 1500 prm->ipsec_xform.options.esn = 1; 1501 prm->ipsec_xform.esn.value = ss->esn; 1502 } 1503 1504 if (IS_IP4_TUNNEL(ss->flags)) { 1505 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; 1506 prm->tun.hdr_len = sizeof(*v4); 1507 prm->tun.next_proto = rc; 1508 prm->tun.hdr = v4; 1509 } else if (IS_IP6_TUNNEL(ss->flags)) { 1510 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6; 1511 prm->tun.hdr_len = sizeof(*v6); 1512 prm->tun.next_proto = rc; 1513 prm->tun.hdr = v6; 1514 } else { 1515 /* transport mode */ 1516 prm->trs.proto = rc; 1517 } 1518 1519 /* setup crypto section */ 1520 prm->crypto_xform = ss->xforms; 1521 return 0; 1522 } 1523 1524 static int 1525 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa) 1526 { 1527 int32_t rc = 0; 1528 1529 ss->sa = sa; 1530 1531 rc = rte_ipsec_session_prepare(ss); 1532 if (rc != 0) 1533 memset(ss, 0, sizeof(*ss)); 1534 1535 return rc; 1536 } 1537 1538 /* 1539 * Initialise related rte_ipsec_sa object. 1540 */ 1541 static int 1542 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size, 1543 struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[], 1544 const struct eventmode_conf *em_conf) 1545 { 1546 int rc; 1547 struct rte_ipsec_sa_prm prm; 1548 struct rte_ipsec_session *ips; 1549 struct rte_ipv4_hdr v4 = { 1550 .version_ihl = IPVERSION << 4 | 1551 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER, 1552 .time_to_live = IPDEFTTL, 1553 .next_proto_id = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP, 1554 .src_addr = lsa->src.ip.ip4, 1555 .dst_addr = lsa->dst.ip.ip4, 1556 }; 1557 struct rte_ipv6_hdr v6 = { 1558 .vtc_flow = htonl(IP6_VERSION << 28), 1559 .proto = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP, 1560 }; 1561 1562 if (IS_IP6_TUNNEL(lsa->flags)) { 1563 v6.src_addr = lsa->src.ip.ip6; 1564 v6.dst_addr = lsa->dst.ip.ip6; 1565 } 1566 1567 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6); 1568 if (rc == 0) 1569 rc = rte_ipsec_sa_init(sa, &prm, sa_size); 1570 if (rc < 0) 1571 return rc; 1572 1573 if (lsa->flags & SA_TELEMETRY_ENABLE) 1574 rte_ipsec_telemetry_sa_add(sa); 1575 1576 /* init primary processing session */ 1577 ips = ipsec_get_primary_session(lsa); 1578 rc = fill_ipsec_session(ips, sa); 1579 if (rc != 0) 1580 return rc; 1581 1582 /* init inline fallback processing session */ 1583 if (lsa->fallback_sessions == 1) { 1584 struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa); 1585 if (ipfs->security.ses == NULL) { 1586 rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs); 1587 if (rc != 0) 1588 return rc; 1589 } 1590 rc = fill_ipsec_session(ipfs, sa); 1591 } 1592 1593 return rc; 1594 } 1595 1596 /* 1597 * Allocate space and init rte_ipsec_sa structures, 1598 * one per session. 1599 */ 1600 static int 1601 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket, 1602 struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[], 1603 const struct eventmode_conf *em_conf) 1604 { 1605 int32_t rc, sz; 1606 uint32_t i, idx; 1607 size_t tsz; 1608 struct rte_ipsec_sa *sa; 1609 struct ipsec_sa *lsa; 1610 struct rte_ipsec_sa_prm prm; 1611 1612 /* determine SA size */ 1613 idx = 0; 1614 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL); 1615 sz = rte_ipsec_sa_size(&prm); 1616 if (sz < 0) { 1617 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): " 1618 "failed to determine SA size, error code: %d\n", 1619 __func__, ctx, nb_ent, socket, sz); 1620 return sz; 1621 } 1622 1623 tsz = sz * nb_ent; 1624 1625 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket); 1626 if (ctx->satbl == NULL) { 1627 RTE_LOG(ERR, IPSEC, 1628 "%s(%p, %u, %d): failed to allocate %zu bytes\n", 1629 __func__, ctx, nb_ent, socket, tsz); 1630 return -ENOMEM; 1631 } 1632 1633 rc = 0; 1634 for (i = 0; i != nb_ent && rc == 0; i++) { 1635 1636 idx = i; 1637 1638 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i); 1639 lsa = ctx->sa + idx; 1640 1641 rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf); 1642 } 1643 1644 return rc; 1645 } 1646 1647 static int 1648 sa_cmp(const void *p, const void *q) 1649 { 1650 uint32_t spi1 = ((const struct ipsec_sa *)p)->spi; 1651 uint32_t spi2 = ((const struct ipsec_sa *)q)->spi; 1652 1653 return (int)(spi1 - spi2); 1654 } 1655 1656 /* 1657 * Walk through all SA rules to find an SA with given SPI 1658 */ 1659 int 1660 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound) 1661 { 1662 uint32_t num; 1663 struct ipsec_sa *sa; 1664 struct ipsec_sa tmpl; 1665 const struct ipsec_sa *sar; 1666 1667 sar = sa_ctx->sa; 1668 if (inbound != 0) 1669 num = nb_sa_in; 1670 else 1671 num = nb_sa_out; 1672 1673 tmpl.spi = spi; 1674 1675 sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp); 1676 if (sa != NULL) 1677 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa); 1678 1679 return -ENOENT; 1680 } 1681 1682 void 1683 sa_init(struct socket_ctx *ctx, int32_t socket_id, 1684 struct lcore_conf *lcore_conf, 1685 const struct eventmode_conf *em_conf) 1686 { 1687 int32_t rc; 1688 const char *name; 1689 uint32_t lcore_id; 1690 struct ipsec_ctx *ipsec_ctx[RTE_MAX_LCORE]; 1691 1692 if (ctx == NULL) 1693 rte_exit(EXIT_FAILURE, "NULL context.\n"); 1694 1695 if (ctx->sa_in != NULL) 1696 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already " 1697 "initialized\n", socket_id); 1698 1699 if (ctx->sa_out != NULL) 1700 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already " 1701 "initialized\n", socket_id); 1702 1703 if (nb_sa_in > 0) { 1704 name = "sa_in"; 1705 ctx->sa_in = sa_create(name, socket_id, nb_sa_in); 1706 if (ctx->sa_in == NULL) 1707 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1708 "context %s in socket %d\n", rte_errno, 1709 name, socket_id); 1710 1711 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id, 1712 &sa_in_cnt); 1713 if (rc != 0) 1714 rte_exit(EXIT_FAILURE, "failed to init SAD\n"); 1715 RTE_LCORE_FOREACH(lcore_id) 1716 ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound; 1717 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf); 1718 1719 if (app_sa_prm.enable != 0) { 1720 rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in, 1721 socket_id, ctx, ipsec_ctx, em_conf); 1722 if (rc != 0) 1723 rte_exit(EXIT_FAILURE, 1724 "failed to init inbound SAs\n"); 1725 } 1726 } else 1727 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n"); 1728 1729 if (nb_sa_out > 0) { 1730 name = "sa_out"; 1731 ctx->sa_out = sa_create(name, socket_id, nb_sa_out); 1732 if (ctx->sa_out == NULL) 1733 rte_exit(EXIT_FAILURE, "Error [%d] creating SA " 1734 "context %s in socket %d\n", rte_errno, 1735 name, socket_id); 1736 1737 RTE_LCORE_FOREACH(lcore_id) 1738 ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound; 1739 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf); 1740 1741 if (app_sa_prm.enable != 0) { 1742 rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out, 1743 socket_id, ctx, ipsec_ctx, em_conf); 1744 if (rc != 0) 1745 rte_exit(EXIT_FAILURE, 1746 "failed to init outbound SAs\n"); 1747 } 1748 } else 1749 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule " 1750 "specified\n"); 1751 } 1752 1753 int 1754 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx) 1755 { 1756 struct ipsec_mbuf_metadata *priv; 1757 struct ipsec_sa *sa; 1758 1759 priv = get_priv(m); 1760 sa = priv->sa; 1761 if (sa != NULL) 1762 return (sa_ctx->sa[sa_idx].spi == sa->spi); 1763 1764 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); 1765 return 0; 1766 } 1767 1768 void 1769 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], 1770 void *sa_arr[], uint16_t nb_pkts) 1771 { 1772 uint32_t i; 1773 void *result_sa; 1774 struct ipsec_sa *sa; 1775 1776 sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts); 1777 1778 /* 1779 * Mark need for inline offload fallback on the LSB of SA pointer. 1780 * Thanks to packet grouping mechanism which ipsec_process is using 1781 * packets marked for fallback processing will form separate group. 1782 * 1783 * Because it is not safe to use SA pointer it is casted to generic 1784 * pointer to prevent from unintentional use. Use ipsec_mask_saptr 1785 * to get valid struct pointer. 1786 */ 1787 for (i = 0; i < nb_pkts; i++) { 1788 if (sa_arr[i] == NULL) 1789 continue; 1790 1791 result_sa = sa = sa_arr[i]; 1792 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) && 1793 sa->fallback_sessions > 0) { 1794 uintptr_t intsa = (uintptr_t)sa; 1795 intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG; 1796 result_sa = (void *)intsa; 1797 } 1798 sa_arr[i] = result_sa; 1799 } 1800 } 1801 1802 void 1803 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[], 1804 void *sa[], uint16_t nb_pkts) 1805 { 1806 uint32_t i; 1807 1808 for (i = 0; i < nb_pkts; i++) 1809 sa[i] = &sa_ctx->sa[sa_idx[i]]; 1810 } 1811 1812 /* 1813 * Select HW offloads to be used. 1814 */ 1815 int 1816 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, 1817 uint64_t *tx_offloads, uint8_t *hw_reassembly) 1818 { 1819 struct ipsec_sa *rule; 1820 uint32_t idx_sa; 1821 enum rte_security_session_action_type rule_type; 1822 struct rte_eth_dev_info dev_info; 1823 int ret; 1824 1825 *rx_offloads = 0; 1826 *tx_offloads = 0; 1827 *hw_reassembly = 0; 1828 1829 ret = rte_eth_dev_info_get(port_id, &dev_info); 1830 if (ret != 0) 1831 rte_exit(EXIT_FAILURE, 1832 "Error during getting device (port %u) info: %s\n", 1833 port_id, strerror(-ret)); 1834 1835 /* Check for inbound rules that use offloads and use this port */ 1836 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { 1837 rule = &sa_in[idx_sa]; 1838 rule_type = ipsec_get_action_type(rule); 1839 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || 1840 rule_type == 1841 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 1842 && rule->portid == port_id) 1843 *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY; 1844 if (IS_HW_REASSEMBLY_EN(rule->flags)) { 1845 *tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 1846 *hw_reassembly = 1; 1847 } 1848 } 1849 1850 /* Check for outbound rules that use offloads and use this port */ 1851 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { 1852 rule = &sa_out[idx_sa]; 1853 rule_type = ipsec_get_action_type(rule); 1854 if (rule->portid == port_id) { 1855 switch (rule_type) { 1856 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 1857 /* Checksum offload is not needed for inline 1858 * protocol as all processing for Outbound IPSec 1859 * packets will be implicitly taken care and for 1860 * non-IPSec packets, there is no need of 1861 * IPv4 Checksum offload. 1862 */ 1863 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; 1864 if (rule->mss) 1865 *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO | 1866 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM); 1867 break; 1868 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 1869 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; 1870 if (rule->mss) 1871 *tx_offloads |= 1872 RTE_ETH_TX_OFFLOAD_TCP_TSO; 1873 if (dev_info.tx_offload_capa & 1874 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 1875 *tx_offloads |= 1876 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 1877 break; 1878 default: 1879 /* Enable IPv4 checksum offload even if 1880 * one of lookaside SA's are present. 1881 */ 1882 if (dev_info.tx_offload_capa & 1883 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 1884 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 1885 break; 1886 } 1887 } else { 1888 if (dev_info.tx_offload_capa & 1889 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 1890 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 1891 } 1892 } 1893 return 0; 1894 } 1895 1896 void 1897 sa_sort_arr(void) 1898 { 1899 qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp); 1900 qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp); 1901 } 1902 1903 uint32_t 1904 get_nb_crypto_sessions(void) 1905 { 1906 return nb_crypto_sessions; 1907 } 1908