1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <rte_common.h> 6 #include <rte_cryptodev.h> 7 #include <rte_esp.h> 8 #include <rte_ip.h> 9 #include <rte_security.h> 10 #include <rte_tcp.h> 11 #include <rte_udp.h> 12 13 #include "test.h" 14 #include "test_cryptodev_security_ipsec.h" 15 16 #define IV_LEN_MAX 16 17 18 #ifndef IPVERSION 19 #define IPVERSION 4 20 #endif 21 22 struct crypto_param_comb alg_list[RTE_DIM(aead_list) + 23 (RTE_DIM(cipher_list) * 24 RTE_DIM(auth_list))]; 25 26 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)]; 27 28 static bool 29 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt) 30 { 31 /* The IP version number must be 4 */ 32 if (((pkt->version_ihl) >> 4) != 4) 33 return false; 34 /* 35 * The IP header length field must be large enough to hold the 36 * minimum length legal IP datagram (20 bytes = 5 words). 37 */ 38 if ((pkt->version_ihl & 0xf) < 5) 39 return false; 40 41 /* 42 * The IP total length field must be large enough to hold the IP 43 * datagram header, whose length is specified in the IP header length 44 * field. 45 */ 46 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr)) 47 return false; 48 49 return true; 50 } 51 52 static bool 53 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt) 54 { 55 /* The IP version number must be 6 */ 56 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6) 57 return false; 58 59 return true; 60 } 61 62 void 63 test_ipsec_alg_list_populate(void) 64 { 65 unsigned long i, j, index = 0; 66 67 for (i = 0; i < RTE_DIM(aead_list); i++) { 68 alg_list[index].param1 = &aead_list[i]; 69 alg_list[index].param2 = NULL; 70 index++; 71 } 72 73 for (i = 0; i < RTE_DIM(cipher_list); i++) { 74 for (j = 0; j < RTE_DIM(auth_list); j++) { 75 alg_list[index].param1 = &cipher_list[i]; 76 alg_list[index].param2 = &auth_list[j]; 77 index++; 78 } 79 } 80 } 81 82 void 83 test_ipsec_ah_alg_list_populate(void) 84 { 85 unsigned long i, index = 0; 86 87 for (i = 1; i < RTE_DIM(auth_list); i++) { 88 ah_alg_list[index].param1 = &auth_list[i]; 89 ah_alg_list[index].param2 = NULL; 90 index++; 91 } 92 93 for (i = 1; i < RTE_DIM(auth_list); i++) { 94 /* NULL cipher */ 95 ah_alg_list[index].param1 = &cipher_list[0]; 96 97 ah_alg_list[index].param2 = &auth_list[i]; 98 index++; 99 } 100 } 101 102 int 103 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform, 104 const struct rte_security_capability *sec_cap, 105 bool silent) 106 { 107 /* Verify security capabilities */ 108 109 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) { 110 if (!silent) 111 RTE_LOG(INFO, USER1, "ESN is not supported\n"); 112 return -ENOTSUP; 113 } 114 115 if (ipsec_xform->options.udp_encap == 1 && 116 sec_cap->ipsec.options.udp_encap == 0) { 117 if (!silent) 118 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n"); 119 return -ENOTSUP; 120 } 121 122 if (ipsec_xform->options.udp_ports_verify == 1 && 123 sec_cap->ipsec.options.udp_ports_verify == 0) { 124 if (!silent) 125 RTE_LOG(INFO, USER1, "UDP encapsulation ports " 126 "verification is not supported\n"); 127 return -ENOTSUP; 128 } 129 130 if (ipsec_xform->options.copy_dscp == 1 && 131 sec_cap->ipsec.options.copy_dscp == 0) { 132 if (!silent) 133 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n"); 134 return -ENOTSUP; 135 } 136 137 if (ipsec_xform->options.copy_flabel == 1 && 138 sec_cap->ipsec.options.copy_flabel == 0) { 139 if (!silent) 140 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n"); 141 return -ENOTSUP; 142 } 143 144 if (ipsec_xform->options.copy_df == 1 && 145 sec_cap->ipsec.options.copy_df == 0) { 146 if (!silent) 147 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n"); 148 return -ENOTSUP; 149 } 150 151 if (ipsec_xform->options.dec_ttl == 1 && 152 sec_cap->ipsec.options.dec_ttl == 0) { 153 if (!silent) 154 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n"); 155 return -ENOTSUP; 156 } 157 158 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) { 159 if (!silent) 160 RTE_LOG(INFO, USER1, "ECN is not supported\n"); 161 return -ENOTSUP; 162 } 163 164 if (ipsec_xform->options.stats == 1 && 165 sec_cap->ipsec.options.stats == 0) { 166 if (!silent) 167 RTE_LOG(INFO, USER1, "Stats is not supported\n"); 168 return -ENOTSUP; 169 } 170 171 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 172 (ipsec_xform->options.iv_gen_disable == 1) && 173 (sec_cap->ipsec.options.iv_gen_disable != 1)) { 174 if (!silent) 175 RTE_LOG(INFO, USER1, 176 "Application provided IV is not supported\n"); 177 return -ENOTSUP; 178 } 179 180 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 181 (ipsec_xform->options.tunnel_hdr_verify > 182 sec_cap->ipsec.options.tunnel_hdr_verify)) { 183 if (!silent) 184 RTE_LOG(INFO, USER1, 185 "Tunnel header verify is not supported\n"); 186 return -ENOTSUP; 187 } 188 189 if (ipsec_xform->options.ip_csum_enable == 1 && 190 sec_cap->ipsec.options.ip_csum_enable == 0) { 191 if (!silent) 192 RTE_LOG(INFO, USER1, 193 "Inner IP checksum is not supported\n"); 194 return -ENOTSUP; 195 } 196 197 if (ipsec_xform->options.l4_csum_enable == 1 && 198 sec_cap->ipsec.options.l4_csum_enable == 0) { 199 if (!silent) 200 RTE_LOG(INFO, USER1, 201 "Inner L4 checksum is not supported\n"); 202 return -ENOTSUP; 203 } 204 205 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) { 206 if (!silent) 207 RTE_LOG(INFO, USER1, 208 "Replay window size is not supported\n"); 209 return -ENOTSUP; 210 } 211 212 return 0; 213 } 214 215 int 216 test_ipsec_crypto_caps_aead_verify( 217 const struct rte_security_capability *sec_cap, 218 struct rte_crypto_sym_xform *aead) 219 { 220 const struct rte_cryptodev_symmetric_capability *sym_cap; 221 const struct rte_cryptodev_capabilities *crypto_cap; 222 int j = 0; 223 224 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op != 225 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 226 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 227 crypto_cap->sym.xform_type == aead->type && 228 crypto_cap->sym.aead.algo == aead->aead.algo) { 229 sym_cap = &crypto_cap->sym; 230 if (rte_cryptodev_sym_capability_check_aead(sym_cap, 231 aead->aead.key.length, 232 aead->aead.digest_length, 233 aead->aead.aad_length, 234 aead->aead.iv.length) == 0) 235 return 0; 236 } 237 } 238 239 return -ENOTSUP; 240 } 241 242 int 243 test_ipsec_crypto_caps_cipher_verify( 244 const struct rte_security_capability *sec_cap, 245 struct rte_crypto_sym_xform *cipher) 246 { 247 const struct rte_cryptodev_symmetric_capability *sym_cap; 248 const struct rte_cryptodev_capabilities *cap; 249 int j = 0; 250 251 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 252 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 253 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 254 cap->sym.xform_type == cipher->type && 255 cap->sym.cipher.algo == cipher->cipher.algo) { 256 sym_cap = &cap->sym; 257 if (rte_cryptodev_sym_capability_check_cipher(sym_cap, 258 cipher->cipher.key.length, 259 cipher->cipher.iv.length) == 0) 260 return 0; 261 } 262 } 263 264 return -ENOTSUP; 265 } 266 267 int 268 test_ipsec_crypto_caps_auth_verify( 269 const struct rte_security_capability *sec_cap, 270 struct rte_crypto_sym_xform *auth) 271 { 272 const struct rte_cryptodev_symmetric_capability *sym_cap; 273 const struct rte_cryptodev_capabilities *cap; 274 int j = 0; 275 276 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 277 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 278 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 279 cap->sym.xform_type == auth->type && 280 cap->sym.auth.algo == auth->auth.algo) { 281 sym_cap = &cap->sym; 282 if (rte_cryptodev_sym_capability_check_auth(sym_cap, 283 auth->auth.key.length, 284 auth->auth.digest_length, 285 auth->auth.iv.length) == 0) 286 return 0; 287 } 288 } 289 290 return -ENOTSUP; 291 } 292 293 void 294 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out, 295 struct ipsec_test_data *td_in) 296 { 297 memcpy(td_in, td_out, sizeof(*td_in)); 298 299 /* Populate output text of td_in with input text of td_out */ 300 memcpy(td_in->output_text.data, td_out->input_text.data, 301 td_out->input_text.len); 302 td_in->output_text.len = td_out->input_text.len; 303 304 /* Populate input text of td_in with output text of td_out */ 305 memcpy(td_in->input_text.data, td_out->output_text.data, 306 td_out->output_text.len); 307 td_in->input_text.len = td_out->output_text.len; 308 309 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 310 311 if (td_in->aead) { 312 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 313 } else { 314 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 315 td_in->xform.chain.cipher.cipher.op = 316 RTE_CRYPTO_CIPHER_OP_DECRYPT; 317 } 318 } 319 320 static bool 321 is_ipv4(void *ip) 322 { 323 struct rte_ipv4_hdr *ipv4 = ip; 324 uint8_t ip_ver; 325 326 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER; 327 if (ip_ver == IPVERSION) 328 return true; 329 else 330 return false; 331 } 332 333 static void 334 test_ipsec_csum_init(void *ip, bool l3, bool l4) 335 { 336 struct rte_ipv4_hdr *ipv4; 337 struct rte_tcp_hdr *tcp; 338 struct rte_udp_hdr *udp; 339 uint8_t next_proto; 340 uint8_t size; 341 342 if (is_ipv4(ip)) { 343 ipv4 = ip; 344 size = sizeof(struct rte_ipv4_hdr); 345 next_proto = ipv4->next_proto_id; 346 347 if (l3) 348 ipv4->hdr_checksum = 0; 349 } else { 350 size = sizeof(struct rte_ipv6_hdr); 351 next_proto = ((struct rte_ipv6_hdr *)ip)->proto; 352 } 353 354 if (l4) { 355 switch (next_proto) { 356 case IPPROTO_TCP: 357 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size); 358 tcp->cksum = 0; 359 break; 360 case IPPROTO_UDP: 361 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size); 362 udp->dgram_cksum = 0; 363 break; 364 default: 365 return; 366 } 367 } 368 } 369 370 void 371 test_ipsec_td_prepare(const struct crypto_param *param1, 372 const struct crypto_param *param2, 373 const struct ipsec_test_flags *flags, 374 struct ipsec_test_data *td_array, 375 int nb_td) 376 377 { 378 struct ipsec_test_data *td; 379 int i; 380 381 memset(td_array, 0, nb_td * sizeof(*td)); 382 383 for (i = 0; i < nb_td; i++) { 384 td = &td_array[i]; 385 386 /* Prepare fields based on param */ 387 388 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 389 /* Copy template for packet & key fields */ 390 if (flags->ipv6) 391 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td)); 392 else 393 memcpy(td, &pkt_aes_256_gcm, sizeof(*td)); 394 395 if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM) 396 td->salt.len = 3; 397 398 td->aead = true; 399 td->xform.aead.aead.algo = param1->alg.aead; 400 td->xform.aead.aead.key.length = param1->key_length; 401 } else { 402 /* Copy template for packet & key fields */ 403 if (flags->ipv6) 404 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6, 405 sizeof(*td)); 406 else 407 memcpy(td, &pkt_aes_128_cbc_hmac_sha256, 408 sizeof(*td)); 409 410 td->aead = false; 411 412 if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 413 td->xform.chain.auth.auth.algo = 414 param1->alg.auth; 415 td->xform.chain.auth.auth.key.length = 416 param1->key_length; 417 td->xform.chain.auth.auth.digest_length = 418 param1->digest_length; 419 td->auth_only = true; 420 421 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 422 td->xform.chain.auth.auth.iv.length = 423 param1->iv_length; 424 td->aes_gmac = true; 425 } 426 } else { 427 td->xform.chain.cipher.cipher.algo = 428 param1->alg.cipher; 429 td->xform.chain.cipher.cipher.key.length = 430 param1->key_length; 431 td->xform.chain.cipher.cipher.iv.length = 432 param1->iv_length; 433 td->xform.chain.auth.auth.algo = 434 param2->alg.auth; 435 td->xform.chain.auth.auth.key.length = 436 param2->key_length; 437 td->xform.chain.auth.auth.digest_length = 438 param2->digest_length; 439 440 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 441 td->xform.chain.auth.auth.iv.length = 442 param2->iv_length; 443 td->aes_gmac = true; 444 } 445 } 446 } 447 448 if (flags->ah) { 449 td->ipsec_xform.proto = 450 RTE_SECURITY_IPSEC_SA_PROTO_AH; 451 } 452 453 if (flags->iv_gen) 454 td->ipsec_xform.options.iv_gen_disable = 0; 455 456 if (flags->sa_expiry_pkts_soft) 457 td->ipsec_xform.life.packets_soft_limit = 458 IPSEC_TEST_PACKETS_MAX - 1; 459 460 if (flags->ip_csum) { 461 td->ipsec_xform.options.ip_csum_enable = 1; 462 test_ipsec_csum_init(&td->input_text.data, true, false); 463 } 464 465 if (flags->l4_csum) { 466 td->ipsec_xform.options.l4_csum_enable = 1; 467 test_ipsec_csum_init(&td->input_text.data, false, true); 468 } 469 470 if (flags->transport) { 471 td->ipsec_xform.mode = 472 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 473 } else { 474 td->ipsec_xform.mode = 475 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 476 477 if (flags->tunnel_ipv6) 478 td->ipsec_xform.tunnel.type = 479 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 480 else 481 td->ipsec_xform.tunnel.type = 482 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 483 } 484 485 if (flags->stats_success) 486 td->ipsec_xform.options.stats = 1; 487 488 if (flags->fragment) { 489 struct rte_ipv4_hdr *ip; 490 ip = (struct rte_ipv4_hdr *)&td->input_text.data; 491 ip->fragment_offset = 4; 492 ip->hdr_checksum = rte_ipv4_cksum(ip); 493 } 494 495 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 496 flags->df == TEST_IPSEC_COPY_DF_INNER_1) 497 td->ipsec_xform.options.copy_df = 1; 498 499 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 500 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1) 501 td->ipsec_xform.options.copy_dscp = 1; 502 503 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 504 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1) 505 td->ipsec_xform.options.copy_flabel = 1; 506 507 if (flags->dec_ttl_or_hop_limit) 508 td->ipsec_xform.options.dec_ttl = 1; 509 } 510 } 511 512 void 513 test_ipsec_td_update(struct ipsec_test_data td_inb[], 514 const struct ipsec_test_data td_outb[], 515 int nb_td, 516 const struct ipsec_test_flags *flags) 517 { 518 int i; 519 520 for (i = 0; i < nb_td; i++) { 521 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data, 522 td_outb[i].input_text.len); 523 td_inb[i].output_text.len = td_outb->input_text.len; 524 525 if (flags->icv_corrupt) { 526 int icv_pos = td_inb[i].input_text.len - 4; 527 td_inb[i].input_text.data[icv_pos] += 1; 528 } 529 530 if (flags->sa_expiry_pkts_hard) 531 td_inb[i].ipsec_xform.life.packets_hard_limit = 532 IPSEC_TEST_PACKETS_MAX - 1; 533 534 if (flags->udp_encap) 535 td_inb[i].ipsec_xform.options.udp_encap = 1; 536 537 if (flags->udp_ports_verify) 538 td_inb[i].ipsec_xform.options.udp_ports_verify = 1; 539 540 td_inb[i].ipsec_xform.options.tunnel_hdr_verify = 541 flags->tunnel_hdr_verify; 542 543 if (flags->ip_csum) 544 td_inb[i].ipsec_xform.options.ip_csum_enable = 1; 545 546 if (flags->l4_csum) 547 td_inb[i].ipsec_xform.options.l4_csum_enable = 1; 548 549 /* Clear outbound specific flags */ 550 td_inb[i].ipsec_xform.options.iv_gen_disable = 0; 551 } 552 } 553 554 void 555 test_ipsec_display_alg(const struct crypto_param *param1, 556 const struct crypto_param *param2) 557 { 558 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 559 printf("\t%s [%d]", 560 rte_crypto_aead_algorithm_strings[param1->alg.aead], 561 param1->key_length * 8); 562 } else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 563 printf("\t%s", 564 rte_crypto_auth_algorithm_strings[param1->alg.auth]); 565 if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL) 566 printf(" [%dB ICV]", param1->digest_length); 567 } else { 568 printf("\t%s", 569 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]); 570 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL) 571 printf(" [%d]", param1->key_length * 8); 572 printf(" %s", 573 rte_crypto_auth_algorithm_strings[param2->alg.auth]); 574 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL) 575 printf(" [%dB ICV]", param2->digest_length); 576 } 577 printf("\n"); 578 } 579 580 static int 581 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td) 582 { 583 int len = 0; 584 585 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 586 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 587 if (td->ipsec_xform.tunnel.type == 588 RTE_SECURITY_IPSEC_TUNNEL_IPV4) 589 len += sizeof(struct rte_ipv4_hdr); 590 else 591 len += sizeof(struct rte_ipv6_hdr); 592 } 593 } 594 595 return len; 596 } 597 598 static int 599 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td) 600 { 601 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX]; 602 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *); 603 int i, iv_pos, iv_len; 604 static int index; 605 606 if (td->aead) 607 iv_len = td->xform.aead.aead.iv.length - td->salt.len; 608 else 609 iv_len = td->xform.chain.cipher.cipher.iv.length; 610 611 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr); 612 output_text += iv_pos; 613 614 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported"); 615 616 /* Compare against previous values */ 617 for (i = 0; i < index; i++) { 618 iv_tmp = &iv_queue[i * IV_LEN_MAX]; 619 620 if (memcmp(output_text, iv_tmp, iv_len) == 0) { 621 printf("IV repeated"); 622 return TEST_FAILED; 623 } 624 } 625 626 /* Save IV for future comparisons */ 627 628 iv_tmp = &iv_queue[index * IV_LEN_MAX]; 629 memcpy(iv_tmp, output_text, iv_len); 630 index++; 631 632 if (index == IPSEC_TEST_PACKETS_MAX) 633 index = 0; 634 635 return TEST_SUCCESS; 636 } 637 638 static int 639 test_ipsec_l3_csum_verify(struct rte_mbuf *m) 640 { 641 uint16_t actual_cksum, expected_cksum; 642 struct rte_ipv4_hdr *ip; 643 644 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 645 646 if (!is_ipv4((void *)ip)) 647 return TEST_SKIPPED; 648 649 actual_cksum = ip->hdr_checksum; 650 651 ip->hdr_checksum = 0; 652 653 expected_cksum = rte_ipv4_cksum(ip); 654 655 if (actual_cksum != expected_cksum) 656 return TEST_FAILED; 657 658 return TEST_SUCCESS; 659 } 660 661 static int 662 test_ipsec_l4_csum_verify(struct rte_mbuf *m) 663 { 664 uint16_t actual_cksum = 0, expected_cksum = 0; 665 struct rte_ipv4_hdr *ipv4; 666 struct rte_ipv6_hdr *ipv6; 667 struct rte_tcp_hdr *tcp; 668 struct rte_udp_hdr *udp; 669 void *ip, *l4; 670 671 ip = rte_pktmbuf_mtod(m, void *); 672 673 if (is_ipv4(ip)) { 674 ipv4 = ip; 675 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr)); 676 677 switch (ipv4->next_proto_id) { 678 case IPPROTO_TCP: 679 tcp = (struct rte_tcp_hdr *)l4; 680 actual_cksum = tcp->cksum; 681 tcp->cksum = 0; 682 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 683 break; 684 case IPPROTO_UDP: 685 udp = (struct rte_udp_hdr *)l4; 686 actual_cksum = udp->dgram_cksum; 687 udp->dgram_cksum = 0; 688 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 689 break; 690 default: 691 break; 692 } 693 } else { 694 ipv6 = ip; 695 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr)); 696 697 switch (ipv6->proto) { 698 case IPPROTO_TCP: 699 tcp = (struct rte_tcp_hdr *)l4; 700 actual_cksum = tcp->cksum; 701 tcp->cksum = 0; 702 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 703 break; 704 case IPPROTO_UDP: 705 udp = (struct rte_udp_hdr *)l4; 706 actual_cksum = udp->dgram_cksum; 707 udp->dgram_cksum = 0; 708 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 709 break; 710 default: 711 break; 712 } 713 } 714 715 if (actual_cksum != expected_cksum) 716 return TEST_FAILED; 717 718 return TEST_SUCCESS; 719 } 720 721 static int 722 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected) 723 { 724 struct rte_ipv4_hdr *iph4_ex, *iph4_re; 725 struct rte_ipv6_hdr *iph6_ex, *iph6_re; 726 727 if (is_ipv4(received) && is_ipv4(expected)) { 728 iph4_ex = expected; 729 iph4_re = received; 730 iph4_ex->time_to_live -= 1; 731 if (iph4_re->time_to_live != iph4_ex->time_to_live) 732 return TEST_FAILED; 733 } else if (!is_ipv4(received) && !is_ipv4(expected)) { 734 iph6_ex = expected; 735 iph6_re = received; 736 iph6_ex->hop_limits -= 1; 737 if (iph6_re->hop_limits != iph6_ex->hop_limits) 738 return TEST_FAILED; 739 } else { 740 printf("IP header version miss match\n"); 741 return TEST_FAILED; 742 } 743 744 return TEST_SUCCESS; 745 } 746 747 static int 748 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td, 749 bool silent, const struct ipsec_test_flags *flags) 750 { 751 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 752 uint32_t skip, len = rte_pktmbuf_pkt_len(m); 753 uint8_t td_output_text[4096]; 754 int ret; 755 756 /* For tests with status as error for test success, skip verification */ 757 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 758 (flags->icv_corrupt || 759 flags->sa_expiry_pkts_hard || 760 flags->tunnel_hdr_verify || 761 td->ar_packet)) 762 return TEST_SUCCESS; 763 764 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && 765 flags->udp_encap) { 766 const struct rte_ipv4_hdr *iph4; 767 const struct rte_ipv6_hdr *iph6; 768 769 if (td->ipsec_xform.tunnel.type == 770 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 771 iph4 = (const struct rte_ipv4_hdr *)output_text; 772 if (iph4->next_proto_id != IPPROTO_UDP) { 773 printf("UDP header is not found\n"); 774 return TEST_FAILED; 775 } 776 } else { 777 iph6 = (const struct rte_ipv6_hdr *)output_text; 778 if (iph6->proto != IPPROTO_UDP) { 779 printf("UDP header is not found\n"); 780 return TEST_FAILED; 781 } 782 } 783 784 len -= sizeof(struct rte_udp_hdr); 785 output_text += sizeof(struct rte_udp_hdr); 786 } 787 788 if (len != td->output_text.len) { 789 printf("Output length (%d) not matching with expected (%d)\n", 790 len, td->output_text.len); 791 return TEST_FAILED; 792 } 793 794 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 795 flags->fragment) { 796 const struct rte_ipv4_hdr *iph4; 797 iph4 = (const struct rte_ipv4_hdr *)output_text; 798 if (iph4->fragment_offset) { 799 printf("Output packet is fragmented"); 800 return TEST_FAILED; 801 } 802 } 803 804 skip = test_ipsec_tunnel_hdr_len_get(td); 805 806 len -= skip; 807 output_text += skip; 808 809 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 810 flags->ip_csum) { 811 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD) 812 ret = test_ipsec_l3_csum_verify(m); 813 else 814 ret = TEST_FAILED; 815 816 if (ret == TEST_FAILED) 817 printf("Inner IP checksum test failed\n"); 818 819 return ret; 820 } 821 822 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 823 flags->l4_csum) { 824 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD) 825 ret = test_ipsec_l4_csum_verify(m); 826 else 827 ret = TEST_FAILED; 828 829 if (ret == TEST_FAILED) 830 printf("Inner L4 checksum test failed\n"); 831 832 return ret; 833 } 834 835 memcpy(td_output_text, td->output_text.data + skip, len); 836 837 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 838 flags->dec_ttl_or_hop_limit) { 839 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) { 840 printf("Inner TTL/hop limit decrement test failed\n"); 841 return TEST_FAILED; 842 } 843 } 844 845 if (test_ipsec_pkt_update(td_output_text, flags)) { 846 printf("Could not update expected vector"); 847 return TEST_FAILED; 848 } 849 850 if (memcmp(output_text, td_output_text, len)) { 851 if (silent) 852 return TEST_FAILED; 853 854 printf("TestCase %s line %d: %s\n", __func__, __LINE__, 855 "output text not as expected\n"); 856 857 rte_hexdump(stdout, "expected", td_output_text, len); 858 rte_hexdump(stdout, "actual", output_text, len); 859 return TEST_FAILED; 860 } 861 862 return TEST_SUCCESS; 863 } 864 865 static int 866 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td, 867 struct ipsec_test_data *res_d) 868 { 869 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 870 uint32_t len = rte_pktmbuf_pkt_len(m); 871 872 memcpy(res_d, td, sizeof(*res_d)); 873 memcpy(res_d->input_text.data, output_text, len); 874 res_d->input_text.len = len; 875 876 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 877 if (res_d->aead) { 878 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 879 } else { 880 res_d->xform.chain.cipher.cipher.op = 881 RTE_CRYPTO_CIPHER_OP_DECRYPT; 882 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 883 } 884 885 return TEST_SUCCESS; 886 } 887 888 static int 889 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4, 890 const struct ipsec_test_flags *flags) 891 { 892 uint8_t tos, dscp; 893 uint16_t f_off; 894 895 if (!is_valid_ipv4_pkt(iph4)) { 896 printf("Tunnel outer header is not IPv4\n"); 897 return -1; 898 } 899 900 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 901 printf("Tunnel outer header proto is not AH\n"); 902 return -1; 903 } 904 905 f_off = rte_be_to_cpu_16(iph4->fragment_offset); 906 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 907 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 908 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) { 909 printf("DF bit is not set\n"); 910 return -1; 911 } 912 } else { 913 if (f_off & RTE_IPV4_HDR_DF_FLAG) { 914 printf("DF bit is set\n"); 915 return -1; 916 } 917 } 918 919 tos = iph4->type_of_service; 920 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2; 921 922 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 923 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 924 if (dscp != TEST_IPSEC_DSCP_VAL) { 925 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 926 TEST_IPSEC_DSCP_VAL, dscp); 927 return -1; 928 } 929 } else { 930 if (dscp != 0) { 931 printf("DSCP value is set [exp: 0, actual: %x]\n", 932 dscp); 933 return -1; 934 } 935 } 936 937 return 0; 938 } 939 940 static int 941 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6, 942 const struct ipsec_test_flags *flags) 943 { 944 uint32_t vtc_flow; 945 uint32_t flabel; 946 uint8_t dscp; 947 948 if (!is_valid_ipv6_pkt(iph6)) { 949 printf("Tunnel outer header is not IPv6\n"); 950 return -1; 951 } 952 953 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 954 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >> 955 (RTE_IPV6_HDR_TC_SHIFT + 2); 956 957 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 958 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 959 if (dscp != TEST_IPSEC_DSCP_VAL) { 960 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 961 TEST_IPSEC_DSCP_VAL, dscp); 962 return -1; 963 } 964 } else { 965 if (dscp != 0) { 966 printf("DSCP value is set [exp: 0, actual: %x]\n", 967 dscp); 968 return -1; 969 } 970 } 971 972 flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK; 973 974 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 975 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 976 if (flabel != TEST_IPSEC_FLABEL_VAL) { 977 printf("FLABEL value is not matching [exp: %x, actual: %x]\n", 978 TEST_IPSEC_FLABEL_VAL, flabel); 979 return -1; 980 } 981 } else { 982 if (flabel != 0) { 983 printf("FLABEL value is set [exp: 0, actual: %x]\n", 984 flabel); 985 return -1; 986 } 987 } 988 989 return 0; 990 } 991 992 int 993 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td, 994 struct ipsec_test_data *res_d, bool silent, 995 const struct ipsec_test_flags *flags) 996 { 997 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 998 int ret; 999 1000 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1001 const struct rte_ipv4_hdr *iph4; 1002 const struct rte_ipv6_hdr *iph6; 1003 1004 if (flags->iv_gen) { 1005 ret = test_ipsec_iv_verify_push(m, td); 1006 if (ret != TEST_SUCCESS) 1007 return ret; 1008 } 1009 1010 iph4 = (const struct rte_ipv4_hdr *)output_text; 1011 1012 if (td->ipsec_xform.mode == 1013 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) { 1014 if (flags->ipv6) { 1015 iph6 = (const struct rte_ipv6_hdr *)output_text; 1016 if (is_valid_ipv6_pkt(iph6) == false) { 1017 printf("Transport packet is not IPv6\n"); 1018 return TEST_FAILED; 1019 } 1020 } else { 1021 if (is_valid_ipv4_pkt(iph4) == false) { 1022 printf("Transport packet is not IPv4\n"); 1023 return TEST_FAILED; 1024 } 1025 1026 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 1027 printf("Transport IPv4 header proto is not AH\n"); 1028 return -1; 1029 } 1030 } 1031 } else { 1032 if (td->ipsec_xform.tunnel.type == 1033 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1034 if (test_ipsec_iph4_hdr_validate(iph4, flags)) 1035 return TEST_FAILED; 1036 } else { 1037 iph6 = (const struct rte_ipv6_hdr *)output_text; 1038 if (test_ipsec_iph6_hdr_validate(iph6, flags)) 1039 return TEST_FAILED; 1040 } 1041 } 1042 } 1043 1044 /* 1045 * In case of known vector tests & all inbound tests, res_d provided 1046 * would be NULL and output data need to be validated against expected. 1047 * For inbound, output_text would be plain packet and for outbound 1048 * output_text would IPsec packet. Validate by comparing against 1049 * known vectors. 1050 * 1051 * In case of combined mode tests, the output_text from outbound 1052 * operation (ie, IPsec packet) would need to be inbound processed to 1053 * obtain the plain text. Copy output_text to result data, 'res_d', so 1054 * that inbound processing can be done. 1055 */ 1056 1057 if (res_d == NULL) 1058 return test_ipsec_td_verify(m, td, silent, flags); 1059 else 1060 return test_ipsec_res_d_prepare(m, td, res_d); 1061 } 1062 1063 int 1064 test_ipsec_status_check(const struct ipsec_test_data *td, 1065 struct rte_crypto_op *op, 1066 const struct ipsec_test_flags *flags, 1067 enum rte_security_ipsec_sa_direction dir, 1068 int pkt_num) 1069 { 1070 int ret = TEST_SUCCESS; 1071 1072 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1073 td->ar_packet) { 1074 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1075 printf("Anti replay test case failed\n"); 1076 return TEST_FAILED; 1077 } else { 1078 return TEST_SUCCESS; 1079 } 1080 } 1081 1082 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 1083 flags->sa_expiry_pkts_hard && 1084 pkt_num == IPSEC_TEST_PACKETS_MAX) { 1085 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1086 printf("SA hard expiry (pkts) test failed\n"); 1087 return TEST_FAILED; 1088 } else { 1089 return TEST_SUCCESS; 1090 } 1091 } 1092 1093 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1094 flags->tunnel_hdr_verify) { 1095 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1096 printf("Tunnel header verify test case failed\n"); 1097 return TEST_FAILED; 1098 } else { 1099 return TEST_SUCCESS; 1100 } 1101 } 1102 1103 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) { 1104 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1105 printf("ICV corruption test case failed\n"); 1106 ret = TEST_FAILED; 1107 } 1108 } else { 1109 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 1110 printf("Security op processing failed [pkt_num: %d]\n", 1111 pkt_num); 1112 ret = TEST_FAILED; 1113 } 1114 } 1115 1116 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) { 1117 if (!(op->aux_flags & 1118 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) { 1119 printf("SA soft expiry (pkts) test failed\n"); 1120 ret = TEST_FAILED; 1121 } 1122 } 1123 1124 return ret; 1125 } 1126 1127 int 1128 test_ipsec_stats_verify(struct rte_security_ctx *ctx, 1129 struct rte_security_session *sess, 1130 const struct ipsec_test_flags *flags, 1131 enum rte_security_ipsec_sa_direction dir) 1132 { 1133 struct rte_security_stats stats = {0}; 1134 int ret = TEST_SUCCESS; 1135 1136 if (flags->stats_success) { 1137 if (rte_security_session_stats_get(ctx, sess, &stats) < 0) 1138 return TEST_FAILED; 1139 1140 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1141 if (stats.ipsec.opackets != 1 || 1142 stats.ipsec.oerrors != 0) 1143 ret = TEST_FAILED; 1144 } else { 1145 if (stats.ipsec.ipackets != 1 || 1146 stats.ipsec.ierrors != 0) 1147 ret = TEST_FAILED; 1148 } 1149 } 1150 1151 return ret; 1152 } 1153 1154 int 1155 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags) 1156 { 1157 struct rte_ipv4_hdr *iph4; 1158 struct rte_ipv6_hdr *iph6; 1159 bool cksum_dirty = false; 1160 1161 iph4 = (struct rte_ipv4_hdr *)pkt; 1162 1163 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1164 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 || 1165 flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 1166 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 1167 uint16_t frag_off; 1168 1169 if (!is_ipv4(iph4)) { 1170 printf("Invalid packet type\n"); 1171 return -1; 1172 } 1173 1174 frag_off = rte_be_to_cpu_16(iph4->fragment_offset); 1175 1176 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1177 flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 1178 frag_off |= RTE_IPV4_HDR_DF_FLAG; 1179 else 1180 frag_off &= ~RTE_IPV4_HDR_DF_FLAG; 1181 1182 iph4->fragment_offset = rte_cpu_to_be_16(frag_off); 1183 cksum_dirty = true; 1184 } 1185 1186 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1187 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 || 1188 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 1189 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 || 1190 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1191 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 || 1192 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 1193 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 1194 1195 if (is_ipv4(iph4)) { 1196 uint8_t tos; 1197 1198 tos = iph4->type_of_service; 1199 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1200 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1201 tos |= (RTE_IPV4_HDR_DSCP_MASK & 1202 (TEST_IPSEC_DSCP_VAL << 2)); 1203 else 1204 tos &= ~RTE_IPV4_HDR_DSCP_MASK; 1205 1206 iph4->type_of_service = tos; 1207 cksum_dirty = true; 1208 } else { 1209 uint32_t vtc_flow; 1210 1211 iph6 = (struct rte_ipv6_hdr *)pkt; 1212 1213 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 1214 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1215 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1216 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK & 1217 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2))); 1218 else 1219 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK; 1220 1221 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1222 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1) 1223 vtc_flow |= (RTE_IPV6_HDR_FL_MASK & 1224 (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT)); 1225 else 1226 vtc_flow &= ~RTE_IPV6_HDR_FL_MASK; 1227 1228 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow); 1229 } 1230 } 1231 1232 if (cksum_dirty && is_ipv4(iph4)) { 1233 iph4->hdr_checksum = 0; 1234 iph4->hdr_checksum = rte_ipv4_cksum(iph4); 1235 } 1236 1237 return 0; 1238 } 1239