1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #ifndef RTE_EXEC_ENV_WINDOWS 6 7 #include <rte_common.h> 8 #include <rte_cryptodev.h> 9 #include <rte_esp.h> 10 #include <rte_ip.h> 11 #include <rte_security.h> 12 #include <rte_tcp.h> 13 #include <rte_udp.h> 14 15 #include "test.h" 16 #include "test_cryptodev_security_ipsec.h" 17 18 #define IV_LEN_MAX 16 19 20 struct crypto_param_comb alg_list[RTE_DIM(aead_list) + 21 (RTE_DIM(cipher_list) * 22 RTE_DIM(auth_list))]; 23 24 static bool 25 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt) 26 { 27 /* The IP version number must be 4 */ 28 if (((pkt->version_ihl) >> 4) != 4) 29 return false; 30 /* 31 * The IP header length field must be large enough to hold the 32 * minimum length legal IP datagram (20 bytes = 5 words). 33 */ 34 if ((pkt->version_ihl & 0xf) < 5) 35 return false; 36 37 /* 38 * The IP total length field must be large enough to hold the IP 39 * datagram header, whose length is specified in the IP header length 40 * field. 41 */ 42 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr)) 43 return false; 44 45 return true; 46 } 47 48 static bool 49 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt) 50 { 51 /* The IP version number must be 6 */ 52 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6) 53 return false; 54 55 return true; 56 } 57 58 void 59 test_ipsec_alg_list_populate(void) 60 { 61 unsigned long i, j, index = 0; 62 63 for (i = 0; i < RTE_DIM(aead_list); i++) { 64 alg_list[index].param1 = &aead_list[i]; 65 alg_list[index].param2 = NULL; 66 index++; 67 } 68 69 for (i = 0; i < RTE_DIM(cipher_list); i++) { 70 for (j = 0; j < RTE_DIM(auth_list); j++) { 71 alg_list[index].param1 = &cipher_list[i]; 72 alg_list[index].param2 = &auth_list[j]; 73 index++; 74 } 75 } 76 } 77 78 int 79 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform, 80 const struct rte_security_capability *sec_cap, 81 bool silent) 82 { 83 /* Verify security capabilities */ 84 85 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) { 86 if (!silent) 87 RTE_LOG(INFO, USER1, "ESN is not supported\n"); 88 return -ENOTSUP; 89 } 90 91 if (ipsec_xform->options.udp_encap == 1 && 92 sec_cap->ipsec.options.udp_encap == 0) { 93 if (!silent) 94 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n"); 95 return -ENOTSUP; 96 } 97 98 if (ipsec_xform->options.udp_ports_verify == 1 && 99 sec_cap->ipsec.options.udp_ports_verify == 0) { 100 if (!silent) 101 RTE_LOG(INFO, USER1, "UDP encapsulation ports " 102 "verification is not supported\n"); 103 return -ENOTSUP; 104 } 105 106 if (ipsec_xform->options.copy_dscp == 1 && 107 sec_cap->ipsec.options.copy_dscp == 0) { 108 if (!silent) 109 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n"); 110 return -ENOTSUP; 111 } 112 113 if (ipsec_xform->options.copy_flabel == 1 && 114 sec_cap->ipsec.options.copy_flabel == 0) { 115 if (!silent) 116 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n"); 117 return -ENOTSUP; 118 } 119 120 if (ipsec_xform->options.copy_df == 1 && 121 sec_cap->ipsec.options.copy_df == 0) { 122 if (!silent) 123 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n"); 124 return -ENOTSUP; 125 } 126 127 if (ipsec_xform->options.dec_ttl == 1 && 128 sec_cap->ipsec.options.dec_ttl == 0) { 129 if (!silent) 130 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n"); 131 return -ENOTSUP; 132 } 133 134 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) { 135 if (!silent) 136 RTE_LOG(INFO, USER1, "ECN is not supported\n"); 137 return -ENOTSUP; 138 } 139 140 if (ipsec_xform->options.stats == 1 && 141 sec_cap->ipsec.options.stats == 0) { 142 if (!silent) 143 RTE_LOG(INFO, USER1, "Stats is not supported\n"); 144 return -ENOTSUP; 145 } 146 147 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 148 (ipsec_xform->options.iv_gen_disable == 1) && 149 (sec_cap->ipsec.options.iv_gen_disable != 1)) { 150 if (!silent) 151 RTE_LOG(INFO, USER1, 152 "Application provided IV is not supported\n"); 153 return -ENOTSUP; 154 } 155 156 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 157 (ipsec_xform->options.tunnel_hdr_verify > 158 sec_cap->ipsec.options.tunnel_hdr_verify)) { 159 if (!silent) 160 RTE_LOG(INFO, USER1, 161 "Tunnel header verify is not supported\n"); 162 return -ENOTSUP; 163 } 164 165 if (ipsec_xform->options.ip_csum_enable == 1 && 166 sec_cap->ipsec.options.ip_csum_enable == 0) { 167 if (!silent) 168 RTE_LOG(INFO, USER1, 169 "Inner IP checksum is not supported\n"); 170 return -ENOTSUP; 171 } 172 173 if (ipsec_xform->options.l4_csum_enable == 1 && 174 sec_cap->ipsec.options.l4_csum_enable == 0) { 175 if (!silent) 176 RTE_LOG(INFO, USER1, 177 "Inner L4 checksum is not supported\n"); 178 return -ENOTSUP; 179 } 180 181 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) { 182 if (!silent) 183 RTE_LOG(INFO, USER1, 184 "Replay window size is not supported\n"); 185 return -ENOTSUP; 186 } 187 188 return 0; 189 } 190 191 int 192 test_ipsec_crypto_caps_aead_verify( 193 const struct rte_security_capability *sec_cap, 194 struct rte_crypto_sym_xform *aead) 195 { 196 const struct rte_cryptodev_symmetric_capability *sym_cap; 197 const struct rte_cryptodev_capabilities *crypto_cap; 198 int j = 0; 199 200 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op != 201 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 202 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 203 crypto_cap->sym.xform_type == aead->type && 204 crypto_cap->sym.aead.algo == aead->aead.algo) { 205 sym_cap = &crypto_cap->sym; 206 if (rte_cryptodev_sym_capability_check_aead(sym_cap, 207 aead->aead.key.length, 208 aead->aead.digest_length, 209 aead->aead.aad_length, 210 aead->aead.iv.length) == 0) 211 return 0; 212 } 213 } 214 215 return -ENOTSUP; 216 } 217 218 int 219 test_ipsec_crypto_caps_cipher_verify( 220 const struct rte_security_capability *sec_cap, 221 struct rte_crypto_sym_xform *cipher) 222 { 223 const struct rte_cryptodev_symmetric_capability *sym_cap; 224 const struct rte_cryptodev_capabilities *cap; 225 int j = 0; 226 227 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 228 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 229 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 230 cap->sym.xform_type == cipher->type && 231 cap->sym.cipher.algo == cipher->cipher.algo) { 232 sym_cap = &cap->sym; 233 if (rte_cryptodev_sym_capability_check_cipher(sym_cap, 234 cipher->cipher.key.length, 235 cipher->cipher.iv.length) == 0) 236 return 0; 237 } 238 } 239 240 return -ENOTSUP; 241 } 242 243 int 244 test_ipsec_crypto_caps_auth_verify( 245 const struct rte_security_capability *sec_cap, 246 struct rte_crypto_sym_xform *auth) 247 { 248 const struct rte_cryptodev_symmetric_capability *sym_cap; 249 const struct rte_cryptodev_capabilities *cap; 250 int j = 0; 251 252 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 253 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 254 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 255 cap->sym.xform_type == auth->type && 256 cap->sym.auth.algo == auth->auth.algo) { 257 sym_cap = &cap->sym; 258 if (rte_cryptodev_sym_capability_check_auth(sym_cap, 259 auth->auth.key.length, 260 auth->auth.digest_length, 261 auth->auth.iv.length) == 0) 262 return 0; 263 } 264 } 265 266 return -ENOTSUP; 267 } 268 269 void 270 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out, 271 struct ipsec_test_data *td_in) 272 { 273 memcpy(td_in, td_out, sizeof(*td_in)); 274 275 /* Populate output text of td_in with input text of td_out */ 276 memcpy(td_in->output_text.data, td_out->input_text.data, 277 td_out->input_text.len); 278 td_in->output_text.len = td_out->input_text.len; 279 280 /* Populate input text of td_in with output text of td_out */ 281 memcpy(td_in->input_text.data, td_out->output_text.data, 282 td_out->output_text.len); 283 td_in->input_text.len = td_out->output_text.len; 284 285 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 286 287 if (td_in->aead) { 288 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 289 } else { 290 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 291 td_in->xform.chain.cipher.cipher.op = 292 RTE_CRYPTO_CIPHER_OP_DECRYPT; 293 } 294 } 295 296 static bool 297 is_ipv4(void *ip) 298 { 299 struct rte_ipv4_hdr *ipv4 = ip; 300 uint8_t ip_ver; 301 302 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER; 303 if (ip_ver == IPVERSION) 304 return true; 305 else 306 return false; 307 } 308 309 static void 310 test_ipsec_csum_init(void *ip, bool l3, bool l4) 311 { 312 struct rte_ipv4_hdr *ipv4; 313 struct rte_tcp_hdr *tcp; 314 struct rte_udp_hdr *udp; 315 uint8_t next_proto; 316 uint8_t size; 317 318 if (is_ipv4(ip)) { 319 ipv4 = ip; 320 size = sizeof(struct rte_ipv4_hdr); 321 next_proto = ipv4->next_proto_id; 322 323 if (l3) 324 ipv4->hdr_checksum = 0; 325 } else { 326 size = sizeof(struct rte_ipv6_hdr); 327 next_proto = ((struct rte_ipv6_hdr *)ip)->proto; 328 } 329 330 if (l4) { 331 switch (next_proto) { 332 case IPPROTO_TCP: 333 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size); 334 tcp->cksum = 0; 335 break; 336 case IPPROTO_UDP: 337 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size); 338 udp->dgram_cksum = 0; 339 break; 340 default: 341 return; 342 } 343 } 344 } 345 346 void 347 test_ipsec_td_prepare(const struct crypto_param *param1, 348 const struct crypto_param *param2, 349 const struct ipsec_test_flags *flags, 350 struct ipsec_test_data *td_array, 351 int nb_td) 352 353 { 354 struct ipsec_test_data *td; 355 int i; 356 357 memset(td_array, 0, nb_td * sizeof(*td)); 358 359 for (i = 0; i < nb_td; i++) { 360 td = &td_array[i]; 361 362 /* Prepare fields based on param */ 363 364 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 365 /* Copy template for packet & key fields */ 366 if (flags->ipv6) 367 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td)); 368 else 369 memcpy(td, &pkt_aes_256_gcm, sizeof(*td)); 370 371 td->aead = true; 372 td->xform.aead.aead.algo = param1->alg.aead; 373 td->xform.aead.aead.key.length = param1->key_length; 374 } else { 375 /* Copy template for packet & key fields */ 376 if (flags->ipv6) 377 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6, 378 sizeof(*td)); 379 else 380 memcpy(td, &pkt_aes_128_cbc_hmac_sha256, 381 sizeof(*td)); 382 383 td->aead = false; 384 td->xform.chain.cipher.cipher.algo = param1->alg.cipher; 385 td->xform.chain.cipher.cipher.key.length = 386 param1->key_length; 387 td->xform.chain.cipher.cipher.iv.length = 388 param1->iv_length; 389 td->xform.chain.auth.auth.algo = param2->alg.auth; 390 td->xform.chain.auth.auth.key.length = 391 param2->key_length; 392 td->xform.chain.auth.auth.digest_length = 393 param2->digest_length; 394 395 } 396 397 if (flags->iv_gen) 398 td->ipsec_xform.options.iv_gen_disable = 0; 399 400 if (flags->sa_expiry_pkts_soft) 401 td->ipsec_xform.life.packets_soft_limit = 402 IPSEC_TEST_PACKETS_MAX - 1; 403 404 if (flags->ip_csum) { 405 td->ipsec_xform.options.ip_csum_enable = 1; 406 test_ipsec_csum_init(&td->input_text.data, true, false); 407 } 408 409 if (flags->l4_csum) { 410 td->ipsec_xform.options.l4_csum_enable = 1; 411 test_ipsec_csum_init(&td->input_text.data, false, true); 412 } 413 414 if (flags->transport) { 415 td->ipsec_xform.mode = 416 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 417 } else { 418 td->ipsec_xform.mode = 419 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 420 421 if (flags->tunnel_ipv6) 422 td->ipsec_xform.tunnel.type = 423 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 424 else 425 td->ipsec_xform.tunnel.type = 426 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 427 } 428 429 if (flags->stats_success) 430 td->ipsec_xform.options.stats = 1; 431 432 if (flags->fragment) { 433 struct rte_ipv4_hdr *ip; 434 ip = (struct rte_ipv4_hdr *)&td->input_text.data; 435 ip->fragment_offset = 4; 436 ip->hdr_checksum = rte_ipv4_cksum(ip); 437 } 438 439 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 440 flags->df == TEST_IPSEC_COPY_DF_INNER_1) 441 td->ipsec_xform.options.copy_df = 1; 442 443 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 444 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1) 445 td->ipsec_xform.options.copy_dscp = 1; 446 447 if (flags->dec_ttl_or_hop_limit) 448 td->ipsec_xform.options.dec_ttl = 1; 449 } 450 } 451 452 void 453 test_ipsec_td_update(struct ipsec_test_data td_inb[], 454 const struct ipsec_test_data td_outb[], 455 int nb_td, 456 const struct ipsec_test_flags *flags) 457 { 458 int i; 459 460 for (i = 0; i < nb_td; i++) { 461 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data, 462 td_outb[i].input_text.len); 463 td_inb[i].output_text.len = td_outb->input_text.len; 464 465 if (flags->icv_corrupt) { 466 int icv_pos = td_inb[i].input_text.len - 4; 467 td_inb[i].input_text.data[icv_pos] += 1; 468 } 469 470 if (flags->sa_expiry_pkts_hard) 471 td_inb[i].ipsec_xform.life.packets_hard_limit = 472 IPSEC_TEST_PACKETS_MAX - 1; 473 474 if (flags->udp_encap) 475 td_inb[i].ipsec_xform.options.udp_encap = 1; 476 477 if (flags->udp_ports_verify) 478 td_inb[i].ipsec_xform.options.udp_ports_verify = 1; 479 480 td_inb[i].ipsec_xform.options.tunnel_hdr_verify = 481 flags->tunnel_hdr_verify; 482 483 if (flags->ip_csum) 484 td_inb[i].ipsec_xform.options.ip_csum_enable = 1; 485 486 if (flags->l4_csum) 487 td_inb[i].ipsec_xform.options.l4_csum_enable = 1; 488 489 /* Clear outbound specific flags */ 490 td_inb[i].ipsec_xform.options.iv_gen_disable = 0; 491 } 492 } 493 494 void 495 test_ipsec_display_alg(const struct crypto_param *param1, 496 const struct crypto_param *param2) 497 { 498 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 499 printf("\t%s [%d]", 500 rte_crypto_aead_algorithm_strings[param1->alg.aead], 501 param1->key_length * 8); 502 } else { 503 printf("\t%s", 504 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]); 505 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL) 506 printf(" [%d]", param1->key_length * 8); 507 printf(" %s", 508 rte_crypto_auth_algorithm_strings[param2->alg.auth]); 509 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL) 510 printf(" [%dB ICV]", param2->digest_length); 511 } 512 printf("\n"); 513 } 514 515 static int 516 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td) 517 { 518 int len = 0; 519 520 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 521 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 522 if (td->ipsec_xform.tunnel.type == 523 RTE_SECURITY_IPSEC_TUNNEL_IPV4) 524 len += sizeof(struct rte_ipv4_hdr); 525 else 526 len += sizeof(struct rte_ipv6_hdr); 527 } 528 } 529 530 return len; 531 } 532 533 static int 534 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td) 535 { 536 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX]; 537 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *); 538 int i, iv_pos, iv_len; 539 static int index; 540 541 if (td->aead) 542 iv_len = td->xform.aead.aead.iv.length - td->salt.len; 543 else 544 iv_len = td->xform.chain.cipher.cipher.iv.length; 545 546 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr); 547 output_text += iv_pos; 548 549 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported"); 550 551 /* Compare against previous values */ 552 for (i = 0; i < index; i++) { 553 iv_tmp = &iv_queue[i * IV_LEN_MAX]; 554 555 if (memcmp(output_text, iv_tmp, iv_len) == 0) { 556 printf("IV repeated"); 557 return TEST_FAILED; 558 } 559 } 560 561 /* Save IV for future comparisons */ 562 563 iv_tmp = &iv_queue[index * IV_LEN_MAX]; 564 memcpy(iv_tmp, output_text, iv_len); 565 index++; 566 567 if (index == IPSEC_TEST_PACKETS_MAX) 568 index = 0; 569 570 return TEST_SUCCESS; 571 } 572 573 static int 574 test_ipsec_l3_csum_verify(struct rte_mbuf *m) 575 { 576 uint16_t actual_cksum, expected_cksum; 577 struct rte_ipv4_hdr *ip; 578 579 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 580 581 if (!is_ipv4((void *)ip)) 582 return TEST_SKIPPED; 583 584 actual_cksum = ip->hdr_checksum; 585 586 ip->hdr_checksum = 0; 587 588 expected_cksum = rte_ipv4_cksum(ip); 589 590 if (actual_cksum != expected_cksum) 591 return TEST_FAILED; 592 593 return TEST_SUCCESS; 594 } 595 596 static int 597 test_ipsec_l4_csum_verify(struct rte_mbuf *m) 598 { 599 uint16_t actual_cksum = 0, expected_cksum = 0; 600 struct rte_ipv4_hdr *ipv4; 601 struct rte_ipv6_hdr *ipv6; 602 struct rte_tcp_hdr *tcp; 603 struct rte_udp_hdr *udp; 604 void *ip, *l4; 605 606 ip = rte_pktmbuf_mtod(m, void *); 607 608 if (is_ipv4(ip)) { 609 ipv4 = ip; 610 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr)); 611 612 switch (ipv4->next_proto_id) { 613 case IPPROTO_TCP: 614 tcp = (struct rte_tcp_hdr *)l4; 615 actual_cksum = tcp->cksum; 616 tcp->cksum = 0; 617 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 618 break; 619 case IPPROTO_UDP: 620 udp = (struct rte_udp_hdr *)l4; 621 actual_cksum = udp->dgram_cksum; 622 udp->dgram_cksum = 0; 623 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 624 break; 625 default: 626 break; 627 } 628 } else { 629 ipv6 = ip; 630 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr)); 631 632 switch (ipv6->proto) { 633 case IPPROTO_TCP: 634 tcp = (struct rte_tcp_hdr *)l4; 635 actual_cksum = tcp->cksum; 636 tcp->cksum = 0; 637 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 638 break; 639 case IPPROTO_UDP: 640 udp = (struct rte_udp_hdr *)l4; 641 actual_cksum = udp->dgram_cksum; 642 udp->dgram_cksum = 0; 643 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 644 break; 645 default: 646 break; 647 } 648 } 649 650 if (actual_cksum != expected_cksum) 651 return TEST_FAILED; 652 653 return TEST_SUCCESS; 654 } 655 656 static int 657 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected) 658 { 659 struct rte_ipv4_hdr *iph4_ex, *iph4_re; 660 struct rte_ipv6_hdr *iph6_ex, *iph6_re; 661 662 if (is_ipv4(received) && is_ipv4(expected)) { 663 iph4_ex = expected; 664 iph4_re = received; 665 iph4_ex->time_to_live -= 1; 666 if (iph4_re->time_to_live != iph4_ex->time_to_live) 667 return TEST_FAILED; 668 } else if (!is_ipv4(received) && !is_ipv4(expected)) { 669 iph6_ex = expected; 670 iph6_re = received; 671 iph6_ex->hop_limits -= 1; 672 if (iph6_re->hop_limits != iph6_ex->hop_limits) 673 return TEST_FAILED; 674 } else { 675 printf("IP header version miss match\n"); 676 return TEST_FAILED; 677 } 678 679 return TEST_SUCCESS; 680 } 681 682 static int 683 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td, 684 bool silent, const struct ipsec_test_flags *flags) 685 { 686 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 687 uint32_t skip, len = rte_pktmbuf_pkt_len(m); 688 uint8_t td_output_text[4096]; 689 int ret; 690 691 /* For tests with status as error for test success, skip verification */ 692 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 693 (flags->icv_corrupt || 694 flags->sa_expiry_pkts_hard || 695 flags->tunnel_hdr_verify || 696 td->ar_packet)) 697 return TEST_SUCCESS; 698 699 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && 700 flags->udp_encap) { 701 const struct rte_ipv4_hdr *iph4; 702 const struct rte_ipv6_hdr *iph6; 703 704 if (td->ipsec_xform.tunnel.type == 705 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 706 iph4 = (const struct rte_ipv4_hdr *)output_text; 707 if (iph4->next_proto_id != IPPROTO_UDP) { 708 printf("UDP header is not found\n"); 709 return TEST_FAILED; 710 } 711 } else { 712 iph6 = (const struct rte_ipv6_hdr *)output_text; 713 if (iph6->proto != IPPROTO_UDP) { 714 printf("UDP header is not found\n"); 715 return TEST_FAILED; 716 } 717 } 718 719 len -= sizeof(struct rte_udp_hdr); 720 output_text += sizeof(struct rte_udp_hdr); 721 } 722 723 if (len != td->output_text.len) { 724 printf("Output length (%d) not matching with expected (%d)\n", 725 len, td->output_text.len); 726 return TEST_FAILED; 727 } 728 729 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 730 flags->fragment) { 731 const struct rte_ipv4_hdr *iph4; 732 iph4 = (const struct rte_ipv4_hdr *)output_text; 733 if (iph4->fragment_offset) { 734 printf("Output packet is fragmented"); 735 return TEST_FAILED; 736 } 737 } 738 739 skip = test_ipsec_tunnel_hdr_len_get(td); 740 741 len -= skip; 742 output_text += skip; 743 744 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 745 flags->ip_csum) { 746 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD) 747 ret = test_ipsec_l3_csum_verify(m); 748 else 749 ret = TEST_FAILED; 750 751 if (ret == TEST_FAILED) 752 printf("Inner IP checksum test failed\n"); 753 754 return ret; 755 } 756 757 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 758 flags->l4_csum) { 759 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD) 760 ret = test_ipsec_l4_csum_verify(m); 761 else 762 ret = TEST_FAILED; 763 764 if (ret == TEST_FAILED) 765 printf("Inner L4 checksum test failed\n"); 766 767 return ret; 768 } 769 770 memcpy(td_output_text, td->output_text.data + skip, len); 771 772 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 773 flags->dec_ttl_or_hop_limit) { 774 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) { 775 printf("Inner TTL/hop limit decrement test failed\n"); 776 return TEST_FAILED; 777 } 778 } 779 780 if (test_ipsec_pkt_update(td_output_text, flags)) { 781 printf("Could not update expected vector"); 782 return TEST_FAILED; 783 } 784 785 if (memcmp(output_text, td_output_text, len)) { 786 if (silent) 787 return TEST_FAILED; 788 789 printf("TestCase %s line %d: %s\n", __func__, __LINE__, 790 "output text not as expected\n"); 791 792 rte_hexdump(stdout, "expected", td_output_text, len); 793 rte_hexdump(stdout, "actual", output_text, len); 794 return TEST_FAILED; 795 } 796 797 return TEST_SUCCESS; 798 } 799 800 static int 801 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td, 802 struct ipsec_test_data *res_d) 803 { 804 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 805 uint32_t len = rte_pktmbuf_pkt_len(m); 806 807 memcpy(res_d, td, sizeof(*res_d)); 808 memcpy(res_d->input_text.data, output_text, len); 809 res_d->input_text.len = len; 810 811 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 812 if (res_d->aead) { 813 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 814 } else { 815 res_d->xform.chain.cipher.cipher.op = 816 RTE_CRYPTO_CIPHER_OP_DECRYPT; 817 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 818 } 819 820 return TEST_SUCCESS; 821 } 822 823 static int 824 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4, 825 const struct ipsec_test_flags *flags) 826 { 827 uint8_t tos, dscp; 828 uint16_t f_off; 829 830 if (!is_valid_ipv4_pkt(iph4)) { 831 printf("Tunnel outer header is not IPv4\n"); 832 return -1; 833 } 834 835 f_off = rte_be_to_cpu_16(iph4->fragment_offset); 836 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 837 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 838 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) { 839 printf("DF bit is not set\n"); 840 return -1; 841 } 842 } else { 843 if (f_off & RTE_IPV4_HDR_DF_FLAG) { 844 printf("DF bit is set\n"); 845 return -1; 846 } 847 } 848 849 tos = iph4->type_of_service; 850 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2; 851 852 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 853 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 854 if (dscp != TEST_IPSEC_DSCP_VAL) { 855 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 856 TEST_IPSEC_DSCP_VAL, dscp); 857 return -1; 858 } 859 } else { 860 if (dscp != 0) { 861 printf("DSCP value is set [exp: 0, actual: %x]\n", 862 dscp); 863 return -1; 864 } 865 } 866 867 return 0; 868 } 869 870 static int 871 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6, 872 const struct ipsec_test_flags *flags) 873 { 874 uint32_t vtc_flow; 875 uint8_t dscp; 876 877 if (!is_valid_ipv6_pkt(iph6)) { 878 printf("Tunnel outer header is not IPv6\n"); 879 return -1; 880 } 881 882 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 883 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >> 884 (RTE_IPV6_HDR_TC_SHIFT + 2); 885 886 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 887 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 888 if (dscp != TEST_IPSEC_DSCP_VAL) { 889 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 890 TEST_IPSEC_DSCP_VAL, dscp); 891 return -1; 892 } 893 } else { 894 if (dscp != 0) { 895 printf("DSCP value is set [exp: 0, actual: %x]\n", 896 dscp); 897 return -1; 898 } 899 } 900 901 return 0; 902 } 903 904 int 905 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td, 906 struct ipsec_test_data *res_d, bool silent, 907 const struct ipsec_test_flags *flags) 908 { 909 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 910 int ret; 911 912 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 913 const struct rte_ipv4_hdr *iph4; 914 const struct rte_ipv6_hdr *iph6; 915 916 if (flags->iv_gen) { 917 ret = test_ipsec_iv_verify_push(m, td); 918 if (ret != TEST_SUCCESS) 919 return ret; 920 } 921 922 iph4 = (const struct rte_ipv4_hdr *)output_text; 923 924 if (td->ipsec_xform.mode == 925 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) { 926 if (flags->ipv6) { 927 iph6 = (const struct rte_ipv6_hdr *)output_text; 928 if (is_valid_ipv6_pkt(iph6) == false) { 929 printf("Transport packet is not IPv6\n"); 930 return TEST_FAILED; 931 } 932 } else { 933 if (is_valid_ipv4_pkt(iph4) == false) { 934 printf("Transport packet is not IPv4\n"); 935 return TEST_FAILED; 936 } 937 } 938 } else { 939 if (td->ipsec_xform.tunnel.type == 940 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 941 if (test_ipsec_iph4_hdr_validate(iph4, flags)) 942 return TEST_FAILED; 943 } else { 944 iph6 = (const struct rte_ipv6_hdr *)output_text; 945 if (test_ipsec_iph6_hdr_validate(iph6, flags)) 946 return TEST_FAILED; 947 } 948 } 949 } 950 951 /* 952 * In case of known vector tests & all inbound tests, res_d provided 953 * would be NULL and output data need to be validated against expected. 954 * For inbound, output_text would be plain packet and for outbound 955 * output_text would IPsec packet. Validate by comparing against 956 * known vectors. 957 * 958 * In case of combined mode tests, the output_text from outbound 959 * operation (ie, IPsec packet) would need to be inbound processed to 960 * obtain the plain text. Copy output_text to result data, 'res_d', so 961 * that inbound processing can be done. 962 */ 963 964 if (res_d == NULL) 965 return test_ipsec_td_verify(m, td, silent, flags); 966 else 967 return test_ipsec_res_d_prepare(m, td, res_d); 968 } 969 970 int 971 test_ipsec_status_check(const struct ipsec_test_data *td, 972 struct rte_crypto_op *op, 973 const struct ipsec_test_flags *flags, 974 enum rte_security_ipsec_sa_direction dir, 975 int pkt_num) 976 { 977 int ret = TEST_SUCCESS; 978 979 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 980 td->ar_packet) { 981 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 982 printf("Anti replay test case failed\n"); 983 return TEST_FAILED; 984 } else { 985 return TEST_SUCCESS; 986 } 987 } 988 989 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 990 flags->sa_expiry_pkts_hard && 991 pkt_num == IPSEC_TEST_PACKETS_MAX) { 992 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 993 printf("SA hard expiry (pkts) test failed\n"); 994 return TEST_FAILED; 995 } else { 996 return TEST_SUCCESS; 997 } 998 } 999 1000 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1001 flags->tunnel_hdr_verify) { 1002 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1003 printf("Tunnel header verify test case failed\n"); 1004 return TEST_FAILED; 1005 } else { 1006 return TEST_SUCCESS; 1007 } 1008 } 1009 1010 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) { 1011 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1012 printf("ICV corruption test case failed\n"); 1013 ret = TEST_FAILED; 1014 } 1015 } else { 1016 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 1017 printf("Security op processing failed [pkt_num: %d]\n", 1018 pkt_num); 1019 ret = TEST_FAILED; 1020 } 1021 } 1022 1023 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) { 1024 if (!(op->aux_flags & 1025 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) { 1026 printf("SA soft expiry (pkts) test failed\n"); 1027 ret = TEST_FAILED; 1028 } 1029 } 1030 1031 return ret; 1032 } 1033 1034 int 1035 test_ipsec_stats_verify(struct rte_security_ctx *ctx, 1036 struct rte_security_session *sess, 1037 const struct ipsec_test_flags *flags, 1038 enum rte_security_ipsec_sa_direction dir) 1039 { 1040 struct rte_security_stats stats = {0}; 1041 int ret = TEST_SUCCESS; 1042 1043 if (flags->stats_success) { 1044 if (rte_security_session_stats_get(ctx, sess, &stats) < 0) 1045 return TEST_FAILED; 1046 1047 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1048 if (stats.ipsec.opackets != 1 || 1049 stats.ipsec.oerrors != 0) 1050 ret = TEST_FAILED; 1051 } else { 1052 if (stats.ipsec.ipackets != 1 || 1053 stats.ipsec.ierrors != 0) 1054 ret = TEST_FAILED; 1055 } 1056 } 1057 1058 return ret; 1059 } 1060 1061 int 1062 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags) 1063 { 1064 struct rte_ipv4_hdr *iph4; 1065 struct rte_ipv6_hdr *iph6; 1066 bool cksum_dirty = false; 1067 1068 iph4 = (struct rte_ipv4_hdr *)pkt; 1069 1070 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1071 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 || 1072 flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 1073 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 1074 uint16_t frag_off; 1075 1076 if (!is_ipv4(iph4)) { 1077 printf("Invalid packet type\n"); 1078 return -1; 1079 } 1080 1081 frag_off = rte_be_to_cpu_16(iph4->fragment_offset); 1082 1083 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1084 flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 1085 frag_off |= RTE_IPV4_HDR_DF_FLAG; 1086 else 1087 frag_off &= ~RTE_IPV4_HDR_DF_FLAG; 1088 1089 iph4->fragment_offset = rte_cpu_to_be_16(frag_off); 1090 cksum_dirty = true; 1091 } 1092 1093 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1094 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 || 1095 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 1096 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 1097 1098 if (is_ipv4(iph4)) { 1099 uint8_t tos; 1100 1101 tos = iph4->type_of_service; 1102 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1103 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1104 tos |= (RTE_IPV4_HDR_DSCP_MASK & 1105 (TEST_IPSEC_DSCP_VAL << 2)); 1106 else 1107 tos &= ~RTE_IPV4_HDR_DSCP_MASK; 1108 1109 iph4->type_of_service = tos; 1110 cksum_dirty = true; 1111 } else { 1112 uint32_t vtc_flow; 1113 1114 iph6 = (struct rte_ipv6_hdr *)pkt; 1115 1116 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 1117 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1118 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1119 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK & 1120 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2))); 1121 else 1122 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK; 1123 1124 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow); 1125 } 1126 } 1127 1128 if (cksum_dirty && is_ipv4(iph4)) { 1129 iph4->hdr_checksum = 0; 1130 iph4->hdr_checksum = rte_ipv4_cksum(iph4); 1131 } 1132 1133 return 0; 1134 } 1135 1136 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1137