1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <rte_common.h> 6 #include <rte_cryptodev.h> 7 #include <rte_esp.h> 8 #include <rte_ip.h> 9 #include <rte_security.h> 10 #include <rte_tcp.h> 11 #include <rte_udp.h> 12 13 #include "test.h" 14 #include "test_cryptodev_security_ipsec.h" 15 16 #define IV_LEN_MAX 16 17 18 #ifndef IPVERSION 19 #define IPVERSION 4 20 #endif 21 22 struct crypto_param_comb alg_list[RTE_DIM(aead_list) + 23 (RTE_DIM(cipher_list) * 24 RTE_DIM(auth_list))]; 25 26 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)]; 27 28 static bool 29 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt) 30 { 31 /* The IP version number must be 4 */ 32 if (((pkt->version_ihl) >> 4) != 4) 33 return false; 34 /* 35 * The IP header length field must be large enough to hold the 36 * minimum length legal IP datagram (20 bytes = 5 words). 37 */ 38 if ((pkt->version_ihl & 0xf) < 5) 39 return false; 40 41 /* 42 * The IP total length field must be large enough to hold the IP 43 * datagram header, whose length is specified in the IP header length 44 * field. 45 */ 46 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr)) 47 return false; 48 49 return true; 50 } 51 52 static bool 53 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt) 54 { 55 /* The IP version number must be 6 */ 56 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6) 57 return false; 58 59 return true; 60 } 61 62 void 63 test_ipsec_alg_list_populate(void) 64 { 65 unsigned long i, j, index = 0; 66 67 for (i = 0; i < RTE_DIM(aead_list); i++) { 68 alg_list[index].param1 = &aead_list[i]; 69 alg_list[index].param2 = NULL; 70 index++; 71 } 72 73 for (i = 0; i < RTE_DIM(cipher_list); i++) { 74 for (j = 0; j < RTE_DIM(auth_list); j++) { 75 alg_list[index].param1 = &cipher_list[i]; 76 alg_list[index].param2 = &auth_list[j]; 77 index++; 78 } 79 } 80 } 81 82 void 83 test_ipsec_ah_alg_list_populate(void) 84 { 85 unsigned long i, index = 0; 86 87 for (i = 1; i < RTE_DIM(auth_list); i++) { 88 ah_alg_list[index].param1 = &auth_list[i]; 89 ah_alg_list[index].param2 = NULL; 90 index++; 91 } 92 93 for (i = 1; i < RTE_DIM(auth_list); i++) { 94 /* NULL cipher */ 95 ah_alg_list[index].param1 = &cipher_list[0]; 96 97 ah_alg_list[index].param2 = &auth_list[i]; 98 index++; 99 } 100 } 101 102 int 103 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform, 104 const struct rte_security_capability *sec_cap, 105 bool silent) 106 { 107 /* Verify security capabilities */ 108 109 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) { 110 if (!silent) 111 RTE_LOG(INFO, USER1, "ESN is not supported\n"); 112 return -ENOTSUP; 113 } 114 115 if (ipsec_xform->options.udp_encap == 1 && 116 sec_cap->ipsec.options.udp_encap == 0) { 117 if (!silent) 118 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n"); 119 return -ENOTSUP; 120 } 121 122 if (ipsec_xform->options.udp_ports_verify == 1 && 123 sec_cap->ipsec.options.udp_ports_verify == 0) { 124 if (!silent) 125 RTE_LOG(INFO, USER1, "UDP encapsulation ports " 126 "verification is not supported\n"); 127 return -ENOTSUP; 128 } 129 130 if (ipsec_xform->options.copy_dscp == 1 && 131 sec_cap->ipsec.options.copy_dscp == 0) { 132 if (!silent) 133 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n"); 134 return -ENOTSUP; 135 } 136 137 if (ipsec_xform->options.copy_flabel == 1 && 138 sec_cap->ipsec.options.copy_flabel == 0) { 139 if (!silent) 140 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n"); 141 return -ENOTSUP; 142 } 143 144 if (ipsec_xform->options.copy_df == 1 && 145 sec_cap->ipsec.options.copy_df == 0) { 146 if (!silent) 147 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n"); 148 return -ENOTSUP; 149 } 150 151 if (ipsec_xform->options.dec_ttl == 1 && 152 sec_cap->ipsec.options.dec_ttl == 0) { 153 if (!silent) 154 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n"); 155 return -ENOTSUP; 156 } 157 158 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) { 159 if (!silent) 160 RTE_LOG(INFO, USER1, "ECN is not supported\n"); 161 return -ENOTSUP; 162 } 163 164 if (ipsec_xform->options.stats == 1 && 165 sec_cap->ipsec.options.stats == 0) { 166 if (!silent) 167 RTE_LOG(INFO, USER1, "Stats is not supported\n"); 168 return -ENOTSUP; 169 } 170 171 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 172 (ipsec_xform->options.iv_gen_disable == 1) && 173 (sec_cap->ipsec.options.iv_gen_disable != 1)) { 174 if (!silent) 175 RTE_LOG(INFO, USER1, 176 "Application provided IV is not supported\n"); 177 return -ENOTSUP; 178 } 179 180 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 181 (ipsec_xform->options.tunnel_hdr_verify > 182 sec_cap->ipsec.options.tunnel_hdr_verify)) { 183 if (!silent) 184 RTE_LOG(INFO, USER1, 185 "Tunnel header verify is not supported\n"); 186 return -ENOTSUP; 187 } 188 189 if (ipsec_xform->options.ip_csum_enable == 1 && 190 sec_cap->ipsec.options.ip_csum_enable == 0) { 191 if (!silent) 192 RTE_LOG(INFO, USER1, 193 "Inner IP checksum is not supported\n"); 194 return -ENOTSUP; 195 } 196 197 if (ipsec_xform->options.l4_csum_enable == 1 && 198 sec_cap->ipsec.options.l4_csum_enable == 0) { 199 if (!silent) 200 RTE_LOG(INFO, USER1, 201 "Inner L4 checksum is not supported\n"); 202 return -ENOTSUP; 203 } 204 205 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 206 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) { 207 if (!silent) 208 RTE_LOG(INFO, USER1, 209 "Replay window size is not supported\n"); 210 return -ENOTSUP; 211 } 212 } 213 214 return 0; 215 } 216 217 int 218 test_ipsec_crypto_caps_aead_verify( 219 const struct rte_security_capability *sec_cap, 220 struct rte_crypto_sym_xform *aead) 221 { 222 const struct rte_cryptodev_symmetric_capability *sym_cap; 223 const struct rte_cryptodev_capabilities *crypto_cap; 224 int j = 0; 225 226 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op != 227 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 228 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 229 crypto_cap->sym.xform_type == aead->type && 230 crypto_cap->sym.aead.algo == aead->aead.algo) { 231 sym_cap = &crypto_cap->sym; 232 if (rte_cryptodev_sym_capability_check_aead(sym_cap, 233 aead->aead.key.length, 234 aead->aead.digest_length, 235 aead->aead.aad_length, 236 aead->aead.iv.length) == 0) 237 return 0; 238 } 239 } 240 241 return -ENOTSUP; 242 } 243 244 int 245 test_ipsec_crypto_caps_cipher_verify( 246 const struct rte_security_capability *sec_cap, 247 struct rte_crypto_sym_xform *cipher) 248 { 249 const struct rte_cryptodev_symmetric_capability *sym_cap; 250 const struct rte_cryptodev_capabilities *cap; 251 int j = 0; 252 253 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 254 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 255 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 256 cap->sym.xform_type == cipher->type && 257 cap->sym.cipher.algo == cipher->cipher.algo) { 258 sym_cap = &cap->sym; 259 if (rte_cryptodev_sym_capability_check_cipher(sym_cap, 260 cipher->cipher.key.length, 261 cipher->cipher.iv.length) == 0) 262 return 0; 263 } 264 } 265 266 return -ENOTSUP; 267 } 268 269 int 270 test_ipsec_crypto_caps_auth_verify( 271 const struct rte_security_capability *sec_cap, 272 struct rte_crypto_sym_xform *auth) 273 { 274 const struct rte_cryptodev_symmetric_capability *sym_cap; 275 const struct rte_cryptodev_capabilities *cap; 276 int j = 0; 277 278 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 279 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 280 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 281 cap->sym.xform_type == auth->type && 282 cap->sym.auth.algo == auth->auth.algo) { 283 sym_cap = &cap->sym; 284 if (rte_cryptodev_sym_capability_check_auth(sym_cap, 285 auth->auth.key.length, 286 auth->auth.digest_length, 287 auth->auth.iv.length) == 0) 288 return 0; 289 } 290 } 291 292 return -ENOTSUP; 293 } 294 295 void 296 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out, 297 struct ipsec_test_data *td_in) 298 { 299 memcpy(td_in, td_out, sizeof(*td_in)); 300 301 /* Populate output text of td_in with input text of td_out */ 302 memcpy(td_in->output_text.data, td_out->input_text.data, 303 td_out->input_text.len); 304 td_in->output_text.len = td_out->input_text.len; 305 306 /* Populate input text of td_in with output text of td_out */ 307 memcpy(td_in->input_text.data, td_out->output_text.data, 308 td_out->output_text.len); 309 td_in->input_text.len = td_out->output_text.len; 310 311 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 312 313 if (td_in->aead) { 314 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 315 } else { 316 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 317 td_in->xform.chain.cipher.cipher.op = 318 RTE_CRYPTO_CIPHER_OP_DECRYPT; 319 } 320 } 321 322 static bool 323 is_ipv4(void *ip) 324 { 325 struct rte_ipv4_hdr *ipv4 = ip; 326 uint8_t ip_ver; 327 328 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER; 329 if (ip_ver == IPVERSION) 330 return true; 331 else 332 return false; 333 } 334 335 static void 336 test_ipsec_csum_init(void *ip, bool l3, bool l4) 337 { 338 struct rte_ipv4_hdr *ipv4; 339 struct rte_tcp_hdr *tcp; 340 struct rte_udp_hdr *udp; 341 uint8_t next_proto; 342 uint8_t size; 343 344 if (is_ipv4(ip)) { 345 ipv4 = ip; 346 size = sizeof(struct rte_ipv4_hdr); 347 next_proto = ipv4->next_proto_id; 348 349 if (l3) 350 ipv4->hdr_checksum = 0; 351 } else { 352 size = sizeof(struct rte_ipv6_hdr); 353 next_proto = ((struct rte_ipv6_hdr *)ip)->proto; 354 } 355 356 if (l4) { 357 switch (next_proto) { 358 case IPPROTO_TCP: 359 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size); 360 tcp->cksum = 0; 361 break; 362 case IPPROTO_UDP: 363 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size); 364 udp->dgram_cksum = 0; 365 break; 366 default: 367 return; 368 } 369 } 370 } 371 372 void 373 test_ipsec_td_prepare(const struct crypto_param *param1, 374 const struct crypto_param *param2, 375 const struct ipsec_test_flags *flags, 376 struct ipsec_test_data *td_array, 377 int nb_td) 378 379 { 380 struct ipsec_test_data *td; 381 int i; 382 383 memset(td_array, 0, nb_td * sizeof(*td)); 384 385 for (i = 0; i < nb_td; i++) { 386 td = &td_array[i]; 387 388 /* Prepare fields based on param */ 389 390 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 391 /* Copy template for packet & key fields */ 392 if (flags->ipv6) 393 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td)); 394 else 395 memcpy(td, &pkt_aes_256_gcm, sizeof(*td)); 396 397 if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM) 398 td->salt.len = 3; 399 400 td->aead = true; 401 td->xform.aead.aead.algo = param1->alg.aead; 402 td->xform.aead.aead.key.length = param1->key_length; 403 } else { 404 /* Copy template for packet & key fields */ 405 if (flags->ipv6) 406 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6, 407 sizeof(*td)); 408 else 409 memcpy(td, &pkt_aes_128_cbc_hmac_sha256, 410 sizeof(*td)); 411 412 td->aead = false; 413 414 if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 415 td->xform.chain.auth.auth.algo = 416 param1->alg.auth; 417 td->xform.chain.auth.auth.key.length = 418 param1->key_length; 419 td->xform.chain.auth.auth.digest_length = 420 param1->digest_length; 421 td->auth_only = true; 422 423 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 424 td->xform.chain.auth.auth.iv.length = 425 param1->iv_length; 426 td->aes_gmac = true; 427 } 428 } else { 429 td->xform.chain.cipher.cipher.algo = 430 param1->alg.cipher; 431 td->xform.chain.cipher.cipher.key.length = 432 param1->key_length; 433 td->xform.chain.cipher.cipher.iv.length = 434 param1->iv_length; 435 td->xform.chain.auth.auth.algo = 436 param2->alg.auth; 437 td->xform.chain.auth.auth.key.length = 438 param2->key_length; 439 td->xform.chain.auth.auth.digest_length = 440 param2->digest_length; 441 442 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 443 td->xform.chain.auth.auth.iv.length = 444 param2->iv_length; 445 td->aes_gmac = true; 446 } 447 } 448 } 449 450 if (flags->ah) { 451 td->ipsec_xform.proto = 452 RTE_SECURITY_IPSEC_SA_PROTO_AH; 453 } 454 455 if (flags->iv_gen) 456 td->ipsec_xform.options.iv_gen_disable = 0; 457 458 if (flags->sa_expiry_pkts_soft) 459 td->ipsec_xform.life.packets_soft_limit = 460 IPSEC_TEST_PACKETS_MAX - 1; 461 462 if (flags->ip_csum) { 463 td->ipsec_xform.options.ip_csum_enable = 1; 464 test_ipsec_csum_init(&td->input_text.data, true, false); 465 } 466 467 if (flags->l4_csum) { 468 td->ipsec_xform.options.l4_csum_enable = 1; 469 test_ipsec_csum_init(&td->input_text.data, false, true); 470 } 471 472 if (flags->transport) { 473 td->ipsec_xform.mode = 474 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 475 } else { 476 td->ipsec_xform.mode = 477 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 478 479 if (flags->tunnel_ipv6) 480 td->ipsec_xform.tunnel.type = 481 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 482 else 483 td->ipsec_xform.tunnel.type = 484 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 485 } 486 487 if (flags->stats_success) 488 td->ipsec_xform.options.stats = 1; 489 490 if (flags->fragment) { 491 struct rte_ipv4_hdr *ip; 492 ip = (struct rte_ipv4_hdr *)&td->input_text.data; 493 ip->fragment_offset = 4; 494 ip->hdr_checksum = rte_ipv4_cksum(ip); 495 } 496 497 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 498 flags->df == TEST_IPSEC_COPY_DF_INNER_1) 499 td->ipsec_xform.options.copy_df = 1; 500 501 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 502 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1) 503 td->ipsec_xform.options.copy_dscp = 1; 504 505 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 506 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1) 507 td->ipsec_xform.options.copy_flabel = 1; 508 509 if (flags->dec_ttl_or_hop_limit) 510 td->ipsec_xform.options.dec_ttl = 1; 511 } 512 } 513 514 void 515 test_ipsec_td_update(struct ipsec_test_data td_inb[], 516 const struct ipsec_test_data td_outb[], 517 int nb_td, 518 const struct ipsec_test_flags *flags) 519 { 520 int i; 521 522 for (i = 0; i < nb_td; i++) { 523 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data, 524 td_outb[i].input_text.len); 525 td_inb[i].output_text.len = td_outb->input_text.len; 526 527 if (flags->icv_corrupt) { 528 int icv_pos = td_inb[i].input_text.len - 4; 529 td_inb[i].input_text.data[icv_pos] += 1; 530 } 531 532 if (flags->sa_expiry_pkts_hard) 533 td_inb[i].ipsec_xform.life.packets_hard_limit = 534 IPSEC_TEST_PACKETS_MAX - 1; 535 536 if (flags->udp_encap) 537 td_inb[i].ipsec_xform.options.udp_encap = 1; 538 539 if (flags->udp_ports_verify) 540 td_inb[i].ipsec_xform.options.udp_ports_verify = 1; 541 542 td_inb[i].ipsec_xform.options.tunnel_hdr_verify = 543 flags->tunnel_hdr_verify; 544 545 if (flags->ip_csum) 546 td_inb[i].ipsec_xform.options.ip_csum_enable = 1; 547 548 if (flags->l4_csum) 549 td_inb[i].ipsec_xform.options.l4_csum_enable = 1; 550 551 /* Clear outbound specific flags */ 552 td_inb[i].ipsec_xform.options.iv_gen_disable = 0; 553 } 554 } 555 556 void 557 test_ipsec_display_alg(const struct crypto_param *param1, 558 const struct crypto_param *param2) 559 { 560 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 561 printf("\t%s [%d]", 562 rte_crypto_aead_algorithm_strings[param1->alg.aead], 563 param1->key_length * 8); 564 } else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 565 printf("\t%s", 566 rte_crypto_auth_algorithm_strings[param1->alg.auth]); 567 if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL) 568 printf(" [%dB ICV]", param1->digest_length); 569 } else { 570 printf("\t%s", 571 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]); 572 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL) 573 printf(" [%d]", param1->key_length * 8); 574 printf(" %s", 575 rte_crypto_auth_algorithm_strings[param2->alg.auth]); 576 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL) 577 printf(" [%dB ICV]", param2->digest_length); 578 } 579 printf("\n"); 580 } 581 582 static int 583 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td) 584 { 585 int len = 0; 586 587 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 588 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 589 if (td->ipsec_xform.tunnel.type == 590 RTE_SECURITY_IPSEC_TUNNEL_IPV4) 591 len += sizeof(struct rte_ipv4_hdr); 592 else 593 len += sizeof(struct rte_ipv6_hdr); 594 } 595 } 596 597 return len; 598 } 599 600 static int 601 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td) 602 { 603 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX]; 604 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *); 605 int i, iv_pos, iv_len; 606 static int index; 607 608 if (td->aead) 609 iv_len = td->xform.aead.aead.iv.length - td->salt.len; 610 else 611 iv_len = td->xform.chain.cipher.cipher.iv.length; 612 613 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr); 614 output_text += iv_pos; 615 616 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported"); 617 618 /* Compare against previous values */ 619 for (i = 0; i < index; i++) { 620 iv_tmp = &iv_queue[i * IV_LEN_MAX]; 621 622 if (memcmp(output_text, iv_tmp, iv_len) == 0) { 623 printf("IV repeated"); 624 return TEST_FAILED; 625 } 626 } 627 628 /* Save IV for future comparisons */ 629 630 iv_tmp = &iv_queue[index * IV_LEN_MAX]; 631 memcpy(iv_tmp, output_text, iv_len); 632 index++; 633 634 if (index == IPSEC_TEST_PACKETS_MAX) 635 index = 0; 636 637 return TEST_SUCCESS; 638 } 639 640 static int 641 test_ipsec_l3_csum_verify(struct rte_mbuf *m) 642 { 643 uint16_t actual_cksum, expected_cksum; 644 struct rte_ipv4_hdr *ip; 645 646 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 647 648 if (!is_ipv4((void *)ip)) 649 return TEST_SKIPPED; 650 651 actual_cksum = ip->hdr_checksum; 652 653 ip->hdr_checksum = 0; 654 655 expected_cksum = rte_ipv4_cksum(ip); 656 657 if (actual_cksum != expected_cksum) 658 return TEST_FAILED; 659 660 return TEST_SUCCESS; 661 } 662 663 static int 664 test_ipsec_l4_csum_verify(struct rte_mbuf *m) 665 { 666 uint16_t actual_cksum = 0, expected_cksum = 0; 667 struct rte_ipv4_hdr *ipv4; 668 struct rte_ipv6_hdr *ipv6; 669 struct rte_tcp_hdr *tcp; 670 struct rte_udp_hdr *udp; 671 void *ip, *l4; 672 673 ip = rte_pktmbuf_mtod(m, void *); 674 675 if (is_ipv4(ip)) { 676 ipv4 = ip; 677 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr)); 678 679 switch (ipv4->next_proto_id) { 680 case IPPROTO_TCP: 681 tcp = (struct rte_tcp_hdr *)l4; 682 actual_cksum = tcp->cksum; 683 tcp->cksum = 0; 684 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 685 break; 686 case IPPROTO_UDP: 687 udp = (struct rte_udp_hdr *)l4; 688 actual_cksum = udp->dgram_cksum; 689 udp->dgram_cksum = 0; 690 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 691 break; 692 default: 693 break; 694 } 695 } else { 696 ipv6 = ip; 697 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr)); 698 699 switch (ipv6->proto) { 700 case IPPROTO_TCP: 701 tcp = (struct rte_tcp_hdr *)l4; 702 actual_cksum = tcp->cksum; 703 tcp->cksum = 0; 704 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 705 break; 706 case IPPROTO_UDP: 707 udp = (struct rte_udp_hdr *)l4; 708 actual_cksum = udp->dgram_cksum; 709 udp->dgram_cksum = 0; 710 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 711 break; 712 default: 713 break; 714 } 715 } 716 717 if (actual_cksum != expected_cksum) 718 return TEST_FAILED; 719 720 return TEST_SUCCESS; 721 } 722 723 static int 724 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected) 725 { 726 struct rte_ipv4_hdr *iph4_ex, *iph4_re; 727 struct rte_ipv6_hdr *iph6_ex, *iph6_re; 728 729 if (is_ipv4(received) && is_ipv4(expected)) { 730 iph4_ex = expected; 731 iph4_re = received; 732 iph4_ex->time_to_live -= 1; 733 if (iph4_re->time_to_live != iph4_ex->time_to_live) 734 return TEST_FAILED; 735 } else if (!is_ipv4(received) && !is_ipv4(expected)) { 736 iph6_ex = expected; 737 iph6_re = received; 738 iph6_ex->hop_limits -= 1; 739 if (iph6_re->hop_limits != iph6_ex->hop_limits) 740 return TEST_FAILED; 741 } else { 742 printf("IP header version miss match\n"); 743 return TEST_FAILED; 744 } 745 746 return TEST_SUCCESS; 747 } 748 749 static int 750 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td, 751 bool silent, const struct ipsec_test_flags *flags) 752 { 753 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 754 uint32_t skip, len = rte_pktmbuf_pkt_len(m); 755 uint8_t td_output_text[4096]; 756 int ret; 757 758 /* For tests with status as error for test success, skip verification */ 759 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 760 (flags->icv_corrupt || 761 flags->sa_expiry_pkts_hard || 762 flags->tunnel_hdr_verify || 763 td->ar_packet)) 764 return TEST_SUCCESS; 765 766 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && 767 flags->udp_encap) { 768 const struct rte_ipv4_hdr *iph4; 769 const struct rte_ipv6_hdr *iph6; 770 771 if (td->ipsec_xform.tunnel.type == 772 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 773 iph4 = (const struct rte_ipv4_hdr *)output_text; 774 if (iph4->next_proto_id != IPPROTO_UDP) { 775 printf("UDP header is not found\n"); 776 return TEST_FAILED; 777 } 778 } else { 779 iph6 = (const struct rte_ipv6_hdr *)output_text; 780 if (iph6->proto != IPPROTO_UDP) { 781 printf("UDP header is not found\n"); 782 return TEST_FAILED; 783 } 784 } 785 786 len -= sizeof(struct rte_udp_hdr); 787 output_text += sizeof(struct rte_udp_hdr); 788 } 789 790 if (len != td->output_text.len) { 791 printf("Output length (%d) not matching with expected (%d)\n", 792 len, td->output_text.len); 793 return TEST_FAILED; 794 } 795 796 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 797 flags->fragment) { 798 const struct rte_ipv4_hdr *iph4; 799 iph4 = (const struct rte_ipv4_hdr *)output_text; 800 if (iph4->fragment_offset) { 801 printf("Output packet is fragmented"); 802 return TEST_FAILED; 803 } 804 } 805 806 skip = test_ipsec_tunnel_hdr_len_get(td); 807 808 len -= skip; 809 output_text += skip; 810 811 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 812 flags->ip_csum) { 813 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD) 814 ret = test_ipsec_l3_csum_verify(m); 815 else 816 ret = TEST_FAILED; 817 818 if (ret == TEST_FAILED) 819 printf("Inner IP checksum test failed\n"); 820 821 return ret; 822 } 823 824 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 825 flags->l4_csum) { 826 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD) 827 ret = test_ipsec_l4_csum_verify(m); 828 else 829 ret = TEST_FAILED; 830 831 if (ret == TEST_FAILED) 832 printf("Inner L4 checksum test failed\n"); 833 834 return ret; 835 } 836 837 memcpy(td_output_text, td->output_text.data + skip, len); 838 839 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 840 flags->dec_ttl_or_hop_limit) { 841 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) { 842 printf("Inner TTL/hop limit decrement test failed\n"); 843 return TEST_FAILED; 844 } 845 } 846 847 if (test_ipsec_pkt_update(td_output_text, flags)) { 848 printf("Could not update expected vector"); 849 return TEST_FAILED; 850 } 851 852 if (memcmp(output_text, td_output_text, len)) { 853 if (silent) 854 return TEST_FAILED; 855 856 printf("TestCase %s line %d: %s\n", __func__, __LINE__, 857 "output text not as expected\n"); 858 859 rte_hexdump(stdout, "expected", td_output_text, len); 860 rte_hexdump(stdout, "actual", output_text, len); 861 return TEST_FAILED; 862 } 863 864 return TEST_SUCCESS; 865 } 866 867 static int 868 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td, 869 struct ipsec_test_data *res_d) 870 { 871 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 872 uint32_t len = rte_pktmbuf_pkt_len(m); 873 874 memcpy(res_d, td, sizeof(*res_d)); 875 memcpy(res_d->input_text.data, output_text, len); 876 res_d->input_text.len = len; 877 878 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 879 if (res_d->aead) { 880 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 881 } else { 882 res_d->xform.chain.cipher.cipher.op = 883 RTE_CRYPTO_CIPHER_OP_DECRYPT; 884 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 885 } 886 887 return TEST_SUCCESS; 888 } 889 890 static int 891 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4, 892 const struct ipsec_test_flags *flags) 893 { 894 uint8_t tos, dscp; 895 uint16_t f_off; 896 897 if (!is_valid_ipv4_pkt(iph4)) { 898 printf("Tunnel outer header is not IPv4\n"); 899 return -1; 900 } 901 902 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 903 printf("Tunnel outer header proto is not AH\n"); 904 return -1; 905 } 906 907 f_off = rte_be_to_cpu_16(iph4->fragment_offset); 908 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 909 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 910 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) { 911 printf("DF bit is not set\n"); 912 return -1; 913 } 914 } else { 915 if (f_off & RTE_IPV4_HDR_DF_FLAG) { 916 printf("DF bit is set\n"); 917 return -1; 918 } 919 } 920 921 tos = iph4->type_of_service; 922 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2; 923 924 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 925 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 926 if (dscp != TEST_IPSEC_DSCP_VAL) { 927 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 928 TEST_IPSEC_DSCP_VAL, dscp); 929 return -1; 930 } 931 } else { 932 if (dscp != 0) { 933 printf("DSCP value is set [exp: 0, actual: %x]\n", 934 dscp); 935 return -1; 936 } 937 } 938 939 return 0; 940 } 941 942 static int 943 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6, 944 const struct ipsec_test_flags *flags) 945 { 946 uint32_t vtc_flow; 947 uint32_t flabel; 948 uint8_t dscp; 949 950 if (!is_valid_ipv6_pkt(iph6)) { 951 printf("Tunnel outer header is not IPv6\n"); 952 return -1; 953 } 954 955 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 956 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >> 957 (RTE_IPV6_HDR_TC_SHIFT + 2); 958 959 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 960 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 961 if (dscp != TEST_IPSEC_DSCP_VAL) { 962 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 963 TEST_IPSEC_DSCP_VAL, dscp); 964 return -1; 965 } 966 } else { 967 if (dscp != 0) { 968 printf("DSCP value is set [exp: 0, actual: %x]\n", 969 dscp); 970 return -1; 971 } 972 } 973 974 flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK; 975 976 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 977 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 978 if (flabel != TEST_IPSEC_FLABEL_VAL) { 979 printf("FLABEL value is not matching [exp: %x, actual: %x]\n", 980 TEST_IPSEC_FLABEL_VAL, flabel); 981 return -1; 982 } 983 } else { 984 if (flabel != 0) { 985 printf("FLABEL value is set [exp: 0, actual: %x]\n", 986 flabel); 987 return -1; 988 } 989 } 990 991 return 0; 992 } 993 994 int 995 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td, 996 struct ipsec_test_data *res_d, bool silent, 997 const struct ipsec_test_flags *flags) 998 { 999 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 1000 int ret; 1001 1002 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1003 const struct rte_ipv4_hdr *iph4; 1004 const struct rte_ipv6_hdr *iph6; 1005 1006 if (flags->iv_gen) { 1007 ret = test_ipsec_iv_verify_push(m, td); 1008 if (ret != TEST_SUCCESS) 1009 return ret; 1010 } 1011 1012 iph4 = (const struct rte_ipv4_hdr *)output_text; 1013 1014 if (td->ipsec_xform.mode == 1015 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) { 1016 if (flags->ipv6) { 1017 iph6 = (const struct rte_ipv6_hdr *)output_text; 1018 if (is_valid_ipv6_pkt(iph6) == false) { 1019 printf("Transport packet is not IPv6\n"); 1020 return TEST_FAILED; 1021 } 1022 } else { 1023 if (is_valid_ipv4_pkt(iph4) == false) { 1024 printf("Transport packet is not IPv4\n"); 1025 return TEST_FAILED; 1026 } 1027 1028 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 1029 printf("Transport IPv4 header proto is not AH\n"); 1030 return -1; 1031 } 1032 } 1033 } else { 1034 if (td->ipsec_xform.tunnel.type == 1035 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1036 if (test_ipsec_iph4_hdr_validate(iph4, flags)) 1037 return TEST_FAILED; 1038 } else { 1039 iph6 = (const struct rte_ipv6_hdr *)output_text; 1040 if (test_ipsec_iph6_hdr_validate(iph6, flags)) 1041 return TEST_FAILED; 1042 } 1043 } 1044 } 1045 1046 /* 1047 * In case of known vector tests & all inbound tests, res_d provided 1048 * would be NULL and output data need to be validated against expected. 1049 * For inbound, output_text would be plain packet and for outbound 1050 * output_text would IPsec packet. Validate by comparing against 1051 * known vectors. 1052 * 1053 * In case of combined mode tests, the output_text from outbound 1054 * operation (ie, IPsec packet) would need to be inbound processed to 1055 * obtain the plain text. Copy output_text to result data, 'res_d', so 1056 * that inbound processing can be done. 1057 */ 1058 1059 if (res_d == NULL) 1060 return test_ipsec_td_verify(m, td, silent, flags); 1061 else 1062 return test_ipsec_res_d_prepare(m, td, res_d); 1063 } 1064 1065 int 1066 test_ipsec_status_check(const struct ipsec_test_data *td, 1067 struct rte_crypto_op *op, 1068 const struct ipsec_test_flags *flags, 1069 enum rte_security_ipsec_sa_direction dir, 1070 int pkt_num) 1071 { 1072 int ret = TEST_SUCCESS; 1073 1074 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1075 td->ar_packet) { 1076 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1077 printf("Anti replay test case failed\n"); 1078 return TEST_FAILED; 1079 } else { 1080 return TEST_SUCCESS; 1081 } 1082 } 1083 1084 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 1085 flags->sa_expiry_pkts_hard && 1086 pkt_num == IPSEC_TEST_PACKETS_MAX) { 1087 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1088 printf("SA hard expiry (pkts) test failed\n"); 1089 return TEST_FAILED; 1090 } else { 1091 return TEST_SUCCESS; 1092 } 1093 } 1094 1095 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1096 flags->tunnel_hdr_verify) { 1097 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1098 printf("Tunnel header verify test case failed\n"); 1099 return TEST_FAILED; 1100 } else { 1101 return TEST_SUCCESS; 1102 } 1103 } 1104 1105 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) { 1106 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1107 printf("ICV corruption test case failed\n"); 1108 ret = TEST_FAILED; 1109 } 1110 } else { 1111 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 1112 printf("Security op processing failed [pkt_num: %d]\n", 1113 pkt_num); 1114 ret = TEST_FAILED; 1115 } 1116 } 1117 1118 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) { 1119 if (!(op->aux_flags & 1120 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) { 1121 printf("SA soft expiry (pkts) test failed\n"); 1122 ret = TEST_FAILED; 1123 } 1124 } 1125 1126 return ret; 1127 } 1128 1129 int 1130 test_ipsec_stats_verify(struct rte_security_ctx *ctx, 1131 void *sess, 1132 const struct ipsec_test_flags *flags, 1133 enum rte_security_ipsec_sa_direction dir) 1134 { 1135 struct rte_security_stats stats = {0}; 1136 int ret = TEST_SUCCESS; 1137 1138 if (flags->stats_success) { 1139 if (rte_security_session_stats_get(ctx, sess, &stats) < 0) 1140 return TEST_FAILED; 1141 1142 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1143 if (stats.ipsec.opackets != 1 || 1144 stats.ipsec.oerrors != 0) 1145 ret = TEST_FAILED; 1146 } else { 1147 if (stats.ipsec.ipackets != 1 || 1148 stats.ipsec.ierrors != 0) 1149 ret = TEST_FAILED; 1150 } 1151 } 1152 1153 return ret; 1154 } 1155 1156 int 1157 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags) 1158 { 1159 struct rte_ipv4_hdr *iph4; 1160 struct rte_ipv6_hdr *iph6; 1161 bool cksum_dirty = false; 1162 1163 iph4 = (struct rte_ipv4_hdr *)pkt; 1164 1165 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1166 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 || 1167 flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 1168 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 1169 uint16_t frag_off; 1170 1171 if (!is_ipv4(iph4)) { 1172 printf("Invalid packet type\n"); 1173 return -1; 1174 } 1175 1176 frag_off = rte_be_to_cpu_16(iph4->fragment_offset); 1177 1178 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1179 flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 1180 frag_off |= RTE_IPV4_HDR_DF_FLAG; 1181 else 1182 frag_off &= ~RTE_IPV4_HDR_DF_FLAG; 1183 1184 iph4->fragment_offset = rte_cpu_to_be_16(frag_off); 1185 cksum_dirty = true; 1186 } 1187 1188 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1189 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 || 1190 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 1191 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 || 1192 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1193 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 || 1194 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 1195 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 1196 1197 if (is_ipv4(iph4)) { 1198 uint8_t tos; 1199 1200 tos = iph4->type_of_service; 1201 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1202 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1203 tos |= (RTE_IPV4_HDR_DSCP_MASK & 1204 (TEST_IPSEC_DSCP_VAL << 2)); 1205 else 1206 tos &= ~RTE_IPV4_HDR_DSCP_MASK; 1207 1208 iph4->type_of_service = tos; 1209 cksum_dirty = true; 1210 } else { 1211 uint32_t vtc_flow; 1212 1213 iph6 = (struct rte_ipv6_hdr *)pkt; 1214 1215 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 1216 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1217 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1218 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK & 1219 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2))); 1220 else 1221 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK; 1222 1223 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1224 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1) 1225 vtc_flow |= (RTE_IPV6_HDR_FL_MASK & 1226 (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT)); 1227 else 1228 vtc_flow &= ~RTE_IPV6_HDR_FL_MASK; 1229 1230 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow); 1231 } 1232 } 1233 1234 if (cksum_dirty && is_ipv4(iph4)) { 1235 iph4->hdr_checksum = 0; 1236 iph4->hdr_checksum = rte_ipv4_cksum(iph4); 1237 } 1238 1239 return 0; 1240 } 1241