1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <rte_common.h> 6 #include <rte_cryptodev.h> 7 #include <rte_esp.h> 8 #include <rte_ip.h> 9 #include <rte_security.h> 10 #include <rte_tcp.h> 11 #include <rte_udp.h> 12 13 #include "test.h" 14 #include "test_cryptodev_security_ipsec.h" 15 16 #define IV_LEN_MAX 16 17 #define UDP_CUSTOM_SPORT 4650 18 #define UDP_CUSTOM_DPORT 4660 19 20 #ifndef IPVERSION 21 #define IPVERSION 4 22 #endif 23 24 struct crypto_param_comb alg_list[RTE_DIM(aead_list) + 25 (RTE_DIM(cipher_list) * 26 RTE_DIM(auth_list))]; 27 28 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)]; 29 30 static bool 31 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt) 32 { 33 /* The IP version number must be 4 */ 34 if (((pkt->version_ihl) >> 4) != 4) 35 return false; 36 /* 37 * The IP header length field must be large enough to hold the 38 * minimum length legal IP datagram (20 bytes = 5 words). 39 */ 40 if ((pkt->version_ihl & 0xf) < 5) 41 return false; 42 43 /* 44 * The IP total length field must be large enough to hold the IP 45 * datagram header, whose length is specified in the IP header length 46 * field. 47 */ 48 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr)) 49 return false; 50 51 return true; 52 } 53 54 static bool 55 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt) 56 { 57 /* The IP version number must be 6 */ 58 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6) 59 return false; 60 61 return true; 62 } 63 64 void 65 test_ipsec_alg_list_populate(void) 66 { 67 unsigned long i, j, index = 0; 68 69 for (i = 0; i < RTE_DIM(aead_list); i++) { 70 alg_list[index].param1 = &aead_list[i]; 71 alg_list[index].param2 = NULL; 72 index++; 73 } 74 75 for (i = 0; i < RTE_DIM(cipher_list); i++) { 76 for (j = 0; j < RTE_DIM(auth_list); j++) { 77 alg_list[index].param1 = &cipher_list[i]; 78 alg_list[index].param2 = &auth_list[j]; 79 index++; 80 } 81 } 82 } 83 84 void 85 test_ipsec_ah_alg_list_populate(void) 86 { 87 unsigned long i, index = 0; 88 89 for (i = 1; i < RTE_DIM(auth_list); i++) { 90 ah_alg_list[index].param1 = &auth_list[i]; 91 ah_alg_list[index].param2 = NULL; 92 index++; 93 } 94 95 for (i = 1; i < RTE_DIM(auth_list); i++) { 96 /* NULL cipher */ 97 ah_alg_list[index].param1 = &cipher_list[0]; 98 99 ah_alg_list[index].param2 = &auth_list[i]; 100 index++; 101 } 102 } 103 104 int 105 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform, 106 const struct rte_security_capability *sec_cap, 107 bool silent) 108 { 109 /* Verify security capabilities */ 110 111 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) { 112 if (!silent) 113 RTE_LOG(INFO, USER1, "ESN is not supported\n"); 114 return -ENOTSUP; 115 } 116 117 if (ipsec_xform->options.udp_encap == 1 && 118 sec_cap->ipsec.options.udp_encap == 0) { 119 if (!silent) 120 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n"); 121 return -ENOTSUP; 122 } 123 124 if (ipsec_xform->options.udp_ports_verify == 1 && 125 sec_cap->ipsec.options.udp_ports_verify == 0) { 126 if (!silent) 127 RTE_LOG(INFO, USER1, "UDP encapsulation ports " 128 "verification is not supported\n"); 129 return -ENOTSUP; 130 } 131 132 if (ipsec_xform->options.copy_dscp == 1 && 133 sec_cap->ipsec.options.copy_dscp == 0) { 134 if (!silent) 135 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n"); 136 return -ENOTSUP; 137 } 138 139 if (ipsec_xform->options.copy_flabel == 1 && 140 sec_cap->ipsec.options.copy_flabel == 0) { 141 if (!silent) 142 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n"); 143 return -ENOTSUP; 144 } 145 146 if (ipsec_xform->options.copy_df == 1 && 147 sec_cap->ipsec.options.copy_df == 0) { 148 if (!silent) 149 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n"); 150 return -ENOTSUP; 151 } 152 153 if (ipsec_xform->options.dec_ttl == 1 && 154 sec_cap->ipsec.options.dec_ttl == 0) { 155 if (!silent) 156 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n"); 157 return -ENOTSUP; 158 } 159 160 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) { 161 if (!silent) 162 RTE_LOG(INFO, USER1, "ECN is not supported\n"); 163 return -ENOTSUP; 164 } 165 166 if (ipsec_xform->options.stats == 1 && 167 sec_cap->ipsec.options.stats == 0) { 168 if (!silent) 169 RTE_LOG(INFO, USER1, "Stats is not supported\n"); 170 return -ENOTSUP; 171 } 172 173 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 174 (ipsec_xform->options.iv_gen_disable == 1) && 175 (sec_cap->ipsec.options.iv_gen_disable != 1)) { 176 if (!silent) 177 RTE_LOG(INFO, USER1, 178 "Application provided IV is not supported\n"); 179 return -ENOTSUP; 180 } 181 182 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 183 (ipsec_xform->options.tunnel_hdr_verify > 184 sec_cap->ipsec.options.tunnel_hdr_verify)) { 185 if (!silent) 186 RTE_LOG(INFO, USER1, 187 "Tunnel header verify is not supported\n"); 188 return -ENOTSUP; 189 } 190 191 if (ipsec_xform->options.ip_csum_enable == 1 && 192 sec_cap->ipsec.options.ip_csum_enable == 0) { 193 if (!silent) 194 RTE_LOG(INFO, USER1, 195 "Inner IP checksum is not supported\n"); 196 return -ENOTSUP; 197 } 198 199 if (ipsec_xform->options.l4_csum_enable == 1 && 200 sec_cap->ipsec.options.l4_csum_enable == 0) { 201 if (!silent) 202 RTE_LOG(INFO, USER1, 203 "Inner L4 checksum is not supported\n"); 204 return -ENOTSUP; 205 } 206 207 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 208 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) { 209 if (!silent) 210 RTE_LOG(INFO, USER1, 211 "Replay window size is not supported\n"); 212 return -ENOTSUP; 213 } 214 } 215 216 return 0; 217 } 218 219 int 220 test_ipsec_crypto_caps_aead_verify( 221 const struct rte_security_capability *sec_cap, 222 struct rte_crypto_sym_xform *aead) 223 { 224 const struct rte_cryptodev_symmetric_capability *sym_cap; 225 const struct rte_cryptodev_capabilities *crypto_cap; 226 int j = 0; 227 228 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op != 229 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 230 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 231 crypto_cap->sym.xform_type == aead->type && 232 crypto_cap->sym.aead.algo == aead->aead.algo) { 233 sym_cap = &crypto_cap->sym; 234 if (rte_cryptodev_sym_capability_check_aead(sym_cap, 235 aead->aead.key.length, 236 aead->aead.digest_length, 237 aead->aead.aad_length, 238 aead->aead.iv.length) == 0) 239 return 0; 240 } 241 } 242 243 return -ENOTSUP; 244 } 245 246 int 247 test_ipsec_crypto_caps_cipher_verify( 248 const struct rte_security_capability *sec_cap, 249 struct rte_crypto_sym_xform *cipher) 250 { 251 const struct rte_cryptodev_symmetric_capability *sym_cap; 252 const struct rte_cryptodev_capabilities *cap; 253 int j = 0; 254 255 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 256 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 257 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 258 cap->sym.xform_type == cipher->type && 259 cap->sym.cipher.algo == cipher->cipher.algo) { 260 sym_cap = &cap->sym; 261 if (rte_cryptodev_sym_capability_check_cipher(sym_cap, 262 cipher->cipher.key.length, 263 cipher->cipher.iv.length) == 0) 264 return 0; 265 } 266 } 267 268 return -ENOTSUP; 269 } 270 271 int 272 test_ipsec_crypto_caps_auth_verify( 273 const struct rte_security_capability *sec_cap, 274 struct rte_crypto_sym_xform *auth) 275 { 276 const struct rte_cryptodev_symmetric_capability *sym_cap; 277 const struct rte_cryptodev_capabilities *cap; 278 int j = 0; 279 280 while ((cap = &sec_cap->crypto_capabilities[j++])->op != 281 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 282 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && 283 cap->sym.xform_type == auth->type && 284 cap->sym.auth.algo == auth->auth.algo) { 285 sym_cap = &cap->sym; 286 if (rte_cryptodev_sym_capability_check_auth(sym_cap, 287 auth->auth.key.length, 288 auth->auth.digest_length, 289 auth->auth.iv.length) == 0) 290 return 0; 291 } 292 } 293 294 return -ENOTSUP; 295 } 296 297 void 298 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out, 299 struct ipsec_test_data *td_in) 300 { 301 memcpy(td_in, td_out, sizeof(*td_in)); 302 303 /* Populate output text of td_in with input text of td_out */ 304 memcpy(td_in->output_text.data, td_out->input_text.data, 305 td_out->input_text.len); 306 td_in->output_text.len = td_out->input_text.len; 307 308 /* Populate input text of td_in with output text of td_out */ 309 memcpy(td_in->input_text.data, td_out->output_text.data, 310 td_out->output_text.len); 311 td_in->input_text.len = td_out->output_text.len; 312 313 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 314 315 if (td_in->aead) { 316 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 317 } else { 318 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 319 td_in->xform.chain.cipher.cipher.op = 320 RTE_CRYPTO_CIPHER_OP_DECRYPT; 321 } 322 } 323 324 static bool 325 is_ipv4(void *ip) 326 { 327 struct rte_ipv4_hdr *ipv4 = ip; 328 uint8_t ip_ver; 329 330 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER; 331 if (ip_ver == IPVERSION) 332 return true; 333 else 334 return false; 335 } 336 337 static void 338 test_ipsec_csum_init(void *ip, bool l3, bool l4) 339 { 340 struct rte_ipv4_hdr *ipv4; 341 struct rte_tcp_hdr *tcp; 342 struct rte_udp_hdr *udp; 343 uint8_t next_proto; 344 uint8_t size; 345 346 if (is_ipv4(ip)) { 347 ipv4 = ip; 348 size = sizeof(struct rte_ipv4_hdr); 349 next_proto = ipv4->next_proto_id; 350 351 if (l3) 352 ipv4->hdr_checksum = 0; 353 } else { 354 size = sizeof(struct rte_ipv6_hdr); 355 next_proto = ((struct rte_ipv6_hdr *)ip)->proto; 356 } 357 358 if (l4) { 359 switch (next_proto) { 360 case IPPROTO_TCP: 361 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size); 362 tcp->cksum = 0; 363 break; 364 case IPPROTO_UDP: 365 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size); 366 udp->dgram_cksum = 0; 367 break; 368 default: 369 return; 370 } 371 } 372 } 373 374 void 375 test_ipsec_td_prepare(const struct crypto_param *param1, 376 const struct crypto_param *param2, 377 const struct ipsec_test_flags *flags, 378 struct ipsec_test_data *td_array, 379 int nb_td) 380 381 { 382 struct ipsec_test_data *td; 383 int i; 384 385 memset(td_array, 0, nb_td * sizeof(*td)); 386 387 for (i = 0; i < nb_td; i++) { 388 td = &td_array[i]; 389 390 /* Prepare fields based on param */ 391 392 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 393 /* Copy template for packet & key fields */ 394 if (flags->ipv6) 395 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td)); 396 else 397 memcpy(td, &pkt_aes_256_gcm, sizeof(*td)); 398 399 if (param1->alg.aead == RTE_CRYPTO_AEAD_AES_CCM) 400 td->salt.len = 3; 401 402 td->aead = true; 403 td->xform.aead.aead.algo = param1->alg.aead; 404 td->xform.aead.aead.key.length = param1->key_length; 405 } else { 406 /* Copy template for packet & key fields */ 407 if (flags->ipv6) 408 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6, 409 sizeof(*td)); 410 else 411 memcpy(td, &pkt_aes_128_cbc_hmac_sha256, 412 sizeof(*td)); 413 414 td->aead = false; 415 416 if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 417 td->xform.chain.auth.auth.algo = 418 param1->alg.auth; 419 td->xform.chain.auth.auth.key.length = 420 param1->key_length; 421 td->xform.chain.auth.auth.digest_length = 422 param1->digest_length; 423 td->auth_only = true; 424 425 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 426 td->xform.chain.auth.auth.iv.length = 427 param1->iv_length; 428 td->aes_gmac = true; 429 } 430 } else { 431 td->xform.chain.cipher.cipher.algo = 432 param1->alg.cipher; 433 td->xform.chain.cipher.cipher.key.length = 434 param1->key_length; 435 td->xform.chain.cipher.cipher.iv.length = 436 param1->iv_length; 437 td->xform.chain.auth.auth.algo = 438 param2->alg.auth; 439 td->xform.chain.auth.auth.key.length = 440 param2->key_length; 441 td->xform.chain.auth.auth.digest_length = 442 param2->digest_length; 443 444 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 445 td->xform.chain.auth.auth.iv.length = 446 param2->iv_length; 447 td->aes_gmac = true; 448 } 449 } 450 } 451 452 if (flags->ah) { 453 td->ipsec_xform.proto = 454 RTE_SECURITY_IPSEC_SA_PROTO_AH; 455 } 456 457 if (flags->iv_gen) 458 td->ipsec_xform.options.iv_gen_disable = 0; 459 460 if (flags->sa_expiry_pkts_soft) 461 td->ipsec_xform.life.packets_soft_limit = 462 IPSEC_TEST_PACKETS_MAX - 1; 463 464 if (flags->ip_csum) { 465 td->ipsec_xform.options.ip_csum_enable = 1; 466 test_ipsec_csum_init(&td->input_text.data, true, false); 467 } 468 469 if (flags->l4_csum) { 470 td->ipsec_xform.options.l4_csum_enable = 1; 471 test_ipsec_csum_init(&td->input_text.data, false, true); 472 } 473 474 if (flags->transport) { 475 td->ipsec_xform.mode = 476 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 477 } else { 478 td->ipsec_xform.mode = 479 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 480 481 if (flags->tunnel_ipv6) 482 td->ipsec_xform.tunnel.type = 483 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 484 else 485 td->ipsec_xform.tunnel.type = 486 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 487 } 488 489 if (flags->stats_success) 490 td->ipsec_xform.options.stats = 1; 491 492 if (flags->fragment) { 493 struct rte_ipv4_hdr *ip; 494 ip = (struct rte_ipv4_hdr *)&td->input_text.data; 495 ip->fragment_offset = 4; 496 ip->hdr_checksum = rte_ipv4_cksum(ip); 497 } 498 499 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 500 flags->df == TEST_IPSEC_COPY_DF_INNER_1) 501 td->ipsec_xform.options.copy_df = 1; 502 503 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 504 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1) 505 td->ipsec_xform.options.copy_dscp = 1; 506 507 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 508 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1) 509 td->ipsec_xform.options.copy_flabel = 1; 510 511 if (flags->dec_ttl_or_hop_limit) 512 td->ipsec_xform.options.dec_ttl = 1; 513 514 if (flags->udp_encap && flags->udp_encap_custom_ports) { 515 td->ipsec_xform.udp.sport = UDP_CUSTOM_SPORT; 516 td->ipsec_xform.udp.dport = UDP_CUSTOM_DPORT; 517 } 518 } 519 } 520 521 void 522 test_ipsec_td_update(struct ipsec_test_data td_inb[], 523 const struct ipsec_test_data td_outb[], 524 int nb_td, 525 const struct ipsec_test_flags *flags) 526 { 527 int i; 528 529 for (i = 0; i < nb_td; i++) { 530 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data, 531 td_outb[i].input_text.len); 532 td_inb[i].output_text.len = td_outb->input_text.len; 533 534 if (flags->icv_corrupt) { 535 int icv_pos = td_inb[i].input_text.len - 4; 536 td_inb[i].input_text.data[icv_pos] += 1; 537 } 538 539 if (flags->sa_expiry_pkts_hard) 540 td_inb[i].ipsec_xform.life.packets_hard_limit = 541 IPSEC_TEST_PACKETS_MAX - 1; 542 543 if (flags->udp_encap) 544 td_inb[i].ipsec_xform.options.udp_encap = 1; 545 546 if (flags->udp_ports_verify) 547 td_inb[i].ipsec_xform.options.udp_ports_verify = 1; 548 549 td_inb[i].ipsec_xform.options.tunnel_hdr_verify = 550 flags->tunnel_hdr_verify; 551 552 if (flags->ip_csum) 553 td_inb[i].ipsec_xform.options.ip_csum_enable = 1; 554 555 if (flags->l4_csum) 556 td_inb[i].ipsec_xform.options.l4_csum_enable = 1; 557 558 /* Clear outbound specific flags */ 559 td_inb[i].ipsec_xform.options.iv_gen_disable = 0; 560 } 561 } 562 563 void 564 test_ipsec_display_alg(const struct crypto_param *param1, 565 const struct crypto_param *param2) 566 { 567 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 568 printf("\t%s [%d]", 569 rte_crypto_aead_algorithm_strings[param1->alg.aead], 570 param1->key_length * 8); 571 } else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 572 printf("\t%s", 573 rte_crypto_auth_algorithm_strings[param1->alg.auth]); 574 if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL) 575 printf(" [%dB ICV]", param1->digest_length); 576 } else { 577 printf("\t%s", 578 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]); 579 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL) 580 printf(" [%d]", param1->key_length * 8); 581 printf(" %s", 582 rte_crypto_auth_algorithm_strings[param2->alg.auth]); 583 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL) 584 printf(" [%dB ICV]", param2->digest_length); 585 } 586 printf("\n"); 587 } 588 589 static int 590 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td) 591 { 592 int len = 0; 593 594 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 595 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 596 if (td->ipsec_xform.tunnel.type == 597 RTE_SECURITY_IPSEC_TUNNEL_IPV4) 598 len += sizeof(struct rte_ipv4_hdr); 599 else 600 len += sizeof(struct rte_ipv6_hdr); 601 } 602 } 603 604 return len; 605 } 606 607 static int 608 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td) 609 { 610 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX]; 611 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *); 612 int i, iv_pos, iv_len; 613 static int index; 614 615 if (td->aead) 616 iv_len = td->xform.aead.aead.iv.length - td->salt.len; 617 else 618 iv_len = td->xform.chain.cipher.cipher.iv.length; 619 620 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr); 621 output_text += iv_pos; 622 623 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported"); 624 625 /* Compare against previous values */ 626 for (i = 0; i < index; i++) { 627 iv_tmp = &iv_queue[i * IV_LEN_MAX]; 628 629 if (memcmp(output_text, iv_tmp, iv_len) == 0) { 630 printf("IV repeated"); 631 return TEST_FAILED; 632 } 633 } 634 635 /* Save IV for future comparisons */ 636 637 iv_tmp = &iv_queue[index * IV_LEN_MAX]; 638 memcpy(iv_tmp, output_text, iv_len); 639 index++; 640 641 if (index == IPSEC_TEST_PACKETS_MAX) 642 index = 0; 643 644 return TEST_SUCCESS; 645 } 646 647 static int 648 test_ipsec_l3_csum_verify(struct rte_mbuf *m) 649 { 650 uint16_t actual_cksum, expected_cksum; 651 struct rte_ipv4_hdr *ip; 652 653 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); 654 655 if (!is_ipv4((void *)ip)) 656 return TEST_SKIPPED; 657 658 actual_cksum = ip->hdr_checksum; 659 660 ip->hdr_checksum = 0; 661 662 expected_cksum = rte_ipv4_cksum(ip); 663 664 if (actual_cksum != expected_cksum) 665 return TEST_FAILED; 666 667 return TEST_SUCCESS; 668 } 669 670 static int 671 test_ipsec_l4_csum_verify(struct rte_mbuf *m) 672 { 673 uint16_t actual_cksum = 0, expected_cksum = 0; 674 struct rte_ipv4_hdr *ipv4; 675 struct rte_ipv6_hdr *ipv6; 676 struct rte_tcp_hdr *tcp; 677 struct rte_udp_hdr *udp; 678 void *ip, *l4; 679 680 ip = rte_pktmbuf_mtod(m, void *); 681 682 if (is_ipv4(ip)) { 683 ipv4 = ip; 684 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr)); 685 686 switch (ipv4->next_proto_id) { 687 case IPPROTO_TCP: 688 tcp = (struct rte_tcp_hdr *)l4; 689 actual_cksum = tcp->cksum; 690 tcp->cksum = 0; 691 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 692 break; 693 case IPPROTO_UDP: 694 udp = (struct rte_udp_hdr *)l4; 695 actual_cksum = udp->dgram_cksum; 696 udp->dgram_cksum = 0; 697 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4); 698 break; 699 default: 700 break; 701 } 702 } else { 703 ipv6 = ip; 704 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr)); 705 706 switch (ipv6->proto) { 707 case IPPROTO_TCP: 708 tcp = (struct rte_tcp_hdr *)l4; 709 actual_cksum = tcp->cksum; 710 tcp->cksum = 0; 711 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 712 break; 713 case IPPROTO_UDP: 714 udp = (struct rte_udp_hdr *)l4; 715 actual_cksum = udp->dgram_cksum; 716 udp->dgram_cksum = 0; 717 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4); 718 break; 719 default: 720 break; 721 } 722 } 723 724 if (actual_cksum != expected_cksum) 725 return TEST_FAILED; 726 727 return TEST_SUCCESS; 728 } 729 730 static int 731 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected) 732 { 733 struct rte_ipv4_hdr *iph4_ex, *iph4_re; 734 struct rte_ipv6_hdr *iph6_ex, *iph6_re; 735 736 if (is_ipv4(received) && is_ipv4(expected)) { 737 iph4_ex = expected; 738 iph4_re = received; 739 iph4_ex->time_to_live -= 1; 740 if (iph4_re->time_to_live != iph4_ex->time_to_live) 741 return TEST_FAILED; 742 } else if (!is_ipv4(received) && !is_ipv4(expected)) { 743 iph6_ex = expected; 744 iph6_re = received; 745 iph6_ex->hop_limits -= 1; 746 if (iph6_re->hop_limits != iph6_ex->hop_limits) 747 return TEST_FAILED; 748 } else { 749 printf("IP header version miss match\n"); 750 return TEST_FAILED; 751 } 752 753 return TEST_SUCCESS; 754 } 755 756 static int 757 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td, 758 bool silent, const struct ipsec_test_flags *flags) 759 { 760 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 761 uint32_t skip, len = rte_pktmbuf_pkt_len(m); 762 uint8_t td_output_text[4096]; 763 int ret; 764 765 /* For tests with status as error for test success, skip verification */ 766 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 767 (flags->icv_corrupt || 768 flags->sa_expiry_pkts_hard || 769 flags->tunnel_hdr_verify || 770 td->ar_packet)) 771 return TEST_SUCCESS; 772 773 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && 774 flags->udp_encap) { 775 776 len -= sizeof(struct rte_udp_hdr); 777 output_text += sizeof(struct rte_udp_hdr); 778 } 779 780 if (len != td->output_text.len) { 781 printf("Output length (%d) not matching with expected (%d)\n", 782 len, td->output_text.len); 783 return TEST_FAILED; 784 } 785 786 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 787 flags->fragment) { 788 const struct rte_ipv4_hdr *iph4; 789 iph4 = (const struct rte_ipv4_hdr *)output_text; 790 if (iph4->fragment_offset) { 791 printf("Output packet is fragmented"); 792 return TEST_FAILED; 793 } 794 } 795 796 skip = test_ipsec_tunnel_hdr_len_get(td); 797 798 len -= skip; 799 output_text += skip; 800 801 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 802 flags->ip_csum) { 803 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD) 804 ret = test_ipsec_l3_csum_verify(m); 805 else 806 ret = TEST_FAILED; 807 808 if (ret == TEST_FAILED) 809 printf("Inner IP checksum test failed\n"); 810 811 return ret; 812 } 813 814 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 815 flags->l4_csum) { 816 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD) 817 ret = test_ipsec_l4_csum_verify(m); 818 else 819 ret = TEST_FAILED; 820 821 if (ret == TEST_FAILED) 822 printf("Inner L4 checksum test failed\n"); 823 824 return ret; 825 } 826 827 memcpy(td_output_text, td->output_text.data + skip, len); 828 829 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 830 flags->dec_ttl_or_hop_limit) { 831 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) { 832 printf("Inner TTL/hop limit decrement test failed\n"); 833 return TEST_FAILED; 834 } 835 } 836 837 if (test_ipsec_pkt_update(td_output_text, flags)) { 838 printf("Could not update expected vector"); 839 return TEST_FAILED; 840 } 841 842 if (memcmp(output_text, td_output_text, len)) { 843 if (silent) 844 return TEST_FAILED; 845 846 printf("TestCase %s line %d: %s\n", __func__, __LINE__, 847 "output text not as expected\n"); 848 849 rte_hexdump(stdout, "expected", td_output_text, len); 850 rte_hexdump(stdout, "actual", output_text, len); 851 return TEST_FAILED; 852 } 853 854 return TEST_SUCCESS; 855 } 856 857 static int 858 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td, 859 struct ipsec_test_data *res_d) 860 { 861 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 862 uint32_t len = rte_pktmbuf_pkt_len(m); 863 864 memcpy(res_d, td, sizeof(*res_d)); 865 memcpy(res_d->input_text.data, output_text, len); 866 res_d->input_text.len = len; 867 868 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 869 if (res_d->aead) { 870 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; 871 } else { 872 res_d->xform.chain.cipher.cipher.op = 873 RTE_CRYPTO_CIPHER_OP_DECRYPT; 874 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; 875 } 876 877 return TEST_SUCCESS; 878 } 879 880 static int 881 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4, 882 const struct ipsec_test_flags *flags) 883 { 884 uint8_t tos, dscp; 885 uint16_t f_off; 886 887 if (!is_valid_ipv4_pkt(iph4)) { 888 printf("Tunnel outer header is not IPv4\n"); 889 return -1; 890 } 891 892 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 893 printf("Tunnel outer header proto is not AH\n"); 894 return -1; 895 } 896 897 f_off = rte_be_to_cpu_16(iph4->fragment_offset); 898 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 899 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 900 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) { 901 printf("DF bit is not set\n"); 902 return -1; 903 } 904 } else { 905 if (f_off & RTE_IPV4_HDR_DF_FLAG) { 906 printf("DF bit is set\n"); 907 return -1; 908 } 909 } 910 911 tos = iph4->type_of_service; 912 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2; 913 914 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 915 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 916 if (dscp != TEST_IPSEC_DSCP_VAL) { 917 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 918 TEST_IPSEC_DSCP_VAL, dscp); 919 return -1; 920 } 921 } else { 922 if (dscp != 0) { 923 printf("DSCP value is set [exp: 0, actual: %x]\n", 924 dscp); 925 return -1; 926 } 927 } 928 929 return 0; 930 } 931 932 static int 933 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6, 934 const struct ipsec_test_flags *flags) 935 { 936 uint32_t vtc_flow; 937 uint32_t flabel; 938 uint8_t dscp; 939 940 if (!is_valid_ipv6_pkt(iph6)) { 941 printf("Tunnel outer header is not IPv6\n"); 942 return -1; 943 } 944 945 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 946 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >> 947 (RTE_IPV6_HDR_TC_SHIFT + 2); 948 949 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 950 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) { 951 if (dscp != TEST_IPSEC_DSCP_VAL) { 952 printf("DSCP value is not matching [exp: %x, actual: %x]\n", 953 TEST_IPSEC_DSCP_VAL, dscp); 954 return -1; 955 } 956 } else { 957 if (dscp != 0) { 958 printf("DSCP value is set [exp: 0, actual: %x]\n", 959 dscp); 960 return -1; 961 } 962 } 963 964 flabel = vtc_flow & RTE_IPV6_HDR_FL_MASK; 965 966 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 967 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 968 if (flabel != TEST_IPSEC_FLABEL_VAL) { 969 printf("FLABEL value is not matching [exp: %x, actual: %x]\n", 970 TEST_IPSEC_FLABEL_VAL, flabel); 971 return -1; 972 } 973 } else { 974 if (flabel != 0) { 975 printf("FLABEL value is set [exp: 0, actual: %x]\n", 976 flabel); 977 return -1; 978 } 979 } 980 981 return 0; 982 } 983 984 int 985 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td, 986 struct ipsec_test_data *res_d, bool silent, 987 const struct ipsec_test_flags *flags) 988 { 989 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *); 990 int ret; 991 992 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 993 const struct rte_ipv4_hdr *iph4; 994 const struct rte_ipv6_hdr *iph6; 995 996 if (flags->iv_gen) { 997 ret = test_ipsec_iv_verify_push(m, td); 998 if (ret != TEST_SUCCESS) 999 return ret; 1000 } 1001 1002 iph4 = (const struct rte_ipv4_hdr *)output_text; 1003 1004 if (td->ipsec_xform.mode == 1005 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) { 1006 if (flags->ipv6) { 1007 iph6 = (const struct rte_ipv6_hdr *)output_text; 1008 if (is_valid_ipv6_pkt(iph6) == false) { 1009 printf("Transport packet is not IPv6\n"); 1010 return TEST_FAILED; 1011 } 1012 } else { 1013 if (is_valid_ipv4_pkt(iph4) == false) { 1014 printf("Transport packet is not IPv4\n"); 1015 return TEST_FAILED; 1016 } 1017 1018 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) { 1019 printf("Transport IPv4 header proto is not AH\n"); 1020 return -1; 1021 } 1022 } 1023 } else { 1024 if (td->ipsec_xform.tunnel.type == 1025 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1026 if (test_ipsec_iph4_hdr_validate(iph4, flags)) 1027 return TEST_FAILED; 1028 } else { 1029 iph6 = (const struct rte_ipv6_hdr *)output_text; 1030 if (test_ipsec_iph6_hdr_validate(iph6, flags)) 1031 return TEST_FAILED; 1032 } 1033 } 1034 } 1035 1036 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && 1037 flags->udp_encap) { 1038 const struct rte_ipv4_hdr *iph4; 1039 const struct rte_ipv6_hdr *iph6; 1040 1041 if (td->ipsec_xform.tunnel.type == 1042 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1043 iph4 = (const struct rte_ipv4_hdr *)output_text; 1044 1045 if (iph4->next_proto_id != IPPROTO_UDP) { 1046 printf("UDP header is not found\n"); 1047 return TEST_FAILED; 1048 } 1049 1050 if (flags->udp_encap_custom_ports) { 1051 const struct rte_udp_hdr *udph; 1052 1053 udph = (const struct rte_udp_hdr *)(output_text + 1054 sizeof(struct rte_ipv4_hdr)); 1055 if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) || 1056 (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) { 1057 printf("UDP custom ports not matching.\n"); 1058 return TEST_FAILED; 1059 } 1060 } 1061 } else { 1062 iph6 = (const struct rte_ipv6_hdr *)output_text; 1063 1064 if (iph6->proto != IPPROTO_UDP) { 1065 printf("UDP header is not found\n"); 1066 return TEST_FAILED; 1067 } 1068 1069 if (flags->udp_encap_custom_ports) { 1070 const struct rte_udp_hdr *udph; 1071 1072 udph = (const struct rte_udp_hdr *)(output_text + 1073 sizeof(struct rte_ipv6_hdr)); 1074 if ((rte_be_to_cpu_16(udph->src_port) != UDP_CUSTOM_SPORT) || 1075 (rte_be_to_cpu_16(udph->dst_port) != UDP_CUSTOM_DPORT)) { 1076 printf("UDP custom ports not matching.\n"); 1077 return TEST_FAILED; 1078 } 1079 } 1080 } 1081 } 1082 1083 /* 1084 * In case of known vector tests & all inbound tests, res_d provided 1085 * would be NULL and output data need to be validated against expected. 1086 * For inbound, output_text would be plain packet and for outbound 1087 * output_text would IPsec packet. Validate by comparing against 1088 * known vectors. 1089 * 1090 * In case of combined mode tests, the output_text from outbound 1091 * operation (ie, IPsec packet) would need to be inbound processed to 1092 * obtain the plain text. Copy output_text to result data, 'res_d', so 1093 * that inbound processing can be done. 1094 */ 1095 1096 if (res_d == NULL) 1097 return test_ipsec_td_verify(m, td, silent, flags); 1098 else 1099 return test_ipsec_res_d_prepare(m, td, res_d); 1100 } 1101 1102 int 1103 test_ipsec_status_check(const struct ipsec_test_data *td, 1104 struct rte_crypto_op *op, 1105 const struct ipsec_test_flags *flags, 1106 enum rte_security_ipsec_sa_direction dir, 1107 int pkt_num) 1108 { 1109 int ret = TEST_SUCCESS; 1110 1111 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1112 td->ar_packet) { 1113 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1114 printf("Anti replay test case failed\n"); 1115 return TEST_FAILED; 1116 } else { 1117 return TEST_SUCCESS; 1118 } 1119 } 1120 1121 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && 1122 flags->sa_expiry_pkts_hard && 1123 pkt_num == IPSEC_TEST_PACKETS_MAX) { 1124 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1125 printf("SA hard expiry (pkts) test failed\n"); 1126 return TEST_FAILED; 1127 } else { 1128 return TEST_SUCCESS; 1129 } 1130 } 1131 1132 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && 1133 flags->tunnel_hdr_verify) { 1134 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1135 printf("Tunnel header verify test case failed\n"); 1136 return TEST_FAILED; 1137 } else { 1138 return TEST_SUCCESS; 1139 } 1140 } 1141 1142 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) { 1143 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) { 1144 printf("ICV corruption test case failed\n"); 1145 ret = TEST_FAILED; 1146 } 1147 } else { 1148 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 1149 printf("Security op processing failed [pkt_num: %d]\n", 1150 pkt_num); 1151 ret = TEST_FAILED; 1152 } 1153 } 1154 1155 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) { 1156 if (!(op->aux_flags & 1157 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) { 1158 printf("SA soft expiry (pkts) test failed\n"); 1159 ret = TEST_FAILED; 1160 } 1161 } 1162 1163 return ret; 1164 } 1165 1166 int 1167 test_ipsec_stats_verify(struct rte_security_ctx *ctx, 1168 void *sess, 1169 const struct ipsec_test_flags *flags, 1170 enum rte_security_ipsec_sa_direction dir) 1171 { 1172 struct rte_security_stats stats = {0}; 1173 int ret = TEST_SUCCESS; 1174 1175 if (flags->stats_success) { 1176 if (rte_security_session_stats_get(ctx, sess, &stats) < 0) 1177 return TEST_FAILED; 1178 1179 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1180 if (stats.ipsec.opackets != 1 || 1181 stats.ipsec.oerrors != 0) 1182 ret = TEST_FAILED; 1183 } else { 1184 if (stats.ipsec.ipackets != 1 || 1185 stats.ipsec.ierrors != 0) 1186 ret = TEST_FAILED; 1187 } 1188 } 1189 1190 return ret; 1191 } 1192 1193 int 1194 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags) 1195 { 1196 struct rte_ipv4_hdr *iph4; 1197 struct rte_ipv6_hdr *iph6; 1198 bool cksum_dirty = false; 1199 1200 iph4 = (struct rte_ipv4_hdr *)pkt; 1201 1202 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1203 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 || 1204 flags->df == TEST_IPSEC_COPY_DF_INNER_0 || 1205 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) { 1206 uint16_t frag_off; 1207 1208 if (!is_ipv4(iph4)) { 1209 printf("Invalid packet type\n"); 1210 return -1; 1211 } 1212 1213 frag_off = rte_be_to_cpu_16(iph4->fragment_offset); 1214 1215 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 || 1216 flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 1217 frag_off |= RTE_IPV4_HDR_DF_FLAG; 1218 else 1219 frag_off &= ~RTE_IPV4_HDR_DF_FLAG; 1220 1221 iph4->fragment_offset = rte_cpu_to_be_16(frag_off); 1222 cksum_dirty = true; 1223 } 1224 1225 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1226 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 || 1227 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 || 1228 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0 || 1229 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1230 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1 || 1231 flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_0 || 1232 flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) { 1233 1234 if (is_ipv4(iph4)) { 1235 uint8_t tos; 1236 1237 tos = iph4->type_of_service; 1238 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1239 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1240 tos |= (RTE_IPV4_HDR_DSCP_MASK & 1241 (TEST_IPSEC_DSCP_VAL << 2)); 1242 else 1243 tos &= ~RTE_IPV4_HDR_DSCP_MASK; 1244 1245 iph4->type_of_service = tos; 1246 cksum_dirty = true; 1247 } else { 1248 uint32_t vtc_flow; 1249 1250 iph6 = (struct rte_ipv6_hdr *)pkt; 1251 1252 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow); 1253 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 || 1254 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 1255 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK & 1256 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2))); 1257 else 1258 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK; 1259 1260 if (flags->flabel == TEST_IPSEC_COPY_FLABEL_INNER_1 || 1261 flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1) 1262 vtc_flow |= (RTE_IPV6_HDR_FL_MASK & 1263 (TEST_IPSEC_FLABEL_VAL << RTE_IPV6_HDR_FL_SHIFT)); 1264 else 1265 vtc_flow &= ~RTE_IPV6_HDR_FL_MASK; 1266 1267 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow); 1268 } 1269 } 1270 1271 if (cksum_dirty && is_ipv4(iph4)) { 1272 iph4->hdr_checksum = 0; 1273 iph4->hdr_checksum = rte_ipv4_cksum(iph4); 1274 } 1275 1276 return 0; 1277 } 1278