1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2023 Corigine Systems, Inc. 3 * All rights reserved. 4 */ 5 6 #include <stdalign.h> 7 8 #include "nfp_ipsec.h" 9 10 #include <rte_cryptodev.h> 11 #include <rte_malloc.h> 12 #include <rte_security_driver.h> 13 14 #include <ethdev_driver.h> 15 #include <ethdev_pci.h> 16 17 #include "nfp_logs.h" 18 #include "nfp_net_common.h" 19 #include "nfp_net_ctrl.h" 20 #include "nfp_rxtx.h" 21 #include "nfp_net_meta.h" 22 23 #define NFP_UDP_ESP_PORT 4500 24 #define NFP_ESP_IV_LENGTH 8 25 26 static const struct rte_cryptodev_capabilities nfp_crypto_caps[] = { 27 { 28 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 29 .sym = { 30 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 31 .auth = { 32 .algo = RTE_CRYPTO_AUTH_MD5_HMAC, 33 .block_size = 64, 34 .key_size = { 35 .min = 16, 36 .max = 16, 37 .increment = 0 38 }, 39 .digest_size = { 40 .min = 12, 41 .max = 16, 42 .increment = 4 43 }, 44 }, 45 }, 46 }, 47 { 48 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 49 .sym = { 50 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 51 .auth = { 52 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 53 .block_size = 64, 54 .key_size = { 55 .min = 20, 56 .max = 64, 57 .increment = 1 58 }, 59 .digest_size = { 60 .min = 10, 61 .max = 12, 62 .increment = 2 63 }, 64 }, 65 }, 66 }, 67 { 68 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 69 .sym = { 70 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 71 .auth = { 72 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 73 .block_size = 64, 74 .key_size = { 75 .min = 32, 76 .max = 32, 77 .increment = 0 78 }, 79 .digest_size = { 80 .min = 12, 81 .max = 16, 82 .increment = 4 83 }, 84 }, 85 }, 86 }, 87 { 88 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 89 .sym = { 90 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 91 .auth = { 92 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, 93 .block_size = 128, 94 .key_size = { 95 .min = 48, 96 .max = 48, 97 .increment = 0 98 }, 99 .digest_size = { 100 .min = 12, 101 .max = 24, 102 .increment = 12 103 }, 104 }, 105 }, 106 }, 107 { 108 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 109 .sym = { 110 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 111 .auth = { 112 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, 113 .block_size = 128, 114 .key_size = { 115 .min = 64, 116 .max = 64, 117 .increment = 1 118 }, 119 .digest_size = { 120 .min = 12, 121 .max = 32, 122 .increment = 4 123 }, 124 }, 125 }, 126 }, 127 { 128 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 129 .sym = { 130 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 131 .cipher = { 132 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 133 .block_size = 8, 134 .key_size = { 135 .min = 24, 136 .max = 24, 137 .increment = 0 138 }, 139 .iv_size = { 140 .min = 8, 141 .max = 16, 142 .increment = 8 143 }, 144 }, 145 }, 146 }, 147 { 148 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 149 .sym = { 150 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 151 .cipher = { 152 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 153 .block_size = 16, 154 .key_size = { 155 .min = 16, 156 .max = 32, 157 .increment = 8 158 }, 159 .iv_size = { 160 .min = 8, 161 .max = 16, 162 .increment = 8 163 }, 164 }, 165 }, 166 }, 167 { 168 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 169 .sym = { 170 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, 171 .aead = { 172 .algo = RTE_CRYPTO_AEAD_AES_GCM, 173 .block_size = 16, 174 .key_size = { 175 .min = 16, 176 .max = 32, 177 .increment = 8 178 }, 179 .digest_size = { 180 .min = 16, 181 .max = 16, 182 .increment = 0 183 }, 184 .aad_size = { 185 .min = 0, 186 .max = 1024, 187 .increment = 1 188 }, 189 .iv_size = { 190 .min = 8, 191 .max = 16, 192 .increment = 4 193 } 194 }, 195 }, 196 }, 197 { 198 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 199 .sym = { 200 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, 201 .aead = { 202 .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, 203 .block_size = 16, 204 .key_size = { 205 .min = 32, 206 .max = 32, 207 .increment = 0 208 }, 209 .digest_size = { 210 .min = 16, 211 .max = 16, 212 .increment = 0 213 }, 214 .aad_size = { 215 .min = 0, 216 .max = 1024, 217 .increment = 1 218 }, 219 .iv_size = { 220 .min = 8, 221 .max = 16, 222 .increment = 4 223 } 224 }, 225 }, 226 }, 227 { 228 .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, 229 .sym = { 230 .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED 231 }, 232 } 233 }; 234 235 static const struct rte_security_capability nfp_security_caps[] = { 236 { /* IPsec Inline Crypto Tunnel Egress */ 237 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, 238 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 239 .ipsec = { 240 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 241 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 242 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 243 .options = { 244 .udp_encap = 1, 245 .stats = 1, 246 .esn = 1 247 } 248 }, 249 .crypto_capabilities = nfp_crypto_caps, 250 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 251 }, 252 { /* IPsec Inline Crypto Tunnel Ingress */ 253 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, 254 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 255 .ipsec = { 256 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 257 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 258 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 259 .options = { 260 .udp_encap = 1, 261 .stats = 1, 262 .esn = 1 263 } 264 }, 265 .crypto_capabilities = nfp_crypto_caps 266 }, 267 { /* IPsec Inline Crypto Transport Egress */ 268 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, 269 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 270 .ipsec = { 271 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 272 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 273 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 274 .options = { 275 .udp_encap = 1, 276 .stats = 1, 277 .esn = 1 278 } 279 }, 280 .crypto_capabilities = nfp_crypto_caps, 281 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 282 }, 283 { /* IPsec Inline Crypto Transport Ingress */ 284 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, 285 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 286 .ipsec = { 287 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 288 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 289 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 290 .options = { 291 .udp_encap = 1, 292 .stats = 1, 293 .esn = 1 294 } 295 }, 296 .crypto_capabilities = nfp_crypto_caps 297 }, 298 { /* IPsec Inline Protocol Tunnel Egress */ 299 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 300 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 301 .ipsec = { 302 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 303 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 304 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 305 .options = { 306 .udp_encap = 1, 307 .stats = 1, 308 .esn = 1 309 } 310 }, 311 .crypto_capabilities = nfp_crypto_caps, 312 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 313 }, 314 { /* IPsec Inline Protocol Tunnel Ingress */ 315 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 316 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 317 .ipsec = { 318 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 319 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 320 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 321 .options = { 322 .udp_encap = 1, 323 .stats = 1, 324 .esn = 1 325 } 326 }, 327 .crypto_capabilities = nfp_crypto_caps 328 }, 329 { /* IPsec Inline Protocol Transport Egress */ 330 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 331 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 332 .ipsec = { 333 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 334 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 335 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 336 .options = { 337 .udp_encap = 1, 338 .stats = 1, 339 .esn = 1 340 } 341 }, 342 .crypto_capabilities = nfp_crypto_caps, 343 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 344 }, 345 { /* IPsec Inline Protocol Transport Ingress */ 346 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 347 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 348 .ipsec = { 349 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 350 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 351 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 352 .options = { 353 .udp_encap = 1, 354 .stats = 1, 355 .esn = 1 356 } 357 }, 358 .crypto_capabilities = nfp_crypto_caps 359 }, 360 { 361 .action = RTE_SECURITY_ACTION_TYPE_NONE 362 } 363 }; 364 365 /* IPsec config message cmd codes */ 366 enum nfp_ipsec_cfg_msg_cmd_codes { 367 NFP_IPSEC_CFG_MSG_ADD_SA, /**< Add a new SA */ 368 NFP_IPSEC_CFG_MSG_INV_SA, /**< Invalidate an existing SA */ 369 NFP_IPSEC_CFG_MSG_MODIFY_SA, /**< Modify an existing SA */ 370 NFP_IPSEC_CFG_MSG_GET_SA_STATS, /**< Report SA counters, flags, etc. */ 371 NFP_IPSEC_CFG_MSG_GET_SEQ_NUMS, /**< Allocate sequence numbers */ 372 NFP_IPSEC_CFG_MSG_LAST 373 }; 374 375 enum nfp_ipsec_cfg_msg_rsp_codes { 376 NFP_IPSEC_CFG_MSG_OK, 377 NFP_IPSEC_CFG_MSG_FAILED, 378 NFP_IPSEC_CFG_MSG_SA_VALID, 379 NFP_IPSEC_CFG_MSG_SA_HASH_ADD_FAILED, 380 NFP_IPSEC_CFG_MSG_SA_HASH_DEL_FAILED, 381 NFP_IPSEC_CFG_MSG_SA_INVALID_CMD 382 }; 383 384 enum nfp_ipsec_mode { 385 NFP_IPSEC_MODE_TRANSPORT, 386 NFP_IPSEC_MODE_TUNNEL, 387 }; 388 389 enum nfp_ipsec_protocol { 390 NFP_IPSEC_PROTOCOL_AH, 391 NFP_IPSEC_PROTOCOL_ESP, 392 }; 393 394 /* Cipher modes */ 395 enum nfp_ipsec_cimode { 396 NFP_IPSEC_CIMODE_ECB, 397 NFP_IPSEC_CIMODE_CBC, 398 NFP_IPSEC_CIMODE_CFB, 399 NFP_IPSEC_CIMODE_OFB, 400 NFP_IPSEC_CIMODE_CTR, 401 }; 402 403 /* Hash types */ 404 enum nfp_ipsec_hash_type { 405 NFP_IPSEC_HASH_NONE, 406 NFP_IPSEC_HASH_MD5_96, 407 NFP_IPSEC_HASH_SHA1_96, 408 NFP_IPSEC_HASH_SHA256_96, 409 NFP_IPSEC_HASH_SHA384_96, 410 NFP_IPSEC_HASH_SHA512_96, 411 NFP_IPSEC_HASH_MD5_128, 412 NFP_IPSEC_HASH_SHA1_80, 413 NFP_IPSEC_HASH_SHA256_128, 414 NFP_IPSEC_HASH_SHA384_192, 415 NFP_IPSEC_HASH_SHA512_256, 416 NFP_IPSEC_HASH_GF128_128, 417 NFP_IPSEC_HASH_POLY1305_128, 418 }; 419 420 /* Cipher types */ 421 enum nfp_ipsec_cipher_type { 422 NFP_IPSEC_CIPHER_NULL, 423 NFP_IPSEC_CIPHER_3DES, 424 NFP_IPSEC_CIPHER_AES128, 425 NFP_IPSEC_CIPHER_AES192, 426 NFP_IPSEC_CIPHER_AES256, 427 NFP_IPSEC_CIPHER_AES128_NULL, 428 NFP_IPSEC_CIPHER_AES192_NULL, 429 NFP_IPSEC_CIPHER_AES256_NULL, 430 NFP_IPSEC_CIPHER_CHACHA20, 431 }; 432 433 /* Don't Fragment types */ 434 enum nfp_ipsec_df_type { 435 NFP_IPSEC_DF_CLEAR, 436 NFP_IPSEC_DF_SET, 437 NFP_IPSEC_DF_COPY, 438 }; 439 440 static int 441 nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *net_hw, 442 struct nfp_ipsec_msg *msg) 443 { 444 int ret; 445 uint32_t i; 446 uint32_t msg_size; 447 448 msg_size = RTE_DIM(msg->raw); 449 msg->rsp = NFP_IPSEC_CFG_MSG_OK; 450 451 for (i = 0; i < msg_size; i++) 452 nn_cfg_writel(&net_hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]); 453 454 ret = nfp_net_mbox_reconfig(net_hw, NFP_NET_CFG_MBOX_CMD_IPSEC); 455 if (ret < 0) { 456 PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox."); 457 return ret; 458 } 459 460 /* 461 * Not all commands and callers make use of response message data. But 462 * leave this up to the caller and always read and store the full 463 * response. One example where the data is needed is for statistics. 464 */ 465 for (i = 0; i < msg_size; i++) 466 msg->raw[i] = nn_cfg_readl(&net_hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i); 467 468 switch (msg->rsp) { 469 case NFP_IPSEC_CFG_MSG_OK: 470 ret = 0; 471 break; 472 case NFP_IPSEC_CFG_MSG_SA_INVALID_CMD: 473 ret = -EINVAL; 474 break; 475 case NFP_IPSEC_CFG_MSG_SA_VALID: 476 ret = -EEXIST; 477 break; 478 case NFP_IPSEC_CFG_MSG_FAILED: 479 /* FALLTHROUGH */ 480 case NFP_IPSEC_CFG_MSG_SA_HASH_ADD_FAILED: 481 /* FALLTHROUGH */ 482 case NFP_IPSEC_CFG_MSG_SA_HASH_DEL_FAILED: 483 ret = -EIO; 484 break; 485 default: 486 ret = -EDOM; 487 } 488 489 return ret; 490 } 491 492 /** 493 * Get valid SA index from SA table 494 * 495 * @param data 496 * SA table pointer 497 * @param sa_idx 498 * SA table index pointer 499 * 500 * @return 501 * Negative number on full or repeat, 0 on success 502 * 503 * Note: multiple sockets may create same SA session. 504 */ 505 static void 506 nfp_get_sa_entry(struct nfp_net_ipsec_data *data, 507 int *sa_idx) 508 { 509 uint32_t i; 510 511 for (i = 0; i < NFP_NET_IPSEC_MAX_SA_CNT; i++) { 512 if (data->sa_entries[i] == NULL) { 513 *sa_idx = i; 514 break; 515 } 516 } 517 } 518 519 static void 520 nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, 521 uint16_t iv_len, 522 const char *iv_string) 523 { 524 int i; 525 char *save; 526 char *iv_b; 527 char *iv_str; 528 const rte_be32_t *iv_value; 529 uint8_t cfg_iv[NFP_ESP_IV_LENGTH] = {}; 530 531 iv_str = strdup(iv_string); 532 if (iv_str == NULL) { 533 PMD_DRV_LOG(ERR, "Failed to strdup iv_string."); 534 return; 535 } 536 537 for (i = 0; i < iv_len; i++) { 538 iv_b = strtok_r(i ? NULL : iv_str, ",", &save); 539 if (iv_b == NULL) 540 break; 541 542 cfg_iv[i] = strtoul(iv_b, NULL, 0); 543 } 544 545 iv_value = (const rte_be32_t *)(cfg_iv); 546 cfg->aesgcm_fields.iv[0] = rte_be_to_cpu_32(iv_value[0]); 547 cfg->aesgcm_fields.iv[1] = rte_be_to_cpu_32(iv_value[1]); 548 549 free(iv_str); 550 } 551 552 static int 553 set_aes_keylen(uint32_t key_length, 554 struct ipsec_add_sa *cfg) 555 { 556 switch (key_length << 3) { 557 case 128: 558 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES128; 559 break; 560 case 192: 561 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES192; 562 break; 563 case 256: 564 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_AES256; 565 break; 566 default: 567 PMD_DRV_LOG(ERR, "AES cipher key length is illegal!"); 568 return -EINVAL; 569 } 570 571 return 0; 572 } 573 574 /* Map rte_security_session_conf aead algo to NFP aead algo */ 575 static int 576 nfp_aead_map(struct rte_eth_dev *eth_dev, 577 struct rte_crypto_aead_xform *aead, 578 uint32_t key_length, 579 struct ipsec_add_sa *cfg) 580 { 581 int ret; 582 uint32_t i; 583 uint32_t index; 584 uint16_t iv_len; 585 uint32_t offset; 586 uint32_t device_id; 587 const char *iv_str; 588 const rte_be32_t *key; 589 struct nfp_net_hw *net_hw; 590 591 net_hw = eth_dev->data->dev_private; 592 device_id = net_hw->device_id; 593 offset = 0; 594 595 switch (aead->algo) { 596 case RTE_CRYPTO_AEAD_AES_GCM: 597 if (aead->digest_length != 16) { 598 PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_AES_GCM!"); 599 return -EINVAL; 600 } 601 602 cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR; 603 cfg->ctrl_word.hash = NFP_IPSEC_HASH_GF128_128; 604 605 ret = set_aes_keylen(key_length, cfg); 606 if (ret < 0) { 607 PMD_DRV_LOG(ERR, "Failed to set AES_GCM key length!"); 608 return -EINVAL; 609 } 610 611 break; 612 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: 613 if (device_id != PCI_DEVICE_ID_NFP3800_PF_NIC) { 614 PMD_DRV_LOG(ERR, "Unsupported aead CHACHA20_POLY1305 algorithm!"); 615 return -EINVAL; 616 } 617 618 if (aead->digest_length != 16) { 619 PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305."); 620 return -EINVAL; 621 } 622 623 /* Aead->alg_key_len includes 32-bit salt */ 624 if (key_length != 32) { 625 PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length."); 626 return -EINVAL; 627 } 628 629 /* The CHACHA20's mode is not configured */ 630 cfg->ctrl_word.hash = NFP_IPSEC_HASH_POLY1305_128; 631 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_CHACHA20; 632 break; 633 default: 634 PMD_DRV_LOG(ERR, "Unsupported aead algorithm!"); 635 return -EINVAL; 636 } 637 638 key = (const rte_be32_t *)(aead->key.data); 639 640 /* 641 * The CHACHA20's key order needs to be adjusted based on hardware design. 642 * Unadjusted order: {K0, K1, K2, K3, K4, K5, K6, K7} 643 * Adjusted order: {K4, K5, K6, K7, K0, K1, K2, K3} 644 */ 645 if (aead->algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) 646 offset = key_length / sizeof(cfg->cipher_key[0]) << 1; 647 648 for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) { 649 index = (i + offset) % (key_length / sizeof(cfg->cipher_key[0])); 650 cfg->cipher_key[index] = rte_be_to_cpu_32(key[i]); 651 } 652 653 /* 654 * The iv of the FW is equal to ESN by default. Only the 655 * aead algorithm can offload the iv of configuration and 656 * the length of iv cannot be greater than NFP_ESP_IV_LENGTH. 657 */ 658 iv_str = getenv("ETH_SEC_IV_OVR"); 659 if (iv_str != NULL) { 660 iv_len = aead->iv.length; 661 if (iv_len > NFP_ESP_IV_LENGTH) { 662 PMD_DRV_LOG(ERR, "Unsupported length of iv data."); 663 return -EINVAL; 664 } 665 666 nfp_aesgcm_iv_update(cfg, iv_len, iv_str); 667 } 668 669 return 0; 670 } 671 672 /* Map rte_security_session_conf cipher algo to NFP cipher algo */ 673 static int 674 nfp_cipher_map(struct rte_eth_dev *eth_dev, 675 struct rte_crypto_cipher_xform *cipher, 676 uint32_t key_length, 677 struct ipsec_add_sa *cfg) 678 { 679 int ret; 680 uint32_t i; 681 uint32_t device_id; 682 const rte_be32_t *key; 683 struct nfp_net_hw *net_hw; 684 685 net_hw = eth_dev->data->dev_private; 686 device_id = net_hw->device_id; 687 688 switch (cipher->algo) { 689 case RTE_CRYPTO_CIPHER_NULL: 690 cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; 691 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL; 692 break; 693 case RTE_CRYPTO_CIPHER_3DES_CBC: 694 if (device_id == PCI_DEVICE_ID_NFP3800_PF_NIC) { 695 PMD_DRV_LOG(ERR, "Unsupported 3DESCBC encryption algorithm!"); 696 return -EINVAL; 697 } 698 699 cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; 700 cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES; 701 break; 702 case RTE_CRYPTO_CIPHER_AES_CBC: 703 cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; 704 ret = set_aes_keylen(key_length, cfg); 705 if (ret < 0) { 706 PMD_DRV_LOG(ERR, "Failed to set cipher key length!"); 707 return -EINVAL; 708 } 709 710 break; 711 default: 712 PMD_DRV_LOG(ERR, "Unsupported cipher alg!"); 713 return -EINVAL; 714 } 715 716 key = (const rte_be32_t *)(cipher->key.data); 717 if (key_length > sizeof(cfg->cipher_key)) { 718 PMD_DRV_LOG(ERR, "Insufficient space for offloaded key."); 719 return -EINVAL; 720 } 721 722 for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) 723 cfg->cipher_key[i] = rte_be_to_cpu_32(key[i]); 724 725 return 0; 726 } 727 728 static void 729 set_md5hmac(struct ipsec_add_sa *cfg, 730 uint32_t *digest_length) 731 { 732 switch (*digest_length) { 733 case 96: 734 cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_96; 735 break; 736 case 128: 737 cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_128; 738 break; 739 default: 740 *digest_length = 0; 741 } 742 } 743 744 static void 745 set_sha1hmac(struct ipsec_add_sa *cfg, 746 uint32_t *digest_length) 747 { 748 switch (*digest_length) { 749 case 96: 750 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_96; 751 break; 752 case 80: 753 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_80; 754 break; 755 default: 756 *digest_length = 0; 757 } 758 } 759 760 static void 761 set_sha2_256hmac(struct ipsec_add_sa *cfg, 762 uint32_t *digest_length) 763 { 764 switch (*digest_length) { 765 case 96: 766 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_96; 767 break; 768 case 128: 769 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_128; 770 break; 771 default: 772 *digest_length = 0; 773 } 774 } 775 776 static void 777 set_sha2_384hmac(struct ipsec_add_sa *cfg, 778 uint32_t *digest_length) 779 { 780 switch (*digest_length) { 781 case 96: 782 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_96; 783 break; 784 case 192: 785 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_192; 786 break; 787 default: 788 *digest_length = 0; 789 } 790 } 791 792 static void 793 set_sha2_512hmac(struct ipsec_add_sa *cfg, 794 uint32_t *digest_length) 795 { 796 switch (*digest_length) { 797 case 96: 798 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_96; 799 break; 800 case 256: 801 cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_256; 802 break; 803 default: 804 *digest_length = 0; 805 } 806 } 807 808 /* Map rte_security_session_conf auth algo to NFP auth algo */ 809 static int 810 nfp_auth_map(struct rte_eth_dev *eth_dev, 811 struct rte_crypto_auth_xform *auth, 812 uint32_t digest_length, 813 struct ipsec_add_sa *cfg) 814 { 815 uint32_t i; 816 uint8_t key_length; 817 uint32_t device_id; 818 const rte_be32_t *key; 819 struct nfp_net_hw *net_hw; 820 821 if (digest_length == 0) { 822 PMD_DRV_LOG(ERR, "Auth digest length is illegal!"); 823 return -EINVAL; 824 } 825 826 net_hw = eth_dev->data->dev_private; 827 device_id = net_hw->device_id; 828 digest_length = digest_length << 3; 829 830 switch (auth->algo) { 831 case RTE_CRYPTO_AUTH_NULL: 832 cfg->ctrl_word.hash = NFP_IPSEC_HASH_NONE; 833 digest_length = 1; 834 break; 835 case RTE_CRYPTO_AUTH_MD5_HMAC: 836 if (device_id == PCI_DEVICE_ID_NFP3800_PF_NIC) { 837 PMD_DRV_LOG(ERR, "Unsupported MD5HMAC authentication algorithm!"); 838 return -EINVAL; 839 } 840 841 set_md5hmac(cfg, &digest_length); 842 break; 843 case RTE_CRYPTO_AUTH_SHA1_HMAC: 844 set_sha1hmac(cfg, &digest_length); 845 break; 846 case RTE_CRYPTO_AUTH_SHA256_HMAC: 847 set_sha2_256hmac(cfg, &digest_length); 848 break; 849 case RTE_CRYPTO_AUTH_SHA384_HMAC: 850 set_sha2_384hmac(cfg, &digest_length); 851 break; 852 case RTE_CRYPTO_AUTH_SHA512_HMAC: 853 set_sha2_512hmac(cfg, &digest_length); 854 break; 855 default: 856 PMD_DRV_LOG(ERR, "Unsupported auth alg!"); 857 return -EINVAL; 858 } 859 860 if (digest_length == 0) { 861 PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length."); 862 return -EINVAL; 863 } 864 865 key = (const rte_be32_t *)(auth->key.data); 866 key_length = auth->key.length; 867 if (key_length > sizeof(cfg->auth_key)) { 868 PMD_DRV_LOG(ERR, "Insufficient space for offloaded auth key!"); 869 return -EINVAL; 870 } 871 872 for (i = 0; i < key_length / sizeof(cfg->auth_key[0]); i++) 873 cfg->auth_key[i] = rte_be_to_cpu_32(key[i]); 874 875 return 0; 876 } 877 878 static int 879 nfp_crypto_msg_build(struct rte_eth_dev *eth_dev, 880 struct rte_security_session_conf *conf, 881 struct nfp_ipsec_msg *msg) 882 { 883 int ret; 884 struct ipsec_add_sa *cfg; 885 struct rte_crypto_sym_xform *cur; 886 struct rte_crypto_sym_xform *next; 887 enum rte_security_ipsec_sa_direction direction; 888 889 cur = conf->crypto_xform; 890 if (cur == NULL) { 891 PMD_DRV_LOG(ERR, "Unsupported crypto_xform is NULL!"); 892 return -EINVAL; 893 } 894 895 next = cur->next; 896 direction = conf->ipsec.direction; 897 cfg = &msg->cfg_add_sa; 898 899 switch (cur->type) { 900 case RTE_CRYPTO_SYM_XFORM_AEAD: 901 /* Aead transforms can be used for either inbound/outbound IPsec SAs */ 902 if (next != NULL) { 903 PMD_DRV_LOG(ERR, "Next crypto_xform type should be NULL!"); 904 return -EINVAL; 905 } 906 907 ret = nfp_aead_map(eth_dev, &cur->aead, cur->aead.key.length, cfg); 908 if (ret < 0) { 909 PMD_DRV_LOG(ERR, "Failed to map aead alg!"); 910 return ret; 911 } 912 913 cfg->aesgcm_fields.salt = conf->ipsec.salt; 914 break; 915 case RTE_CRYPTO_SYM_XFORM_AUTH: 916 /* Only support Auth + Cipher for inbound */ 917 if (direction != RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 918 PMD_DRV_LOG(ERR, "Direction should be INGRESS, but it is not!"); 919 return -EINVAL; 920 } 921 922 if (next == NULL || next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { 923 PMD_DRV_LOG(ERR, "Next crypto_xfrm should be cipher, but it is not!"); 924 return -EINVAL; 925 } 926 927 ret = nfp_auth_map(eth_dev, &cur->auth, cur->auth.digest_length, cfg); 928 if (ret < 0) { 929 PMD_DRV_LOG(ERR, "Failed to map auth alg!"); 930 return ret; 931 } 932 933 ret = nfp_cipher_map(eth_dev, &next->cipher, next->cipher.key.length, cfg); 934 if (ret < 0) { 935 PMD_DRV_LOG(ERR, "Failed to map cipher alg!"); 936 return ret; 937 } 938 939 break; 940 case RTE_CRYPTO_SYM_XFORM_CIPHER: 941 /* Only support Cipher + Auth for outbound */ 942 if (direction != RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 943 PMD_DRV_LOG(ERR, "Direction should be EGRESS, but it is not!"); 944 return -EINVAL; 945 } 946 947 if (next == NULL || next->type != RTE_CRYPTO_SYM_XFORM_AUTH) { 948 PMD_DRV_LOG(ERR, "Next crypto_xfrm should be auth, but it is not!"); 949 return -EINVAL; 950 } 951 952 ret = nfp_cipher_map(eth_dev, &cur->cipher, cur->cipher.key.length, cfg); 953 if (ret < 0) { 954 PMD_DRV_LOG(ERR, "Failed to map cipher alg!"); 955 return ret; 956 } 957 958 ret = nfp_auth_map(eth_dev, &next->auth, next->auth.digest_length, cfg); 959 if (ret < 0) { 960 PMD_DRV_LOG(ERR, "Failed to map auth alg!"); 961 return ret; 962 } 963 964 break; 965 default: 966 PMD_DRV_LOG(ERR, "Unsupported crypto_xform type!"); 967 return -EINVAL; 968 } 969 970 return 0; 971 } 972 973 static int 974 nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, 975 struct rte_security_session_conf *conf, 976 struct nfp_ipsec_msg *msg) 977 { 978 int i; 979 int ret; 980 rte_be32_t *src_ip; 981 rte_be32_t *dst_ip; 982 struct ipsec_add_sa *cfg; 983 enum rte_security_ipsec_tunnel_type type; 984 985 cfg = &msg->cfg_add_sa; 986 cfg->spi = conf->ipsec.spi; 987 cfg->pmtu_limit = 0xffff; 988 989 /* 990 * UDP encapsulation 991 * 992 * 1: Do UDP encapsulation/decapsulation 993 * 0: No UDP encapsulation 994 */ 995 if (conf->ipsec.options.udp_encap == 1) { 996 cfg->udp_enable = 1; 997 cfg->natt_dst_port = NFP_UDP_ESP_PORT; 998 cfg->natt_src_port = NFP_UDP_ESP_PORT; 999 } 1000 1001 if (conf->ipsec.options.copy_df == 1) 1002 cfg->df_ctrl = NFP_IPSEC_DF_COPY; 1003 else if (conf->ipsec.tunnel.ipv4.df != 0) 1004 cfg->df_ctrl = NFP_IPSEC_DF_SET; 1005 else 1006 cfg->df_ctrl = NFP_IPSEC_DF_CLEAR; 1007 1008 switch (conf->action_type) { 1009 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 1010 cfg->ctrl_word.encap_dsbl = 1; 1011 break; 1012 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 1013 cfg->ctrl_word.encap_dsbl = 0; 1014 break; 1015 default: 1016 PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d.", 1017 conf->action_type); 1018 return -EINVAL; 1019 } 1020 1021 switch (conf->ipsec.proto) { 1022 case RTE_SECURITY_IPSEC_SA_PROTO_ESP: 1023 cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_ESP; 1024 break; 1025 case RTE_SECURITY_IPSEC_SA_PROTO_AH: 1026 cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH; 1027 break; 1028 default: 1029 PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d.", 1030 conf->ipsec.proto); 1031 return -EINVAL; 1032 } 1033 1034 switch (conf->ipsec.mode) { 1035 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL: 1036 type = conf->ipsec.tunnel.type; 1037 cfg->ctrl_word.mode = NFP_IPSEC_MODE_TUNNEL; 1038 if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1039 src_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.src_ip.s_addr; 1040 dst_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.dst_ip.s_addr; 1041 cfg->src_ip[0] = rte_be_to_cpu_32(src_ip[0]); 1042 cfg->dst_ip[0] = rte_be_to_cpu_32(dst_ip[0]); 1043 cfg->ipv6 = 0; 1044 } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 1045 src_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv6.src_addr; 1046 dst_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv6.dst_addr; 1047 for (i = 0; i < 4; i++) { 1048 cfg->src_ip[i] = rte_be_to_cpu_32(src_ip[i]); 1049 cfg->dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]); 1050 } 1051 cfg->ipv6 = 1; 1052 } else { 1053 PMD_DRV_LOG(ERR, "Unsupported address family!"); 1054 return -EINVAL; 1055 } 1056 1057 break; 1058 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: 1059 cfg->ctrl_word.mode = NFP_IPSEC_MODE_TRANSPORT; 1060 memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); 1061 memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip)); 1062 1063 break; 1064 default: 1065 PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d.", 1066 conf->ipsec.mode); 1067 return -EINVAL; 1068 } 1069 1070 ret = nfp_crypto_msg_build(eth_dev, conf, msg); 1071 if (ret < 0) { 1072 PMD_DRV_LOG(ERR, "Failed to build auth/crypto/aead msg!"); 1073 return ret; 1074 } 1075 1076 return 0; 1077 } 1078 1079 static int 1080 nfp_crypto_create_session(void *device, 1081 struct rte_security_session_conf *conf, 1082 struct rte_security_session *session) 1083 { 1084 int ret; 1085 int sa_idx; 1086 struct nfp_net_hw *net_hw; 1087 struct nfp_ipsec_msg msg; 1088 struct rte_eth_dev *eth_dev; 1089 struct nfp_ipsec_session *priv_session; 1090 1091 /* Only support IPsec at present */ 1092 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) { 1093 PMD_DRV_LOG(ERR, "Unsupported non-IPsec offload!"); 1094 return -EINVAL; 1095 } 1096 1097 sa_idx = -1; 1098 eth_dev = device; 1099 priv_session = SECURITY_GET_SESS_PRIV(session); 1100 net_hw = eth_dev->data->dev_private; 1101 1102 if (net_hw->ipsec_data->sa_free_cnt == 0) { 1103 PMD_DRV_LOG(ERR, "No space in SA table, spi: %d.", conf->ipsec.spi); 1104 return -EINVAL; 1105 } 1106 1107 nfp_get_sa_entry(net_hw->ipsec_data, &sa_idx); 1108 1109 if (sa_idx < 0) { 1110 PMD_DRV_LOG(ERR, "Failed to get SA entry!"); 1111 return -EINVAL; 1112 } 1113 1114 memset(&msg, 0, sizeof(msg)); 1115 ret = nfp_ipsec_msg_build(eth_dev, conf, &msg); 1116 if (ret < 0) { 1117 PMD_DRV_LOG(ERR, "Failed to build IPsec msg!"); 1118 return -EINVAL; 1119 } 1120 1121 msg.cmd = NFP_IPSEC_CFG_MSG_ADD_SA; 1122 msg.sa_idx = sa_idx; 1123 ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg); 1124 if (ret < 0) { 1125 PMD_DRV_LOG(ERR, "Failed to add SA to nic."); 1126 return -EINVAL; 1127 } 1128 1129 priv_session->action = conf->action_type; 1130 priv_session->ipsec = conf->ipsec; 1131 priv_session->msg = msg.cfg_add_sa; 1132 priv_session->sa_index = sa_idx; 1133 priv_session->dev = eth_dev; 1134 priv_session->user_data = conf->userdata; 1135 1136 net_hw->ipsec_data->sa_free_cnt--; 1137 net_hw->ipsec_data->sa_entries[sa_idx] = priv_session; 1138 1139 return 0; 1140 } 1141 1142 static int 1143 nfp_crypto_update_session(void *device __rte_unused, 1144 struct rte_security_session *session, 1145 struct rte_security_session_conf *conf) 1146 { 1147 struct nfp_ipsec_session *priv_session; 1148 1149 priv_session = SECURITY_GET_SESS_PRIV(session); 1150 if (priv_session == NULL) 1151 return -EINVAL; 1152 1153 /* Update IPsec ESN value */ 1154 if (priv_session->msg.ctrl_word.ext_seq != 0 && conf->ipsec.options.esn != 0) { 1155 /* 1156 * Store in nfp_ipsec_session for outbound SA for use 1157 * in nfp_security_set_pkt_metadata() function. 1158 */ 1159 priv_session->ipsec.esn.hi = conf->ipsec.esn.hi; 1160 priv_session->ipsec.esn.low = conf->ipsec.esn.low; 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int 1167 nfp_security_set_pkt_metadata(void *device, 1168 struct rte_security_session *session, 1169 struct rte_mbuf *m, 1170 void *params) 1171 { 1172 int offset; 1173 uint64_t *sqn; 1174 struct nfp_net_hw *net_hw; 1175 struct rte_eth_dev *eth_dev; 1176 struct nfp_ipsec_session *priv_session; 1177 1178 sqn = params; 1179 eth_dev = device; 1180 priv_session = SECURITY_GET_SESS_PRIV(session); 1181 net_hw = eth_dev->data->dev_private; 1182 1183 if (priv_session->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1184 struct nfp_tx_ipsec_desc_msg *desc_md; 1185 1186 offset = net_hw->ipsec_data->pkt_dynfield_offset; 1187 desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *); 1188 1189 if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) { 1190 desc_md->esn.low = (uint32_t)*sqn; 1191 desc_md->esn.hi = (uint32_t)(*sqn >> 32); 1192 } else if (priv_session->msg.ctrl_word.ext_seq != 0) { 1193 desc_md->esn.low = priv_session->ipsec.esn.low; 1194 desc_md->esn.hi = priv_session->ipsec.esn.hi; 1195 } else { 1196 desc_md->esn.low = priv_session->ipsec.esn.low; 1197 desc_md->esn.hi = 0; 1198 } 1199 1200 desc_md->enc = 1; 1201 desc_md->sa_idx = priv_session->sa_index; 1202 } 1203 1204 return 0; 1205 } 1206 1207 /** 1208 * Get discards packet statistics for each SA 1209 * 1210 * The sa_discard_stats contains the statistics of discards packets 1211 * of an SA. This function calculates the sum total of discarded packets. 1212 * 1213 * @param errors 1214 * The value is SA discards packet sum total 1215 * @param sa_discard_stats 1216 * The struct is SA discards packet Statistics 1217 */ 1218 static void 1219 nfp_get_errorstats(uint64_t *errors, 1220 struct ipsec_discard_stats *sa_discard_stats) 1221 { 1222 uint32_t i; 1223 uint32_t len; 1224 uint32_t *perror; 1225 1226 perror = &sa_discard_stats->discards_auth; 1227 len = sizeof(struct ipsec_discard_stats) / sizeof(uint32_t); 1228 1229 for (i = 0; i < len; i++) 1230 *errors += *perror++; 1231 1232 *errors -= sa_discard_stats->ipv4_id_counter; 1233 } 1234 1235 static int 1236 nfp_security_session_get_stats(void *device, 1237 struct rte_security_session *session, 1238 struct rte_security_stats *stats) 1239 { 1240 int ret; 1241 struct nfp_net_hw *net_hw; 1242 struct nfp_ipsec_msg msg; 1243 struct rte_eth_dev *eth_dev; 1244 struct ipsec_get_sa_stats *cfg_s; 1245 struct rte_security_ipsec_stats *ips_s; 1246 struct nfp_ipsec_session *priv_session; 1247 enum rte_security_ipsec_sa_direction direction; 1248 1249 eth_dev = device; 1250 priv_session = SECURITY_GET_SESS_PRIV(session); 1251 memset(&msg, 0, sizeof(msg)); 1252 msg.cmd = NFP_IPSEC_CFG_MSG_GET_SA_STATS; 1253 msg.sa_idx = priv_session->sa_index; 1254 net_hw = eth_dev->data->dev_private; 1255 1256 ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg); 1257 if (ret < 0) { 1258 PMD_DRV_LOG(ERR, "Failed to get SA stats."); 1259 return ret; 1260 } 1261 1262 cfg_s = &msg.cfg_get_stats; 1263 direction = priv_session->ipsec.direction; 1264 memset(stats, 0, sizeof(struct rte_security_stats)); /* Start with zeros */ 1265 stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC; 1266 ips_s = &stats->ipsec; 1267 1268 /* Only display SA if any counters are non-zero */ 1269 if (cfg_s->lifetime_byte_count != 0 || cfg_s->pkt_count != 0) { 1270 if (direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1271 ips_s->ipackets = cfg_s->pkt_count; 1272 ips_s->ibytes = cfg_s->lifetime_byte_count; 1273 nfp_get_errorstats(&ips_s->ierrors, &cfg_s->sa_discard_stats); 1274 } else { 1275 ips_s->opackets = cfg_s->pkt_count; 1276 ips_s->obytes = cfg_s->lifetime_byte_count; 1277 nfp_get_errorstats(&ips_s->oerrors, &cfg_s->sa_discard_stats); 1278 } 1279 } 1280 1281 return 0; 1282 } 1283 1284 static const struct rte_security_capability * 1285 nfp_crypto_capabilities_get(void *device __rte_unused) 1286 { 1287 return nfp_security_caps; 1288 } 1289 1290 static uint32_t 1291 nfp_security_session_get_size(void *device __rte_unused) 1292 { 1293 return sizeof(struct nfp_ipsec_session); 1294 } 1295 1296 static int 1297 nfp_crypto_remove_sa(struct rte_eth_dev *eth_dev, 1298 struct nfp_ipsec_session *priv_session) 1299 { 1300 int ret; 1301 uint32_t sa_index; 1302 struct nfp_net_hw *net_hw; 1303 struct nfp_ipsec_msg cfg; 1304 1305 sa_index = priv_session->sa_index; 1306 net_hw = eth_dev->data->dev_private; 1307 1308 cfg.cmd = NFP_IPSEC_CFG_MSG_INV_SA; 1309 cfg.sa_idx = sa_index; 1310 ret = nfp_ipsec_cfg_cmd_issue(net_hw, &cfg); 1311 if (ret < 0) { 1312 PMD_DRV_LOG(ERR, "Failed to remove SA!"); 1313 return -EINVAL; 1314 } 1315 1316 net_hw->ipsec_data->sa_free_cnt++; 1317 net_hw->ipsec_data->sa_entries[sa_index] = NULL; 1318 1319 return 0; 1320 } 1321 1322 static int 1323 nfp_crypto_remove_session(void *device, 1324 struct rte_security_session *session) 1325 { 1326 int ret; 1327 struct rte_eth_dev *eth_dev; 1328 struct nfp_ipsec_session *priv_session; 1329 1330 eth_dev = device; 1331 priv_session = SECURITY_GET_SESS_PRIV(session); 1332 if (eth_dev != priv_session->dev) { 1333 PMD_DRV_LOG(ERR, "Session not bound to this device."); 1334 return -ENODEV; 1335 } 1336 1337 ret = nfp_crypto_remove_sa(eth_dev, priv_session); 1338 if (ret < 0) { 1339 PMD_DRV_LOG(ERR, "Failed to remove session."); 1340 return -EFAULT; 1341 } 1342 1343 memset(priv_session, 0, sizeof(struct nfp_ipsec_session)); 1344 1345 return 0; 1346 } 1347 1348 static const struct rte_security_ops nfp_security_ops = { 1349 .session_create = nfp_crypto_create_session, 1350 .session_update = nfp_crypto_update_session, 1351 .session_get_size = nfp_security_session_get_size, 1352 .session_stats_get = nfp_security_session_get_stats, 1353 .session_destroy = nfp_crypto_remove_session, 1354 .set_pkt_metadata = nfp_security_set_pkt_metadata, 1355 .capabilities_get = nfp_crypto_capabilities_get, 1356 }; 1357 1358 static int 1359 nfp_ipsec_ctx_create(struct rte_eth_dev *dev, 1360 struct nfp_net_ipsec_data *data) 1361 { 1362 struct rte_security_ctx *ctx; 1363 static const struct rte_mbuf_dynfield pkt_md_dynfield = { 1364 .name = "nfp_ipsec_crypto_pkt_metadata", 1365 .size = sizeof(struct nfp_tx_ipsec_desc_msg), 1366 .align = alignof(struct nfp_tx_ipsec_desc_msg), 1367 }; 1368 1369 ctx = rte_zmalloc("security_ctx", 1370 sizeof(struct rte_security_ctx), 0); 1371 if (ctx == NULL) { 1372 PMD_INIT_LOG(ERR, "Failed to malloc security_ctx."); 1373 return -ENOMEM; 1374 } 1375 1376 ctx->device = dev; 1377 ctx->ops = &nfp_security_ops; 1378 ctx->sess_cnt = 0; 1379 dev->security_ctx = ctx; 1380 1381 data->pkt_dynfield_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield); 1382 if (data->pkt_dynfield_offset < 0) { 1383 PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield."); 1384 return -ENOMEM; 1385 } 1386 1387 return 0; 1388 } 1389 1390 int 1391 nfp_ipsec_init(struct rte_eth_dev *dev) 1392 { 1393 int ret; 1394 uint32_t cap_extend; 1395 struct nfp_net_hw *net_hw; 1396 struct nfp_net_ipsec_data *data; 1397 1398 net_hw = dev->data->dev_private; 1399 1400 cap_extend = net_hw->super.cap_ext; 1401 if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { 1402 PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability."); 1403 return 0; 1404 } 1405 1406 data = rte_zmalloc("ipsec_data", sizeof(struct nfp_net_ipsec_data), 0); 1407 if (data == NULL) { 1408 PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data."); 1409 return -ENOMEM; 1410 } 1411 1412 data->pkt_dynfield_offset = -1; 1413 data->sa_free_cnt = NFP_NET_IPSEC_MAX_SA_CNT; 1414 net_hw->ipsec_data = data; 1415 1416 ret = nfp_ipsec_ctx_create(dev, data); 1417 if (ret != 0) { 1418 PMD_INIT_LOG(ERR, "Failed to create IPsec ctx."); 1419 goto ipsec_cleanup; 1420 } 1421 1422 return 0; 1423 1424 ipsec_cleanup: 1425 nfp_ipsec_uninit(dev); 1426 1427 return ret; 1428 } 1429 1430 static void 1431 nfp_ipsec_ctx_destroy(struct rte_eth_dev *dev) 1432 { 1433 rte_free(dev->security_ctx); 1434 } 1435 1436 void 1437 nfp_ipsec_uninit(struct rte_eth_dev *dev) 1438 { 1439 uint16_t i; 1440 uint32_t cap_extend; 1441 struct nfp_net_hw *net_hw; 1442 struct nfp_ipsec_session *priv_session; 1443 1444 net_hw = dev->data->dev_private; 1445 1446 cap_extend = net_hw->super.cap_ext; 1447 if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { 1448 PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability."); 1449 return; 1450 } 1451 1452 nfp_ipsec_ctx_destroy(dev); 1453 1454 if (net_hw->ipsec_data == NULL) { 1455 PMD_INIT_LOG(INFO, "IPsec data is NULL!"); 1456 return; 1457 } 1458 1459 for (i = 0; i < NFP_NET_IPSEC_MAX_SA_CNT; i++) { 1460 priv_session = net_hw->ipsec_data->sa_entries[i]; 1461 if (priv_session != NULL) 1462 memset(priv_session, 0, sizeof(struct nfp_ipsec_session)); 1463 } 1464 1465 rte_free(net_hw->ipsec_data); 1466 } 1467 1468