1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <rte_cryptodev.h> 6 #include <rte_security.h> 7 #include <rte_security_driver.h> 8 9 #include <cn9k_ethdev.h> 10 #include <cnxk_security.h> 11 12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = { 13 { /* NULL (CIPHER) */ 14 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 15 {.sym = { 16 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 17 {.cipher = { 18 .algo = RTE_CRYPTO_CIPHER_NULL, 19 .block_size = 1, 20 .key_size = { 21 .min = 0, 22 .max = 0, 23 .increment = 0 24 }, 25 .iv_size = { 26 .min = 0, 27 .max = 0, 28 .increment = 0 29 } 30 }, }, 31 }, } 32 }, 33 { /* DES */ 34 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 35 {.sym = { 36 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 37 {.cipher = { 38 .algo = RTE_CRYPTO_CIPHER_DES_CBC, 39 .block_size = 8, 40 .key_size = { 41 .min = 8, 42 .max = 8, 43 .increment = 0 44 }, 45 .iv_size = { 46 .min = 8, 47 .max = 8, 48 .increment = 0 49 } 50 }, }, 51 }, } 52 }, 53 { /* 3DES CBC */ 54 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 55 {.sym = { 56 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 57 {.cipher = { 58 .algo = RTE_CRYPTO_CIPHER_3DES_CBC, 59 .block_size = 8, 60 .key_size = { 61 .min = 24, 62 .max = 24, 63 .increment = 0 64 }, 65 .iv_size = { 66 .min = 8, 67 .max = 16, 68 .increment = 8 69 } 70 }, }, 71 }, } 72 }, 73 { /* AES GCM */ 74 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 75 {.sym = { 76 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, 77 {.aead = { 78 .algo = RTE_CRYPTO_AEAD_AES_GCM, 79 .block_size = 16, 80 .key_size = { 81 .min = 16, 82 .max = 32, 83 .increment = 8 84 }, 85 .digest_size = { 86 .min = 16, 87 .max = 16, 88 .increment = 0 89 }, 90 .aad_size = { 91 .min = 8, 92 .max = 12, 93 .increment = 4 94 }, 95 .iv_size = { 96 .min = 12, 97 .max = 12, 98 .increment = 0 99 } 100 }, } 101 }, } 102 }, 103 { /* AES CCM */ 104 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 105 {.sym = { 106 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, 107 {.aead = { 108 .algo = RTE_CRYPTO_AEAD_AES_CCM, 109 .block_size = 16, 110 .key_size = { 111 .min = 16, 112 .max = 32, 113 .increment = 8 114 }, 115 .digest_size = { 116 .min = 16, 117 .max = 16, 118 .increment = 0 119 }, 120 .aad_size = { 121 .min = 8, 122 .max = 12, 123 .increment = 4 124 }, 125 .iv_size = { 126 .min = 11, 127 .max = 13, 128 .increment = 1 129 } 130 }, } 131 }, } 132 }, 133 { /* AES CBC */ 134 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 135 {.sym = { 136 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 137 {.cipher = { 138 .algo = RTE_CRYPTO_CIPHER_AES_CBC, 139 .block_size = 16, 140 .key_size = { 141 .min = 16, 142 .max = 32, 143 .increment = 8 144 }, 145 .iv_size = { 146 .min = 16, 147 .max = 16, 148 .increment = 0 149 } 150 }, } 151 }, } 152 }, 153 { /* AES CTR */ 154 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 155 {.sym = { 156 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, 157 {.cipher = { 158 .algo = RTE_CRYPTO_CIPHER_AES_CTR, 159 .block_size = 16, 160 .key_size = { 161 .min = 16, 162 .max = 32, 163 .increment = 8 164 }, 165 .iv_size = { 166 .min = 12, 167 .max = 16, 168 .increment = 4 169 } 170 }, } 171 }, } 172 }, 173 { /* AES-XCBC */ 174 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 175 { .sym = { 176 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 177 {.auth = { 178 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, 179 .block_size = 16, 180 .key_size = { 181 .min = 16, 182 .max = 16, 183 .increment = 0 184 }, 185 .digest_size = { 186 .min = 12, 187 .max = 12, 188 .increment = 0, 189 }, 190 }, } 191 }, } 192 }, 193 { /* AES GMAC (AUTH) */ 194 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 195 {.sym = { 196 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 197 {.auth = { 198 .algo = RTE_CRYPTO_AUTH_AES_GMAC, 199 .block_size = 16, 200 .key_size = { 201 .min = 16, 202 .max = 32, 203 .increment = 8 204 }, 205 .digest_size = { 206 .min = 8, 207 .max = 16, 208 .increment = 4 209 }, 210 .iv_size = { 211 .min = 12, 212 .max = 12, 213 .increment = 0 214 } 215 }, } 216 }, } 217 }, 218 { /* MD5 HMAC */ 219 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 220 {.sym = { 221 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 222 {.auth = { 223 .algo = RTE_CRYPTO_AUTH_MD5_HMAC, 224 .block_size = 64, 225 .key_size = { 226 .min = 16, 227 .max = 16, 228 .increment = 0 229 }, 230 .digest_size = { 231 .min = 12, 232 .max = 12, 233 .increment = 0 234 }, 235 }, } 236 }, } 237 }, 238 { /* SHA1 HMAC */ 239 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 240 {.sym = { 241 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 242 {.auth = { 243 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, 244 .block_size = 64, 245 .key_size = { 246 .min = 20, 247 .max = 64, 248 .increment = 1 249 }, 250 .digest_size = { 251 .min = 12, 252 .max = 12, 253 .increment = 0 254 }, 255 }, } 256 }, } 257 }, 258 { /* SHA256 HMAC */ 259 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 260 {.sym = { 261 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 262 {.auth = { 263 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, 264 .block_size = 64, 265 .key_size = { 266 .min = 1, 267 .max = 1024, 268 .increment = 1 269 }, 270 .digest_size = { 271 .min = 16, 272 .max = 32, 273 .increment = 16 274 }, 275 }, } 276 }, } 277 }, 278 { /* SHA384 HMAC */ 279 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 280 {.sym = { 281 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 282 {.auth = { 283 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, 284 .block_size = 64, 285 .key_size = { 286 .min = 1, 287 .max = 1024, 288 .increment = 1 289 }, 290 .digest_size = { 291 .min = 24, 292 .max = 48, 293 .increment = 24 294 }, 295 }, } 296 }, } 297 }, 298 { /* SHA512 HMAC */ 299 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 300 {.sym = { 301 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, 302 {.auth = { 303 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, 304 .block_size = 128, 305 .key_size = { 306 .min = 1, 307 .max = 1024, 308 .increment = 1 309 }, 310 .digest_size = { 311 .min = 32, 312 .max = 64, 313 .increment = 32 314 }, 315 }, } 316 }, } 317 }, 318 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() 319 }; 320 321 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = { 322 { /* IPsec Inline Protocol ESP Tunnel Ingress */ 323 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 324 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 325 .ipsec = { 326 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 327 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 328 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 329 .replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX, 330 .options = { 331 .udp_encap = 1, 332 .esn = 1 333 } 334 }, 335 .crypto_capabilities = cn9k_eth_sec_crypto_caps, 336 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 337 }, 338 { /* IPsec Inline Protocol ESP Tunnel Egress */ 339 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 340 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 341 .ipsec = { 342 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 343 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, 344 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 345 .options = { 346 .udp_encap = 1, 347 .iv_gen_disable = 1, 348 .esn = 1 349 } 350 }, 351 .crypto_capabilities = cn9k_eth_sec_crypto_caps, 352 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 353 }, 354 { /* IPsec Inline Protocol ESP Transport Ingress */ 355 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 356 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 357 .ipsec = { 358 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 359 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 360 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, 361 .replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX, 362 .options = { 363 .udp_encap = 1, 364 .esn = 1, 365 }, 366 }, 367 .crypto_capabilities = cn9k_eth_sec_crypto_caps, 368 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 369 }, 370 { /* IPsec Inline Protocol ESP Transport Egress */ 371 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, 372 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 373 .ipsec = { 374 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 375 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 376 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, 377 .replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX, 378 .options = { 379 .iv_gen_disable = 1, 380 .udp_encap = 1, 381 .esn = 1, 382 }, 383 }, 384 .crypto_capabilities = cn9k_eth_sec_crypto_caps, 385 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA 386 }, 387 { 388 .action = RTE_SECURITY_ACTION_TYPE_NONE 389 } 390 }; 391 392 static inline int 393 ar_window_init(struct cn9k_inb_priv_data *inb_priv) 394 { 395 if (inb_priv->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) { 396 plt_err("Replay window size:%u is not supported", 397 inb_priv->replay_win_sz); 398 return -ENOTSUP; 399 } 400 401 rte_spinlock_init(&inb_priv->ar.lock); 402 /* 403 * Set window bottom to 1, base and top to size of 404 * window 405 */ 406 inb_priv->ar.winb = 1; 407 inb_priv->ar.wint = inb_priv->replay_win_sz; 408 inb_priv->ar.base = inb_priv->replay_win_sz; 409 410 return 0; 411 } 412 413 static void 414 outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str) 415 { 416 uint8_t *iv_dbg = common_sa->iv.aes_iv; 417 char *iv_str = strdup(__iv_str); 418 char *iv_b = NULL; 419 char *save; 420 int i, iv_len = ROC_IE_ON_MAX_IV_LEN; 421 422 if (!iv_str) 423 return; 424 425 if (common_sa->ctl.enc_type == ROC_IE_SA_ENC_AES_GCM || 426 common_sa->ctl.enc_type == ROC_IE_SA_ENC_AES_CTR || 427 common_sa->ctl.enc_type == ROC_IE_SA_ENC_AES_CCM || 428 common_sa->ctl.auth_type == ROC_IE_SA_AUTH_AES_GMAC) { 429 iv_dbg = common_sa->iv.gcm.iv; 430 iv_len = 8; 431 } 432 433 memset(iv_dbg, 0, iv_len); 434 for (i = 0; i < iv_len; i++) { 435 iv_b = strtok_r(i ? NULL : iv_str, ",", &save); 436 if (!iv_b) 437 break; 438 iv_dbg[i] = strtoul(iv_b, NULL, 0); 439 } 440 441 free(iv_str); 442 } 443 444 static int 445 cn9k_eth_sec_session_update(void *device, 446 struct rte_security_session *sess, 447 struct rte_security_session_conf *conf) 448 { 449 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; 450 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 451 struct rte_security_ipsec_xform *ipsec; 452 struct cn9k_outb_priv_data *outb_priv; 453 struct cnxk_ipsec_outb_rlens *rlens; 454 struct cn9k_sec_sess_priv sess_priv; 455 struct rte_crypto_sym_xform *crypto; 456 struct cnxk_eth_sec_sess *eth_sec; 457 struct roc_ie_on_outb_sa *outb_sa; 458 rte_spinlock_t *lock; 459 char tbuf[128] = {0}; 460 const char *iv_str; 461 uint32_t sa_idx; 462 int ctx_len; 463 int rc = 0; 464 465 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 466 return -ENOTSUP; 467 468 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) 469 return -ENOTSUP; 470 471 if (rte_security_dynfield_register() < 0) 472 return -ENOTSUP; 473 474 ipsec = &conf->ipsec; 475 crypto = conf->crypto_xform; 476 477 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 478 return -ENOTSUP; 479 480 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess); 481 if (!eth_sec) 482 return -ENOENT; 483 484 eth_sec->spi = conf->ipsec.spi; 485 486 lock = &dev->outb.lock; 487 rte_spinlock_lock(lock); 488 489 outb_sa = eth_sec->sa; 490 outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa); 491 sa_idx = outb_priv->sa_idx; 492 493 /* Disable SA */ 494 outb_sa->common_sa.ctl.valid = 0; 495 496 /* Sync SA content */ 497 plt_atomic_thread_fence(__ATOMIC_ACQ_REL); 498 499 sess_priv.u64 = 0; 500 memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa)); 501 502 /* Fill outbound sa params */ 503 rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa); 504 if (rc < 0) { 505 snprintf(tbuf, sizeof(tbuf), 506 "Failed to init outbound sa, rc=%d", rc); 507 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx); 508 goto exit; 509 } 510 511 ctx_len = rc; 512 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, false, 513 ctx_len); 514 if (rc) { 515 snprintf(tbuf, sizeof(tbuf), 516 "Failed to init outbound sa, rc=%d", rc); 517 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx); 518 goto exit; 519 } 520 521 /* When IV is provided by the application, 522 * copy the IV to context and enable explicit IV flag in context. 523 */ 524 if (ipsec->options.iv_gen_disable == 1) { 525 outb_sa->common_sa.ctl.explicit_iv_en = 1; 526 iv_str = getenv("ETH_SEC_IV_OVR"); 527 if (iv_str) 528 outb_dbg_iv_update(&outb_sa->common_sa, iv_str); 529 } 530 531 outb_priv->userdata = conf->userdata; 532 outb_priv->eth_sec = eth_sec; 533 /* Start sequence number with 1 */ 534 outb_priv->esn = ipsec->esn.value; 535 536 memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4); 537 if (outb_sa->common_sa.ctl.enc_type == ROC_IE_SA_ENC_AES_GCM) 538 outb_priv->copy_salt = 1; 539 540 rlens = &outb_priv->rlens; 541 /* Save rlen info */ 542 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto); 543 544 sess_priv.sa_idx = outb_priv->sa_idx; 545 sess_priv.roundup_byte = rlens->roundup_byte; 546 sess_priv.roundup_len = rlens->roundup_len; 547 sess_priv.partial_len = rlens->partial_len; 548 549 /* Pointer from eth_sec -> outb_sa */ 550 eth_sec->sa = outb_sa; 551 eth_sec->sess = sess; 552 eth_sec->sa_idx = sa_idx; 553 eth_sec->spi = ipsec->spi; 554 555 /* Sync SA content */ 556 plt_atomic_thread_fence(__ATOMIC_ACQ_REL); 557 558 rte_spinlock_unlock(lock); 559 560 plt_nix_dbg("Created outbound session with spi=%u, sa_idx=%u", 561 eth_sec->spi, eth_sec->sa_idx); 562 563 /* 564 * Update fast path info in priv area. 565 */ 566 sess->fast_mdata = sess_priv.u64; 567 568 return 0; 569 exit: 570 rte_spinlock_unlock(lock); 571 if (rc) 572 plt_err("%s", tbuf); 573 return rc; 574 } 575 576 static int 577 cn9k_eth_sec_session_create(void *device, 578 struct rte_security_session_conf *conf, 579 struct rte_security_session *sess) 580 { 581 struct cnxk_eth_sec_sess *eth_sec = SECURITY_GET_SESS_PRIV(sess); 582 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; 583 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 584 struct rte_security_ipsec_xform *ipsec; 585 struct cn9k_sec_sess_priv sess_priv; 586 struct rte_crypto_sym_xform *crypto; 587 struct roc_nix *nix = &dev->nix; 588 rte_spinlock_t *lock; 589 char tbuf[128] = {0}; 590 bool inbound; 591 int ctx_len; 592 int rc = 0; 593 594 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) 595 return -ENOTSUP; 596 597 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) 598 return -ENOTSUP; 599 600 if (rte_security_dynfield_register() < 0) 601 return -ENOTSUP; 602 603 ipsec = &conf->ipsec; 604 crypto = conf->crypto_xform; 605 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS); 606 607 /* Search if a session already exists */ 608 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) { 609 plt_err("%s SA with SPI %u already in use", 610 inbound ? "Inbound" : "Outbound", ipsec->spi); 611 return -EEXIST; 612 } 613 614 lock = inbound ? &dev->inb.lock : &dev->outb.lock; 615 rte_spinlock_lock(lock); 616 617 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess)); 618 sess_priv.u64 = 0; 619 620 if (!dev->outb.lf_base) { 621 plt_err("Could not allocate security session private data"); 622 rte_spinlock_unlock(lock); 623 return -ENOMEM; 624 } 625 626 if (inbound) { 627 struct cn9k_inb_priv_data *inb_priv; 628 struct roc_ie_on_inb_sa *inb_sa; 629 uint32_t spi_mask; 630 631 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) < 632 ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD); 633 634 spi_mask = roc_nix_inl_inb_spi_range(nix, false, NULL, NULL); 635 636 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline 637 * device always for CN9K. 638 */ 639 inb_sa = (struct roc_ie_on_inb_sa *)roc_nix_inl_inb_sa_get(nix, false, ipsec->spi); 640 if (!inb_sa) { 641 snprintf(tbuf, sizeof(tbuf), 642 "Failed to create ingress sa"); 643 rc = -EFAULT; 644 goto err; 645 } 646 647 /* Check if SA is already in use */ 648 if (inb_sa->common_sa.ctl.valid) { 649 snprintf(tbuf, sizeof(tbuf), 650 "Inbound SA with SPI %u already in use", 651 ipsec->spi); 652 rc = -EBUSY; 653 goto err; 654 } 655 656 memset(inb_sa, 0, sizeof(struct roc_ie_on_inb_sa)); 657 658 /* Fill inbound sa params */ 659 rc = cnxk_on_ipsec_inb_sa_create(ipsec, crypto, inb_sa); 660 if (rc < 0) { 661 snprintf(tbuf, sizeof(tbuf), 662 "Failed to init inbound sa, rc=%d", rc); 663 goto err; 664 } 665 666 ctx_len = rc; 667 inb_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(inb_sa); 668 /* Back pointer to get eth_sec */ 669 inb_priv->eth_sec = eth_sec; 670 671 /* Save userdata in inb private area */ 672 inb_priv->userdata = conf->userdata; 673 674 inb_priv->replay_win_sz = ipsec->replay_win_sz; 675 if (inb_priv->replay_win_sz) { 676 rc = ar_window_init(inb_priv); 677 if (rc) 678 goto err; 679 } 680 681 /* Prepare session priv */ 682 sess_priv.inb_sa = 1; 683 sess_priv.sa_idx = ipsec->spi & spi_mask; 684 685 /* Pointer from eth_sec -> inb_sa */ 686 eth_sec->sa = inb_sa; 687 eth_sec->sess = sess; 688 eth_sec->sa_idx = ipsec->spi & spi_mask; 689 eth_sec->spi = ipsec->spi; 690 eth_sec->inb = true; 691 692 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry); 693 dev->inb.nb_sess++; 694 } else { 695 struct cn9k_outb_priv_data *outb_priv; 696 uintptr_t sa_base = dev->outb.sa_base; 697 struct cnxk_ipsec_outb_rlens *rlens; 698 struct roc_ie_on_outb_sa *outb_sa; 699 const char *iv_str; 700 uint32_t sa_idx; 701 702 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) < 703 ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD); 704 705 /* Alloc an sa index */ 706 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0); 707 if (rc) 708 goto err; 709 710 outb_sa = roc_nix_inl_on_ipsec_outb_sa(sa_base, sa_idx); 711 outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa); 712 rlens = &outb_priv->rlens; 713 714 memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa)); 715 716 /* Fill outbound sa params */ 717 rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa); 718 if (rc < 0) { 719 snprintf(tbuf, sizeof(tbuf), 720 "Failed to init outbound sa, rc=%d", rc); 721 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx); 722 goto err; 723 } 724 725 ctx_len = rc; 726 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, inbound, 727 ctx_len); 728 if (rc) { 729 snprintf(tbuf, sizeof(tbuf), 730 "Failed to init outbound sa, rc=%d", rc); 731 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx); 732 goto err; 733 } 734 735 /* When IV is provided by the application, 736 * copy the IV to context and enable explicit IV flag in context. 737 */ 738 if (ipsec->options.iv_gen_disable == 1) { 739 outb_sa->common_sa.ctl.explicit_iv_en = 1; 740 iv_str = getenv("ETH_SEC_IV_OVR"); 741 if (iv_str) 742 outb_dbg_iv_update(&outb_sa->common_sa, iv_str); 743 } 744 745 /* Save userdata */ 746 outb_priv->userdata = conf->userdata; 747 outb_priv->sa_idx = sa_idx; 748 outb_priv->eth_sec = eth_sec; 749 /* Start sequence number with 1 */ 750 outb_priv->seq = 1; 751 752 memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4); 753 if (outb_sa->common_sa.ctl.enc_type == ROC_IE_SA_ENC_AES_GCM) 754 outb_priv->copy_salt = 1; 755 756 /* Save rlen info */ 757 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto); 758 759 sess_priv.sa_idx = outb_priv->sa_idx; 760 sess_priv.roundup_byte = rlens->roundup_byte; 761 sess_priv.roundup_len = rlens->roundup_len; 762 sess_priv.partial_len = rlens->partial_len; 763 764 /* Pointer from eth_sec -> outb_sa */ 765 eth_sec->sa = outb_sa; 766 eth_sec->sess = sess; 767 eth_sec->sa_idx = sa_idx; 768 eth_sec->spi = ipsec->spi; 769 770 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry); 771 dev->outb.nb_sess++; 772 } 773 774 /* Sync SA content */ 775 plt_atomic_thread_fence(__ATOMIC_ACQ_REL); 776 777 rte_spinlock_unlock(lock); 778 779 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u", 780 inbound ? "inbound" : "outbound", eth_sec->spi, 781 eth_sec->sa_idx); 782 /* 783 * Update fast path info in priv area. 784 */ 785 sess->fast_mdata = sess_priv.u64; 786 787 return 0; 788 err: 789 rte_spinlock_unlock(lock); 790 if (rc) 791 plt_err("%s", tbuf); 792 return rc; 793 } 794 795 static int 796 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess) 797 { 798 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; 799 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); 800 struct cnxk_eth_sec_sess *eth_sec; 801 struct roc_ie_on_outb_sa *outb_sa; 802 struct roc_ie_on_inb_sa *inb_sa; 803 rte_spinlock_t *lock; 804 805 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess); 806 if (!eth_sec) 807 return -ENOENT; 808 809 lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock; 810 rte_spinlock_lock(lock); 811 812 if (eth_sec->inb) { 813 inb_sa = eth_sec->sa; 814 /* Disable SA */ 815 inb_sa->common_sa.ctl.valid = 0; 816 817 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry); 818 dev->inb.nb_sess--; 819 } else { 820 outb_sa = eth_sec->sa; 821 /* Disable SA */ 822 outb_sa->common_sa.ctl.valid = 0; 823 824 /* Release Outbound SA index */ 825 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx); 826 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry); 827 dev->outb.nb_sess--; 828 } 829 830 /* Sync SA content */ 831 plt_atomic_thread_fence(__ATOMIC_ACQ_REL); 832 833 rte_spinlock_unlock(lock); 834 835 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u", 836 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi, 837 eth_sec->sa_idx); 838 839 return 0; 840 } 841 842 static const struct rte_security_capability * 843 cn9k_eth_sec_capabilities_get(void *device __rte_unused) 844 { 845 return cn9k_eth_sec_capabilities; 846 } 847 848 static uint16_t 849 cn9k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst) 850 { 851 /* Not supported */ 852 PLT_SET_USED(q); 853 PLT_SET_USED(inst); 854 PLT_SET_USED(nb_inst); 855 856 return 0; 857 } 858 859 void 860 cn9k_eth_sec_ops_override(void) 861 { 862 static int init_once; 863 864 if (init_once) 865 return; 866 init_once = 1; 867 868 /* Update platform specific ops */ 869 cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create; 870 cnxk_eth_sec_ops.session_update = cn9k_eth_sec_session_update; 871 cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy; 872 cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get; 873 874 /* Update platform specific rte_pmd_cnxk ops */ 875 cnxk_pmd_ops.inl_dev_submit = cn9k_inl_dev_submit; 876 } 877