1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <rte_udp.h> 6 7 #include "cnxk_security.h" 8 9 #include "roc_api.h" 10 11 static int 12 ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2, uint8_t *cipher_key, 13 uint8_t *salt_key, uint8_t *hmac_opad_ipad, 14 struct rte_security_ipsec_xform *ipsec_xfrm, 15 struct rte_crypto_sym_xform *crypto_xfrm) 16 { 17 struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm; 18 const uint8_t *key = NULL; 19 uint8_t ccm_flag = 0; 20 uint32_t *tmp_salt; 21 uint64_t *tmp_key; 22 int i, length = 0; 23 24 /* Set direction */ 25 if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 26 w2->s.dir = ROC_IE_SA_DIR_OUTBOUND; 27 else 28 w2->s.dir = ROC_IE_SA_DIR_INBOUND; 29 30 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 31 auth_xfrm = crypto_xfrm; 32 cipher_xfrm = crypto_xfrm->next; 33 } else { 34 cipher_xfrm = crypto_xfrm; 35 auth_xfrm = crypto_xfrm->next; 36 } 37 38 /* Set protocol - ESP vs AH */ 39 switch (ipsec_xfrm->proto) { 40 case RTE_SECURITY_IPSEC_SA_PROTO_ESP: 41 w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP; 42 break; 43 case RTE_SECURITY_IPSEC_SA_PROTO_AH: 44 w2->s.protocol = ROC_IE_SA_PROTOCOL_AH; 45 break; 46 default: 47 return -EINVAL; 48 } 49 50 /* Set mode - transport vs tunnel */ 51 switch (ipsec_xfrm->mode) { 52 case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: 53 w2->s.mode = ROC_IE_SA_MODE_TRANSPORT; 54 break; 55 case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL: 56 w2->s.mode = ROC_IE_SA_MODE_TUNNEL; 57 break; 58 default: 59 return -EINVAL; 60 } 61 62 /* Set encryption algorithm */ 63 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 64 key = crypto_xfrm->aead.key.data; 65 length = crypto_xfrm->aead.key.length; 66 67 switch (crypto_xfrm->aead.algo) { 68 case RTE_CRYPTO_AEAD_AES_GCM: 69 w2->s.enc_type = ROC_IE_SA_ENC_AES_GCM; 70 w2->s.auth_type = ROC_IE_SA_AUTH_NULL; 71 memcpy(salt_key, &ipsec_xfrm->salt, 4); 72 tmp_salt = (uint32_t *)salt_key; 73 *tmp_salt = rte_be_to_cpu_32(*tmp_salt); 74 break; 75 case RTE_CRYPTO_AEAD_AES_CCM: 76 w2->s.enc_type = ROC_IE_SA_ENC_AES_CCM; 77 w2->s.auth_type = ROC_IE_SA_AUTH_NULL; 78 ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN; 79 *salt_key = ccm_flag; 80 memcpy(PLT_PTR_ADD(salt_key, 1), &ipsec_xfrm->salt, 3); 81 tmp_salt = (uint32_t *)salt_key; 82 *tmp_salt = rte_be_to_cpu_32(*tmp_salt); 83 break; 84 default: 85 return -ENOTSUP; 86 } 87 } else { 88 if (cipher_xfrm != NULL) { 89 switch (cipher_xfrm->cipher.algo) { 90 case RTE_CRYPTO_CIPHER_NULL: 91 w2->s.enc_type = ROC_IE_SA_ENC_NULL; 92 break; 93 case RTE_CRYPTO_CIPHER_AES_CBC: 94 w2->s.enc_type = ROC_IE_SA_ENC_AES_CBC; 95 break; 96 case RTE_CRYPTO_CIPHER_AES_CTR: 97 w2->s.enc_type = ROC_IE_SA_ENC_AES_CTR; 98 break; 99 case RTE_CRYPTO_CIPHER_3DES_CBC: 100 w2->s.enc_type = ROC_IE_SA_ENC_3DES_CBC; 101 break; 102 default: 103 return -ENOTSUP; 104 } 105 106 key = cipher_xfrm->cipher.key.data; 107 length = cipher_xfrm->cipher.key.length; 108 } 109 110 switch (auth_xfrm->auth.algo) { 111 case RTE_CRYPTO_AUTH_NULL: 112 if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) { 113 plt_err("anti-replay can't be supported with integrity service disabled"); 114 return -EINVAL; 115 } 116 w2->s.auth_type = ROC_IE_SA_AUTH_NULL; 117 break; 118 case RTE_CRYPTO_AUTH_SHA1_HMAC: 119 w2->s.auth_type = ROC_IE_SA_AUTH_SHA1; 120 break; 121 case RTE_CRYPTO_AUTH_SHA256_HMAC: 122 w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_256; 123 break; 124 case RTE_CRYPTO_AUTH_SHA384_HMAC: 125 w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_384; 126 break; 127 case RTE_CRYPTO_AUTH_SHA512_HMAC: 128 w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_512; 129 break; 130 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 131 w2->s.auth_type = ROC_IE_SA_AUTH_AES_XCBC_128; 132 break; 133 case RTE_CRYPTO_AUTH_AES_GMAC: 134 w2->s.auth_type = ROC_IE_SA_AUTH_AES_GMAC; 135 key = auth_xfrm->auth.key.data; 136 length = auth_xfrm->auth.key.length; 137 memcpy(salt_key, &ipsec_xfrm->salt, 4); 138 tmp_salt = (uint32_t *)salt_key; 139 *tmp_salt = rte_be_to_cpu_32(*tmp_salt); 140 break; 141 default: 142 return -ENOTSUP; 143 } 144 145 if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 146 const uint8_t *auth_key = auth_xfrm->auth.key.data; 147 roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad); 148 } else { 149 roc_se_hmac_opad_ipad_gen(w2->s.auth_type, auth_xfrm->auth.key.data, 150 auth_xfrm->auth.key.length, &hmac_opad_ipad[0], 151 ROC_SE_IPSEC); 152 } 153 154 tmp_key = (uint64_t *)hmac_opad_ipad; 155 for (i = 0; 156 i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t)); 157 i++) 158 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]); 159 160 } 161 162 /* Set encapsulation type */ 163 if (ipsec_xfrm->options.udp_encap) 164 w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP; 165 166 w2->s.spi = ipsec_xfrm->spi; 167 168 if (key != NULL && length != 0) { 169 /* Copy encryption key */ 170 memcpy(cipher_key, key, length); 171 tmp_key = (uint64_t *)cipher_key; 172 for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++) 173 tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]); 174 } 175 176 /* Set AES key length */ 177 if (w2->s.enc_type == ROC_IE_SA_ENC_AES_CBC || w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM || 178 w2->s.enc_type == ROC_IE_SA_ENC_AES_CTR || w2->s.enc_type == ROC_IE_SA_ENC_AES_GCM || 179 w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM || w2->s.auth_type == ROC_IE_SA_AUTH_AES_GMAC) { 180 switch (length) { 181 case ROC_CPT_AES128_KEY_LEN: 182 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128; 183 break; 184 case ROC_CPT_AES192_KEY_LEN: 185 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192; 186 break; 187 case ROC_CPT_AES256_KEY_LEN: 188 w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256; 189 break; 190 default: 191 plt_err("Invalid AES key length"); 192 return -EINVAL; 193 } 194 } 195 196 if (ipsec_xfrm->life.packets_soft_limit != 0 || 197 ipsec_xfrm->life.packets_hard_limit != 0) { 198 if (ipsec_xfrm->life.bytes_soft_limit != 0 || 199 ipsec_xfrm->life.bytes_hard_limit != 0) { 200 plt_err("Expiry tracking with both packets & bytes is not supported"); 201 return -EINVAL; 202 } 203 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS; 204 } 205 206 if (ipsec_xfrm->life.bytes_soft_limit != 0 || 207 ipsec_xfrm->life.bytes_hard_limit != 0) { 208 if (ipsec_xfrm->life.packets_soft_limit != 0 || 209 ipsec_xfrm->life.packets_hard_limit != 0) { 210 plt_err("Expiry tracking with both packets & bytes is not supported"); 211 return -EINVAL; 212 } 213 w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS; 214 } 215 216 return 0; 217 } 218 219 static size_t 220 ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa) 221 { 222 size_t size; 223 224 /* Variable based on Anti-replay Window */ 225 size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) + 226 offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits); 227 228 if (sa->w0.s.ar_win) 229 size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t); 230 231 return size; 232 } 233 234 static void 235 ot_ipsec_update_ipv6_addr_endianness(uint64_t *addr) 236 { 237 *addr = rte_be_to_cpu_64(*addr); 238 addr++; 239 *addr = rte_be_to_cpu_64(*addr); 240 } 241 242 static int 243 ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa, 244 struct rte_security_ipsec_xform *ipsec_xfrm) 245 { 246 struct rte_security_ipsec_tunnel_param *tunnel; 247 248 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) 249 return 0; 250 251 if (ipsec_xfrm->options.tunnel_hdr_verify == 0) 252 return 0; 253 254 tunnel = &ipsec_xfrm->tunnel; 255 256 switch (tunnel->type) { 257 case RTE_SECURITY_IPSEC_TUNNEL_IPV4: 258 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4; 259 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, 260 sizeof(struct in_addr)); 261 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, 262 sizeof(struct in_addr)); 263 264 /* IP Source and Dest are in LE/CPU endian */ 265 sa->outer_hdr.ipv4.src_addr = 266 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr); 267 sa->outer_hdr.ipv4.dst_addr = 268 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr); 269 270 break; 271 case RTE_SECURITY_IPSEC_TUNNEL_IPV6: 272 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6; 273 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr, 274 sizeof(sa->outer_hdr.ipv6.src_addr)); 275 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, 276 sizeof(sa->outer_hdr.ipv6.dst_addr)); 277 278 /* IP Source and Dest are in LE/CPU endian */ 279 ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); 280 ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); 281 282 break; 283 default: 284 return -EINVAL; 285 } 286 287 switch (ipsec_xfrm->options.tunnel_hdr_verify) { 288 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR: 289 sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR; 290 break; 291 case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR: 292 sa->w2.s.ip_hdr_verify = 293 ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR; 294 break; 295 default: 296 return -ENOTSUP; 297 } 298 299 return 0; 300 } 301 302 int 303 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa, 304 struct rte_security_ipsec_xform *ipsec_xfrm, 305 struct rte_crypto_sym_xform *crypto_xfrm, 306 bool is_inline) 307 { 308 uint16_t sport = 4500, dport = 4500; 309 union roc_ot_ipsec_sa_word2 w2; 310 uint32_t replay_win_sz; 311 size_t offset; 312 int rc; 313 314 /* Initialize the SA */ 315 roc_ot_ipsec_inb_sa_init(sa, is_inline); 316 317 w2.u64 = 0; 318 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt, 319 sa->hmac_opad_ipad, ipsec_xfrm, 320 crypto_xfrm); 321 if (rc) 322 return rc; 323 324 /* Updata common word2 data */ 325 sa->w2.u64 = w2.u64; 326 327 /* Only support power-of-two window sizes supported */ 328 replay_win_sz = ipsec_xfrm->replay_win_sz; 329 if (replay_win_sz) { 330 if (!rte_is_power_of_2(replay_win_sz) || 331 replay_win_sz > ROC_AR_WIN_SIZE_MAX) 332 return -ENOTSUP; 333 334 sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5; 335 } 336 337 rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm); 338 if (rc) 339 return rc; 340 341 /* Default options for pkt_out and pkt_fmt are with 342 * second pass meta and no defrag. 343 */ 344 sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META; 345 sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_NO_FRAG; 346 sa->w0.s.pkind = ROC_IE_OT_CPT_PKIND; 347 348 if (ipsec_xfrm->options.ip_reassembly_en) 349 sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG; 350 351 /* ESN */ 352 sa->w2.s.esn_en = !!ipsec_xfrm->options.esn; 353 if (ipsec_xfrm->options.udp_encap) { 354 if (ipsec_xfrm->udp.sport) 355 sport = ipsec_xfrm->udp.sport; 356 357 if (ipsec_xfrm->udp.dport) 358 dport = ipsec_xfrm->udp.dport; 359 360 sa->w10.s.udp_src_port = sport; 361 sa->w10.s.udp_dst_port = dport; 362 } 363 364 if (ipsec_xfrm->options.udp_ports_verify) 365 sa->w2.s.udp_ports_verify = 1; 366 367 offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx); 368 /* Word offset for HW managed SA field */ 369 sa->w0.s.hw_ctx_off = offset / 8; 370 /* Context push size for inbound spans up to hw_ctx including 371 * ar_base field, in 8b units 372 */ 373 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1; 374 /* Entire context size in 128B units */ 375 sa->w0.s.ctx_size = 376 (PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) / 377 ROC_CTX_UNIT_128B) - 378 1; 379 380 /** 381 * CPT MC triggers expiry when counter value changes from 2 to 1. To 382 * mitigate this behaviour add 1 to the life counter values provided. 383 */ 384 385 if (ipsec_xfrm->life.bytes_soft_limit) { 386 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1; 387 sa->w0.s.soft_life_dec = 1; 388 } 389 390 if (ipsec_xfrm->life.packets_soft_limit) { 391 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1; 392 sa->w0.s.soft_life_dec = 1; 393 } 394 395 if (ipsec_xfrm->life.bytes_hard_limit) { 396 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1; 397 sa->w0.s.hard_life_dec = 1; 398 } 399 400 if (ipsec_xfrm->life.packets_hard_limit) { 401 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1; 402 sa->w0.s.hard_life_dec = 1; 403 } 404 405 rte_wmb(); 406 407 /* Enable SA */ 408 sa->w2.s.valid = 1; 409 return 0; 410 } 411 412 int 413 cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, 414 struct rte_security_ipsec_xform *ipsec_xfrm, 415 struct rte_crypto_sym_xform *crypto_xfrm) 416 { 417 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel; 418 uint16_t sport = 4500, dport = 4500; 419 union roc_ot_ipsec_sa_word2 w2; 420 size_t offset; 421 int rc; 422 423 /* Initialize the SA */ 424 roc_ot_ipsec_outb_sa_init(sa); 425 426 w2.u64 = 0; 427 rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt, 428 sa->hmac_opad_ipad, ipsec_xfrm, 429 crypto_xfrm); 430 if (rc) 431 return rc; 432 433 /* Update common word2 data */ 434 sa->w2.u64 = w2.u64; 435 436 if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) 437 goto skip_tunnel_info; 438 439 /* Tunnel header info */ 440 switch (tunnel->type) { 441 case RTE_SECURITY_IPSEC_TUNNEL_IPV4: 442 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4; 443 memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, 444 sizeof(struct in_addr)); 445 memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, 446 sizeof(struct in_addr)); 447 448 /* IP Source and Dest seems to be in LE/CPU endian */ 449 sa->outer_hdr.ipv4.src_addr = 450 rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr); 451 sa->outer_hdr.ipv4.dst_addr = 452 rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr); 453 454 /* Outer header DF bit source */ 455 if (!ipsec_xfrm->options.copy_df) { 456 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = 457 ROC_IE_OT_SA_COPY_FROM_SA; 458 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df; 459 } else { 460 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = 461 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR; 462 } 463 464 /* Outer header DSCP source */ 465 if (!ipsec_xfrm->options.copy_dscp) { 466 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA; 467 sa->w10.s.dscp = tunnel->ipv4.dscp; 468 } else { 469 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR; 470 } 471 break; 472 case RTE_SECURITY_IPSEC_TUNNEL_IPV6: 473 sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6; 474 memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr, 475 sizeof(sa->outer_hdr.ipv6.src_addr)); 476 memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, 477 sizeof(sa->outer_hdr.ipv6.dst_addr)); 478 479 /* IP Source and Dest are in LE/CPU endian */ 480 ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); 481 ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); 482 483 /* Outer header flow label source */ 484 if (!ipsec_xfrm->options.copy_flabel) { 485 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = 486 ROC_IE_OT_SA_COPY_FROM_SA; 487 488 sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel; 489 } else { 490 sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = 491 ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR; 492 } 493 494 /* Outer header DSCP source */ 495 if (!ipsec_xfrm->options.copy_dscp) { 496 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA; 497 sa->w10.s.dscp = tunnel->ipv6.dscp; 498 } else { 499 sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR; 500 } 501 break; 502 default: 503 return -EINVAL; 504 } 505 506 skip_tunnel_info: 507 /* ESN */ 508 sa->w0.s.esn_en = !!ipsec_xfrm->options.esn; 509 510 if (ipsec_xfrm->esn.value) 511 sa->ctx.esn_val = ipsec_xfrm->esn.value - 1; 512 513 if (ipsec_xfrm->options.udp_encap) { 514 if (ipsec_xfrm->udp.sport) 515 sport = ipsec_xfrm->udp.sport; 516 517 if (ipsec_xfrm->udp.dport) 518 dport = ipsec_xfrm->udp.dport; 519 520 sa->w10.s.udp_src_port = sport; 521 sa->w10.s.udp_dst_port = dport; 522 } 523 524 offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx); 525 /* Word offset for HW managed SA field */ 526 sa->w0.s.hw_ctx_off = offset / 8; 527 528 /* Context push size is up to err ctl in HW ctx */ 529 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1; 530 531 /* Entire context size in 128B units */ 532 offset = sizeof(struct roc_ot_ipsec_outb_sa); 533 sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) / 534 ROC_CTX_UNIT_128B) - 535 1; 536 537 /* IPID gen */ 538 sa->w2.s.ipid_gen = 1; 539 540 /** 541 * CPT MC triggers expiry when counter value changes from 2 to 1. To 542 * mitigate this behaviour add 1 to the life counter values provided. 543 */ 544 545 if (ipsec_xfrm->life.bytes_soft_limit) { 546 sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1; 547 sa->w0.s.soft_life_dec = 1; 548 } 549 550 if (ipsec_xfrm->life.packets_soft_limit) { 551 sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1; 552 sa->w0.s.soft_life_dec = 1; 553 } 554 555 if (ipsec_xfrm->life.bytes_hard_limit) { 556 sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1; 557 sa->w0.s.hard_life_dec = 1; 558 } 559 560 if (ipsec_xfrm->life.packets_hard_limit) { 561 sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1; 562 sa->w0.s.hard_life_dec = 1; 563 } 564 565 /* There are two words of CPT_CTX_HW_S for ucode to skip */ 566 sa->w0.s.ctx_hdr_size = 1; 567 sa->w0.s.aop_valid = 1; 568 569 rte_wmb(); 570 571 /* Enable SA */ 572 sa->w2.s.valid = 1; 573 return 0; 574 } 575 576 bool 577 cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa) 578 { 579 return !!sa->w2.s.valid; 580 } 581 582 bool 583 cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa) 584 { 585 return !!sa->w2.s.valid; 586 } 587 588 uint8_t 589 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo, 590 enum rte_crypto_auth_algorithm a_algo, 591 enum rte_crypto_aead_algorithm aead_algo) 592 { 593 uint8_t ivlen = 0; 594 595 if ((aead_algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_algo == RTE_CRYPTO_AEAD_AES_CCM)) 596 ivlen = 8; 597 598 switch (c_algo) { 599 case RTE_CRYPTO_CIPHER_AES_CTR: 600 ivlen = 8; 601 break; 602 case RTE_CRYPTO_CIPHER_DES_CBC: 603 case RTE_CRYPTO_CIPHER_3DES_CBC: 604 ivlen = ROC_CPT_DES_BLOCK_LENGTH; 605 break; 606 case RTE_CRYPTO_CIPHER_AES_CBC: 607 ivlen = ROC_CPT_AES_BLOCK_LENGTH; 608 break; 609 default: 610 break; 611 } 612 613 switch (a_algo) { 614 case RTE_CRYPTO_AUTH_AES_GMAC: 615 ivlen = 8; 616 break; 617 default: 618 break; 619 } 620 621 return ivlen; 622 } 623 624 uint8_t 625 cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo, 626 enum rte_crypto_auth_algorithm a_algo, 627 enum rte_crypto_aead_algorithm aead_algo) 628 { 629 uint8_t icv = 0; 630 631 (void)c_algo; 632 633 switch (a_algo) { 634 case RTE_CRYPTO_AUTH_NULL: 635 icv = 0; 636 break; 637 case RTE_CRYPTO_AUTH_MD5_HMAC: 638 case RTE_CRYPTO_AUTH_SHA1_HMAC: 639 icv = 12; 640 break; 641 case RTE_CRYPTO_AUTH_SHA256_HMAC: 642 case RTE_CRYPTO_AUTH_AES_GMAC: 643 icv = 16; 644 break; 645 case RTE_CRYPTO_AUTH_SHA384_HMAC: 646 icv = 24; 647 break; 648 case RTE_CRYPTO_AUTH_SHA512_HMAC: 649 icv = 32; 650 break; 651 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 652 icv = 12; 653 break; 654 default: 655 break; 656 } 657 658 switch (aead_algo) { 659 case RTE_CRYPTO_AEAD_AES_GCM: 660 case RTE_CRYPTO_AEAD_AES_CCM: 661 icv = 16; 662 break; 663 default: 664 break; 665 } 666 667 return icv; 668 } 669 670 uint8_t 671 cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo, 672 enum rte_crypto_aead_algorithm aead_algo) 673 { 674 uint8_t roundup_byte = 4; 675 676 if ((aead_algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_algo == RTE_CRYPTO_AEAD_AES_CCM)) 677 return roundup_byte; 678 679 switch (c_algo) { 680 case RTE_CRYPTO_CIPHER_AES_CTR: 681 roundup_byte = 4; 682 break; 683 case RTE_CRYPTO_CIPHER_AES_CBC: 684 roundup_byte = 16; 685 break; 686 case RTE_CRYPTO_CIPHER_DES_CBC: 687 case RTE_CRYPTO_CIPHER_3DES_CBC: 688 roundup_byte = 8; 689 break; 690 case RTE_CRYPTO_CIPHER_NULL: 691 roundup_byte = 4; 692 break; 693 default: 694 break; 695 } 696 697 return roundup_byte; 698 } 699 700 int 701 cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens, 702 struct rte_security_ipsec_xform *ipsec_xfrm, 703 struct rte_crypto_sym_xform *crypto_xfrm) 704 { 705 struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel; 706 enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL; 707 enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL; 708 enum rte_crypto_aead_algorithm aead_algo = 0; 709 uint16_t partial_len = 0; 710 uint8_t roundup_byte = 0; 711 int8_t roundup_len = 0; 712 713 memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens)); 714 715 /* Get Cipher and Auth algo */ 716 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 717 aead_algo = crypto_xfrm->aead.algo; 718 } else { 719 if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 720 c_algo = crypto_xfrm->cipher.algo; 721 else 722 a_algo = crypto_xfrm->auth.algo; 723 724 if (crypto_xfrm->next) { 725 if (crypto_xfrm->next->type == 726 RTE_CRYPTO_SYM_XFORM_CIPHER) 727 c_algo = crypto_xfrm->next->cipher.algo; 728 else 729 a_algo = crypto_xfrm->next->auth.algo; 730 } 731 } 732 733 if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) { 734 partial_len = ROC_CPT_ESP_HDR_LEN; 735 roundup_len = ROC_CPT_ESP_TRL_LEN; 736 } else { 737 partial_len = ROC_CPT_AH_HDR_LEN; 738 } 739 740 if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 741 if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 742 partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN; 743 else 744 partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN; 745 } 746 747 partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo); 748 partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo); 749 roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo); 750 751 if (ipsec_xfrm->options.udp_encap) 752 partial_len += sizeof(struct rte_udp_hdr); 753 754 rlens->partial_len = partial_len; 755 rlens->roundup_len = roundup_len; 756 rlens->roundup_byte = roundup_byte; 757 rlens->max_extended_len = partial_len + roundup_len + roundup_byte; 758 return 0; 759 } 760 761 static inline int 762 on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec, 763 struct rte_crypto_sym_xform *crypto_xform, 764 struct roc_ie_on_sa_ctl *ctl) 765 { 766 struct rte_crypto_sym_xform *cipher_xform, *auth_xform; 767 int aes_key_len = 0; 768 769 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 770 auth_xform = crypto_xform; 771 cipher_xform = crypto_xform->next; 772 } else { 773 cipher_xform = crypto_xform; 774 auth_xform = crypto_xform->next; 775 } 776 777 if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 778 ctl->direction = ROC_IE_SA_DIR_OUTBOUND; 779 else 780 ctl->direction = ROC_IE_SA_DIR_INBOUND; 781 782 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 783 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) 784 ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4; 785 else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) 786 ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6; 787 else 788 return -EINVAL; 789 } 790 791 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) { 792 ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT; 793 ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4; 794 } else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) 795 ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL; 796 else 797 return -EINVAL; 798 799 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) 800 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH; 801 else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) 802 ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP; 803 else 804 return -EINVAL; 805 806 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 807 switch (crypto_xform->aead.algo) { 808 case RTE_CRYPTO_AEAD_AES_GCM: 809 ctl->enc_type = ROC_IE_SA_ENC_AES_GCM; 810 aes_key_len = crypto_xform->aead.key.length; 811 break; 812 case RTE_CRYPTO_AEAD_AES_CCM: 813 ctl->enc_type = ROC_IE_SA_ENC_AES_CCM; 814 aes_key_len = crypto_xform->aead.key.length; 815 break; 816 default: 817 plt_err("Unsupported AEAD algorithm"); 818 return -ENOTSUP; 819 } 820 } else { 821 if (cipher_xform != NULL) { 822 switch (cipher_xform->cipher.algo) { 823 case RTE_CRYPTO_CIPHER_NULL: 824 ctl->enc_type = ROC_IE_SA_ENC_NULL; 825 break; 826 case RTE_CRYPTO_CIPHER_DES_CBC: 827 ctl->enc_type = ROC_IE_SA_ENC_DES_CBC; 828 break; 829 case RTE_CRYPTO_CIPHER_3DES_CBC: 830 ctl->enc_type = ROC_IE_SA_ENC_3DES_CBC; 831 break; 832 case RTE_CRYPTO_CIPHER_AES_CBC: 833 ctl->enc_type = ROC_IE_SA_ENC_AES_CBC; 834 aes_key_len = cipher_xform->cipher.key.length; 835 break; 836 case RTE_CRYPTO_CIPHER_AES_CTR: 837 ctl->enc_type = ROC_IE_SA_ENC_AES_CTR; 838 aes_key_len = cipher_xform->cipher.key.length; 839 break; 840 default: 841 plt_err("Unsupported cipher algorithm"); 842 return -ENOTSUP; 843 } 844 } 845 846 switch (auth_xform->auth.algo) { 847 case RTE_CRYPTO_AUTH_NULL: 848 ctl->auth_type = ROC_IE_SA_AUTH_NULL; 849 break; 850 case RTE_CRYPTO_AUTH_MD5_HMAC: 851 ctl->auth_type = ROC_IE_SA_AUTH_MD5; 852 break; 853 case RTE_CRYPTO_AUTH_SHA1_HMAC: 854 ctl->auth_type = ROC_IE_SA_AUTH_SHA1; 855 break; 856 case RTE_CRYPTO_AUTH_SHA224_HMAC: 857 ctl->auth_type = ROC_IE_SA_AUTH_SHA2_224; 858 break; 859 case RTE_CRYPTO_AUTH_SHA256_HMAC: 860 ctl->auth_type = ROC_IE_SA_AUTH_SHA2_256; 861 break; 862 case RTE_CRYPTO_AUTH_SHA384_HMAC: 863 ctl->auth_type = ROC_IE_SA_AUTH_SHA2_384; 864 break; 865 case RTE_CRYPTO_AUTH_SHA512_HMAC: 866 ctl->auth_type = ROC_IE_SA_AUTH_SHA2_512; 867 break; 868 case RTE_CRYPTO_AUTH_AES_GMAC: 869 ctl->auth_type = ROC_IE_SA_AUTH_AES_GMAC; 870 aes_key_len = auth_xform->auth.key.length; 871 break; 872 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 873 ctl->auth_type = ROC_IE_SA_AUTH_AES_XCBC_128; 874 break; 875 default: 876 plt_err("Unsupported auth algorithm"); 877 return -ENOTSUP; 878 } 879 } 880 881 /* Set AES key length */ 882 if (ctl->enc_type == ROC_IE_SA_ENC_AES_CBC || ctl->enc_type == ROC_IE_SA_ENC_AES_CCM || 883 ctl->enc_type == ROC_IE_SA_ENC_AES_CTR || ctl->enc_type == ROC_IE_SA_ENC_AES_GCM || 884 ctl->enc_type == ROC_IE_SA_ENC_AES_CCM || ctl->auth_type == ROC_IE_SA_AUTH_AES_GMAC) { 885 switch (aes_key_len) { 886 case 16: 887 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128; 888 break; 889 case 24: 890 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192; 891 break; 892 case 32: 893 ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256; 894 break; 895 default: 896 plt_err("Invalid AES key length"); 897 return -EINVAL; 898 } 899 } 900 901 if (ipsec->options.esn) 902 ctl->esn_en = 1; 903 904 if (ipsec->options.udp_encap == 1) 905 ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP; 906 907 ctl->copy_df = ipsec->options.copy_df; 908 909 ctl->spi = rte_cpu_to_be_32(ipsec->spi); 910 911 rte_io_wmb(); 912 913 ctl->valid = 1; 914 915 return 0; 916 } 917 918 static inline int 919 on_fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec, 920 struct rte_crypto_sym_xform *crypto_xform, 921 struct roc_ie_on_common_sa *common_sa) 922 { 923 struct rte_crypto_sym_xform *cipher_xform, *auth_xform; 924 const uint8_t *cipher_key; 925 int cipher_key_len = 0; 926 uint8_t ccm_flag = 0; 927 int ret; 928 929 ret = on_ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl); 930 if (ret) 931 return ret; 932 933 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 934 auth_xform = crypto_xform; 935 cipher_xform = crypto_xform->next; 936 } else { 937 cipher_xform = crypto_xform; 938 auth_xform = crypto_xform->next; 939 } 940 941 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 942 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) 943 memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4); 944 else if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) { 945 ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN; 946 *common_sa->iv.gcm.nonce = ccm_flag; 947 memcpy(PLT_PTR_ADD(common_sa->iv.gcm.nonce, 1), &ipsec->salt, 3); 948 } 949 cipher_key = crypto_xform->aead.key.data; 950 cipher_key_len = crypto_xform->aead.key.length; 951 } else { 952 if (cipher_xform) { 953 cipher_key = cipher_xform->cipher.key.data; 954 cipher_key_len = cipher_xform->cipher.key.length; 955 } 956 957 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 958 memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4); 959 cipher_key = auth_xform->auth.key.data; 960 cipher_key_len = auth_xform->auth.key.length; 961 } 962 } 963 964 if (cipher_key_len != 0) 965 memcpy(common_sa->cipher_key, cipher_key, cipher_key_len); 966 967 return 0; 968 } 969 970 int 971 cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec, 972 struct rte_crypto_sym_xform *crypto_xform, 973 struct roc_ie_on_outb_sa *out_sa) 974 { 975 struct roc_ie_on_ip_template *template = NULL; 976 struct rte_crypto_sym_xform *auth_xform; 977 struct roc_ie_on_sa_ctl *ctl; 978 struct rte_ipv6_hdr *ip6; 979 struct rte_ipv4_hdr *ip4; 980 uint16_t sport, dport; 981 size_t ctx_len; 982 int ret; 983 984 ctl = &out_sa->common_sa.ctl; 985 986 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) 987 auth_xform = crypto_xform; 988 else 989 auth_xform = crypto_xform->next; 990 991 ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa); 992 if (ret) 993 return ret; 994 995 if (ctl->enc_type == ROC_IE_SA_ENC_AES_GCM || ctl->enc_type == ROC_IE_SA_ENC_AES_CCM || 996 ctl->auth_type == ROC_IE_SA_AUTH_NULL || ctl->auth_type == ROC_IE_SA_AUTH_AES_GMAC) { 997 template = &out_sa->aes_gcm.template; 998 ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template); 999 } else { 1000 switch (ctl->auth_type) { 1001 case ROC_IE_SA_AUTH_MD5: 1002 case ROC_IE_SA_AUTH_SHA1: 1003 template = &out_sa->sha1.template; 1004 ctx_len = offsetof(struct roc_ie_on_outb_sa, sha1.template); 1005 break; 1006 case ROC_IE_SA_AUTH_SHA2_256: 1007 case ROC_IE_SA_AUTH_SHA2_384: 1008 case ROC_IE_SA_AUTH_SHA2_512: 1009 template = &out_sa->sha2.template; 1010 ctx_len = offsetof(struct roc_ie_on_outb_sa, sha2.template); 1011 break; 1012 case ROC_IE_SA_AUTH_AES_XCBC_128: 1013 template = &out_sa->aes_xcbc.template; 1014 ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_xcbc.template); 1015 break; 1016 default: 1017 plt_err("Unsupported auth algorithm"); 1018 return -EINVAL; 1019 } 1020 } 1021 1022 ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr; 1023 1024 sport = 4500; 1025 dport = 4500; 1026 1027 /* If custom port values are provided, Overwrite default port values. */ 1028 if (ipsec->options.udp_encap) { 1029 1030 if (ipsec->udp.sport) 1031 sport = ipsec->udp.sport; 1032 1033 if (ipsec->udp.dport) 1034 dport = ipsec->udp.dport; 1035 1036 ip4->next_proto_id = IPPROTO_UDP; 1037 template->ip4.udp_src = rte_be_to_cpu_16(sport); 1038 template->ip4.udp_dst = rte_be_to_cpu_16(dport); 1039 } else { 1040 if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) 1041 ip4->next_proto_id = IPPROTO_AH; 1042 else 1043 ip4->next_proto_id = IPPROTO_ESP; 1044 } 1045 1046 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 1047 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 1048 uint16_t frag_off = 0; 1049 1050 ctx_len += sizeof(template->ip4); 1051 1052 ip4->version_ihl = RTE_IPV4_VHL_DEF; 1053 ip4->time_to_live = ipsec->tunnel.ipv4.ttl ? 1054 ipsec->tunnel.ipv4.ttl : 1055 0x40; 1056 ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2); 1057 if (ipsec->tunnel.ipv4.df) 1058 frag_off |= RTE_IPV4_HDR_DF_FLAG; 1059 ip4->fragment_offset = rte_cpu_to_be_16(frag_off); 1060 1061 memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip, 1062 sizeof(struct in_addr)); 1063 memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip, 1064 sizeof(struct in_addr)); 1065 } else if (ipsec->tunnel.type == 1066 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 1067 ctx_len += sizeof(template->ip6); 1068 1069 ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr; 1070 if (ipsec->options.udp_encap) { 1071 ip6->proto = IPPROTO_UDP; 1072 template->ip6.udp_src = rte_be_to_cpu_16(sport); 1073 template->ip6.udp_dst = rte_be_to_cpu_16(dport); 1074 } else { 1075 ip6->proto = (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 1076 IPPROTO_ESP : 1077 IPPROTO_AH; 1078 } 1079 ip6->vtc_flow = 1080 rte_cpu_to_be_32(0x60000000 | 1081 ((ipsec->tunnel.ipv6.dscp 1082 << RTE_IPV6_HDR_TC_SHIFT) & 1083 RTE_IPV6_HDR_TC_MASK) | 1084 ((ipsec->tunnel.ipv6.flabel 1085 << RTE_IPV6_HDR_FL_SHIFT) & 1086 RTE_IPV6_HDR_FL_MASK)); 1087 ip6->hop_limits = ipsec->tunnel.ipv6.hlimit ? 1088 ipsec->tunnel.ipv6.hlimit : 1089 0x40; 1090 ip6->src_addr = ipsec->tunnel.ipv6.src_addr; 1091 ip6->dst_addr = ipsec->tunnel.ipv6.dst_addr; 1092 } 1093 } else 1094 ctx_len += sizeof(template->ip4); 1095 1096 ctx_len = RTE_ALIGN_CEIL(ctx_len, 8); 1097 1098 if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) { 1099 uint8_t *hmac_opad_ipad = (uint8_t *)&out_sa->sha2; 1100 1101 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 1102 const uint8_t *auth_key = auth_xform->auth.key.data; 1103 1104 roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad); 1105 } else if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_NULL) { 1106 roc_se_hmac_opad_ipad_gen( 1107 out_sa->common_sa.ctl.auth_type, auth_xform->auth.key.data, 1108 auth_xform->auth.key.length, &hmac_opad_ipad[0], ROC_SE_IPSEC); 1109 } 1110 } 1111 1112 return ctx_len; 1113 } 1114 1115 int 1116 cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec, 1117 struct rte_crypto_sym_xform *crypto_xform, 1118 struct roc_ie_on_inb_sa *in_sa) 1119 { 1120 struct rte_crypto_sym_xform *auth_xform = crypto_xform; 1121 const uint8_t *auth_key; 1122 int auth_key_len = 0; 1123 size_t ctx_len = 0; 1124 int ret; 1125 1126 ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa); 1127 if (ret) 1128 return ret; 1129 1130 if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD && 1131 crypto_xform->auth.algo == RTE_CRYPTO_AUTH_NULL && ipsec->replay_win_sz) { 1132 plt_err("anti-replay can't be supported with integrity service disabled"); 1133 return -EINVAL; 1134 } 1135 if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD || 1136 auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL || 1137 auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { 1138 ctx_len = offsetof(struct roc_ie_on_inb_sa, sha1_or_gcm.hmac_key[0]); 1139 } else { 1140 uint8_t *hmac_opad_ipad = (uint8_t *)&in_sa->sha2; 1141 auth_key = auth_xform->auth.key.data; 1142 auth_key_len = auth_xform->auth.key.length; 1143 1144 switch (auth_xform->auth.algo) { 1145 case RTE_CRYPTO_AUTH_NULL: 1146 break; 1147 case RTE_CRYPTO_AUTH_MD5_HMAC: 1148 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1149 memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key, 1150 auth_key_len); 1151 ctx_len = offsetof(struct roc_ie_on_inb_sa, 1152 sha1_or_gcm.selector); 1153 break; 1154 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1155 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1156 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1157 memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len); 1158 ctx_len = offsetof(struct roc_ie_on_inb_sa, 1159 sha2.selector); 1160 break; 1161 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1162 memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len); 1163 ctx_len = offsetof(struct roc_ie_on_inb_sa, 1164 aes_xcbc.selector); 1165 break; 1166 default: 1167 plt_err("Unsupported auth algorithm %u", auth_xform->auth.algo); 1168 return -ENOTSUP; 1169 } 1170 if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { 1171 const uint8_t *auth_key = auth_xform->auth.key.data; 1172 1173 roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad); 1174 } else if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_NULL) { 1175 roc_se_hmac_opad_ipad_gen( 1176 in_sa->common_sa.ctl.auth_type, auth_xform->auth.key.data, 1177 auth_xform->auth.key.length, &hmac_opad_ipad[0], ROC_SE_IPSEC); 1178 } 1179 } 1180 1181 return ctx_len; 1182 } 1183