1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 #include <sys/types.h> 5 #include <netinet/in.h> 6 #include <netinet/ip.h> 7 8 #include <rte_branch_prediction.h> 9 #include <rte_log.h> 10 #include <rte_crypto.h> 11 #include <rte_security.h> 12 #include <rte_cryptodev.h> 13 #include <rte_ipsec.h> 14 #include <rte_ethdev.h> 15 #include <rte_mbuf.h> 16 #include <rte_hash.h> 17 18 #include "ipsec.h" 19 #include "esp.h" 20 21 static inline void 22 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) 23 { 24 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 25 struct rte_security_ipsec_tunnel_param *tunnel = 26 &ipsec->tunnel; 27 if (IS_IP4_TUNNEL(sa->flags)) { 28 tunnel->type = 29 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 30 tunnel->ipv4.ttl = IPDEFTTL; 31 32 memcpy((uint8_t *)&tunnel->ipv4.src_ip, 33 (uint8_t *)&sa->src.ip.ip4, 4); 34 35 memcpy((uint8_t *)&tunnel->ipv4.dst_ip, 36 (uint8_t *)&sa->dst.ip.ip4, 4); 37 } else if (IS_IP6_TUNNEL(sa->flags)) { 38 tunnel->type = 39 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 40 tunnel->ipv6.hlimit = IPDEFTTL; 41 tunnel->ipv6.dscp = 0; 42 tunnel->ipv6.flabel = 0; 43 44 memcpy((uint8_t *)&tunnel->ipv6.src_addr, 45 (uint8_t *)&sa->src.ip.ip6.ip6_b, 16); 46 47 memcpy((uint8_t *)&tunnel->ipv6.dst_addr, 48 (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16); 49 } 50 /* TODO support for Transport */ 51 } 52 ipsec->replay_win_sz = app_sa_prm.window_size; 53 ipsec->options.esn = app_sa_prm.enable_esn; 54 ipsec->options.udp_encap = sa->udp_encap; 55 } 56 57 int 58 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], 59 struct socket_ctx *skt_ctx, struct ipsec_sa *sa, 60 struct rte_ipsec_session *ips) 61 { 62 uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS; 63 struct rte_cryptodev_info cdev_info; 64 unsigned long cdev_id_qp = 0; 65 struct cdev_key key = { 0 }; 66 struct ipsec_ctx *ipsec_ctx; 67 uint32_t lcore_id; 68 int32_t ret = 0; 69 70 RTE_LCORE_FOREACH(lcore_id) { 71 ipsec_ctx = ipsec_ctx_lcore[lcore_id]; 72 73 /* Core is not bound to any cryptodev, skip it */ 74 if (ipsec_ctx->cdev_map == NULL) 75 continue; 76 77 /* Looking for cryptodev, which can handle this SA */ 78 key.lcore_id = (uint8_t)lcore_id; 79 key.cipher_algo = (uint8_t)sa->cipher_algo; 80 key.auth_algo = (uint8_t)sa->auth_algo; 81 key.aead_algo = (uint8_t)sa->aead_algo; 82 83 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, 84 (void **)&cdev_id_qp); 85 if (ret == -ENOENT) 86 continue; 87 if (ret < 0) { 88 RTE_LOG(ERR, IPSEC, 89 "No cryptodev: core %u, cipher_algo %u, " 90 "auth_algo %u, aead_algo %u\n", 91 key.lcore_id, 92 key.cipher_algo, 93 key.auth_algo, 94 key.aead_algo); 95 return ret; 96 } 97 98 /* Verify that all cores are using same cryptodev for current 99 * algorithm combination, required by SA. 100 * Current cryptodev mapping process will map SA to the first 101 * cryptodev that matches requirements, so it's a double check, 102 * not an additional restriction. 103 */ 104 if (cdev_id == RTE_CRYPTO_MAX_DEVS) 105 cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; 106 else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) { 107 RTE_LOG(ERR, IPSEC, 108 "SA mapping to multiple cryptodevs is " 109 "not supported!"); 110 return -EINVAL; 111 } 112 113 /* Store per core queue pair information */ 114 sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp]; 115 } 116 if (cdev_id == RTE_CRYPTO_MAX_DEVS) { 117 RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n"); 118 return 0; 119 } 120 121 RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " 122 "%u\n", sa->spi, cdev_id); 123 124 if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE && 125 ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 126 struct rte_security_session_conf sess_conf = { 127 .action_type = ips->type, 128 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 129 {.ipsec = { 130 .spi = sa->spi, 131 .salt = sa->salt, 132 .options = { 0 }, 133 .replay_win_sz = 0, 134 .direction = sa->direction, 135 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 136 .mode = (IS_TUNNEL(sa->flags)) ? 137 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : 138 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 139 } }, 140 .crypto_xform = sa->xforms, 141 .userdata = NULL, 142 143 }; 144 145 if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { 146 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 147 rte_cryptodev_get_sec_ctx( 148 cdev_id); 149 150 /* Set IPsec parameters in conf */ 151 set_ipsec_conf(sa, &(sess_conf.ipsec)); 152 153 ips->security.ses = rte_security_session_create(ctx, 154 &sess_conf, skt_ctx->session_pool, 155 skt_ctx->session_priv_pool); 156 if (ips->security.ses == NULL) { 157 RTE_LOG(ERR, IPSEC, 158 "SEC Session init failed: err: %d\n", ret); 159 return -1; 160 } 161 ips->security.ctx = ctx; 162 } else { 163 RTE_LOG(ERR, IPSEC, "Inline not supported\n"); 164 return -1; 165 } 166 } else { 167 if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 168 struct rte_cryptodev_info info; 169 170 rte_cryptodev_info_get(cdev_id, &info); 171 if (!(info.feature_flags & 172 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) 173 return -ENOTSUP; 174 175 } 176 ips->crypto.dev_id = cdev_id; 177 ips->crypto.ses = rte_cryptodev_sym_session_create( 178 skt_ctx->session_pool); 179 rte_cryptodev_sym_session_init(cdev_id, 180 ips->crypto.ses, sa->xforms, 181 skt_ctx->session_priv_pool); 182 183 rte_cryptodev_info_get(cdev_id, &cdev_info); 184 } 185 186 return 0; 187 } 188 189 int 190 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, 191 struct rte_ipsec_session *ips) 192 { 193 int32_t ret = 0; 194 struct rte_security_ctx *sec_ctx; 195 struct rte_security_session_conf sess_conf = { 196 .action_type = ips->type, 197 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 198 {.ipsec = { 199 .spi = sa->spi, 200 .salt = sa->salt, 201 .options = { 0 }, 202 .replay_win_sz = 0, 203 .direction = sa->direction, 204 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP 205 } }, 206 .crypto_xform = sa->xforms, 207 .userdata = NULL, 208 }; 209 210 if (IS_TRANSPORT(sa->flags)) { 211 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 212 if (IS_IP4(sa->flags)) { 213 sess_conf.ipsec.tunnel.type = 214 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 215 216 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr = 217 sa->src.ip.ip4; 218 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr = 219 sa->dst.ip.ip4; 220 } else if (IS_IP6(sa->flags)) { 221 sess_conf.ipsec.tunnel.type = 222 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 223 224 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr, 225 sa->src.ip.ip6.ip6_b, 16); 226 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr, 227 sa->dst.ip.ip6.ip6_b, 16); 228 } 229 } else if (IS_TUNNEL(sa->flags)) { 230 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 231 232 if (IS_IP4(sa->flags)) { 233 sess_conf.ipsec.tunnel.type = 234 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 235 236 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr = 237 sa->src.ip.ip4; 238 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr = 239 sa->dst.ip.ip4; 240 } else if (IS_IP6(sa->flags)) { 241 sess_conf.ipsec.tunnel.type = 242 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 243 244 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr, 245 sa->src.ip.ip6.ip6_b, 16); 246 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr, 247 sa->dst.ip.ip6.ip6_b, 16); 248 } else { 249 RTE_LOG(ERR, IPSEC, "invalid tunnel type\n"); 250 return -1; 251 } 252 } 253 254 if (sa->udp_encap) { 255 sess_conf.ipsec.options.udp_encap = 1; 256 sess_conf.ipsec.udp.sport = htons(sa->udp.sport); 257 sess_conf.ipsec.udp.dport = htons(sa->udp.dport); 258 } 259 260 if (sa->esn > 0) { 261 sess_conf.ipsec.options.esn = 1; 262 sess_conf.ipsec.esn.value = sa->esn; 263 } 264 265 266 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n", 267 sa->spi, sa->portid); 268 269 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 270 struct rte_flow_error err; 271 const struct rte_security_capability *sec_cap; 272 int ret = 0; 273 274 sec_ctx = (struct rte_security_ctx *) 275 rte_eth_dev_get_sec_ctx( 276 sa->portid); 277 if (sec_ctx == NULL) { 278 RTE_LOG(ERR, IPSEC, 279 " rte_eth_dev_get_sec_ctx failed\n"); 280 return -1; 281 } 282 283 ips->security.ses = rte_security_session_create(sec_ctx, 284 &sess_conf, skt_ctx->session_pool, 285 skt_ctx->session_priv_pool); 286 if (ips->security.ses == NULL) { 287 RTE_LOG(ERR, IPSEC, 288 "SEC Session init failed: err: %d\n", ret); 289 return -1; 290 } 291 292 sec_cap = rte_security_capabilities_get(sec_ctx); 293 294 /* iterate until ESP tunnel*/ 295 while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) { 296 if (sec_cap->action == ips->type && 297 sec_cap->protocol == 298 RTE_SECURITY_PROTOCOL_IPSEC && 299 sec_cap->ipsec.mode == 300 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && 301 sec_cap->ipsec.direction == sa->direction) 302 break; 303 sec_cap++; 304 } 305 306 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { 307 RTE_LOG(ERR, IPSEC, 308 "No suitable security capability found\n"); 309 return -1; 310 } 311 312 ips->security.ol_flags = sec_cap->ol_flags; 313 ips->security.ctx = sec_ctx; 314 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; 315 316 if (IS_IP6(sa->flags)) { 317 sa->pattern[1].mask = &rte_flow_item_ipv6_mask; 318 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 319 sa->pattern[1].spec = &sa->ipv6_spec; 320 321 memcpy(sa->ipv6_spec.hdr.dst_addr, 322 sa->dst.ip.ip6.ip6_b, 16); 323 memcpy(sa->ipv6_spec.hdr.src_addr, 324 sa->src.ip.ip6.ip6_b, 16); 325 } else if (IS_IP4(sa->flags)) { 326 sa->pattern[1].mask = &rte_flow_item_ipv4_mask; 327 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; 328 sa->pattern[1].spec = &sa->ipv4_spec; 329 330 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; 331 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; 332 } 333 334 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 335 336 if (sa->udp_encap) { 337 338 sa->udp_spec.hdr.dst_port = 339 rte_cpu_to_be_16(sa->udp.dport); 340 sa->udp_spec.hdr.src_port = 341 rte_cpu_to_be_16(sa->udp.sport); 342 343 sa->pattern[2].mask = &rte_flow_item_udp_mask; 344 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP; 345 sa->pattern[2].spec = &sa->udp_spec; 346 347 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP; 348 sa->pattern[3].spec = &sa->esp_spec; 349 sa->pattern[3].mask = &rte_flow_item_esp_mask; 350 351 sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END; 352 } else { 353 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 354 sa->pattern[2].spec = &sa->esp_spec; 355 sa->pattern[2].mask = &rte_flow_item_esp_mask; 356 357 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 358 } 359 360 sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; 361 sa->action[0].conf = ips->security.ses; 362 363 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 364 365 sa->attr.egress = (sa->direction == 366 RTE_SECURITY_IPSEC_SA_DIR_EGRESS); 367 sa->attr.ingress = (sa->direction == 368 RTE_SECURITY_IPSEC_SA_DIR_INGRESS); 369 if (sa->attr.ingress) { 370 uint8_t rss_key[64]; 371 struct rte_eth_rss_conf rss_conf = { 372 .rss_key = rss_key, 373 .rss_key_len = sizeof(rss_key), 374 }; 375 struct rte_eth_dev_info dev_info; 376 uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; 377 struct rte_flow_action_rss action_rss; 378 unsigned int i; 379 unsigned int j; 380 381 /* Don't create flow if default flow is created */ 382 if (flow_info_tbl[sa->portid].rx_def_flow) 383 return 0; 384 385 ret = rte_eth_dev_info_get(sa->portid, &dev_info); 386 if (ret != 0) { 387 RTE_LOG(ERR, IPSEC, 388 "Error during getting device (port %u) info: %s\n", 389 sa->portid, strerror(-ret)); 390 return ret; 391 } 392 393 sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; 394 /* Try RSS. */ 395 sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; 396 sa->action[1].conf = &action_rss; 397 ret = rte_eth_dev_rss_hash_conf_get(sa->portid, 398 &rss_conf); 399 if (ret != 0) { 400 RTE_LOG(ERR, IPSEC, 401 "rte_eth_dev_rss_hash_conf_get:ret=%d\n", 402 ret); 403 return -1; 404 } 405 for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i) 406 queue[j++] = i; 407 408 action_rss = (struct rte_flow_action_rss){ 409 .types = rss_conf.rss_hf, 410 .key_len = rss_conf.rss_key_len, 411 .queue_num = j, 412 .key = rss_key, 413 .queue = queue, 414 }; 415 ret = rte_flow_validate(sa->portid, &sa->attr, 416 sa->pattern, sa->action, 417 &err); 418 if (!ret) 419 goto flow_create; 420 /* Try Queue. */ 421 sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE; 422 sa->action[1].conf = 423 &(struct rte_flow_action_queue){ 424 .index = 0, 425 }; 426 ret = rte_flow_validate(sa->portid, &sa->attr, 427 sa->pattern, sa->action, 428 &err); 429 /* Try End. */ 430 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 431 sa->action[1].conf = NULL; 432 ret = rte_flow_validate(sa->portid, &sa->attr, 433 sa->pattern, sa->action, 434 &err); 435 if (ret) 436 goto flow_create_failure; 437 } else if (sa->attr.egress && 438 (ips->security.ol_flags & 439 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) { 440 sa->action[1].type = 441 RTE_FLOW_ACTION_TYPE_PASSTHRU; 442 sa->action[2].type = 443 RTE_FLOW_ACTION_TYPE_END; 444 } 445 flow_create: 446 sa->flow = rte_flow_create(sa->portid, 447 &sa->attr, sa->pattern, sa->action, &err); 448 if (sa->flow == NULL) { 449 flow_create_failure: 450 RTE_LOG(ERR, IPSEC, 451 "Failed to create ipsec flow msg: %s\n", 452 err.message); 453 return -1; 454 } 455 } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 456 const struct rte_security_capability *sec_cap; 457 458 sec_ctx = (struct rte_security_ctx *) 459 rte_eth_dev_get_sec_ctx(sa->portid); 460 461 if (sec_ctx == NULL) { 462 RTE_LOG(ERR, IPSEC, 463 "Ethernet device doesn't have security features registered\n"); 464 return -1; 465 } 466 467 /* Set IPsec parameters in conf */ 468 set_ipsec_conf(sa, &(sess_conf.ipsec)); 469 470 /* Save SA as userdata for the security session. When 471 * the packet is received, this userdata will be 472 * retrieved using the metadata from the packet. 473 * 474 * The PMD is expected to set similar metadata for other 475 * operations, like rte_eth_event, which are tied to 476 * security session. In such cases, the userdata could 477 * be obtained to uniquely identify the security 478 * parameters denoted. 479 */ 480 481 sess_conf.userdata = (void *) sa; 482 483 ips->security.ses = rte_security_session_create(sec_ctx, 484 &sess_conf, skt_ctx->session_pool, 485 skt_ctx->session_priv_pool); 486 if (ips->security.ses == NULL) { 487 RTE_LOG(ERR, IPSEC, 488 "SEC Session init failed: err: %d\n", ret); 489 return -1; 490 } 491 492 sec_cap = rte_security_capabilities_get(sec_ctx); 493 if (sec_cap == NULL) { 494 RTE_LOG(ERR, IPSEC, 495 "No capabilities registered\n"); 496 return -1; 497 } 498 499 /* iterate until ESP tunnel*/ 500 while (sec_cap->action != 501 RTE_SECURITY_ACTION_TYPE_NONE) { 502 if (sec_cap->action == ips->type && 503 sec_cap->protocol == 504 RTE_SECURITY_PROTOCOL_IPSEC && 505 sec_cap->ipsec.mode == 506 sess_conf.ipsec.mode && 507 sec_cap->ipsec.direction == sa->direction) 508 break; 509 sec_cap++; 510 } 511 512 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { 513 RTE_LOG(ERR, IPSEC, 514 "No suitable security capability found\n"); 515 return -1; 516 } 517 518 ips->security.ol_flags = sec_cap->ol_flags; 519 ips->security.ctx = sec_ctx; 520 } 521 522 return 0; 523 } 524 525 int 526 create_ipsec_esp_flow(struct ipsec_sa *sa) 527 { 528 int ret = 0; 529 struct rte_flow_error err = {}; 530 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 531 RTE_LOG(ERR, IPSEC, 532 "No Flow director rule for Egress traffic\n"); 533 return -1; 534 } 535 if (sa->flags == TRANSPORT) { 536 RTE_LOG(ERR, IPSEC, 537 "No Flow director rule for transport mode\n"); 538 return -1; 539 } 540 sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; 541 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; 542 sa->action[0].conf = &(struct rte_flow_action_queue) { 543 .index = sa->fdir_qid, 544 }; 545 sa->attr.egress = 0; 546 sa->attr.ingress = 1; 547 if (IS_IP6(sa->flags)) { 548 sa->pattern[1].mask = &rte_flow_item_ipv6_mask; 549 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 550 sa->pattern[1].spec = &sa->ipv6_spec; 551 memcpy(sa->ipv6_spec.hdr.dst_addr, 552 sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b)); 553 memcpy(sa->ipv6_spec.hdr.src_addr, 554 sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b)); 555 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 556 sa->pattern[2].spec = &sa->esp_spec; 557 sa->pattern[2].mask = &rte_flow_item_esp_mask; 558 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 559 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 560 } else if (IS_IP4(sa->flags)) { 561 sa->pattern[1].mask = &rte_flow_item_ipv4_mask; 562 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; 563 sa->pattern[1].spec = &sa->ipv4_spec; 564 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; 565 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; 566 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 567 sa->pattern[2].spec = &sa->esp_spec; 568 sa->pattern[2].mask = &rte_flow_item_esp_mask; 569 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 570 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 571 } 572 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 573 574 ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action, 575 &err); 576 if (ret < 0) { 577 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message); 578 return ret; 579 } 580 581 sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern, 582 sa->action, &err); 583 if (!sa->flow) { 584 RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message); 585 return -1; 586 } 587 588 return 0; 589 } 590 591 /* 592 * queue crypto-ops into PMD queue. 593 */ 594 void 595 enqueue_cop_burst(struct cdev_qp *cqp) 596 { 597 uint32_t i, len, ret; 598 599 len = cqp->len; 600 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len); 601 if (ret < len) { 602 RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" 603 " enqueued %u crypto ops out of %u\n", 604 cqp->id, cqp->qp, ret, len); 605 /* drop packets that we fail to enqueue */ 606 for (i = ret; i < len; i++) 607 free_pkts(&cqp->buf[i]->sym->m_src, 1); 608 } 609 cqp->in_flight += ret; 610 cqp->len = 0; 611 } 612 613 static inline void 614 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) 615 { 616 cqp->buf[cqp->len++] = cop; 617 618 if (cqp->len == MAX_PKT_BURST) 619 enqueue_cop_burst(cqp); 620 } 621 622 static inline void 623 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 624 struct rte_mbuf *pkts[], void *sas[], 625 uint16_t nb_pkts) 626 { 627 int32_t ret = 0, i; 628 struct ipsec_mbuf_metadata *priv; 629 struct rte_crypto_sym_op *sym_cop; 630 struct ipsec_sa *sa; 631 struct rte_ipsec_session *ips; 632 633 for (i = 0; i < nb_pkts; i++) { 634 if (unlikely(sas[i] == NULL)) { 635 free_pkts(&pkts[i], 1); 636 continue; 637 } 638 639 rte_prefetch0(sas[i]); 640 rte_prefetch0(pkts[i]); 641 642 priv = get_priv(pkts[i]); 643 sa = ipsec_mask_saptr(sas[i]); 644 priv->sa = sa; 645 ips = ipsec_get_primary_session(sa); 646 647 switch (ips->type) { 648 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 649 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 650 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 651 652 rte_prefetch0(&priv->sym_cop); 653 654 if (unlikely(ips->security.ses == NULL)) { 655 free_pkts(&pkts[i], 1); 656 continue; 657 } 658 659 if (unlikely((pkts[i]->packet_type & 660 (RTE_PTYPE_TUNNEL_MASK | 661 RTE_PTYPE_L4_MASK)) == 662 MBUF_PTYPE_TUNNEL_ESP_IN_UDP && 663 sa->udp_encap != 1)) { 664 free_pkts(&pkts[i], 1); 665 continue; 666 } 667 668 sym_cop = get_sym_cop(&priv->cop); 669 sym_cop->m_src = pkts[i]; 670 671 rte_security_attach_session(&priv->cop, 672 ips->security.ses); 673 break; 674 675 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 676 RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the" 677 " legacy mode."); 678 free_pkts(&pkts[i], 1); 679 continue; 680 681 case RTE_SECURITY_ACTION_TYPE_NONE: 682 683 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 684 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 685 686 rte_prefetch0(&priv->sym_cop); 687 688 if (unlikely(ips->crypto.ses == NULL)) { 689 free_pkts(&pkts[i], 1); 690 continue; 691 } 692 693 rte_crypto_op_attach_sym_session(&priv->cop, 694 ips->crypto.ses); 695 696 ret = xform_func(pkts[i], sa, &priv->cop); 697 if (unlikely(ret)) { 698 free_pkts(&pkts[i], 1); 699 continue; 700 } 701 break; 702 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 703 RTE_ASSERT(ips->security.ses != NULL); 704 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; 705 if (ips->security.ol_flags & 706 RTE_SECURITY_TX_OLOAD_NEED_MDATA) 707 rte_security_set_pkt_metadata( 708 ips->security.ctx, ips->security.ses, 709 pkts[i], NULL); 710 continue; 711 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 712 RTE_ASSERT(ips->security.ses != NULL); 713 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 714 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 715 716 rte_prefetch0(&priv->sym_cop); 717 rte_security_attach_session(&priv->cop, 718 ips->security.ses); 719 720 ret = xform_func(pkts[i], sa, &priv->cop); 721 if (unlikely(ret)) { 722 free_pkts(&pkts[i], 1); 723 continue; 724 } 725 726 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; 727 if (ips->security.ol_flags & 728 RTE_SECURITY_TX_OLOAD_NEED_MDATA) 729 rte_security_set_pkt_metadata( 730 ips->security.ctx, ips->security.ses, 731 pkts[i], NULL); 732 continue; 733 } 734 735 enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop); 736 } 737 } 738 739 static inline int32_t 740 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 741 struct rte_mbuf *pkts[], uint16_t max_pkts) 742 { 743 int32_t nb_pkts, ret; 744 struct ipsec_mbuf_metadata *priv; 745 struct ipsec_sa *sa; 746 struct rte_mbuf *pkt; 747 748 nb_pkts = 0; 749 while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { 750 pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; 751 rte_prefetch0(pkt); 752 priv = get_priv(pkt); 753 sa = priv->sa; 754 ret = xform_func(pkt, sa, &priv->cop); 755 if (unlikely(ret)) { 756 free_pkts(&pkt, 1); 757 continue; 758 } 759 pkts[nb_pkts++] = pkt; 760 } 761 762 return nb_pkts; 763 } 764 765 static inline int 766 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 767 struct rte_mbuf *pkts[], uint16_t max_pkts) 768 { 769 int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; 770 struct ipsec_mbuf_metadata *priv; 771 struct rte_crypto_op *cops[max_pkts]; 772 struct ipsec_sa *sa; 773 struct rte_mbuf *pkt; 774 775 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { 776 struct cdev_qp *cqp; 777 778 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; 779 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) 780 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; 781 782 if (cqp->in_flight == 0) 783 continue; 784 785 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, 786 cops, max_pkts - nb_pkts); 787 788 cqp->in_flight -= nb_cops; 789 790 for (j = 0; j < nb_cops; j++) { 791 pkt = cops[j]->sym->m_src; 792 rte_prefetch0(pkt); 793 794 priv = get_priv(pkt); 795 sa = priv->sa; 796 797 RTE_ASSERT(sa != NULL); 798 799 if (ipsec_get_action_type(sa) == 800 RTE_SECURITY_ACTION_TYPE_NONE) { 801 ret = xform_func(pkt, sa, cops[j]); 802 if (unlikely(ret)) { 803 free_pkts(&pkt, 1); 804 continue; 805 } 806 } else if (ipsec_get_action_type(sa) == 807 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { 808 if (cops[j]->status) { 809 free_pkts(&pkt, 1); 810 continue; 811 } 812 } 813 pkts[nb_pkts++] = pkt; 814 } 815 } 816 817 /* return packets */ 818 return nb_pkts; 819 } 820 821 uint16_t 822 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 823 uint16_t nb_pkts, uint16_t len) 824 { 825 void *sas[nb_pkts]; 826 827 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts); 828 829 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); 830 831 return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len); 832 } 833 834 uint16_t 835 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 836 uint16_t len) 837 { 838 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); 839 } 840 841 uint16_t 842 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 843 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len) 844 { 845 void *sas[nb_pkts]; 846 847 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts); 848 849 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); 850 851 return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len); 852 } 853 854 uint16_t 855 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 856 uint16_t len) 857 { 858 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); 859 } 860