1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 #include <sys/types.h> 5 #include <netinet/in.h> 6 #include <netinet/ip.h> 7 8 #include <rte_branch_prediction.h> 9 #include <rte_log.h> 10 #include <rte_crypto.h> 11 #include <rte_security.h> 12 #include <rte_cryptodev.h> 13 #include <rte_ipsec.h> 14 #include <rte_ethdev.h> 15 #include <rte_mbuf.h> 16 #include <rte_hash.h> 17 18 #include "ipsec.h" 19 #include "esp.h" 20 21 static inline void 22 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) 23 { 24 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 25 struct rte_security_ipsec_tunnel_param *tunnel = 26 &ipsec->tunnel; 27 if (IS_IP4_TUNNEL(sa->flags)) { 28 tunnel->type = 29 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 30 tunnel->ipv4.ttl = IPDEFTTL; 31 32 memcpy((uint8_t *)&tunnel->ipv4.src_ip, 33 (uint8_t *)&sa->src.ip.ip4, 4); 34 35 memcpy((uint8_t *)&tunnel->ipv4.dst_ip, 36 (uint8_t *)&sa->dst.ip.ip4, 4); 37 } else if (IS_IP6_TUNNEL(sa->flags)) { 38 tunnel->type = 39 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 40 tunnel->ipv6.hlimit = IPDEFTTL; 41 tunnel->ipv6.dscp = 0; 42 tunnel->ipv6.flabel = 0; 43 44 memcpy((uint8_t *)&tunnel->ipv6.src_addr, 45 (uint8_t *)&sa->src.ip.ip6.ip6_b, 16); 46 47 memcpy((uint8_t *)&tunnel->ipv6.dst_addr, 48 (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16); 49 } 50 /* TODO support for Transport */ 51 } 52 ipsec->replay_win_sz = app_sa_prm.window_size; 53 ipsec->options.esn = app_sa_prm.enable_esn; 54 ipsec->options.udp_encap = sa->udp_encap; 55 } 56 57 int 58 create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, 59 struct rte_ipsec_session *ips) 60 { 61 struct rte_cryptodev_info cdev_info; 62 unsigned long cdev_id_qp = 0; 63 int32_t ret = 0; 64 struct cdev_key key = { 0 }; 65 66 key.lcore_id = (uint8_t)rte_lcore_id(); 67 68 key.cipher_algo = (uint8_t)sa->cipher_algo; 69 key.auth_algo = (uint8_t)sa->auth_algo; 70 key.aead_algo = (uint8_t)sa->aead_algo; 71 72 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, 73 (void **)&cdev_id_qp); 74 if (ret < 0) { 75 RTE_LOG(ERR, IPSEC, 76 "No cryptodev: core %u, cipher_algo %u, " 77 "auth_algo %u, aead_algo %u\n", 78 key.lcore_id, 79 key.cipher_algo, 80 key.auth_algo, 81 key.aead_algo); 82 return -1; 83 } 84 85 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " 86 "%u qp %u\n", sa->spi, 87 ipsec_ctx->tbl[cdev_id_qp].id, 88 ipsec_ctx->tbl[cdev_id_qp].qp); 89 90 if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE && 91 ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 92 struct rte_security_session_conf sess_conf = { 93 .action_type = ips->type, 94 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 95 {.ipsec = { 96 .spi = sa->spi, 97 .salt = sa->salt, 98 .options = { 0 }, 99 .replay_win_sz = 0, 100 .direction = sa->direction, 101 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, 102 .mode = (IS_TUNNEL(sa->flags)) ? 103 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : 104 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, 105 } }, 106 .crypto_xform = sa->xforms, 107 .userdata = NULL, 108 109 }; 110 111 if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { 112 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 113 rte_cryptodev_get_sec_ctx( 114 ipsec_ctx->tbl[cdev_id_qp].id); 115 116 /* Set IPsec parameters in conf */ 117 set_ipsec_conf(sa, &(sess_conf.ipsec)); 118 119 ips->security.ses = rte_security_session_create(ctx, 120 &sess_conf, ipsec_ctx->session_pool, 121 ipsec_ctx->session_priv_pool); 122 if (ips->security.ses == NULL) { 123 RTE_LOG(ERR, IPSEC, 124 "SEC Session init failed: err: %d\n", ret); 125 return -1; 126 } 127 } else { 128 RTE_LOG(ERR, IPSEC, "Inline not supported\n"); 129 return -1; 130 } 131 } else { 132 if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 133 struct rte_cryptodev_info info; 134 uint16_t cdev_id; 135 136 cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; 137 rte_cryptodev_info_get(cdev_id, &info); 138 if (!(info.feature_flags & 139 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) 140 return -ENOTSUP; 141 142 ips->crypto.dev_id = cdev_id; 143 } 144 ips->crypto.ses = rte_cryptodev_sym_session_create( 145 ipsec_ctx->session_pool); 146 rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, 147 ips->crypto.ses, sa->xforms, 148 ipsec_ctx->session_priv_pool); 149 150 rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, 151 &cdev_info); 152 } 153 154 sa->cdev_id_qp = cdev_id_qp; 155 156 return 0; 157 } 158 159 int 160 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, 161 struct rte_ipsec_session *ips) 162 { 163 int32_t ret = 0; 164 struct rte_security_ctx *sec_ctx; 165 struct rte_security_session_conf sess_conf = { 166 .action_type = ips->type, 167 .protocol = RTE_SECURITY_PROTOCOL_IPSEC, 168 {.ipsec = { 169 .spi = sa->spi, 170 .salt = sa->salt, 171 .options = { 0 }, 172 .replay_win_sz = 0, 173 .direction = sa->direction, 174 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP 175 } }, 176 .crypto_xform = sa->xforms, 177 .userdata = NULL, 178 }; 179 180 if (IS_TRANSPORT(sa->flags)) { 181 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT; 182 if (IS_IP4(sa->flags)) { 183 sess_conf.ipsec.tunnel.type = 184 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 185 186 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr = 187 sa->src.ip.ip4; 188 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr = 189 sa->dst.ip.ip4; 190 } else if (IS_IP6(sa->flags)) { 191 sess_conf.ipsec.tunnel.type = 192 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 193 194 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr, 195 sa->src.ip.ip6.ip6_b, 16); 196 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr, 197 sa->dst.ip.ip6.ip6_b, 16); 198 } 199 } else if (IS_TUNNEL(sa->flags)) { 200 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; 201 202 if (IS_IP4(sa->flags)) { 203 sess_conf.ipsec.tunnel.type = 204 RTE_SECURITY_IPSEC_TUNNEL_IPV4; 205 206 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr = 207 sa->src.ip.ip4; 208 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr = 209 sa->dst.ip.ip4; 210 } else if (IS_IP6(sa->flags)) { 211 sess_conf.ipsec.tunnel.type = 212 RTE_SECURITY_IPSEC_TUNNEL_IPV6; 213 214 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr, 215 sa->src.ip.ip6.ip6_b, 16); 216 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr, 217 sa->dst.ip.ip6.ip6_b, 16); 218 } else { 219 RTE_LOG(ERR, IPSEC, "invalid tunnel type\n"); 220 return -1; 221 } 222 } 223 224 if (sa->udp_encap) { 225 sess_conf.ipsec.options.udp_encap = 1; 226 sess_conf.ipsec.udp.sport = htons(sa->udp.sport); 227 sess_conf.ipsec.udp.dport = htons(sa->udp.dport); 228 } 229 230 if (sa->esn > 0) { 231 sess_conf.ipsec.options.esn = 1; 232 sess_conf.ipsec.esn.value = sa->esn; 233 } 234 235 236 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n", 237 sa->spi, sa->portid); 238 239 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 240 struct rte_flow_error err; 241 const struct rte_security_capability *sec_cap; 242 int ret = 0; 243 244 sec_ctx = (struct rte_security_ctx *) 245 rte_eth_dev_get_sec_ctx( 246 sa->portid); 247 if (sec_ctx == NULL) { 248 RTE_LOG(ERR, IPSEC, 249 " rte_eth_dev_get_sec_ctx failed\n"); 250 return -1; 251 } 252 253 ips->security.ses = rte_security_session_create(sec_ctx, 254 &sess_conf, skt_ctx->session_pool, 255 skt_ctx->session_priv_pool); 256 if (ips->security.ses == NULL) { 257 RTE_LOG(ERR, IPSEC, 258 "SEC Session init failed: err: %d\n", ret); 259 return -1; 260 } 261 262 sec_cap = rte_security_capabilities_get(sec_ctx); 263 264 /* iterate until ESP tunnel*/ 265 while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) { 266 if (sec_cap->action == ips->type && 267 sec_cap->protocol == 268 RTE_SECURITY_PROTOCOL_IPSEC && 269 sec_cap->ipsec.mode == 270 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && 271 sec_cap->ipsec.direction == sa->direction) 272 break; 273 sec_cap++; 274 } 275 276 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { 277 RTE_LOG(ERR, IPSEC, 278 "No suitable security capability found\n"); 279 return -1; 280 } 281 282 ips->security.ol_flags = sec_cap->ol_flags; 283 ips->security.ctx = sec_ctx; 284 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; 285 286 if (IS_IP6(sa->flags)) { 287 sa->pattern[1].mask = &rte_flow_item_ipv6_mask; 288 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 289 sa->pattern[1].spec = &sa->ipv6_spec; 290 291 memcpy(sa->ipv6_spec.hdr.dst_addr, 292 sa->dst.ip.ip6.ip6_b, 16); 293 memcpy(sa->ipv6_spec.hdr.src_addr, 294 sa->src.ip.ip6.ip6_b, 16); 295 } else if (IS_IP4(sa->flags)) { 296 sa->pattern[1].mask = &rte_flow_item_ipv4_mask; 297 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; 298 sa->pattern[1].spec = &sa->ipv4_spec; 299 300 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; 301 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; 302 } 303 304 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 305 306 if (sa->udp_encap) { 307 308 sa->udp_spec.hdr.dst_port = 309 rte_cpu_to_be_16(sa->udp.dport); 310 sa->udp_spec.hdr.src_port = 311 rte_cpu_to_be_16(sa->udp.sport); 312 313 sa->pattern[2].mask = &rte_flow_item_udp_mask; 314 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP; 315 sa->pattern[2].spec = &sa->udp_spec; 316 317 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP; 318 sa->pattern[3].spec = &sa->esp_spec; 319 sa->pattern[3].mask = &rte_flow_item_esp_mask; 320 321 sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END; 322 } else { 323 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 324 sa->pattern[2].spec = &sa->esp_spec; 325 sa->pattern[2].mask = &rte_flow_item_esp_mask; 326 327 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 328 } 329 330 sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; 331 sa->action[0].conf = ips->security.ses; 332 333 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 334 335 sa->attr.egress = (sa->direction == 336 RTE_SECURITY_IPSEC_SA_DIR_EGRESS); 337 sa->attr.ingress = (sa->direction == 338 RTE_SECURITY_IPSEC_SA_DIR_INGRESS); 339 if (sa->attr.ingress) { 340 uint8_t rss_key[64]; 341 struct rte_eth_rss_conf rss_conf = { 342 .rss_key = rss_key, 343 .rss_key_len = sizeof(rss_key), 344 }; 345 struct rte_eth_dev_info dev_info; 346 uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; 347 struct rte_flow_action_rss action_rss; 348 unsigned int i; 349 unsigned int j; 350 351 /* Don't create flow if default flow is created */ 352 if (flow_info_tbl[sa->portid].rx_def_flow) 353 return 0; 354 355 ret = rte_eth_dev_info_get(sa->portid, &dev_info); 356 if (ret != 0) { 357 RTE_LOG(ERR, IPSEC, 358 "Error during getting device (port %u) info: %s\n", 359 sa->portid, strerror(-ret)); 360 return ret; 361 } 362 363 sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; 364 /* Try RSS. */ 365 sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; 366 sa->action[1].conf = &action_rss; 367 ret = rte_eth_dev_rss_hash_conf_get(sa->portid, 368 &rss_conf); 369 if (ret != 0) { 370 RTE_LOG(ERR, IPSEC, 371 "rte_eth_dev_rss_hash_conf_get:ret=%d\n", 372 ret); 373 return -1; 374 } 375 for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i) 376 queue[j++] = i; 377 378 action_rss = (struct rte_flow_action_rss){ 379 .types = rss_conf.rss_hf, 380 .key_len = rss_conf.rss_key_len, 381 .queue_num = j, 382 .key = rss_key, 383 .queue = queue, 384 }; 385 ret = rte_flow_validate(sa->portid, &sa->attr, 386 sa->pattern, sa->action, 387 &err); 388 if (!ret) 389 goto flow_create; 390 /* Try Queue. */ 391 sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE; 392 sa->action[1].conf = 393 &(struct rte_flow_action_queue){ 394 .index = 0, 395 }; 396 ret = rte_flow_validate(sa->portid, &sa->attr, 397 sa->pattern, sa->action, 398 &err); 399 /* Try End. */ 400 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 401 sa->action[1].conf = NULL; 402 ret = rte_flow_validate(sa->portid, &sa->attr, 403 sa->pattern, sa->action, 404 &err); 405 if (ret) 406 goto flow_create_failure; 407 } else if (sa->attr.egress && 408 (ips->security.ol_flags & 409 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) { 410 sa->action[1].type = 411 RTE_FLOW_ACTION_TYPE_PASSTHRU; 412 sa->action[2].type = 413 RTE_FLOW_ACTION_TYPE_END; 414 } 415 flow_create: 416 sa->flow = rte_flow_create(sa->portid, 417 &sa->attr, sa->pattern, sa->action, &err); 418 if (sa->flow == NULL) { 419 flow_create_failure: 420 RTE_LOG(ERR, IPSEC, 421 "Failed to create ipsec flow msg: %s\n", 422 err.message); 423 return -1; 424 } 425 } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 426 const struct rte_security_capability *sec_cap; 427 428 sec_ctx = (struct rte_security_ctx *) 429 rte_eth_dev_get_sec_ctx(sa->portid); 430 431 if (sec_ctx == NULL) { 432 RTE_LOG(ERR, IPSEC, 433 "Ethernet device doesn't have security features registered\n"); 434 return -1; 435 } 436 437 /* Set IPsec parameters in conf */ 438 set_ipsec_conf(sa, &(sess_conf.ipsec)); 439 440 /* Save SA as userdata for the security session. When 441 * the packet is received, this userdata will be 442 * retrieved using the metadata from the packet. 443 * 444 * The PMD is expected to set similar metadata for other 445 * operations, like rte_eth_event, which are tied to 446 * security session. In such cases, the userdata could 447 * be obtained to uniquely identify the security 448 * parameters denoted. 449 */ 450 451 sess_conf.userdata = (void *) sa; 452 453 ips->security.ses = rte_security_session_create(sec_ctx, 454 &sess_conf, skt_ctx->session_pool, 455 skt_ctx->session_priv_pool); 456 if (ips->security.ses == NULL) { 457 RTE_LOG(ERR, IPSEC, 458 "SEC Session init failed: err: %d\n", ret); 459 return -1; 460 } 461 462 sec_cap = rte_security_capabilities_get(sec_ctx); 463 if (sec_cap == NULL) { 464 RTE_LOG(ERR, IPSEC, 465 "No capabilities registered\n"); 466 return -1; 467 } 468 469 /* iterate until ESP tunnel*/ 470 while (sec_cap->action != 471 RTE_SECURITY_ACTION_TYPE_NONE) { 472 if (sec_cap->action == ips->type && 473 sec_cap->protocol == 474 RTE_SECURITY_PROTOCOL_IPSEC && 475 sec_cap->ipsec.mode == 476 sess_conf.ipsec.mode && 477 sec_cap->ipsec.direction == sa->direction) 478 break; 479 sec_cap++; 480 } 481 482 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { 483 RTE_LOG(ERR, IPSEC, 484 "No suitable security capability found\n"); 485 return -1; 486 } 487 488 ips->security.ol_flags = sec_cap->ol_flags; 489 ips->security.ctx = sec_ctx; 490 } 491 492 return 0; 493 } 494 495 int 496 create_ipsec_esp_flow(struct ipsec_sa *sa) 497 { 498 int ret = 0; 499 struct rte_flow_error err; 500 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 501 RTE_LOG(ERR, IPSEC, 502 "No Flow director rule for Egress traffic\n"); 503 return -1; 504 } 505 if (sa->flags == TRANSPORT) { 506 RTE_LOG(ERR, IPSEC, 507 "No Flow director rule for transport mode\n"); 508 return -1; 509 } 510 sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; 511 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; 512 sa->action[0].conf = &(struct rte_flow_action_queue) { 513 .index = sa->fdir_qid, 514 }; 515 sa->attr.egress = 0; 516 sa->attr.ingress = 1; 517 if (IS_IP6(sa->flags)) { 518 sa->pattern[1].mask = &rte_flow_item_ipv6_mask; 519 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; 520 sa->pattern[1].spec = &sa->ipv6_spec; 521 memcpy(sa->ipv6_spec.hdr.dst_addr, 522 sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b)); 523 memcpy(sa->ipv6_spec.hdr.src_addr, 524 sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b)); 525 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 526 sa->pattern[2].spec = &sa->esp_spec; 527 sa->pattern[2].mask = &rte_flow_item_esp_mask; 528 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 529 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 530 } else if (IS_IP4(sa->flags)) { 531 sa->pattern[1].mask = &rte_flow_item_ipv4_mask; 532 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; 533 sa->pattern[1].spec = &sa->ipv4_spec; 534 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; 535 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; 536 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; 537 sa->pattern[2].spec = &sa->esp_spec; 538 sa->pattern[2].mask = &rte_flow_item_esp_mask; 539 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi); 540 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; 541 } 542 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; 543 544 ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action, 545 &err); 546 if (ret < 0) { 547 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message); 548 return ret; 549 } 550 551 sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern, 552 sa->action, &err); 553 if (!sa->flow) { 554 RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message); 555 return -1; 556 } 557 558 return 0; 559 } 560 561 /* 562 * queue crypto-ops into PMD queue. 563 */ 564 void 565 enqueue_cop_burst(struct cdev_qp *cqp) 566 { 567 uint32_t i, len, ret; 568 569 len = cqp->len; 570 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len); 571 if (ret < len) { 572 RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" 573 " enqueued %u crypto ops out of %u\n", 574 cqp->id, cqp->qp, ret, len); 575 /* drop packets that we fail to enqueue */ 576 for (i = ret; i < len; i++) 577 free_pkts(&cqp->buf[i]->sym->m_src, 1); 578 } 579 cqp->in_flight += ret; 580 cqp->len = 0; 581 } 582 583 static inline void 584 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) 585 { 586 cqp->buf[cqp->len++] = cop; 587 588 if (cqp->len == MAX_PKT_BURST) 589 enqueue_cop_burst(cqp); 590 } 591 592 static inline void 593 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 594 struct rte_mbuf *pkts[], void *sas[], 595 uint16_t nb_pkts) 596 { 597 int32_t ret = 0, i; 598 struct ipsec_mbuf_metadata *priv; 599 struct rte_crypto_sym_op *sym_cop; 600 struct ipsec_sa *sa; 601 struct rte_ipsec_session *ips; 602 603 for (i = 0; i < nb_pkts; i++) { 604 if (unlikely(sas[i] == NULL)) { 605 free_pkts(&pkts[i], 1); 606 continue; 607 } 608 609 rte_prefetch0(sas[i]); 610 rte_prefetch0(pkts[i]); 611 612 priv = get_priv(pkts[i]); 613 sa = ipsec_mask_saptr(sas[i]); 614 priv->sa = sa; 615 ips = ipsec_get_primary_session(sa); 616 617 switch (ips->type) { 618 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 619 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 620 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 621 622 rte_prefetch0(&priv->sym_cop); 623 624 if ((unlikely(ips->security.ses == NULL)) && 625 create_lookaside_session(ipsec_ctx, sa, ips)) { 626 free_pkts(&pkts[i], 1); 627 continue; 628 } 629 630 if (unlikely((pkts[i]->packet_type & 631 (RTE_PTYPE_TUNNEL_MASK | 632 RTE_PTYPE_L4_MASK)) == 633 MBUF_PTYPE_TUNNEL_ESP_IN_UDP && 634 sa->udp_encap != 1)) { 635 free_pkts(&pkts[i], 1); 636 continue; 637 } 638 639 sym_cop = get_sym_cop(&priv->cop); 640 sym_cop->m_src = pkts[i]; 641 642 rte_security_attach_session(&priv->cop, 643 ips->security.ses); 644 break; 645 646 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 647 RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the" 648 " legacy mode."); 649 free_pkts(&pkts[i], 1); 650 continue; 651 652 case RTE_SECURITY_ACTION_TYPE_NONE: 653 654 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 655 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 656 657 rte_prefetch0(&priv->sym_cop); 658 659 if ((unlikely(ips->crypto.ses == NULL)) && 660 create_lookaside_session(ipsec_ctx, sa, ips)) { 661 free_pkts(&pkts[i], 1); 662 continue; 663 } 664 665 rte_crypto_op_attach_sym_session(&priv->cop, 666 ips->crypto.ses); 667 668 ret = xform_func(pkts[i], sa, &priv->cop); 669 if (unlikely(ret)) { 670 free_pkts(&pkts[i], 1); 671 continue; 672 } 673 break; 674 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 675 RTE_ASSERT(ips->security.ses != NULL); 676 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; 677 if (ips->security.ol_flags & 678 RTE_SECURITY_TX_OLOAD_NEED_MDATA) 679 rte_security_set_pkt_metadata( 680 ips->security.ctx, ips->security.ses, 681 pkts[i], NULL); 682 continue; 683 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 684 RTE_ASSERT(ips->security.ses != NULL); 685 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 686 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 687 688 rte_prefetch0(&priv->sym_cop); 689 rte_security_attach_session(&priv->cop, 690 ips->security.ses); 691 692 ret = xform_func(pkts[i], sa, &priv->cop); 693 if (unlikely(ret)) { 694 free_pkts(&pkts[i], 1); 695 continue; 696 } 697 698 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i]; 699 if (ips->security.ol_flags & 700 RTE_SECURITY_TX_OLOAD_NEED_MDATA) 701 rte_security_set_pkt_metadata( 702 ips->security.ctx, ips->security.ses, 703 pkts[i], NULL); 704 continue; 705 } 706 707 RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps); 708 enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop); 709 } 710 } 711 712 static inline int32_t 713 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 714 struct rte_mbuf *pkts[], uint16_t max_pkts) 715 { 716 int32_t nb_pkts, ret; 717 struct ipsec_mbuf_metadata *priv; 718 struct ipsec_sa *sa; 719 struct rte_mbuf *pkt; 720 721 nb_pkts = 0; 722 while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { 723 pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; 724 rte_prefetch0(pkt); 725 priv = get_priv(pkt); 726 sa = priv->sa; 727 ret = xform_func(pkt, sa, &priv->cop); 728 if (unlikely(ret)) { 729 free_pkts(&pkt, 1); 730 continue; 731 } 732 pkts[nb_pkts++] = pkt; 733 } 734 735 return nb_pkts; 736 } 737 738 static inline int 739 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, 740 struct rte_mbuf *pkts[], uint16_t max_pkts) 741 { 742 int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; 743 struct ipsec_mbuf_metadata *priv; 744 struct rte_crypto_op *cops[max_pkts]; 745 struct ipsec_sa *sa; 746 struct rte_mbuf *pkt; 747 748 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { 749 struct cdev_qp *cqp; 750 751 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; 752 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) 753 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; 754 755 if (cqp->in_flight == 0) 756 continue; 757 758 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, 759 cops, max_pkts - nb_pkts); 760 761 cqp->in_flight -= nb_cops; 762 763 for (j = 0; j < nb_cops; j++) { 764 pkt = cops[j]->sym->m_src; 765 rte_prefetch0(pkt); 766 767 priv = get_priv(pkt); 768 sa = priv->sa; 769 770 RTE_ASSERT(sa != NULL); 771 772 if (ipsec_get_action_type(sa) == 773 RTE_SECURITY_ACTION_TYPE_NONE) { 774 ret = xform_func(pkt, sa, cops[j]); 775 if (unlikely(ret)) { 776 free_pkts(&pkt, 1); 777 continue; 778 } 779 } else if (ipsec_get_action_type(sa) == 780 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { 781 if (cops[j]->status) { 782 free_pkts(&pkt, 1); 783 continue; 784 } 785 } 786 pkts[nb_pkts++] = pkt; 787 } 788 } 789 790 /* return packets */ 791 return nb_pkts; 792 } 793 794 uint16_t 795 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 796 uint16_t nb_pkts, uint16_t len) 797 { 798 void *sas[nb_pkts]; 799 800 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts); 801 802 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); 803 804 return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len); 805 } 806 807 uint16_t 808 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 809 uint16_t len) 810 { 811 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); 812 } 813 814 uint16_t 815 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 816 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len) 817 { 818 void *sas[nb_pkts]; 819 820 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts); 821 822 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); 823 824 return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len); 825 } 826 827 uint16_t 828 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], 829 uint16_t len) 830 { 831 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); 832 } 833