13e5f4625SKonstantin Ananyev /* SPDX-License-Identifier: BSD-3-Clause 25139d5d9SMarcin Smoczynski * Copyright(c) 2016-2020 Intel Corporation 33e5f4625SKonstantin Ananyev */ 43e5f4625SKonstantin Ananyev #include <sys/types.h> 53e5f4625SKonstantin Ananyev #include <netinet/in.h> 63e5f4625SKonstantin Ananyev #include <netinet/ip.h> 73e5f4625SKonstantin Ananyev 83e5f4625SKonstantin Ananyev #include <rte_branch_prediction.h> 93e5f4625SKonstantin Ananyev #include <rte_log.h> 103e5f4625SKonstantin Ananyev #include <rte_cryptodev.h> 113e5f4625SKonstantin Ananyev #include <rte_ethdev.h> 123e5f4625SKonstantin Ananyev #include <rte_mbuf.h> 133e5f4625SKonstantin Ananyev 143e5f4625SKonstantin Ananyev #include "ipsec.h" 151329602bSAnoob Joseph #include "ipsec-secgw.h" 16dcbf9ad5SNithin Dabilpuram #include "ipsec_worker.h" 173e5f4625SKonstantin Ananyev 183e5f4625SKonstantin Ananyev /* helper routine to free bulk of crypto-ops and related packets */ 193e5f4625SKonstantin Ananyev static inline void 203e5f4625SKonstantin Ananyev free_cops(struct rte_crypto_op *cop[], uint32_t n) 213e5f4625SKonstantin Ananyev { 223e5f4625SKonstantin Ananyev uint32_t i; 233e5f4625SKonstantin Ananyev 243e5f4625SKonstantin Ananyev for (i = 0; i != n; i++) 25727fcb45SAnoob Joseph free_pkts(&cop[i]->sym->m_src, 1); 263e5f4625SKonstantin Ananyev } 273e5f4625SKonstantin Ananyev 283e5f4625SKonstantin Ananyev /* helper routine to enqueue bulk of crypto ops */ 293e5f4625SKonstantin Ananyev static inline void 303e5f4625SKonstantin Ananyev enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num) 313e5f4625SKonstantin Ananyev { 323e5f4625SKonstantin Ananyev uint32_t i, k, len, n; 333e5f4625SKonstantin Ananyev 343e5f4625SKonstantin Ananyev len = cqp->len; 353e5f4625SKonstantin Ananyev 363e5f4625SKonstantin Ananyev /* 373e5f4625SKonstantin Ananyev * if cqp is empty and we have enough ops, 383e5f4625SKonstantin Ananyev * then queue them to the PMD straightway. 393e5f4625SKonstantin Ananyev */ 403e5f4625SKonstantin Ananyev if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) { 413e5f4625SKonstantin Ananyev n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num); 423e5f4625SKonstantin Ananyev cqp->in_flight += n; 433e5f4625SKonstantin Ananyev free_cops(cop + n, num - n); 443e5f4625SKonstantin Ananyev return; 453e5f4625SKonstantin Ananyev } 463e5f4625SKonstantin Ananyev 473e5f4625SKonstantin Ananyev k = 0; 483e5f4625SKonstantin Ananyev 493e5f4625SKonstantin Ananyev do { 503e5f4625SKonstantin Ananyev n = RTE_DIM(cqp->buf) - len; 513e5f4625SKonstantin Ananyev n = RTE_MIN(num - k, n); 523e5f4625SKonstantin Ananyev 533e5f4625SKonstantin Ananyev /* put packets into cqp */ 543e5f4625SKonstantin Ananyev for (i = 0; i != n; i++) 553e5f4625SKonstantin Ananyev cqp->buf[len + i] = cop[k + i]; 563e5f4625SKonstantin Ananyev 573e5f4625SKonstantin Ananyev len += n; 583e5f4625SKonstantin Ananyev k += n; 593e5f4625SKonstantin Ananyev 603e5f4625SKonstantin Ananyev /* if cqp is full then, enqueue crypto-ops to PMD */ 613e5f4625SKonstantin Ananyev if (len == RTE_DIM(cqp->buf)) { 623e5f4625SKonstantin Ananyev n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, 633e5f4625SKonstantin Ananyev cqp->buf, len); 643e5f4625SKonstantin Ananyev cqp->in_flight += n; 653e5f4625SKonstantin Ananyev free_cops(cqp->buf + n, len - n); 663e5f4625SKonstantin Ananyev len = 0; 673e5f4625SKonstantin Ananyev } 683e5f4625SKonstantin Ananyev 693e5f4625SKonstantin Ananyev 703e5f4625SKonstantin Ananyev } while (k != num); 713e5f4625SKonstantin Ananyev 723e5f4625SKonstantin Ananyev cqp->len = len; 733e5f4625SKonstantin Ananyev } 743e5f4625SKonstantin Ananyev 753e5f4625SKonstantin Ananyev static inline int 76a8ade121SVolodymyr Fialko check_ipsec_session(const struct rte_ipsec_session *ss) 773e5f4625SKonstantin Ananyev { 785139d5d9SMarcin Smoczynski if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE || 795139d5d9SMarcin Smoczynski ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { 80a8ade121SVolodymyr Fialko if (ss->crypto.ses == NULL) 81a8ade121SVolodymyr Fialko return -ENOENT; 824a67af84SMarcin Smoczynski } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { 83a8ade121SVolodymyr Fialko if (ss->security.ses == NULL) 84a8ade121SVolodymyr Fialko return -ENOENT; 853a690d5aSBernard Iremonger } else 863a690d5aSBernard Iremonger RTE_ASSERT(0); 87a8ade121SVolodymyr Fialko return 0; 883e5f4625SKonstantin Ananyev } 893e5f4625SKonstantin Ananyev 903e5f4625SKonstantin Ananyev /* 913e5f4625SKonstantin Ananyev * group input packets byt the SA they belong to. 923e5f4625SKonstantin Ananyev */ 933e5f4625SKonstantin Ananyev static uint32_t 94ba66534fSMarcin Smoczynski sa_group(void *sa_ptr[], struct rte_mbuf *pkts[], 953e5f4625SKonstantin Ananyev struct rte_ipsec_group grp[], uint32_t num) 963e5f4625SKonstantin Ananyev { 973e5f4625SKonstantin Ananyev uint32_t i, n, spi; 983e5f4625SKonstantin Ananyev void *sa; 993e5f4625SKonstantin Ananyev void * const nosa = &spi; 1003e5f4625SKonstantin Ananyev 1013e5f4625SKonstantin Ananyev sa = nosa; 102f8afd292SKevin Traynor grp[0].m = pkts; 1033e5f4625SKonstantin Ananyev for (i = 0, n = 0; i != num; i++) { 1043e5f4625SKonstantin Ananyev 1053e5f4625SKonstantin Ananyev if (sa != sa_ptr[i]) { 1063e5f4625SKonstantin Ananyev grp[n].cnt = pkts + i - grp[n].m; 1073e5f4625SKonstantin Ananyev n += (sa != nosa); 1083e5f4625SKonstantin Ananyev grp[n].id.ptr = sa_ptr[i]; 1093e5f4625SKonstantin Ananyev grp[n].m = pkts + i; 1103e5f4625SKonstantin Ananyev sa = sa_ptr[i]; 1113e5f4625SKonstantin Ananyev } 1123e5f4625SKonstantin Ananyev } 1133e5f4625SKonstantin Ananyev 1143e5f4625SKonstantin Ananyev /* terminate last group */ 1153e5f4625SKonstantin Ananyev if (sa != nosa) { 1163e5f4625SKonstantin Ananyev grp[n].cnt = pkts + i - grp[n].m; 1173e5f4625SKonstantin Ananyev n++; 1183e5f4625SKonstantin Ananyev } 1193e5f4625SKonstantin Ananyev 1203e5f4625SKonstantin Ananyev return n; 1213e5f4625SKonstantin Ananyev } 1223e5f4625SKonstantin Ananyev 1233e5f4625SKonstantin Ananyev /* 1243e5f4625SKonstantin Ananyev * helper function, splits processed packets into ipv4/ipv6 traffic. 1253e5f4625SKonstantin Ananyev */ 1263e5f4625SKonstantin Ananyev static inline void 1273e5f4625SKonstantin Ananyev copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], 1283e5f4625SKonstantin Ananyev uint32_t num) 1293e5f4625SKonstantin Ananyev { 1303e5f4625SKonstantin Ananyev uint32_t j, ofs, s; 1313e5f4625SKonstantin Ananyev struct traffic_type *out; 1323e5f4625SKonstantin Ananyev 1333e5f4625SKonstantin Ananyev /* 1343e5f4625SKonstantin Ananyev * determine traffic type(ipv4/ipv6) and offset for ACL classify 1353e5f4625SKonstantin Ananyev * based on SA type 1363e5f4625SKonstantin Ananyev */ 1373e5f4625SKonstantin Ananyev if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) { 1383e5f4625SKonstantin Ananyev if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) { 1393e5f4625SKonstantin Ananyev out = &trf->ip4; 1403e5f4625SKonstantin Ananyev ofs = offsetof(struct ip, ip_p); 1413e5f4625SKonstantin Ananyev } else { 1423e5f4625SKonstantin Ananyev out = &trf->ip6; 1433e5f4625SKonstantin Ananyev ofs = offsetof(struct ip6_hdr, ip6_nxt); 1443e5f4625SKonstantin Ananyev } 1453e5f4625SKonstantin Ananyev } else if (SATP_OUT_IPV4(satp)) { 1463e5f4625SKonstantin Ananyev out = &trf->ip4; 1473e5f4625SKonstantin Ananyev ofs = offsetof(struct ip, ip_p); 1483e5f4625SKonstantin Ananyev } else { 1493e5f4625SKonstantin Ananyev out = &trf->ip6; 1503e5f4625SKonstantin Ananyev ofs = offsetof(struct ip6_hdr, ip6_nxt); 1513e5f4625SKonstantin Ananyev } 1523e5f4625SKonstantin Ananyev 1533e5f4625SKonstantin Ananyev for (j = 0, s = out->num; j != num; j++) { 1543e5f4625SKonstantin Ananyev out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j], 1553e5f4625SKonstantin Ananyev void *, ofs); 1563e5f4625SKonstantin Ananyev out->pkts[s + j] = mb[j]; 1573e5f4625SKonstantin Ananyev } 1583e5f4625SKonstantin Ananyev 1593e5f4625SKonstantin Ananyev out->num += num; 1603e5f4625SKonstantin Ananyev } 1613e5f4625SKonstantin Ananyev 162ba66534fSMarcin Smoczynski static uint32_t 163ba66534fSMarcin Smoczynski ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, 164ba66534fSMarcin Smoczynski struct rte_ipsec_session *ips, struct rte_mbuf **m, 165ba66534fSMarcin Smoczynski unsigned int cnt) 166ba66534fSMarcin Smoczynski { 167ba66534fSMarcin Smoczynski struct cdev_qp *cqp; 168ba66534fSMarcin Smoczynski struct rte_crypto_op *cop[cnt]; 169ba66534fSMarcin Smoczynski uint32_t j, k; 170ba66534fSMarcin Smoczynski struct ipsec_mbuf_metadata *priv; 171ba66534fSMarcin Smoczynski 172a8ade121SVolodymyr Fialko cqp = sa->cqp[ctx->lcore_id]; 173ba66534fSMarcin Smoczynski 174ba66534fSMarcin Smoczynski /* for that app each mbuf has it's own crypto op */ 175ba66534fSMarcin Smoczynski for (j = 0; j != cnt; j++) { 176ba66534fSMarcin Smoczynski priv = get_priv(m[j]); 177ba66534fSMarcin Smoczynski cop[j] = &priv->cop; 178ba66534fSMarcin Smoczynski /* 179ba66534fSMarcin Smoczynski * this is just to satisfy inbound_sa_check() 180ba66534fSMarcin Smoczynski * should be removed in future. 181ba66534fSMarcin Smoczynski */ 182ba66534fSMarcin Smoczynski priv->sa = sa; 183ba66534fSMarcin Smoczynski } 184ba66534fSMarcin Smoczynski 185ba66534fSMarcin Smoczynski /* prepare and enqueue crypto ops */ 186ba66534fSMarcin Smoczynski k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt); 187ba66534fSMarcin Smoczynski if (k != 0) 188ba66534fSMarcin Smoczynski enqueue_cop_bulk(cqp, cop, k); 189ba66534fSMarcin Smoczynski 190ba66534fSMarcin Smoczynski return k; 191ba66534fSMarcin Smoczynski } 192ba66534fSMarcin Smoczynski 1933e5f4625SKonstantin Ananyev /* 1945139d5d9SMarcin Smoczynski * finish processing of packets successfully decrypted by an inline processor 1955139d5d9SMarcin Smoczynski */ 1965139d5d9SMarcin Smoczynski static uint32_t 1975139d5d9SMarcin Smoczynski ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa, 1985139d5d9SMarcin Smoczynski struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) 1995139d5d9SMarcin Smoczynski { 2005139d5d9SMarcin Smoczynski uint64_t satp; 2015139d5d9SMarcin Smoczynski uint32_t k; 2025139d5d9SMarcin Smoczynski 2035139d5d9SMarcin Smoczynski /* get SA type */ 2045139d5d9SMarcin Smoczynski satp = rte_ipsec_sa_type(ips->sa); 2055139d5d9SMarcin Smoczynski prep_process_group(sa, mb, cnt); 2065139d5d9SMarcin Smoczynski 2075139d5d9SMarcin Smoczynski k = rte_ipsec_pkt_process(ips, mb, cnt); 2085139d5d9SMarcin Smoczynski copy_to_trf(trf, satp, mb, k); 2095139d5d9SMarcin Smoczynski return k; 2105139d5d9SMarcin Smoczynski } 2115139d5d9SMarcin Smoczynski 2125139d5d9SMarcin Smoczynski /* 2135139d5d9SMarcin Smoczynski * process packets synchronously 2145139d5d9SMarcin Smoczynski */ 2155139d5d9SMarcin Smoczynski static uint32_t 2165139d5d9SMarcin Smoczynski ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa, 2175139d5d9SMarcin Smoczynski struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) 2185139d5d9SMarcin Smoczynski { 2195139d5d9SMarcin Smoczynski uint64_t satp; 2205139d5d9SMarcin Smoczynski uint32_t k; 2215139d5d9SMarcin Smoczynski 2225139d5d9SMarcin Smoczynski /* get SA type */ 2235139d5d9SMarcin Smoczynski satp = rte_ipsec_sa_type(ips->sa); 2245139d5d9SMarcin Smoczynski prep_process_group(sa, mb, cnt); 2255139d5d9SMarcin Smoczynski 2265139d5d9SMarcin Smoczynski k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt); 2275139d5d9SMarcin Smoczynski k = rte_ipsec_pkt_process(ips, mb, k); 2285139d5d9SMarcin Smoczynski copy_to_trf(trf, satp, mb, k); 2295139d5d9SMarcin Smoczynski return k; 2305139d5d9SMarcin Smoczynski } 2315139d5d9SMarcin Smoczynski 2325139d5d9SMarcin Smoczynski /* 2333e5f4625SKonstantin Ananyev * Process ipsec packets. 2343e5f4625SKonstantin Ananyev * If packet belong to SA that is subject of inline-crypto, 2353e5f4625SKonstantin Ananyev * then process it immediately. 2363e5f4625SKonstantin Ananyev * Otherwise do necessary preparations and queue it to related 2373e5f4625SKonstantin Ananyev * crypto-dev queue. 2383e5f4625SKonstantin Ananyev */ 2393e5f4625SKonstantin Ananyev void 2403e5f4625SKonstantin Ananyev ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) 2413e5f4625SKonstantin Ananyev { 2425139d5d9SMarcin Smoczynski uint32_t i, k, n; 2433e5f4625SKonstantin Ananyev struct ipsec_sa *sa; 2443e5f4625SKonstantin Ananyev struct rte_ipsec_group *pg; 2453e5f4625SKonstantin Ananyev struct rte_ipsec_session *ips; 2463e5f4625SKonstantin Ananyev struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; 2473e5f4625SKonstantin Ananyev 2483e5f4625SKonstantin Ananyev n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num); 2493e5f4625SKonstantin Ananyev 2503e5f4625SKonstantin Ananyev for (i = 0; i != n; i++) { 2515139d5d9SMarcin Smoczynski 2523e5f4625SKonstantin Ananyev pg = grp + i; 253ba66534fSMarcin Smoczynski sa = ipsec_mask_saptr(pg->id.ptr); 2543e5f4625SKonstantin Ananyev 2555139d5d9SMarcin Smoczynski /* fallback to cryptodev with RX packets which inline 2565139d5d9SMarcin Smoczynski * processor was unable to process 2575139d5d9SMarcin Smoczynski */ 2585139d5d9SMarcin Smoczynski if (sa != NULL) 2595139d5d9SMarcin Smoczynski ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ? 2605139d5d9SMarcin Smoczynski ipsec_get_fallback_session(sa) : 2615139d5d9SMarcin Smoczynski ipsec_get_primary_session(sa); 2623e5f4625SKonstantin Ananyev 263a8ade121SVolodymyr Fialko /* no valid HW session for that SA */ 264a8ade121SVolodymyr Fialko if (sa == NULL || unlikely(check_ipsec_session(ips) != 0)) 2653e5f4625SKonstantin Ananyev k = 0; 2663e5f4625SKonstantin Ananyev 2673e5f4625SKonstantin Ananyev /* process packets inline */ 2685139d5d9SMarcin Smoczynski else { 2695139d5d9SMarcin Smoczynski switch (ips->type) { 2703e5f4625SKonstantin Ananyev /* enqueue packets to crypto dev */ 2715139d5d9SMarcin Smoczynski case RTE_SECURITY_ACTION_TYPE_NONE: 2725139d5d9SMarcin Smoczynski case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 2735139d5d9SMarcin Smoczynski k = ipsec_prepare_crypto_group(ctx, sa, ips, 2745139d5d9SMarcin Smoczynski pg->m, pg->cnt); 2755139d5d9SMarcin Smoczynski break; 2765139d5d9SMarcin Smoczynski case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: 2775139d5d9SMarcin Smoczynski case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 2785139d5d9SMarcin Smoczynski k = ipsec_process_inline_group(ips, sa, 2795139d5d9SMarcin Smoczynski trf, pg->m, pg->cnt); 2805139d5d9SMarcin Smoczynski break; 2815139d5d9SMarcin Smoczynski case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: 2825139d5d9SMarcin Smoczynski k = ipsec_process_cpu_group(ips, sa, 2835139d5d9SMarcin Smoczynski trf, pg->m, pg->cnt); 2845139d5d9SMarcin Smoczynski break; 2855139d5d9SMarcin Smoczynski default: 2865139d5d9SMarcin Smoczynski k = 0; 2875139d5d9SMarcin Smoczynski } 2883e5f4625SKonstantin Ananyev } 2893e5f4625SKonstantin Ananyev 2903e5f4625SKonstantin Ananyev /* drop packets that cannot be enqueued/processed */ 2913e5f4625SKonstantin Ananyev if (k != pg->cnt) 2923e5f4625SKonstantin Ananyev free_pkts(pg->m + k, pg->cnt - k); 2933e5f4625SKonstantin Ananyev } 2943e5f4625SKonstantin Ananyev } 2953e5f4625SKonstantin Ananyev 2963e5f4625SKonstantin Ananyev static inline uint32_t 2973e5f4625SKonstantin Ananyev cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num) 2983e5f4625SKonstantin Ananyev { 2993e5f4625SKonstantin Ananyev uint32_t n; 3003e5f4625SKonstantin Ananyev 3013e5f4625SKonstantin Ananyev if (cqp->in_flight == 0) 3023e5f4625SKonstantin Ananyev return 0; 3033e5f4625SKonstantin Ananyev 3043e5f4625SKonstantin Ananyev n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num); 3053e5f4625SKonstantin Ananyev RTE_ASSERT(cqp->in_flight >= n); 3063e5f4625SKonstantin Ananyev cqp->in_flight -= n; 3073e5f4625SKonstantin Ananyev 3083e5f4625SKonstantin Ananyev return n; 3093e5f4625SKonstantin Ananyev } 3103e5f4625SKonstantin Ananyev 3113e5f4625SKonstantin Ananyev static inline uint32_t 3123e5f4625SKonstantin Ananyev ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num) 3133e5f4625SKonstantin Ananyev { 3143e5f4625SKonstantin Ananyev uint32_t i, n; 3153e5f4625SKonstantin Ananyev 3163e5f4625SKonstantin Ananyev n = 0; 3173e5f4625SKonstantin Ananyev 3183e5f4625SKonstantin Ananyev for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++) 3193e5f4625SKonstantin Ananyev n += cqp_dequeue(ctx->tbl + i, cop + n, num - n); 3203e5f4625SKonstantin Ananyev 3213e5f4625SKonstantin Ananyev for (i = 0; n != num && i != ctx->last_qp; i++) 3223e5f4625SKonstantin Ananyev n += cqp_dequeue(ctx->tbl + i, cop + n, num - n); 3233e5f4625SKonstantin Ananyev 3243e5f4625SKonstantin Ananyev ctx->last_qp = i; 3253e5f4625SKonstantin Ananyev return n; 3263e5f4625SKonstantin Ananyev } 3273e5f4625SKonstantin Ananyev 3283e5f4625SKonstantin Ananyev /* 3293e5f4625SKonstantin Ananyev * dequeue packets from crypto-queues and finalize processing. 3303e5f4625SKonstantin Ananyev */ 3313e5f4625SKonstantin Ananyev void 3323e5f4625SKonstantin Ananyev ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) 3333e5f4625SKonstantin Ananyev { 3343e5f4625SKonstantin Ananyev uint64_t satp; 3353e5f4625SKonstantin Ananyev uint32_t i, k, n, ng; 3363e5f4625SKonstantin Ananyev struct rte_ipsec_session *ss; 3373e5f4625SKonstantin Ananyev struct traffic_type *out; 3383e5f4625SKonstantin Ananyev struct rte_ipsec_group *pg; 339*88948ff3STejasree Kondoj const int nb_cops = RTE_DIM(trf->ipsec.pkts); 3403e5f4625SKonstantin Ananyev struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; 3413e5f4625SKonstantin Ananyev struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; 3423e5f4625SKonstantin Ananyev 3433e5f4625SKonstantin Ananyev trf->ip4.num = 0; 3443e5f4625SKonstantin Ananyev trf->ip6.num = 0; 3453e5f4625SKonstantin Ananyev 3463e5f4625SKonstantin Ananyev out = &trf->ipsec; 3473e5f4625SKonstantin Ananyev 3483e5f4625SKonstantin Ananyev /* dequeue completed crypto-ops */ 349*88948ff3STejasree Kondoj n = ctx_dequeue(ctx, cop, RTE_MIN(MAX_PKT_BURST, nb_cops)); 3503e5f4625SKonstantin Ananyev if (n == 0) 3513e5f4625SKonstantin Ananyev return; 3523e5f4625SKonstantin Ananyev 3533e5f4625SKonstantin Ananyev /* group them by ipsec session */ 3543e5f4625SKonstantin Ananyev ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **) 3553e5f4625SKonstantin Ananyev (uintptr_t)cop, out->pkts, grp, n); 3563e5f4625SKonstantin Ananyev 3573e5f4625SKonstantin Ananyev /* process each group of packets */ 3583e5f4625SKonstantin Ananyev for (i = 0; i != ng; i++) { 3593e5f4625SKonstantin Ananyev 3603e5f4625SKonstantin Ananyev pg = grp + i; 3613e5f4625SKonstantin Ananyev ss = pg->id.ptr; 3623e5f4625SKonstantin Ananyev satp = rte_ipsec_sa_type(ss->sa); 3633e5f4625SKonstantin Ananyev 3643e5f4625SKonstantin Ananyev k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt); 3653e5f4625SKonstantin Ananyev copy_to_trf(trf, satp, pg->m, k); 3663e5f4625SKonstantin Ananyev 3673e5f4625SKonstantin Ananyev /* free bad packets, if any */ 3683e5f4625SKonstantin Ananyev free_pkts(pg->m + k, pg->cnt - k); 3693e5f4625SKonstantin Ananyev 3703e5f4625SKonstantin Ananyev n -= pg->cnt; 3713e5f4625SKonstantin Ananyev } 3723e5f4625SKonstantin Ananyev 3733e5f4625SKonstantin Ananyev /* we should never have packet with unknown SA here */ 3743e5f4625SKonstantin Ananyev RTE_VERIFY(n == 0); 3753e5f4625SKonstantin Ananyev } 376