199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 299a2dd95SBruce Richardson * Copyright(c) 2018-2020 Intel Corporation 399a2dd95SBruce Richardson */ 499a2dd95SBruce Richardson 599a2dd95SBruce Richardson #include <rte_ipsec.h> 699a2dd95SBruce Richardson #include <rte_esp.h> 799a2dd95SBruce Richardson #include <rte_ip.h> 801eef590SRadu Nicolau #include <rte_udp.h> 999a2dd95SBruce Richardson #include <rte_errno.h> 1099a2dd95SBruce Richardson #include <rte_cryptodev.h> 1199a2dd95SBruce Richardson 1299a2dd95SBruce Richardson #include "sa.h" 1399a2dd95SBruce Richardson #include "ipsec_sqn.h" 1499a2dd95SBruce Richardson #include "crypto.h" 1599a2dd95SBruce Richardson #include "iph.h" 1699a2dd95SBruce Richardson #include "misc.h" 1799a2dd95SBruce Richardson #include "pad.h" 1899a2dd95SBruce Richardson 1999a2dd95SBruce Richardson typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc, 2099a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 2199a2dd95SBruce Richardson union sym_op_data *icv, uint8_t sqh_len); 2299a2dd95SBruce Richardson 2399a2dd95SBruce Richardson /* 2499a2dd95SBruce Richardson * helper function to fill crypto_sym op for cipher+auth algorithms. 2599a2dd95SBruce Richardson * used by outb_cop_prepare(), see below. 2699a2dd95SBruce Richardson */ 2799a2dd95SBruce Richardson static inline void 2899a2dd95SBruce Richardson sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop, 2999a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 3099a2dd95SBruce Richardson uint32_t pofs, uint32_t plen) 3199a2dd95SBruce Richardson { 3299a2dd95SBruce Richardson sop->cipher.data.offset = sa->ctp.cipher.offset + pofs; 3399a2dd95SBruce Richardson sop->cipher.data.length = sa->ctp.cipher.length + plen; 3499a2dd95SBruce Richardson sop->auth.data.offset = sa->ctp.auth.offset + pofs; 3599a2dd95SBruce Richardson sop->auth.data.length = sa->ctp.auth.length + plen; 3699a2dd95SBruce Richardson sop->auth.digest.data = icv->va; 3799a2dd95SBruce Richardson sop->auth.digest.phys_addr = icv->pa; 3899a2dd95SBruce Richardson } 3999a2dd95SBruce Richardson 4099a2dd95SBruce Richardson /* 4199a2dd95SBruce Richardson * helper function to fill crypto_sym op for cipher+auth algorithms. 4299a2dd95SBruce Richardson * used by outb_cop_prepare(), see below. 4399a2dd95SBruce Richardson */ 4499a2dd95SBruce Richardson static inline void 4599a2dd95SBruce Richardson sop_aead_prepare(struct rte_crypto_sym_op *sop, 4699a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 4799a2dd95SBruce Richardson uint32_t pofs, uint32_t plen) 4899a2dd95SBruce Richardson { 4999a2dd95SBruce Richardson sop->aead.data.offset = sa->ctp.cipher.offset + pofs; 5099a2dd95SBruce Richardson sop->aead.data.length = sa->ctp.cipher.length + plen; 5199a2dd95SBruce Richardson sop->aead.digest.data = icv->va; 5299a2dd95SBruce Richardson sop->aead.digest.phys_addr = icv->pa; 5399a2dd95SBruce Richardson sop->aead.aad.data = icv->va + sa->icv_len; 5499a2dd95SBruce Richardson sop->aead.aad.phys_addr = icv->pa + sa->icv_len; 5599a2dd95SBruce Richardson } 5699a2dd95SBruce Richardson 5799a2dd95SBruce Richardson /* 5899a2dd95SBruce Richardson * setup crypto op and crypto sym op for ESP outbound packet. 5999a2dd95SBruce Richardson */ 6099a2dd95SBruce Richardson static inline void 6199a2dd95SBruce Richardson outb_cop_prepare(struct rte_crypto_op *cop, 6299a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD], 6399a2dd95SBruce Richardson const union sym_op_data *icv, uint32_t hlen, uint32_t plen) 6499a2dd95SBruce Richardson { 6599a2dd95SBruce Richardson struct rte_crypto_sym_op *sop; 6699a2dd95SBruce Richardson struct aead_gcm_iv *gcm; 67c99d2619SRadu Nicolau struct aead_ccm_iv *ccm; 68c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *chacha20_poly1305; 6999a2dd95SBruce Richardson struct aesctr_cnt_blk *ctr; 7099a2dd95SBruce Richardson uint32_t algo; 7199a2dd95SBruce Richardson 7299a2dd95SBruce Richardson algo = sa->algo_type; 7399a2dd95SBruce Richardson 7499a2dd95SBruce Richardson /* fill sym op fields */ 7599a2dd95SBruce Richardson sop = cop->sym; 7699a2dd95SBruce Richardson 7799a2dd95SBruce Richardson switch (algo) { 7899a2dd95SBruce Richardson case ALGO_TYPE_AES_CBC: 7999a2dd95SBruce Richardson /* Cipher-Auth (AES-CBC *) case */ 8099a2dd95SBruce Richardson case ALGO_TYPE_3DES_CBC: 8199a2dd95SBruce Richardson /* Cipher-Auth (3DES-CBC *) case */ 8299a2dd95SBruce Richardson case ALGO_TYPE_NULL: 8399a2dd95SBruce Richardson /* NULL case */ 8499a2dd95SBruce Richardson sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 8599a2dd95SBruce Richardson break; 86c99d2619SRadu Nicolau case ALGO_TYPE_AES_GMAC: 87c99d2619SRadu Nicolau /* GMAC case */ 88c99d2619SRadu Nicolau sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 89c99d2619SRadu Nicolau 90c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 91c99d2619SRadu Nicolau gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 92c99d2619SRadu Nicolau sa->iv_ofs); 93c99d2619SRadu Nicolau aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 94c99d2619SRadu Nicolau break; 9599a2dd95SBruce Richardson case ALGO_TYPE_AES_GCM: 9699a2dd95SBruce Richardson /* AEAD (AES_GCM) case */ 9799a2dd95SBruce Richardson sop_aead_prepare(sop, sa, icv, hlen, plen); 9899a2dd95SBruce Richardson 9999a2dd95SBruce Richardson /* fill AAD IV (located inside crypto op) */ 10099a2dd95SBruce Richardson gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 10199a2dd95SBruce Richardson sa->iv_ofs); 10299a2dd95SBruce Richardson aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 10399a2dd95SBruce Richardson break; 104c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 105c99d2619SRadu Nicolau /* AEAD (AES_CCM) case */ 106c99d2619SRadu Nicolau sop_aead_prepare(sop, sa, icv, hlen, plen); 107c99d2619SRadu Nicolau 108c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 109c99d2619SRadu Nicolau ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *, 110c99d2619SRadu Nicolau sa->iv_ofs); 111c99d2619SRadu Nicolau aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 112c99d2619SRadu Nicolau break; 113c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 114c99d2619SRadu Nicolau /* AEAD (CHACHA20_POLY) case */ 115c99d2619SRadu Nicolau sop_aead_prepare(sop, sa, icv, hlen, plen); 116c99d2619SRadu Nicolau 117c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 118c99d2619SRadu Nicolau chacha20_poly1305 = rte_crypto_op_ctod_offset(cop, 119c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *, 120c99d2619SRadu Nicolau sa->iv_ofs); 121c99d2619SRadu Nicolau aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 122c99d2619SRadu Nicolau ivp[0], sa->salt); 123c99d2619SRadu Nicolau break; 12499a2dd95SBruce Richardson case ALGO_TYPE_AES_CTR: 12599a2dd95SBruce Richardson /* Cipher-Auth (AES-CTR *) case */ 12699a2dd95SBruce Richardson sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 12799a2dd95SBruce Richardson 12899a2dd95SBruce Richardson /* fill CTR block (located inside crypto op) */ 12999a2dd95SBruce Richardson ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *, 13099a2dd95SBruce Richardson sa->iv_ofs); 13199a2dd95SBruce Richardson aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 13299a2dd95SBruce Richardson break; 13399a2dd95SBruce Richardson } 13499a2dd95SBruce Richardson } 13599a2dd95SBruce Richardson 13699a2dd95SBruce Richardson /* 13799a2dd95SBruce Richardson * setup/update packet data and metadata for ESP outbound tunnel case. 13899a2dd95SBruce Richardson */ 13999a2dd95SBruce Richardson static inline int32_t 14099a2dd95SBruce Richardson outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 14199a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 14299a2dd95SBruce Richardson union sym_op_data *icv, uint8_t sqh_len) 14399a2dd95SBruce Richardson { 14499a2dd95SBruce Richardson uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen; 14599a2dd95SBruce Richardson struct rte_mbuf *ml; 14699a2dd95SBruce Richardson struct rte_esp_hdr *esph; 14799a2dd95SBruce Richardson struct rte_esp_tail *espt; 14899a2dd95SBruce Richardson char *ph, *pt; 14999a2dd95SBruce Richardson uint64_t *iv; 15099a2dd95SBruce Richardson 15199a2dd95SBruce Richardson /* calculate extra header space required */ 15299a2dd95SBruce Richardson hlen = sa->hdr_len + sa->iv_len + sizeof(*esph); 15399a2dd95SBruce Richardson 15499a2dd95SBruce Richardson /* size of ipsec protected data */ 15599a2dd95SBruce Richardson l2len = mb->l2_len; 15699a2dd95SBruce Richardson plen = mb->pkt_len - l2len; 15799a2dd95SBruce Richardson 15899a2dd95SBruce Richardson /* number of bytes to encrypt */ 15999a2dd95SBruce Richardson clen = plen + sizeof(*espt); 16099a2dd95SBruce Richardson clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 16199a2dd95SBruce Richardson 16299a2dd95SBruce Richardson /* pad length + esp tail */ 16399a2dd95SBruce Richardson pdlen = clen - plen; 16499a2dd95SBruce Richardson tlen = pdlen + sa->icv_len + sqh_len; 16599a2dd95SBruce Richardson 16699a2dd95SBruce Richardson /* do append and prepend */ 16799a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb); 16899a2dd95SBruce Richardson if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 16999a2dd95SBruce Richardson return -ENOSPC; 17099a2dd95SBruce Richardson 17199a2dd95SBruce Richardson /* prepend header */ 17299a2dd95SBruce Richardson ph = rte_pktmbuf_prepend(mb, hlen - l2len); 17399a2dd95SBruce Richardson if (ph == NULL) 17499a2dd95SBruce Richardson return -ENOSPC; 17599a2dd95SBruce Richardson 17699a2dd95SBruce Richardson /* append tail */ 17799a2dd95SBruce Richardson pdofs = ml->data_len; 17899a2dd95SBruce Richardson ml->data_len += tlen; 17999a2dd95SBruce Richardson mb->pkt_len += tlen; 18099a2dd95SBruce Richardson pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 18199a2dd95SBruce Richardson 18299a2dd95SBruce Richardson /* update pkt l2/l3 len */ 18399a2dd95SBruce Richardson mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) | 18499a2dd95SBruce Richardson sa->tx_offload.val; 18599a2dd95SBruce Richardson 18699a2dd95SBruce Richardson /* copy tunnel pkt header */ 18799a2dd95SBruce Richardson rte_memcpy(ph, sa->hdr, sa->hdr_len); 18899a2dd95SBruce Richardson 18901eef590SRadu Nicolau /* if UDP encap is enabled update the dgram_len */ 19001eef590SRadu Nicolau if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { 19101eef590SRadu Nicolau struct rte_udp_hdr *udph = (struct rte_udp_hdr *) 19201eef590SRadu Nicolau (ph - sizeof(struct rte_udp_hdr)); 19301eef590SRadu Nicolau udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len - 19401eef590SRadu Nicolau sa->hdr_l3_off - sa->hdr_len); 19501eef590SRadu Nicolau } 19601eef590SRadu Nicolau 19799a2dd95SBruce Richardson /* update original and new ip header fields */ 19899a2dd95SBruce Richardson update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen, 19999a2dd95SBruce Richardson mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc)); 20099a2dd95SBruce Richardson 20199a2dd95SBruce Richardson /* update spi, seqn and iv */ 20299a2dd95SBruce Richardson esph = (struct rte_esp_hdr *)(ph + sa->hdr_len); 20399a2dd95SBruce Richardson iv = (uint64_t *)(esph + 1); 20499a2dd95SBruce Richardson copy_iv(iv, ivp, sa->iv_len); 20599a2dd95SBruce Richardson 20699a2dd95SBruce Richardson esph->spi = sa->spi; 20799a2dd95SBruce Richardson esph->seq = sqn_low32(sqc); 20899a2dd95SBruce Richardson 20999a2dd95SBruce Richardson /* offset for ICV */ 21099a2dd95SBruce Richardson pdofs += pdlen + sa->sqh_len; 21199a2dd95SBruce Richardson 21299a2dd95SBruce Richardson /* pad length */ 21399a2dd95SBruce Richardson pdlen -= sizeof(*espt); 21499a2dd95SBruce Richardson 21599a2dd95SBruce Richardson /* copy padding data */ 21699a2dd95SBruce Richardson rte_memcpy(pt, esp_pad_bytes, pdlen); 21799a2dd95SBruce Richardson 21899a2dd95SBruce Richardson /* update esp trailer */ 21999a2dd95SBruce Richardson espt = (struct rte_esp_tail *)(pt + pdlen); 22099a2dd95SBruce Richardson espt->pad_len = pdlen; 22199a2dd95SBruce Richardson espt->next_proto = sa->proto; 22299a2dd95SBruce Richardson 22399a2dd95SBruce Richardson /* set icv va/pa value(s) */ 22499a2dd95SBruce Richardson icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 22599a2dd95SBruce Richardson icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 22699a2dd95SBruce Richardson 22799a2dd95SBruce Richardson return clen; 22899a2dd95SBruce Richardson } 22999a2dd95SBruce Richardson 23099a2dd95SBruce Richardson /* 23199a2dd95SBruce Richardson * for pure cryptodev (lookaside none) depending on SA settings, 23299a2dd95SBruce Richardson * we might have to write some extra data to the packet. 23399a2dd95SBruce Richardson */ 23499a2dd95SBruce Richardson static inline void 23599a2dd95SBruce Richardson outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc, 23699a2dd95SBruce Richardson const union sym_op_data *icv) 23799a2dd95SBruce Richardson { 23899a2dd95SBruce Richardson uint32_t *psqh; 239c99d2619SRadu Nicolau struct aead_gcm_aad *gaad; 240c99d2619SRadu Nicolau struct aead_ccm_aad *caad; 241c99d2619SRadu Nicolau struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad; 24299a2dd95SBruce Richardson 24399a2dd95SBruce Richardson /* insert SQN.hi between ESP trailer and ICV */ 24499a2dd95SBruce Richardson if (sa->sqh_len != 0) { 24599a2dd95SBruce Richardson psqh = (uint32_t *)(icv->va - sa->sqh_len); 24699a2dd95SBruce Richardson psqh[0] = sqn_hi32(sqc); 24799a2dd95SBruce Richardson } 24899a2dd95SBruce Richardson 24999a2dd95SBruce Richardson /* 25099a2dd95SBruce Richardson * fill IV and AAD fields, if any (aad fields are placed after icv), 25199a2dd95SBruce Richardson * right now we support only one AEAD algorithm: AES-GCM . 25299a2dd95SBruce Richardson */ 253c99d2619SRadu Nicolau switch (sa->algo_type) { 254c99d2619SRadu Nicolau case ALGO_TYPE_AES_GCM: 25599a2dd95SBruce Richardson if (sa->aad_len != 0) { 256c99d2619SRadu Nicolau gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len); 257c99d2619SRadu Nicolau aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa)); 258c99d2619SRadu Nicolau } 259c99d2619SRadu Nicolau break; 260c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 261c99d2619SRadu Nicolau if (sa->aad_len != 0) { 262c99d2619SRadu Nicolau caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len); 263c99d2619SRadu Nicolau aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa)); 264c99d2619SRadu Nicolau } 265c99d2619SRadu Nicolau break; 266c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 267c99d2619SRadu Nicolau if (sa->aad_len != 0) { 268c99d2619SRadu Nicolau chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *) 269c99d2619SRadu Nicolau (icv->va + sa->icv_len); 270c99d2619SRadu Nicolau aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad, 271c99d2619SRadu Nicolau sa->spi, sqc, IS_ESN(sa)); 272c99d2619SRadu Nicolau } 273c99d2619SRadu Nicolau break; 274c99d2619SRadu Nicolau default: 275c99d2619SRadu Nicolau break; 27699a2dd95SBruce Richardson } 27799a2dd95SBruce Richardson } 27899a2dd95SBruce Richardson 27999a2dd95SBruce Richardson /* 28099a2dd95SBruce Richardson * setup/update packets and crypto ops for ESP outbound tunnel case. 28199a2dd95SBruce Richardson */ 28299a2dd95SBruce Richardson uint16_t 28399a2dd95SBruce Richardson esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 28499a2dd95SBruce Richardson struct rte_crypto_op *cop[], uint16_t num) 28599a2dd95SBruce Richardson { 28699a2dd95SBruce Richardson int32_t rc; 28799a2dd95SBruce Richardson uint32_t i, k, n; 28899a2dd95SBruce Richardson uint64_t sqn; 28999a2dd95SBruce Richardson rte_be64_t sqc; 29099a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 29199a2dd95SBruce Richardson struct rte_cryptodev_sym_session *cs; 29299a2dd95SBruce Richardson union sym_op_data icv; 29399a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 29499a2dd95SBruce Richardson uint32_t dr[num]; 29599a2dd95SBruce Richardson 29699a2dd95SBruce Richardson sa = ss->sa; 29799a2dd95SBruce Richardson cs = ss->crypto.ses; 29899a2dd95SBruce Richardson 29999a2dd95SBruce Richardson n = num; 30099a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 30199a2dd95SBruce Richardson if (n != num) 30299a2dd95SBruce Richardson rte_errno = EOVERFLOW; 30399a2dd95SBruce Richardson 30499a2dd95SBruce Richardson k = 0; 30599a2dd95SBruce Richardson for (i = 0; i != n; i++) { 30699a2dd95SBruce Richardson 30799a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 30899a2dd95SBruce Richardson gen_iv(iv, sqc); 30999a2dd95SBruce Richardson 31099a2dd95SBruce Richardson /* try to update the packet itself */ 31199a2dd95SBruce Richardson rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 31299a2dd95SBruce Richardson sa->sqh_len); 31399a2dd95SBruce Richardson /* success, setup crypto op */ 31499a2dd95SBruce Richardson if (rc >= 0) { 31599a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 31699a2dd95SBruce Richardson lksd_none_cop_prepare(cop[k], cs, mb[i]); 31799a2dd95SBruce Richardson outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc); 31899a2dd95SBruce Richardson k++; 31999a2dd95SBruce Richardson /* failure, put packet into the death-row */ 32099a2dd95SBruce Richardson } else { 32199a2dd95SBruce Richardson dr[i - k] = i; 32299a2dd95SBruce Richardson rte_errno = -rc; 32399a2dd95SBruce Richardson } 32499a2dd95SBruce Richardson } 32599a2dd95SBruce Richardson 32699a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 32799a2dd95SBruce Richardson if (k != n && k != 0) 32899a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 32999a2dd95SBruce Richardson 33099a2dd95SBruce Richardson return k; 33199a2dd95SBruce Richardson } 33299a2dd95SBruce Richardson 33399a2dd95SBruce Richardson /* 33499a2dd95SBruce Richardson * setup/update packet data and metadata for ESP outbound transport case. 33599a2dd95SBruce Richardson */ 33699a2dd95SBruce Richardson static inline int32_t 33799a2dd95SBruce Richardson outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 33899a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 33999a2dd95SBruce Richardson union sym_op_data *icv, uint8_t sqh_len) 34099a2dd95SBruce Richardson { 34199a2dd95SBruce Richardson uint8_t np; 34299a2dd95SBruce Richardson uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen; 34399a2dd95SBruce Richardson struct rte_mbuf *ml; 34499a2dd95SBruce Richardson struct rte_esp_hdr *esph; 34599a2dd95SBruce Richardson struct rte_esp_tail *espt; 34699a2dd95SBruce Richardson char *ph, *pt; 34799a2dd95SBruce Richardson uint64_t *iv; 34899a2dd95SBruce Richardson uint32_t l2len, l3len; 34999a2dd95SBruce Richardson 35099a2dd95SBruce Richardson l2len = mb->l2_len; 35199a2dd95SBruce Richardson l3len = mb->l3_len; 35299a2dd95SBruce Richardson 35399a2dd95SBruce Richardson uhlen = l2len + l3len; 35499a2dd95SBruce Richardson plen = mb->pkt_len - uhlen; 35599a2dd95SBruce Richardson 35699a2dd95SBruce Richardson /* calculate extra header space required */ 35799a2dd95SBruce Richardson hlen = sa->iv_len + sizeof(*esph); 35899a2dd95SBruce Richardson 35999a2dd95SBruce Richardson /* number of bytes to encrypt */ 36099a2dd95SBruce Richardson clen = plen + sizeof(*espt); 36199a2dd95SBruce Richardson clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 36299a2dd95SBruce Richardson 36399a2dd95SBruce Richardson /* pad length + esp tail */ 36499a2dd95SBruce Richardson pdlen = clen - plen; 36599a2dd95SBruce Richardson tlen = pdlen + sa->icv_len + sqh_len; 36699a2dd95SBruce Richardson 36799a2dd95SBruce Richardson /* do append and insert */ 36899a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb); 36999a2dd95SBruce Richardson if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 37099a2dd95SBruce Richardson return -ENOSPC; 37199a2dd95SBruce Richardson 37299a2dd95SBruce Richardson /* prepend space for ESP header */ 37399a2dd95SBruce Richardson ph = rte_pktmbuf_prepend(mb, hlen); 37499a2dd95SBruce Richardson if (ph == NULL) 37599a2dd95SBruce Richardson return -ENOSPC; 37699a2dd95SBruce Richardson 37799a2dd95SBruce Richardson /* append tail */ 37899a2dd95SBruce Richardson pdofs = ml->data_len; 37999a2dd95SBruce Richardson ml->data_len += tlen; 38099a2dd95SBruce Richardson mb->pkt_len += tlen; 38199a2dd95SBruce Richardson pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 38299a2dd95SBruce Richardson 38399a2dd95SBruce Richardson /* shift L2/L3 headers */ 38499a2dd95SBruce Richardson insert_esph(ph, ph + hlen, uhlen); 38599a2dd95SBruce Richardson 38699a2dd95SBruce Richardson /* update ip header fields */ 38799a2dd95SBruce Richardson np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len, 38899a2dd95SBruce Richardson l3len, IPPROTO_ESP); 38999a2dd95SBruce Richardson 39099a2dd95SBruce Richardson /* update spi, seqn and iv */ 39199a2dd95SBruce Richardson esph = (struct rte_esp_hdr *)(ph + uhlen); 39299a2dd95SBruce Richardson iv = (uint64_t *)(esph + 1); 39399a2dd95SBruce Richardson copy_iv(iv, ivp, sa->iv_len); 39499a2dd95SBruce Richardson 39599a2dd95SBruce Richardson esph->spi = sa->spi; 39699a2dd95SBruce Richardson esph->seq = sqn_low32(sqc); 39799a2dd95SBruce Richardson 39899a2dd95SBruce Richardson /* offset for ICV */ 39999a2dd95SBruce Richardson pdofs += pdlen + sa->sqh_len; 40099a2dd95SBruce Richardson 40199a2dd95SBruce Richardson /* pad length */ 40299a2dd95SBruce Richardson pdlen -= sizeof(*espt); 40399a2dd95SBruce Richardson 40499a2dd95SBruce Richardson /* copy padding data */ 40599a2dd95SBruce Richardson rte_memcpy(pt, esp_pad_bytes, pdlen); 40699a2dd95SBruce Richardson 40799a2dd95SBruce Richardson /* update esp trailer */ 40899a2dd95SBruce Richardson espt = (struct rte_esp_tail *)(pt + pdlen); 40999a2dd95SBruce Richardson espt->pad_len = pdlen; 41099a2dd95SBruce Richardson espt->next_proto = np; 41199a2dd95SBruce Richardson 41299a2dd95SBruce Richardson /* set icv va/pa value(s) */ 41399a2dd95SBruce Richardson icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 41499a2dd95SBruce Richardson icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 41599a2dd95SBruce Richardson 41699a2dd95SBruce Richardson return clen; 41799a2dd95SBruce Richardson } 41899a2dd95SBruce Richardson 41999a2dd95SBruce Richardson /* 42099a2dd95SBruce Richardson * setup/update packets and crypto ops for ESP outbound transport case. 42199a2dd95SBruce Richardson */ 42299a2dd95SBruce Richardson uint16_t 42399a2dd95SBruce Richardson esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 42499a2dd95SBruce Richardson struct rte_crypto_op *cop[], uint16_t num) 42599a2dd95SBruce Richardson { 42699a2dd95SBruce Richardson int32_t rc; 42799a2dd95SBruce Richardson uint32_t i, k, n, l2, l3; 42899a2dd95SBruce Richardson uint64_t sqn; 42999a2dd95SBruce Richardson rte_be64_t sqc; 43099a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 43199a2dd95SBruce Richardson struct rte_cryptodev_sym_session *cs; 43299a2dd95SBruce Richardson union sym_op_data icv; 43399a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 43499a2dd95SBruce Richardson uint32_t dr[num]; 43599a2dd95SBruce Richardson 43699a2dd95SBruce Richardson sa = ss->sa; 43799a2dd95SBruce Richardson cs = ss->crypto.ses; 43899a2dd95SBruce Richardson 43999a2dd95SBruce Richardson n = num; 44099a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 44199a2dd95SBruce Richardson if (n != num) 44299a2dd95SBruce Richardson rte_errno = EOVERFLOW; 44399a2dd95SBruce Richardson 44499a2dd95SBruce Richardson k = 0; 44599a2dd95SBruce Richardson for (i = 0; i != n; i++) { 44699a2dd95SBruce Richardson 44799a2dd95SBruce Richardson l2 = mb[i]->l2_len; 44899a2dd95SBruce Richardson l3 = mb[i]->l3_len; 44999a2dd95SBruce Richardson 45099a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 45199a2dd95SBruce Richardson gen_iv(iv, sqc); 45299a2dd95SBruce Richardson 45399a2dd95SBruce Richardson /* try to update the packet itself */ 45499a2dd95SBruce Richardson rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 45599a2dd95SBruce Richardson sa->sqh_len); 45699a2dd95SBruce Richardson /* success, setup crypto op */ 45799a2dd95SBruce Richardson if (rc >= 0) { 45899a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 45999a2dd95SBruce Richardson lksd_none_cop_prepare(cop[k], cs, mb[i]); 46099a2dd95SBruce Richardson outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc); 46199a2dd95SBruce Richardson k++; 46299a2dd95SBruce Richardson /* failure, put packet into the death-row */ 46399a2dd95SBruce Richardson } else { 46499a2dd95SBruce Richardson dr[i - k] = i; 46599a2dd95SBruce Richardson rte_errno = -rc; 46699a2dd95SBruce Richardson } 46799a2dd95SBruce Richardson } 46899a2dd95SBruce Richardson 46999a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 47099a2dd95SBruce Richardson if (k != n && k != 0) 47199a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 47299a2dd95SBruce Richardson 47399a2dd95SBruce Richardson return k; 47499a2dd95SBruce Richardson } 47599a2dd95SBruce Richardson 47699a2dd95SBruce Richardson 47799a2dd95SBruce Richardson static inline uint32_t 47899a2dd95SBruce Richardson outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs, 47999a2dd95SBruce Richardson uint32_t plen, void *iv) 48099a2dd95SBruce Richardson { 48199a2dd95SBruce Richardson uint64_t *ivp = iv; 48299a2dd95SBruce Richardson struct aead_gcm_iv *gcm; 483c99d2619SRadu Nicolau struct aead_ccm_iv *ccm; 484c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *chacha20_poly1305; 48599a2dd95SBruce Richardson struct aesctr_cnt_blk *ctr; 48699a2dd95SBruce Richardson uint32_t clen; 48799a2dd95SBruce Richardson 48899a2dd95SBruce Richardson switch (sa->algo_type) { 48999a2dd95SBruce Richardson case ALGO_TYPE_AES_GCM: 49099a2dd95SBruce Richardson gcm = iv; 49199a2dd95SBruce Richardson aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 49299a2dd95SBruce Richardson break; 493c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 494c99d2619SRadu Nicolau ccm = iv; 495c99d2619SRadu Nicolau aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 496c99d2619SRadu Nicolau break; 497c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 498c99d2619SRadu Nicolau chacha20_poly1305 = iv; 499c99d2619SRadu Nicolau aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 500c99d2619SRadu Nicolau ivp[0], sa->salt); 501c99d2619SRadu Nicolau break; 50299a2dd95SBruce Richardson case ALGO_TYPE_AES_CTR: 50399a2dd95SBruce Richardson ctr = iv; 50499a2dd95SBruce Richardson aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 50599a2dd95SBruce Richardson break; 50699a2dd95SBruce Richardson } 50799a2dd95SBruce Richardson 50899a2dd95SBruce Richardson *pofs += sa->ctp.auth.offset; 50999a2dd95SBruce Richardson clen = plen + sa->ctp.auth.length; 51099a2dd95SBruce Richardson return clen; 51199a2dd95SBruce Richardson } 51299a2dd95SBruce Richardson 51399a2dd95SBruce Richardson static uint16_t 51499a2dd95SBruce Richardson cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, 51599a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num, 51699a2dd95SBruce Richardson esp_outb_prepare_t prepare, uint32_t cofs_mask) 51799a2dd95SBruce Richardson { 51899a2dd95SBruce Richardson int32_t rc; 51999a2dd95SBruce Richardson uint64_t sqn; 52099a2dd95SBruce Richardson rte_be64_t sqc; 52199a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 52299a2dd95SBruce Richardson uint32_t i, k, n; 52399a2dd95SBruce Richardson uint32_t l2, l3; 52499a2dd95SBruce Richardson union sym_op_data icv; 52599a2dd95SBruce Richardson struct rte_crypto_va_iova_ptr iv[num]; 52699a2dd95SBruce Richardson struct rte_crypto_va_iova_ptr aad[num]; 52799a2dd95SBruce Richardson struct rte_crypto_va_iova_ptr dgst[num]; 52899a2dd95SBruce Richardson uint32_t dr[num]; 52999a2dd95SBruce Richardson uint32_t l4ofs[num]; 53099a2dd95SBruce Richardson uint32_t clen[num]; 53199a2dd95SBruce Richardson uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD]; 53299a2dd95SBruce Richardson 53399a2dd95SBruce Richardson sa = ss->sa; 53499a2dd95SBruce Richardson 53599a2dd95SBruce Richardson n = num; 53699a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 53799a2dd95SBruce Richardson if (n != num) 53899a2dd95SBruce Richardson rte_errno = EOVERFLOW; 53999a2dd95SBruce Richardson 54099a2dd95SBruce Richardson for (i = 0, k = 0; i != n; i++) { 54199a2dd95SBruce Richardson 54299a2dd95SBruce Richardson l2 = mb[i]->l2_len; 54399a2dd95SBruce Richardson l3 = mb[i]->l3_len; 54499a2dd95SBruce Richardson 54599a2dd95SBruce Richardson /* calculate ESP header offset */ 54699a2dd95SBruce Richardson l4ofs[k] = (l2 + l3) & cofs_mask; 54799a2dd95SBruce Richardson 54899a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 54999a2dd95SBruce Richardson gen_iv(ivbuf[k], sqc); 55099a2dd95SBruce Richardson 55199a2dd95SBruce Richardson /* try to update the packet itself */ 55299a2dd95SBruce Richardson rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len); 55399a2dd95SBruce Richardson 55499a2dd95SBruce Richardson /* success, proceed with preparations */ 55599a2dd95SBruce Richardson if (rc >= 0) { 55699a2dd95SBruce Richardson 55799a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 55899a2dd95SBruce Richardson 55999a2dd95SBruce Richardson /* get encrypted data offset and length */ 56099a2dd95SBruce Richardson clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc, 56199a2dd95SBruce Richardson ivbuf[k]); 56299a2dd95SBruce Richardson 56399a2dd95SBruce Richardson /* fill iv, digest and aad */ 56499a2dd95SBruce Richardson iv[k].va = ivbuf[k]; 56599a2dd95SBruce Richardson aad[k].va = icv.va + sa->icv_len; 56699a2dd95SBruce Richardson dgst[k++].va = icv.va; 56799a2dd95SBruce Richardson } else { 56899a2dd95SBruce Richardson dr[i - k] = i; 56999a2dd95SBruce Richardson rte_errno = -rc; 57099a2dd95SBruce Richardson } 57199a2dd95SBruce Richardson } 57299a2dd95SBruce Richardson 57399a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 57499a2dd95SBruce Richardson if (k != n && k != 0) 57599a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 57699a2dd95SBruce Richardson 57799a2dd95SBruce Richardson /* convert mbufs to iovecs and do actual crypto/auth processing */ 57899a2dd95SBruce Richardson if (k != 0) 57999a2dd95SBruce Richardson cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, 58099a2dd95SBruce Richardson l4ofs, clen, k); 58199a2dd95SBruce Richardson return k; 58299a2dd95SBruce Richardson } 58399a2dd95SBruce Richardson 58499a2dd95SBruce Richardson uint16_t 58599a2dd95SBruce Richardson cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss, 58699a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 58799a2dd95SBruce Richardson { 58899a2dd95SBruce Richardson return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0); 58999a2dd95SBruce Richardson } 59099a2dd95SBruce Richardson 59199a2dd95SBruce Richardson uint16_t 59299a2dd95SBruce Richardson cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, 59399a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 59499a2dd95SBruce Richardson { 59599a2dd95SBruce Richardson return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare, 59699a2dd95SBruce Richardson UINT32_MAX); 59799a2dd95SBruce Richardson } 59899a2dd95SBruce Richardson 59999a2dd95SBruce Richardson /* 60099a2dd95SBruce Richardson * process outbound packets for SA with ESN support, 60199a2dd95SBruce Richardson * for algorithms that require SQN.hibits to be implictly included 60299a2dd95SBruce Richardson * into digest computation. 60399a2dd95SBruce Richardson * In that case we have to move ICV bytes back to their proper place. 60499a2dd95SBruce Richardson */ 60599a2dd95SBruce Richardson uint16_t 60699a2dd95SBruce Richardson esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 60799a2dd95SBruce Richardson uint16_t num) 60899a2dd95SBruce Richardson { 60968977baaSRadu Nicolau uint32_t i, k, icv_len, *icv, bytes; 61099a2dd95SBruce Richardson struct rte_mbuf *ml; 61199a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 61299a2dd95SBruce Richardson uint32_t dr[num]; 61399a2dd95SBruce Richardson 61499a2dd95SBruce Richardson sa = ss->sa; 61599a2dd95SBruce Richardson 61699a2dd95SBruce Richardson k = 0; 61799a2dd95SBruce Richardson icv_len = sa->icv_len; 61868977baaSRadu Nicolau bytes = 0; 61999a2dd95SBruce Richardson 62099a2dd95SBruce Richardson for (i = 0; i != num; i++) { 621*daa02b5cSOlivier Matz if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) { 62299a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb[i]); 62399a2dd95SBruce Richardson /* remove high-order 32 bits of esn from packet len */ 62499a2dd95SBruce Richardson mb[i]->pkt_len -= sa->sqh_len; 62599a2dd95SBruce Richardson ml->data_len -= sa->sqh_len; 62699a2dd95SBruce Richardson icv = rte_pktmbuf_mtod_offset(ml, void *, 62799a2dd95SBruce Richardson ml->data_len - icv_len); 62899a2dd95SBruce Richardson remove_sqh(icv, icv_len); 62968977baaSRadu Nicolau bytes += mb[i]->pkt_len; 63099a2dd95SBruce Richardson k++; 63199a2dd95SBruce Richardson } else 63299a2dd95SBruce Richardson dr[i - k] = i; 63399a2dd95SBruce Richardson } 63468977baaSRadu Nicolau sa->statistics.count += k; 63568977baaSRadu Nicolau sa->statistics.bytes += bytes; 63699a2dd95SBruce Richardson 63799a2dd95SBruce Richardson /* handle unprocessed mbufs */ 63899a2dd95SBruce Richardson if (k != num) { 63999a2dd95SBruce Richardson rte_errno = EBADMSG; 64099a2dd95SBruce Richardson if (k != 0) 64199a2dd95SBruce Richardson move_bad_mbufs(mb, dr, num, num - k); 64299a2dd95SBruce Richardson } 64399a2dd95SBruce Richardson 64499a2dd95SBruce Richardson return k; 64599a2dd95SBruce Richardson } 64699a2dd95SBruce Richardson 64799a2dd95SBruce Richardson /* 64899a2dd95SBruce Richardson * prepare packets for inline ipsec processing: 64999a2dd95SBruce Richardson * set ol_flags and attach metadata. 65099a2dd95SBruce Richardson */ 65199a2dd95SBruce Richardson static inline void 65299a2dd95SBruce Richardson inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss, 65399a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 65499a2dd95SBruce Richardson { 65568977baaSRadu Nicolau uint32_t i, ol_flags, bytes; 65699a2dd95SBruce Richardson 65799a2dd95SBruce Richardson ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA; 65868977baaSRadu Nicolau bytes = 0; 65999a2dd95SBruce Richardson for (i = 0; i != num; i++) { 66099a2dd95SBruce Richardson 661*daa02b5cSOlivier Matz mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 66268977baaSRadu Nicolau bytes += mb[i]->pkt_len; 66399a2dd95SBruce Richardson if (ol_flags != 0) 66499a2dd95SBruce Richardson rte_security_set_pkt_metadata(ss->security.ctx, 66599a2dd95SBruce Richardson ss->security.ses, mb[i], NULL); 66699a2dd95SBruce Richardson } 66768977baaSRadu Nicolau ss->sa->statistics.count += num; 66868977baaSRadu Nicolau ss->sa->statistics.bytes += bytes; 66999a2dd95SBruce Richardson } 67099a2dd95SBruce Richardson 67199a2dd95SBruce Richardson /* 67299a2dd95SBruce Richardson * process group of ESP outbound tunnel packets destined for 67399a2dd95SBruce Richardson * INLINE_CRYPTO type of device. 67499a2dd95SBruce Richardson */ 67599a2dd95SBruce Richardson uint16_t 67699a2dd95SBruce Richardson inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, 67799a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 67899a2dd95SBruce Richardson { 67999a2dd95SBruce Richardson int32_t rc; 68099a2dd95SBruce Richardson uint32_t i, k, n; 68199a2dd95SBruce Richardson uint64_t sqn; 68299a2dd95SBruce Richardson rte_be64_t sqc; 68399a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 68499a2dd95SBruce Richardson union sym_op_data icv; 68599a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 68699a2dd95SBruce Richardson uint32_t dr[num]; 68799a2dd95SBruce Richardson 68899a2dd95SBruce Richardson sa = ss->sa; 68999a2dd95SBruce Richardson 69099a2dd95SBruce Richardson n = num; 69199a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 69299a2dd95SBruce Richardson if (n != num) 69399a2dd95SBruce Richardson rte_errno = EOVERFLOW; 69499a2dd95SBruce Richardson 69599a2dd95SBruce Richardson k = 0; 69699a2dd95SBruce Richardson for (i = 0; i != n; i++) { 69799a2dd95SBruce Richardson 69899a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 69999a2dd95SBruce Richardson gen_iv(iv, sqc); 70099a2dd95SBruce Richardson 70199a2dd95SBruce Richardson /* try to update the packet itself */ 70299a2dd95SBruce Richardson rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); 70399a2dd95SBruce Richardson 70499a2dd95SBruce Richardson k += (rc >= 0); 70599a2dd95SBruce Richardson 70699a2dd95SBruce Richardson /* failure, put packet into the death-row */ 70799a2dd95SBruce Richardson if (rc < 0) { 70899a2dd95SBruce Richardson dr[i - k] = i; 70999a2dd95SBruce Richardson rte_errno = -rc; 71099a2dd95SBruce Richardson } 71199a2dd95SBruce Richardson } 71299a2dd95SBruce Richardson 71399a2dd95SBruce Richardson /* copy not processed mbufs beyond good ones */ 71499a2dd95SBruce Richardson if (k != n && k != 0) 71599a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 71699a2dd95SBruce Richardson 71799a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, k); 71899a2dd95SBruce Richardson return k; 71999a2dd95SBruce Richardson } 72099a2dd95SBruce Richardson 72199a2dd95SBruce Richardson /* 72299a2dd95SBruce Richardson * process group of ESP outbound transport packets destined for 72399a2dd95SBruce Richardson * INLINE_CRYPTO type of device. 72499a2dd95SBruce Richardson */ 72599a2dd95SBruce Richardson uint16_t 72699a2dd95SBruce Richardson inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, 72799a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 72899a2dd95SBruce Richardson { 72999a2dd95SBruce Richardson int32_t rc; 73099a2dd95SBruce Richardson uint32_t i, k, n; 73199a2dd95SBruce Richardson uint64_t sqn; 73299a2dd95SBruce Richardson rte_be64_t sqc; 73399a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 73499a2dd95SBruce Richardson union sym_op_data icv; 73599a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 73699a2dd95SBruce Richardson uint32_t dr[num]; 73799a2dd95SBruce Richardson 73899a2dd95SBruce Richardson sa = ss->sa; 73999a2dd95SBruce Richardson 74099a2dd95SBruce Richardson n = num; 74199a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 74299a2dd95SBruce Richardson if (n != num) 74399a2dd95SBruce Richardson rte_errno = EOVERFLOW; 74499a2dd95SBruce Richardson 74599a2dd95SBruce Richardson k = 0; 74699a2dd95SBruce Richardson for (i = 0; i != n; i++) { 74799a2dd95SBruce Richardson 74899a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 74999a2dd95SBruce Richardson gen_iv(iv, sqc); 75099a2dd95SBruce Richardson 75199a2dd95SBruce Richardson /* try to update the packet itself */ 75299a2dd95SBruce Richardson rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); 75399a2dd95SBruce Richardson 75499a2dd95SBruce Richardson k += (rc >= 0); 75599a2dd95SBruce Richardson 75699a2dd95SBruce Richardson /* failure, put packet into the death-row */ 75799a2dd95SBruce Richardson if (rc < 0) { 75899a2dd95SBruce Richardson dr[i - k] = i; 75999a2dd95SBruce Richardson rte_errno = -rc; 76099a2dd95SBruce Richardson } 76199a2dd95SBruce Richardson } 76299a2dd95SBruce Richardson 76399a2dd95SBruce Richardson /* copy not processed mbufs beyond good ones */ 76499a2dd95SBruce Richardson if (k != n && k != 0) 76599a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 76699a2dd95SBruce Richardson 76799a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, k); 76899a2dd95SBruce Richardson return k; 76999a2dd95SBruce Richardson } 77099a2dd95SBruce Richardson 77199a2dd95SBruce Richardson /* 77299a2dd95SBruce Richardson * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 77399a2dd95SBruce Richardson * actual processing is done by HW/PMD, just set flags and metadata. 77499a2dd95SBruce Richardson */ 77599a2dd95SBruce Richardson uint16_t 77699a2dd95SBruce Richardson inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss, 77799a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 77899a2dd95SBruce Richardson { 77999a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, num); 78099a2dd95SBruce Richardson return num; 78199a2dd95SBruce Richardson } 782