199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 299a2dd95SBruce Richardson * Copyright(c) 2018-2020 Intel Corporation 399a2dd95SBruce Richardson */ 499a2dd95SBruce Richardson 599a2dd95SBruce Richardson #include <rte_ipsec.h> 699a2dd95SBruce Richardson #include <rte_esp.h> 701eef590SRadu Nicolau #include <rte_udp.h> 899a2dd95SBruce Richardson #include <rte_errno.h> 999a2dd95SBruce Richardson #include <rte_cryptodev.h> 1099a2dd95SBruce Richardson 1199a2dd95SBruce Richardson #include "sa.h" 1299a2dd95SBruce Richardson #include "ipsec_sqn.h" 1399a2dd95SBruce Richardson #include "crypto.h" 1499a2dd95SBruce Richardson #include "iph.h" 1599a2dd95SBruce Richardson #include "misc.h" 1699a2dd95SBruce Richardson #include "pad.h" 1799a2dd95SBruce Richardson 1899a2dd95SBruce Richardson typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc, 1999a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 20ff4a29d1SRadu Nicolau union sym_op_data *icv, uint8_t sqh_len, uint8_t tso); 2199a2dd95SBruce Richardson 2299a2dd95SBruce Richardson /* 2399a2dd95SBruce Richardson * helper function to fill crypto_sym op for cipher+auth algorithms. 2499a2dd95SBruce Richardson * used by outb_cop_prepare(), see below. 2599a2dd95SBruce Richardson */ 2699a2dd95SBruce Richardson static inline void 2799a2dd95SBruce Richardson sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop, 2899a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 2999a2dd95SBruce Richardson uint32_t pofs, uint32_t plen) 3099a2dd95SBruce Richardson { 3199a2dd95SBruce Richardson sop->cipher.data.offset = sa->ctp.cipher.offset + pofs; 3299a2dd95SBruce Richardson sop->cipher.data.length = sa->ctp.cipher.length + plen; 3399a2dd95SBruce Richardson sop->auth.data.offset = sa->ctp.auth.offset + pofs; 3499a2dd95SBruce Richardson sop->auth.data.length = sa->ctp.auth.length + plen; 3599a2dd95SBruce Richardson sop->auth.digest.data = icv->va; 3699a2dd95SBruce Richardson sop->auth.digest.phys_addr = icv->pa; 3799a2dd95SBruce Richardson } 3899a2dd95SBruce Richardson 3999a2dd95SBruce Richardson /* 4099a2dd95SBruce Richardson * helper function to fill crypto_sym op for cipher+auth algorithms. 4199a2dd95SBruce Richardson * used by outb_cop_prepare(), see below. 4299a2dd95SBruce Richardson */ 4399a2dd95SBruce Richardson static inline void 4499a2dd95SBruce Richardson sop_aead_prepare(struct rte_crypto_sym_op *sop, 4599a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 4699a2dd95SBruce Richardson uint32_t pofs, uint32_t plen) 4799a2dd95SBruce Richardson { 4899a2dd95SBruce Richardson sop->aead.data.offset = sa->ctp.cipher.offset + pofs; 4999a2dd95SBruce Richardson sop->aead.data.length = sa->ctp.cipher.length + plen; 5099a2dd95SBruce Richardson sop->aead.digest.data = icv->va; 5199a2dd95SBruce Richardson sop->aead.digest.phys_addr = icv->pa; 5299a2dd95SBruce Richardson sop->aead.aad.data = icv->va + sa->icv_len; 5399a2dd95SBruce Richardson sop->aead.aad.phys_addr = icv->pa + sa->icv_len; 5499a2dd95SBruce Richardson } 5599a2dd95SBruce Richardson 5699a2dd95SBruce Richardson /* 5799a2dd95SBruce Richardson * setup crypto op and crypto sym op for ESP outbound packet. 5899a2dd95SBruce Richardson */ 5999a2dd95SBruce Richardson static inline void 6099a2dd95SBruce Richardson outb_cop_prepare(struct rte_crypto_op *cop, 6199a2dd95SBruce Richardson const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD], 6299a2dd95SBruce Richardson const union sym_op_data *icv, uint32_t hlen, uint32_t plen) 6399a2dd95SBruce Richardson { 6499a2dd95SBruce Richardson struct rte_crypto_sym_op *sop; 6599a2dd95SBruce Richardson struct aead_gcm_iv *gcm; 66c99d2619SRadu Nicolau struct aead_ccm_iv *ccm; 67c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *chacha20_poly1305; 6899a2dd95SBruce Richardson struct aesctr_cnt_blk *ctr; 6999a2dd95SBruce Richardson uint32_t algo; 7099a2dd95SBruce Richardson 7199a2dd95SBruce Richardson algo = sa->algo_type; 7299a2dd95SBruce Richardson 7399a2dd95SBruce Richardson /* fill sym op fields */ 7499a2dd95SBruce Richardson sop = cop->sym; 7599a2dd95SBruce Richardson 7699a2dd95SBruce Richardson switch (algo) { 7799a2dd95SBruce Richardson case ALGO_TYPE_AES_CBC: 7899a2dd95SBruce Richardson /* Cipher-Auth (AES-CBC *) case */ 7999a2dd95SBruce Richardson case ALGO_TYPE_3DES_CBC: 8099a2dd95SBruce Richardson /* Cipher-Auth (3DES-CBC *) case */ 8199a2dd95SBruce Richardson case ALGO_TYPE_NULL: 8299a2dd95SBruce Richardson /* NULL case */ 8399a2dd95SBruce Richardson sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 8499a2dd95SBruce Richardson break; 85c99d2619SRadu Nicolau case ALGO_TYPE_AES_GMAC: 86c99d2619SRadu Nicolau /* GMAC case */ 87c99d2619SRadu Nicolau sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 88c99d2619SRadu Nicolau 89c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 90c99d2619SRadu Nicolau gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 91c99d2619SRadu Nicolau sa->iv_ofs); 92c99d2619SRadu Nicolau aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 93c99d2619SRadu Nicolau break; 9499a2dd95SBruce Richardson case ALGO_TYPE_AES_GCM: 9599a2dd95SBruce Richardson /* AEAD (AES_GCM) case */ 9699a2dd95SBruce Richardson sop_aead_prepare(sop, sa, icv, hlen, plen); 9799a2dd95SBruce Richardson 9899a2dd95SBruce Richardson /* fill AAD IV (located inside crypto op) */ 9999a2dd95SBruce Richardson gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 10099a2dd95SBruce Richardson sa->iv_ofs); 10199a2dd95SBruce Richardson aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 10299a2dd95SBruce Richardson break; 103c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 104c99d2619SRadu Nicolau /* AEAD (AES_CCM) case */ 105c99d2619SRadu Nicolau sop_aead_prepare(sop, sa, icv, hlen, plen); 106c99d2619SRadu Nicolau 107c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 108c99d2619SRadu Nicolau ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *, 109c99d2619SRadu Nicolau sa->iv_ofs); 110c99d2619SRadu Nicolau aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 111c99d2619SRadu Nicolau break; 112c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 113c99d2619SRadu Nicolau /* AEAD (CHACHA20_POLY) case */ 114c99d2619SRadu Nicolau sop_aead_prepare(sop, sa, icv, hlen, plen); 115c99d2619SRadu Nicolau 116c99d2619SRadu Nicolau /* fill AAD IV (located inside crypto op) */ 117c99d2619SRadu Nicolau chacha20_poly1305 = rte_crypto_op_ctod_offset(cop, 118c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *, 119c99d2619SRadu Nicolau sa->iv_ofs); 120c99d2619SRadu Nicolau aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 121c99d2619SRadu Nicolau ivp[0], sa->salt); 122c99d2619SRadu Nicolau break; 12399a2dd95SBruce Richardson case ALGO_TYPE_AES_CTR: 12499a2dd95SBruce Richardson /* Cipher-Auth (AES-CTR *) case */ 12599a2dd95SBruce Richardson sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 12699a2dd95SBruce Richardson 12799a2dd95SBruce Richardson /* fill CTR block (located inside crypto op) */ 12899a2dd95SBruce Richardson ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *, 12999a2dd95SBruce Richardson sa->iv_ofs); 13099a2dd95SBruce Richardson aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 13199a2dd95SBruce Richardson break; 13299a2dd95SBruce Richardson } 13399a2dd95SBruce Richardson } 13499a2dd95SBruce Richardson 13599a2dd95SBruce Richardson /* 13699a2dd95SBruce Richardson * setup/update packet data and metadata for ESP outbound tunnel case. 13799a2dd95SBruce Richardson */ 13899a2dd95SBruce Richardson static inline int32_t 13999a2dd95SBruce Richardson outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 14099a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 141ff4a29d1SRadu Nicolau union sym_op_data *icv, uint8_t sqh_len, uint8_t tso) 14299a2dd95SBruce Richardson { 14399a2dd95SBruce Richardson uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen; 14499a2dd95SBruce Richardson struct rte_mbuf *ml; 14599a2dd95SBruce Richardson struct rte_esp_hdr *esph; 14699a2dd95SBruce Richardson struct rte_esp_tail *espt; 14799a2dd95SBruce Richardson char *ph, *pt; 14899a2dd95SBruce Richardson uint64_t *iv; 14999a2dd95SBruce Richardson 15099a2dd95SBruce Richardson /* calculate extra header space required */ 15199a2dd95SBruce Richardson hlen = sa->hdr_len + sa->iv_len + sizeof(*esph); 15299a2dd95SBruce Richardson 15399a2dd95SBruce Richardson /* size of ipsec protected data */ 15499a2dd95SBruce Richardson l2len = mb->l2_len; 15599a2dd95SBruce Richardson plen = mb->pkt_len - l2len; 15699a2dd95SBruce Richardson 15799a2dd95SBruce Richardson /* number of bytes to encrypt */ 15899a2dd95SBruce Richardson clen = plen + sizeof(*espt); 15999a2dd95SBruce Richardson 160ff4a29d1SRadu Nicolau if (!tso) { 161ff4a29d1SRadu Nicolau clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 16299a2dd95SBruce Richardson /* pad length + esp tail */ 16399a2dd95SBruce Richardson pdlen = clen - plen; 16499a2dd95SBruce Richardson tlen = pdlen + sa->icv_len + sqh_len; 165ff4a29d1SRadu Nicolau } else { 166ff4a29d1SRadu Nicolau /* We don't need to pad/align packet or append ICV length 167ff4a29d1SRadu Nicolau * when using TSO offload 168ff4a29d1SRadu Nicolau */ 169ff4a29d1SRadu Nicolau pdlen = clen - plen; 170ff4a29d1SRadu Nicolau tlen = pdlen + sqh_len; 171ff4a29d1SRadu Nicolau } 17299a2dd95SBruce Richardson 17399a2dd95SBruce Richardson /* do append and prepend */ 17499a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb); 17599a2dd95SBruce Richardson if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 17699a2dd95SBruce Richardson return -ENOSPC; 17799a2dd95SBruce Richardson 17899a2dd95SBruce Richardson /* prepend header */ 17999a2dd95SBruce Richardson ph = rte_pktmbuf_prepend(mb, hlen - l2len); 18099a2dd95SBruce Richardson if (ph == NULL) 18199a2dd95SBruce Richardson return -ENOSPC; 18299a2dd95SBruce Richardson 18399a2dd95SBruce Richardson /* append tail */ 18499a2dd95SBruce Richardson pdofs = ml->data_len; 18599a2dd95SBruce Richardson ml->data_len += tlen; 18699a2dd95SBruce Richardson mb->pkt_len += tlen; 18799a2dd95SBruce Richardson pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 18899a2dd95SBruce Richardson 18999a2dd95SBruce Richardson /* update pkt l2/l3 len */ 19099a2dd95SBruce Richardson mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) | 19199a2dd95SBruce Richardson sa->tx_offload.val; 19299a2dd95SBruce Richardson 19399a2dd95SBruce Richardson /* copy tunnel pkt header */ 19499a2dd95SBruce Richardson rte_memcpy(ph, sa->hdr, sa->hdr_len); 19599a2dd95SBruce Richardson 19601eef590SRadu Nicolau /* if UDP encap is enabled update the dgram_len */ 19701eef590SRadu Nicolau if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { 19801eef590SRadu Nicolau struct rte_udp_hdr *udph = (struct rte_udp_hdr *) 199778bbc08SRadu Nicolau (ph + sa->hdr_len - sizeof(struct rte_udp_hdr)); 20001eef590SRadu Nicolau udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len - 2015d89d22eSXiao Liang sa->hdr_len + sizeof(struct rte_udp_hdr)); 20201eef590SRadu Nicolau } 20301eef590SRadu Nicolau 20499a2dd95SBruce Richardson /* update original and new ip header fields */ 20599a2dd95SBruce Richardson update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen, 20699a2dd95SBruce Richardson mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc)); 20799a2dd95SBruce Richardson 20899a2dd95SBruce Richardson /* update spi, seqn and iv */ 20999a2dd95SBruce Richardson esph = (struct rte_esp_hdr *)(ph + sa->hdr_len); 21099a2dd95SBruce Richardson iv = (uint64_t *)(esph + 1); 21199a2dd95SBruce Richardson copy_iv(iv, ivp, sa->iv_len); 21299a2dd95SBruce Richardson 21399a2dd95SBruce Richardson esph->spi = sa->spi; 21499a2dd95SBruce Richardson esph->seq = sqn_low32(sqc); 21599a2dd95SBruce Richardson 21699a2dd95SBruce Richardson /* offset for ICV */ 21799a2dd95SBruce Richardson pdofs += pdlen + sa->sqh_len; 21899a2dd95SBruce Richardson 21999a2dd95SBruce Richardson /* pad length */ 22099a2dd95SBruce Richardson pdlen -= sizeof(*espt); 22199a2dd95SBruce Richardson 2222be38342SAmit Prakash Shukla RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); 2232be38342SAmit Prakash Shukla 22499a2dd95SBruce Richardson /* copy padding data */ 2252be38342SAmit Prakash Shukla rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); 22699a2dd95SBruce Richardson 22799a2dd95SBruce Richardson /* update esp trailer */ 22899a2dd95SBruce Richardson espt = (struct rte_esp_tail *)(pt + pdlen); 22999a2dd95SBruce Richardson espt->pad_len = pdlen; 23099a2dd95SBruce Richardson espt->next_proto = sa->proto; 23199a2dd95SBruce Richardson 23299a2dd95SBruce Richardson /* set icv va/pa value(s) */ 23399a2dd95SBruce Richardson icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 23499a2dd95SBruce Richardson icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 23599a2dd95SBruce Richardson 23699a2dd95SBruce Richardson return clen; 23799a2dd95SBruce Richardson } 23899a2dd95SBruce Richardson 23999a2dd95SBruce Richardson /* 24099a2dd95SBruce Richardson * for pure cryptodev (lookaside none) depending on SA settings, 24199a2dd95SBruce Richardson * we might have to write some extra data to the packet. 24299a2dd95SBruce Richardson */ 24399a2dd95SBruce Richardson static inline void 24499a2dd95SBruce Richardson outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc, 24599a2dd95SBruce Richardson const union sym_op_data *icv) 24699a2dd95SBruce Richardson { 24799a2dd95SBruce Richardson uint32_t *psqh; 248c99d2619SRadu Nicolau struct aead_gcm_aad *gaad; 249c99d2619SRadu Nicolau struct aead_ccm_aad *caad; 250c99d2619SRadu Nicolau struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad; 25199a2dd95SBruce Richardson 25299a2dd95SBruce Richardson /* insert SQN.hi between ESP trailer and ICV */ 25399a2dd95SBruce Richardson if (sa->sqh_len != 0) { 25499a2dd95SBruce Richardson psqh = (uint32_t *)(icv->va - sa->sqh_len); 25599a2dd95SBruce Richardson psqh[0] = sqn_hi32(sqc); 25699a2dd95SBruce Richardson } 25799a2dd95SBruce Richardson 25899a2dd95SBruce Richardson /* 25999a2dd95SBruce Richardson * fill IV and AAD fields, if any (aad fields are placed after icv), 26099a2dd95SBruce Richardson * right now we support only one AEAD algorithm: AES-GCM . 26199a2dd95SBruce Richardson */ 262c99d2619SRadu Nicolau switch (sa->algo_type) { 263c99d2619SRadu Nicolau case ALGO_TYPE_AES_GCM: 26499a2dd95SBruce Richardson if (sa->aad_len != 0) { 265c99d2619SRadu Nicolau gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len); 266c99d2619SRadu Nicolau aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa)); 267c99d2619SRadu Nicolau } 268c99d2619SRadu Nicolau break; 269c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 270c99d2619SRadu Nicolau if (sa->aad_len != 0) { 271c99d2619SRadu Nicolau caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len); 272c99d2619SRadu Nicolau aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa)); 273c99d2619SRadu Nicolau } 274c99d2619SRadu Nicolau break; 275c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 276c99d2619SRadu Nicolau if (sa->aad_len != 0) { 277c99d2619SRadu Nicolau chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *) 278c99d2619SRadu Nicolau (icv->va + sa->icv_len); 279c99d2619SRadu Nicolau aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad, 280c99d2619SRadu Nicolau sa->spi, sqc, IS_ESN(sa)); 281c99d2619SRadu Nicolau } 282c99d2619SRadu Nicolau break; 283c99d2619SRadu Nicolau default: 284c99d2619SRadu Nicolau break; 28599a2dd95SBruce Richardson } 28699a2dd95SBruce Richardson } 28799a2dd95SBruce Richardson 28899a2dd95SBruce Richardson /* 28999a2dd95SBruce Richardson * setup/update packets and crypto ops for ESP outbound tunnel case. 29099a2dd95SBruce Richardson */ 291*aae98b8cSAakash Sasidharan static inline uint16_t 292*aae98b8cSAakash Sasidharan esp_outb_tun_prepare_helper(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 293*aae98b8cSAakash Sasidharan struct rte_crypto_op *cop[], uint16_t n, uint64_t sqn) 29499a2dd95SBruce Richardson { 29599a2dd95SBruce Richardson int32_t rc; 296*aae98b8cSAakash Sasidharan uint32_t i, k; 29799a2dd95SBruce Richardson rte_be64_t sqc; 29899a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 29999a2dd95SBruce Richardson struct rte_cryptodev_sym_session *cs; 30099a2dd95SBruce Richardson union sym_op_data icv; 30199a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 302*aae98b8cSAakash Sasidharan uint32_t dr[n]; 30399a2dd95SBruce Richardson 30499a2dd95SBruce Richardson sa = ss->sa; 30599a2dd95SBruce Richardson cs = ss->crypto.ses; 30699a2dd95SBruce Richardson 30799a2dd95SBruce Richardson k = 0; 30899a2dd95SBruce Richardson for (i = 0; i != n; i++) { 30999a2dd95SBruce Richardson 31099a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 31199a2dd95SBruce Richardson gen_iv(iv, sqc); 31299a2dd95SBruce Richardson 31399a2dd95SBruce Richardson /* try to update the packet itself */ 31499a2dd95SBruce Richardson rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 315ff4a29d1SRadu Nicolau sa->sqh_len, 0); 31699a2dd95SBruce Richardson /* success, setup crypto op */ 31799a2dd95SBruce Richardson if (rc >= 0) { 31899a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 31999a2dd95SBruce Richardson lksd_none_cop_prepare(cop[k], cs, mb[i]); 32099a2dd95SBruce Richardson outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc); 32199a2dd95SBruce Richardson k++; 32299a2dd95SBruce Richardson /* failure, put packet into the death-row */ 32399a2dd95SBruce Richardson } else { 32499a2dd95SBruce Richardson dr[i - k] = i; 32599a2dd95SBruce Richardson rte_errno = -rc; 32699a2dd95SBruce Richardson } 32799a2dd95SBruce Richardson } 32899a2dd95SBruce Richardson 32999a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 33099a2dd95SBruce Richardson if (k != n && k != 0) 33199a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 33299a2dd95SBruce Richardson 33399a2dd95SBruce Richardson return k; 33499a2dd95SBruce Richardson } 33599a2dd95SBruce Richardson 336*aae98b8cSAakash Sasidharan uint16_t 337*aae98b8cSAakash Sasidharan esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 338*aae98b8cSAakash Sasidharan struct rte_crypto_op *cop[], uint16_t num) 339*aae98b8cSAakash Sasidharan { 340*aae98b8cSAakash Sasidharan uint64_t sqn; 341*aae98b8cSAakash Sasidharan uint32_t n; 342*aae98b8cSAakash Sasidharan 343*aae98b8cSAakash Sasidharan n = num; 344*aae98b8cSAakash Sasidharan sqn = esn_outb_update_sqn(ss->sa, &n); 345*aae98b8cSAakash Sasidharan if (n != num) 346*aae98b8cSAakash Sasidharan rte_errno = EOVERFLOW; 347*aae98b8cSAakash Sasidharan 348*aae98b8cSAakash Sasidharan return esp_outb_tun_prepare_helper(ss, mb, cop, n, sqn); 349*aae98b8cSAakash Sasidharan } 350*aae98b8cSAakash Sasidharan 351*aae98b8cSAakash Sasidharan uint16_t 352*aae98b8cSAakash Sasidharan esp_outb_tun_prepare_stateless(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 353*aae98b8cSAakash Sasidharan struct rte_crypto_op *cop[], uint16_t num, struct rte_ipsec_state *state) 354*aae98b8cSAakash Sasidharan { 355*aae98b8cSAakash Sasidharan uint64_t sqn = state->sqn; 356*aae98b8cSAakash Sasidharan 357*aae98b8cSAakash Sasidharan return esp_outb_tun_prepare_helper(ss, mb, cop, num, sqn); 358*aae98b8cSAakash Sasidharan } 359*aae98b8cSAakash Sasidharan 36099a2dd95SBruce Richardson /* 36199a2dd95SBruce Richardson * setup/update packet data and metadata for ESP outbound transport case. 36299a2dd95SBruce Richardson */ 36399a2dd95SBruce Richardson static inline int32_t 36499a2dd95SBruce Richardson outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 36599a2dd95SBruce Richardson const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 366ff4a29d1SRadu Nicolau union sym_op_data *icv, uint8_t sqh_len, uint8_t tso) 36799a2dd95SBruce Richardson { 36899a2dd95SBruce Richardson uint8_t np; 36999a2dd95SBruce Richardson uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen; 37099a2dd95SBruce Richardson struct rte_mbuf *ml; 37199a2dd95SBruce Richardson struct rte_esp_hdr *esph; 37299a2dd95SBruce Richardson struct rte_esp_tail *espt; 37399a2dd95SBruce Richardson char *ph, *pt; 37499a2dd95SBruce Richardson uint64_t *iv; 37599a2dd95SBruce Richardson uint32_t l2len, l3len; 37699a2dd95SBruce Richardson 37799a2dd95SBruce Richardson l2len = mb->l2_len; 37899a2dd95SBruce Richardson l3len = mb->l3_len; 37999a2dd95SBruce Richardson 38099a2dd95SBruce Richardson uhlen = l2len + l3len; 38199a2dd95SBruce Richardson plen = mb->pkt_len - uhlen; 38299a2dd95SBruce Richardson 38399a2dd95SBruce Richardson /* calculate extra header space required */ 38499a2dd95SBruce Richardson hlen = sa->iv_len + sizeof(*esph); 38599a2dd95SBruce Richardson 38699a2dd95SBruce Richardson /* number of bytes to encrypt */ 38799a2dd95SBruce Richardson clen = plen + sizeof(*espt); 38899a2dd95SBruce Richardson 389ff4a29d1SRadu Nicolau if (!tso) { 390ff4a29d1SRadu Nicolau clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 39199a2dd95SBruce Richardson /* pad length + esp tail */ 39299a2dd95SBruce Richardson pdlen = clen - plen; 39399a2dd95SBruce Richardson tlen = pdlen + sa->icv_len + sqh_len; 394ff4a29d1SRadu Nicolau } else { 395ff4a29d1SRadu Nicolau /* We don't need to pad/align packet or append ICV length 396ff4a29d1SRadu Nicolau * when using TSO offload 397ff4a29d1SRadu Nicolau */ 398ff4a29d1SRadu Nicolau pdlen = clen - plen; 399ff4a29d1SRadu Nicolau tlen = pdlen + sqh_len; 400ff4a29d1SRadu Nicolau } 40199a2dd95SBruce Richardson 40299a2dd95SBruce Richardson /* do append and insert */ 40399a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb); 40499a2dd95SBruce Richardson if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 40599a2dd95SBruce Richardson return -ENOSPC; 40699a2dd95SBruce Richardson 40799a2dd95SBruce Richardson /* prepend space for ESP header */ 40899a2dd95SBruce Richardson ph = rte_pktmbuf_prepend(mb, hlen); 40999a2dd95SBruce Richardson if (ph == NULL) 41099a2dd95SBruce Richardson return -ENOSPC; 41199a2dd95SBruce Richardson 41299a2dd95SBruce Richardson /* append tail */ 41399a2dd95SBruce Richardson pdofs = ml->data_len; 41499a2dd95SBruce Richardson ml->data_len += tlen; 41599a2dd95SBruce Richardson mb->pkt_len += tlen; 41699a2dd95SBruce Richardson pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 41799a2dd95SBruce Richardson 41899a2dd95SBruce Richardson /* shift L2/L3 headers */ 41999a2dd95SBruce Richardson insert_esph(ph, ph + hlen, uhlen); 42099a2dd95SBruce Richardson 42199a2dd95SBruce Richardson /* update ip header fields */ 42299a2dd95SBruce Richardson np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len, 42399a2dd95SBruce Richardson l3len, IPPROTO_ESP); 42499a2dd95SBruce Richardson 42599a2dd95SBruce Richardson /* update spi, seqn and iv */ 42699a2dd95SBruce Richardson esph = (struct rte_esp_hdr *)(ph + uhlen); 42799a2dd95SBruce Richardson iv = (uint64_t *)(esph + 1); 42899a2dd95SBruce Richardson copy_iv(iv, ivp, sa->iv_len); 42999a2dd95SBruce Richardson 43099a2dd95SBruce Richardson esph->spi = sa->spi; 43199a2dd95SBruce Richardson esph->seq = sqn_low32(sqc); 43299a2dd95SBruce Richardson 43399a2dd95SBruce Richardson /* offset for ICV */ 43499a2dd95SBruce Richardson pdofs += pdlen + sa->sqh_len; 43599a2dd95SBruce Richardson 43699a2dd95SBruce Richardson /* pad length */ 43799a2dd95SBruce Richardson pdlen -= sizeof(*espt); 43899a2dd95SBruce Richardson 4392be38342SAmit Prakash Shukla RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); 4402be38342SAmit Prakash Shukla 44199a2dd95SBruce Richardson /* copy padding data */ 4422be38342SAmit Prakash Shukla rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); 44399a2dd95SBruce Richardson 44499a2dd95SBruce Richardson /* update esp trailer */ 44599a2dd95SBruce Richardson espt = (struct rte_esp_tail *)(pt + pdlen); 44699a2dd95SBruce Richardson espt->pad_len = pdlen; 44799a2dd95SBruce Richardson espt->next_proto = np; 44899a2dd95SBruce Richardson 44999a2dd95SBruce Richardson /* set icv va/pa value(s) */ 45099a2dd95SBruce Richardson icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 45199a2dd95SBruce Richardson icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 45299a2dd95SBruce Richardson 45399a2dd95SBruce Richardson return clen; 45499a2dd95SBruce Richardson } 45599a2dd95SBruce Richardson 45699a2dd95SBruce Richardson /* 45799a2dd95SBruce Richardson * setup/update packets and crypto ops for ESP outbound transport case. 45899a2dd95SBruce Richardson */ 45999a2dd95SBruce Richardson uint16_t 46099a2dd95SBruce Richardson esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 46199a2dd95SBruce Richardson struct rte_crypto_op *cop[], uint16_t num) 46299a2dd95SBruce Richardson { 46399a2dd95SBruce Richardson int32_t rc; 46499a2dd95SBruce Richardson uint32_t i, k, n, l2, l3; 46599a2dd95SBruce Richardson uint64_t sqn; 46699a2dd95SBruce Richardson rte_be64_t sqc; 46799a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 46899a2dd95SBruce Richardson struct rte_cryptodev_sym_session *cs; 46999a2dd95SBruce Richardson union sym_op_data icv; 47099a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 47199a2dd95SBruce Richardson uint32_t dr[num]; 47299a2dd95SBruce Richardson 47399a2dd95SBruce Richardson sa = ss->sa; 47499a2dd95SBruce Richardson cs = ss->crypto.ses; 47599a2dd95SBruce Richardson 47699a2dd95SBruce Richardson n = num; 47799a2dd95SBruce Richardson sqn = esn_outb_update_sqn(sa, &n); 47899a2dd95SBruce Richardson if (n != num) 47999a2dd95SBruce Richardson rte_errno = EOVERFLOW; 48099a2dd95SBruce Richardson 48199a2dd95SBruce Richardson k = 0; 48299a2dd95SBruce Richardson for (i = 0; i != n; i++) { 48399a2dd95SBruce Richardson 48499a2dd95SBruce Richardson l2 = mb[i]->l2_len; 48599a2dd95SBruce Richardson l3 = mb[i]->l3_len; 48699a2dd95SBruce Richardson 48799a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 48899a2dd95SBruce Richardson gen_iv(iv, sqc); 48999a2dd95SBruce Richardson 49099a2dd95SBruce Richardson /* try to update the packet itself */ 49199a2dd95SBruce Richardson rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 492ff4a29d1SRadu Nicolau sa->sqh_len, 0); 49399a2dd95SBruce Richardson /* success, setup crypto op */ 49499a2dd95SBruce Richardson if (rc >= 0) { 49599a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 49699a2dd95SBruce Richardson lksd_none_cop_prepare(cop[k], cs, mb[i]); 49799a2dd95SBruce Richardson outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc); 49899a2dd95SBruce Richardson k++; 49999a2dd95SBruce Richardson /* failure, put packet into the death-row */ 50099a2dd95SBruce Richardson } else { 50199a2dd95SBruce Richardson dr[i - k] = i; 50299a2dd95SBruce Richardson rte_errno = -rc; 50399a2dd95SBruce Richardson } 50499a2dd95SBruce Richardson } 50599a2dd95SBruce Richardson 50699a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 50799a2dd95SBruce Richardson if (k != n && k != 0) 50899a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 50999a2dd95SBruce Richardson 51099a2dd95SBruce Richardson return k; 51199a2dd95SBruce Richardson } 51299a2dd95SBruce Richardson 51399a2dd95SBruce Richardson 51499a2dd95SBruce Richardson static inline uint32_t 51599a2dd95SBruce Richardson outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs, 51699a2dd95SBruce Richardson uint32_t plen, void *iv) 51799a2dd95SBruce Richardson { 51899a2dd95SBruce Richardson uint64_t *ivp = iv; 51999a2dd95SBruce Richardson struct aead_gcm_iv *gcm; 520c99d2619SRadu Nicolau struct aead_ccm_iv *ccm; 521c99d2619SRadu Nicolau struct aead_chacha20_poly1305_iv *chacha20_poly1305; 52299a2dd95SBruce Richardson struct aesctr_cnt_blk *ctr; 52399a2dd95SBruce Richardson uint32_t clen; 52499a2dd95SBruce Richardson 52599a2dd95SBruce Richardson switch (sa->algo_type) { 52699a2dd95SBruce Richardson case ALGO_TYPE_AES_GCM: 52799a2dd95SBruce Richardson gcm = iv; 52899a2dd95SBruce Richardson aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 52999a2dd95SBruce Richardson break; 530c99d2619SRadu Nicolau case ALGO_TYPE_AES_CCM: 531c99d2619SRadu Nicolau ccm = iv; 532c99d2619SRadu Nicolau aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 533c99d2619SRadu Nicolau break; 534c99d2619SRadu Nicolau case ALGO_TYPE_CHACHA20_POLY1305: 535c99d2619SRadu Nicolau chacha20_poly1305 = iv; 536c99d2619SRadu Nicolau aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 537c99d2619SRadu Nicolau ivp[0], sa->salt); 538c99d2619SRadu Nicolau break; 53999a2dd95SBruce Richardson case ALGO_TYPE_AES_CTR: 54099a2dd95SBruce Richardson ctr = iv; 54199a2dd95SBruce Richardson aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 54299a2dd95SBruce Richardson break; 54399a2dd95SBruce Richardson } 54499a2dd95SBruce Richardson 54599a2dd95SBruce Richardson *pofs += sa->ctp.auth.offset; 54699a2dd95SBruce Richardson clen = plen + sa->ctp.auth.length; 54799a2dd95SBruce Richardson return clen; 54899a2dd95SBruce Richardson } 54999a2dd95SBruce Richardson 550*aae98b8cSAakash Sasidharan static inline uint16_t 551*aae98b8cSAakash Sasidharan cpu_outb_pkt_prepare_helper(const struct rte_ipsec_session *ss, 552*aae98b8cSAakash Sasidharan struct rte_mbuf *mb[], uint16_t n, esp_outb_prepare_t prepare, 553*aae98b8cSAakash Sasidharan uint32_t cofs_mask, uint64_t sqn) 55499a2dd95SBruce Richardson { 55599a2dd95SBruce Richardson int32_t rc; 55699a2dd95SBruce Richardson rte_be64_t sqc; 55799a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 558*aae98b8cSAakash Sasidharan uint32_t i, k; 55999a2dd95SBruce Richardson uint32_t l2, l3; 56099a2dd95SBruce Richardson union sym_op_data icv; 561*aae98b8cSAakash Sasidharan struct rte_crypto_va_iova_ptr iv[n]; 562*aae98b8cSAakash Sasidharan struct rte_crypto_va_iova_ptr aad[n]; 563*aae98b8cSAakash Sasidharan struct rte_crypto_va_iova_ptr dgst[n]; 564*aae98b8cSAakash Sasidharan uint32_t dr[n]; 565*aae98b8cSAakash Sasidharan uint32_t l4ofs[n]; 566*aae98b8cSAakash Sasidharan uint32_t clen[n]; 567*aae98b8cSAakash Sasidharan uint64_t ivbuf[n][IPSEC_MAX_IV_QWORD]; 56899a2dd95SBruce Richardson 56999a2dd95SBruce Richardson sa = ss->sa; 57099a2dd95SBruce Richardson 57199a2dd95SBruce Richardson for (i = 0, k = 0; i != n; i++) { 57299a2dd95SBruce Richardson 57399a2dd95SBruce Richardson l2 = mb[i]->l2_len; 57499a2dd95SBruce Richardson l3 = mb[i]->l3_len; 57599a2dd95SBruce Richardson 57699a2dd95SBruce Richardson /* calculate ESP header offset */ 57799a2dd95SBruce Richardson l4ofs[k] = (l2 + l3) & cofs_mask; 57899a2dd95SBruce Richardson 57999a2dd95SBruce Richardson sqc = rte_cpu_to_be_64(sqn + i); 58099a2dd95SBruce Richardson gen_iv(ivbuf[k], sqc); 58199a2dd95SBruce Richardson 58299a2dd95SBruce Richardson /* try to update the packet itself */ 583ff4a29d1SRadu Nicolau rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0); 58499a2dd95SBruce Richardson 58599a2dd95SBruce Richardson /* success, proceed with preparations */ 58699a2dd95SBruce Richardson if (rc >= 0) { 58799a2dd95SBruce Richardson 58899a2dd95SBruce Richardson outb_pkt_xprepare(sa, sqc, &icv); 58999a2dd95SBruce Richardson 59099a2dd95SBruce Richardson /* get encrypted data offset and length */ 59199a2dd95SBruce Richardson clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc, 59299a2dd95SBruce Richardson ivbuf[k]); 59399a2dd95SBruce Richardson 59499a2dd95SBruce Richardson /* fill iv, digest and aad */ 59599a2dd95SBruce Richardson iv[k].va = ivbuf[k]; 59699a2dd95SBruce Richardson aad[k].va = icv.va + sa->icv_len; 59799a2dd95SBruce Richardson dgst[k++].va = icv.va; 59899a2dd95SBruce Richardson } else { 59999a2dd95SBruce Richardson dr[i - k] = i; 60099a2dd95SBruce Richardson rte_errno = -rc; 60199a2dd95SBruce Richardson } 60299a2dd95SBruce Richardson } 60399a2dd95SBruce Richardson 60499a2dd95SBruce Richardson /* copy not prepared mbufs beyond good ones */ 60599a2dd95SBruce Richardson if (k != n && k != 0) 60699a2dd95SBruce Richardson move_bad_mbufs(mb, dr, n, n - k); 60799a2dd95SBruce Richardson 60899a2dd95SBruce Richardson /* convert mbufs to iovecs and do actual crypto/auth processing */ 60999a2dd95SBruce Richardson if (k != 0) 61099a2dd95SBruce Richardson cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, 61199a2dd95SBruce Richardson l4ofs, clen, k); 61299a2dd95SBruce Richardson return k; 61399a2dd95SBruce Richardson } 61499a2dd95SBruce Richardson 61599a2dd95SBruce Richardson uint16_t 61699a2dd95SBruce Richardson cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss, 61799a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 61899a2dd95SBruce Richardson { 619*aae98b8cSAakash Sasidharan uint64_t sqn; 620*aae98b8cSAakash Sasidharan uint32_t n; 621*aae98b8cSAakash Sasidharan 622*aae98b8cSAakash Sasidharan n = num; 623*aae98b8cSAakash Sasidharan sqn = esn_outb_update_sqn(ss->sa, &n); 624*aae98b8cSAakash Sasidharan if (n != num) 625*aae98b8cSAakash Sasidharan rte_errno = EOVERFLOW; 626*aae98b8cSAakash Sasidharan 627*aae98b8cSAakash Sasidharan return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_tun_pkt_prepare, 0, sqn); 628*aae98b8cSAakash Sasidharan } 629*aae98b8cSAakash Sasidharan 630*aae98b8cSAakash Sasidharan uint16_t 631*aae98b8cSAakash Sasidharan cpu_outb_tun_pkt_prepare_stateless(const struct rte_ipsec_session *ss, 632*aae98b8cSAakash Sasidharan struct rte_mbuf *mb[], uint16_t num, struct rte_ipsec_state *state) 633*aae98b8cSAakash Sasidharan { 634*aae98b8cSAakash Sasidharan uint64_t sqn = state->sqn; 635*aae98b8cSAakash Sasidharan 636*aae98b8cSAakash Sasidharan return cpu_outb_pkt_prepare_helper(ss, mb, num, outb_tun_pkt_prepare, 0, sqn); 63799a2dd95SBruce Richardson } 63899a2dd95SBruce Richardson 63999a2dd95SBruce Richardson uint16_t 64099a2dd95SBruce Richardson cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, 64199a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 64299a2dd95SBruce Richardson { 643*aae98b8cSAakash Sasidharan uint64_t sqn; 644*aae98b8cSAakash Sasidharan uint32_t n; 645*aae98b8cSAakash Sasidharan 646*aae98b8cSAakash Sasidharan n = num; 647*aae98b8cSAakash Sasidharan sqn = esn_outb_update_sqn(ss->sa, &n); 648*aae98b8cSAakash Sasidharan if (n != num) 649*aae98b8cSAakash Sasidharan rte_errno = EOVERFLOW; 650*aae98b8cSAakash Sasidharan 651*aae98b8cSAakash Sasidharan return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_trs_pkt_prepare, 652*aae98b8cSAakash Sasidharan UINT32_MAX, sqn); 65399a2dd95SBruce Richardson } 65499a2dd95SBruce Richardson 65599a2dd95SBruce Richardson /* 65699a2dd95SBruce Richardson * process outbound packets for SA with ESN support, 6574a6672c2SStephen Hemminger * for algorithms that require SQN.hibits to be implicitly included 65899a2dd95SBruce Richardson * into digest computation. 65999a2dd95SBruce Richardson * In that case we have to move ICV bytes back to their proper place. 66099a2dd95SBruce Richardson */ 66199a2dd95SBruce Richardson uint16_t 66299a2dd95SBruce Richardson esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 66399a2dd95SBruce Richardson uint16_t num) 66499a2dd95SBruce Richardson { 66568977baaSRadu Nicolau uint32_t i, k, icv_len, *icv, bytes; 66699a2dd95SBruce Richardson struct rte_mbuf *ml; 66799a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 66899a2dd95SBruce Richardson uint32_t dr[num]; 66999a2dd95SBruce Richardson 67099a2dd95SBruce Richardson sa = ss->sa; 67199a2dd95SBruce Richardson 67299a2dd95SBruce Richardson k = 0; 67399a2dd95SBruce Richardson icv_len = sa->icv_len; 67468977baaSRadu Nicolau bytes = 0; 67599a2dd95SBruce Richardson 67699a2dd95SBruce Richardson for (i = 0; i != num; i++) { 677daa02b5cSOlivier Matz if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) { 67899a2dd95SBruce Richardson ml = rte_pktmbuf_lastseg(mb[i]); 67999a2dd95SBruce Richardson /* remove high-order 32 bits of esn from packet len */ 68099a2dd95SBruce Richardson mb[i]->pkt_len -= sa->sqh_len; 68199a2dd95SBruce Richardson ml->data_len -= sa->sqh_len; 68299a2dd95SBruce Richardson icv = rte_pktmbuf_mtod_offset(ml, void *, 68399a2dd95SBruce Richardson ml->data_len - icv_len); 68499a2dd95SBruce Richardson remove_sqh(icv, icv_len); 68568977baaSRadu Nicolau bytes += mb[i]->pkt_len; 68699a2dd95SBruce Richardson k++; 68799a2dd95SBruce Richardson } else 68899a2dd95SBruce Richardson dr[i - k] = i; 68999a2dd95SBruce Richardson } 69068977baaSRadu Nicolau sa->statistics.count += k; 69168977baaSRadu Nicolau sa->statistics.bytes += bytes; 69299a2dd95SBruce Richardson 69399a2dd95SBruce Richardson /* handle unprocessed mbufs */ 69499a2dd95SBruce Richardson if (k != num) { 69599a2dd95SBruce Richardson rte_errno = EBADMSG; 69699a2dd95SBruce Richardson if (k != 0) 69799a2dd95SBruce Richardson move_bad_mbufs(mb, dr, num, num - k); 69899a2dd95SBruce Richardson } 69999a2dd95SBruce Richardson 70099a2dd95SBruce Richardson return k; 70199a2dd95SBruce Richardson } 70299a2dd95SBruce Richardson 70399a2dd95SBruce Richardson /* 70499a2dd95SBruce Richardson * prepare packets for inline ipsec processing: 70599a2dd95SBruce Richardson * set ol_flags and attach metadata. 70699a2dd95SBruce Richardson */ 70799a2dd95SBruce Richardson static inline void 70899a2dd95SBruce Richardson inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss, 70999a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 71099a2dd95SBruce Richardson { 71168977baaSRadu Nicolau uint32_t i, ol_flags, bytes; 71299a2dd95SBruce Richardson 71399a2dd95SBruce Richardson ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA; 71468977baaSRadu Nicolau bytes = 0; 71599a2dd95SBruce Richardson for (i = 0; i != num; i++) { 71699a2dd95SBruce Richardson 717daa02b5cSOlivier Matz mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 71868977baaSRadu Nicolau bytes += mb[i]->pkt_len; 71999a2dd95SBruce Richardson if (ol_flags != 0) 72099a2dd95SBruce Richardson rte_security_set_pkt_metadata(ss->security.ctx, 72199a2dd95SBruce Richardson ss->security.ses, mb[i], NULL); 72299a2dd95SBruce Richardson } 72368977baaSRadu Nicolau ss->sa->statistics.count += num; 72468977baaSRadu Nicolau ss->sa->statistics.bytes += bytes; 72599a2dd95SBruce Richardson } 72699a2dd95SBruce Richardson 727ff4a29d1SRadu Nicolau 728ff4a29d1SRadu Nicolau static inline int 729ff4a29d1SRadu Nicolau esn_outb_nb_segments(struct rte_mbuf *m) 730ff4a29d1SRadu Nicolau { 731ff4a29d1SRadu Nicolau if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { 732ff4a29d1SRadu Nicolau uint16_t pkt_l3len = m->pkt_len - m->l2_len; 733ff4a29d1SRadu Nicolau uint16_t segments = 734ff4a29d1SRadu Nicolau (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ? 735ff4a29d1SRadu Nicolau (pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1; 736ff4a29d1SRadu Nicolau return segments; 737ff4a29d1SRadu Nicolau } 738ff4a29d1SRadu Nicolau return 1; /* no TSO */ 739ff4a29d1SRadu Nicolau } 740ff4a29d1SRadu Nicolau 741ff4a29d1SRadu Nicolau /* Compute how many packets can be sent before overflow occurs */ 742ff4a29d1SRadu Nicolau static inline uint16_t 743ff4a29d1SRadu Nicolau esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[]) 744ff4a29d1SRadu Nicolau { 745ff4a29d1SRadu Nicolau uint16_t i; 746ff4a29d1SRadu Nicolau uint32_t seg_cnt = 0; 747ff4a29d1SRadu Nicolau for (i = 0; i < num && seg_cnt < n_sqn; i++) 748ff4a29d1SRadu Nicolau seg_cnt += nb_segs[i]; 749ff4a29d1SRadu Nicolau return i - 1; 750ff4a29d1SRadu Nicolau } 751ff4a29d1SRadu Nicolau 75299a2dd95SBruce Richardson /* 75399a2dd95SBruce Richardson * process group of ESP outbound tunnel packets destined for 75499a2dd95SBruce Richardson * INLINE_CRYPTO type of device. 75599a2dd95SBruce Richardson */ 75699a2dd95SBruce Richardson uint16_t 75799a2dd95SBruce Richardson inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, 75899a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 75999a2dd95SBruce Richardson { 76099a2dd95SBruce Richardson int32_t rc; 761ff4a29d1SRadu Nicolau uint32_t i, k, nb_segs_total, n_sqn; 76299a2dd95SBruce Richardson uint64_t sqn; 76399a2dd95SBruce Richardson rte_be64_t sqc; 76499a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 76599a2dd95SBruce Richardson union sym_op_data icv; 76699a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 76799a2dd95SBruce Richardson uint32_t dr[num]; 768ff4a29d1SRadu Nicolau uint16_t nb_segs[num]; 76999a2dd95SBruce Richardson 77099a2dd95SBruce Richardson sa = ss->sa; 771ff4a29d1SRadu Nicolau nb_segs_total = 0; 772ff4a29d1SRadu Nicolau /* Calculate number of segments */ 773ff4a29d1SRadu Nicolau for (i = 0; i != num; i++) { 774ff4a29d1SRadu Nicolau nb_segs[i] = esn_outb_nb_segments(mb[i]); 775ff4a29d1SRadu Nicolau nb_segs_total += nb_segs[i]; 776ff4a29d1SRadu Nicolau } 77799a2dd95SBruce Richardson 778ff4a29d1SRadu Nicolau n_sqn = nb_segs_total; 779ff4a29d1SRadu Nicolau sqn = esn_outb_update_sqn(sa, &n_sqn); 780ff4a29d1SRadu Nicolau if (n_sqn != nb_segs_total) { 78199a2dd95SBruce Richardson rte_errno = EOVERFLOW; 782ff4a29d1SRadu Nicolau /* if there are segmented packets find out how many can be 783ff4a29d1SRadu Nicolau * sent until overflow occurs 784ff4a29d1SRadu Nicolau */ 785ff4a29d1SRadu Nicolau if (nb_segs_total > num) /* there is at least 1 */ 786ff4a29d1SRadu Nicolau num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs); 787ff4a29d1SRadu Nicolau else 788ff4a29d1SRadu Nicolau num = n_sqn; /* no segmented packets */ 789ff4a29d1SRadu Nicolau } 79099a2dd95SBruce Richardson 79199a2dd95SBruce Richardson k = 0; 792ff4a29d1SRadu Nicolau for (i = 0; i != num; i++) { 79399a2dd95SBruce Richardson 794ff4a29d1SRadu Nicolau sqc = rte_cpu_to_be_64(sqn); 79599a2dd95SBruce Richardson gen_iv(iv, sqc); 796ff4a29d1SRadu Nicolau sqn += nb_segs[i]; 79799a2dd95SBruce Richardson 79899a2dd95SBruce Richardson /* try to update the packet itself */ 799ff4a29d1SRadu Nicolau rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 800ff4a29d1SRadu Nicolau (mb[i]->ol_flags & 801ff4a29d1SRadu Nicolau (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0); 80299a2dd95SBruce Richardson 80399a2dd95SBruce Richardson k += (rc >= 0); 80499a2dd95SBruce Richardson 80599a2dd95SBruce Richardson /* failure, put packet into the death-row */ 80699a2dd95SBruce Richardson if (rc < 0) { 80799a2dd95SBruce Richardson dr[i - k] = i; 80899a2dd95SBruce Richardson rte_errno = -rc; 80999a2dd95SBruce Richardson } 81099a2dd95SBruce Richardson } 81199a2dd95SBruce Richardson 81299a2dd95SBruce Richardson /* copy not processed mbufs beyond good ones */ 813ff4a29d1SRadu Nicolau if (k != num && k != 0) 814ff4a29d1SRadu Nicolau move_bad_mbufs(mb, dr, num, num - k); 81599a2dd95SBruce Richardson 81699a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, k); 81799a2dd95SBruce Richardson return k; 81899a2dd95SBruce Richardson } 81999a2dd95SBruce Richardson 82099a2dd95SBruce Richardson /* 82199a2dd95SBruce Richardson * process group of ESP outbound transport packets destined for 82299a2dd95SBruce Richardson * INLINE_CRYPTO type of device. 82399a2dd95SBruce Richardson */ 82499a2dd95SBruce Richardson uint16_t 82599a2dd95SBruce Richardson inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, 82699a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 82799a2dd95SBruce Richardson { 82899a2dd95SBruce Richardson int32_t rc; 829ff4a29d1SRadu Nicolau uint32_t i, k, nb_segs_total, n_sqn; 83099a2dd95SBruce Richardson uint64_t sqn; 83199a2dd95SBruce Richardson rte_be64_t sqc; 83299a2dd95SBruce Richardson struct rte_ipsec_sa *sa; 83399a2dd95SBruce Richardson union sym_op_data icv; 83499a2dd95SBruce Richardson uint64_t iv[IPSEC_MAX_IV_QWORD]; 83599a2dd95SBruce Richardson uint32_t dr[num]; 836ff4a29d1SRadu Nicolau uint16_t nb_segs[num]; 83799a2dd95SBruce Richardson 83899a2dd95SBruce Richardson sa = ss->sa; 839ff4a29d1SRadu Nicolau nb_segs_total = 0; 840ff4a29d1SRadu Nicolau /* Calculate number of segments */ 841ff4a29d1SRadu Nicolau for (i = 0; i != num; i++) { 842ff4a29d1SRadu Nicolau nb_segs[i] = esn_outb_nb_segments(mb[i]); 843ff4a29d1SRadu Nicolau nb_segs_total += nb_segs[i]; 844ff4a29d1SRadu Nicolau } 84599a2dd95SBruce Richardson 846ff4a29d1SRadu Nicolau n_sqn = nb_segs_total; 847ff4a29d1SRadu Nicolau sqn = esn_outb_update_sqn(sa, &n_sqn); 848ff4a29d1SRadu Nicolau if (n_sqn != nb_segs_total) { 84999a2dd95SBruce Richardson rte_errno = EOVERFLOW; 850ff4a29d1SRadu Nicolau /* if there are segmented packets find out how many can be 851ff4a29d1SRadu Nicolau * sent until overflow occurs 852ff4a29d1SRadu Nicolau */ 853ff4a29d1SRadu Nicolau if (nb_segs_total > num) /* there is at least 1 */ 854ff4a29d1SRadu Nicolau num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs); 855ff4a29d1SRadu Nicolau else 856ff4a29d1SRadu Nicolau num = n_sqn; /* no segmented packets */ 857ff4a29d1SRadu Nicolau } 85899a2dd95SBruce Richardson 85999a2dd95SBruce Richardson k = 0; 860ff4a29d1SRadu Nicolau for (i = 0; i != num; i++) { 86199a2dd95SBruce Richardson 862ff4a29d1SRadu Nicolau sqc = rte_cpu_to_be_64(sqn); 86399a2dd95SBruce Richardson gen_iv(iv, sqc); 864ff4a29d1SRadu Nicolau sqn += nb_segs[i]; 86599a2dd95SBruce Richardson 86699a2dd95SBruce Richardson /* try to update the packet itself */ 867ff4a29d1SRadu Nicolau rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 868ff4a29d1SRadu Nicolau (mb[i]->ol_flags & 869ff4a29d1SRadu Nicolau (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0); 87099a2dd95SBruce Richardson 87199a2dd95SBruce Richardson k += (rc >= 0); 87299a2dd95SBruce Richardson 87399a2dd95SBruce Richardson /* failure, put packet into the death-row */ 87499a2dd95SBruce Richardson if (rc < 0) { 87599a2dd95SBruce Richardson dr[i - k] = i; 87699a2dd95SBruce Richardson rte_errno = -rc; 87799a2dd95SBruce Richardson } 87899a2dd95SBruce Richardson } 87999a2dd95SBruce Richardson 88099a2dd95SBruce Richardson /* copy not processed mbufs beyond good ones */ 881ff4a29d1SRadu Nicolau if (k != num && k != 0) 882ff4a29d1SRadu Nicolau move_bad_mbufs(mb, dr, num, num - k); 88399a2dd95SBruce Richardson 88499a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, k); 88599a2dd95SBruce Richardson return k; 88699a2dd95SBruce Richardson } 88799a2dd95SBruce Richardson 88899a2dd95SBruce Richardson /* 88999a2dd95SBruce Richardson * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 89099a2dd95SBruce Richardson * actual processing is done by HW/PMD, just set flags and metadata. 89199a2dd95SBruce Richardson */ 89299a2dd95SBruce Richardson uint16_t 89399a2dd95SBruce Richardson inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss, 89499a2dd95SBruce Richardson struct rte_mbuf *mb[], uint16_t num) 89599a2dd95SBruce Richardson { 89699a2dd95SBruce Richardson inline_outb_mbuf_prepare(ss, mb, num); 89799a2dd95SBruce Richardson return num; 89899a2dd95SBruce Richardson } 899