1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2020 Intel Corporation 3 */ 4 5 #include <rte_ipsec.h> 6 #include <rte_esp.h> 7 #include <rte_udp.h> 8 #include <rte_errno.h> 9 #include <rte_cryptodev.h> 10 11 #include "sa.h" 12 #include "ipsec_sqn.h" 13 #include "crypto.h" 14 #include "iph.h" 15 #include "misc.h" 16 #include "pad.h" 17 18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc, 19 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 20 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso); 21 22 /* 23 * helper function to fill crypto_sym op for cipher+auth algorithms. 24 * used by outb_cop_prepare(), see below. 25 */ 26 static inline void 27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop, 28 const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 29 uint32_t pofs, uint32_t plen) 30 { 31 sop->cipher.data.offset = sa->ctp.cipher.offset + pofs; 32 sop->cipher.data.length = sa->ctp.cipher.length + plen; 33 sop->auth.data.offset = sa->ctp.auth.offset + pofs; 34 sop->auth.data.length = sa->ctp.auth.length + plen; 35 sop->auth.digest.data = icv->va; 36 sop->auth.digest.phys_addr = icv->pa; 37 } 38 39 /* 40 * helper function to fill crypto_sym op for cipher+auth algorithms. 41 * used by outb_cop_prepare(), see below. 42 */ 43 static inline void 44 sop_aead_prepare(struct rte_crypto_sym_op *sop, 45 const struct rte_ipsec_sa *sa, const union sym_op_data *icv, 46 uint32_t pofs, uint32_t plen) 47 { 48 sop->aead.data.offset = sa->ctp.cipher.offset + pofs; 49 sop->aead.data.length = sa->ctp.cipher.length + plen; 50 sop->aead.digest.data = icv->va; 51 sop->aead.digest.phys_addr = icv->pa; 52 sop->aead.aad.data = icv->va + sa->icv_len; 53 sop->aead.aad.phys_addr = icv->pa + sa->icv_len; 54 } 55 56 /* 57 * setup crypto op and crypto sym op for ESP outbound packet. 58 */ 59 static inline void 60 outb_cop_prepare(struct rte_crypto_op *cop, 61 const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD], 62 const union sym_op_data *icv, uint32_t hlen, uint32_t plen) 63 { 64 struct rte_crypto_sym_op *sop; 65 struct aead_gcm_iv *gcm; 66 struct aead_ccm_iv *ccm; 67 struct aead_chacha20_poly1305_iv *chacha20_poly1305; 68 struct aesctr_cnt_blk *ctr; 69 uint32_t algo; 70 71 algo = sa->algo_type; 72 73 /* fill sym op fields */ 74 sop = cop->sym; 75 76 switch (algo) { 77 case ALGO_TYPE_AES_CBC: 78 /* Cipher-Auth (AES-CBC *) case */ 79 case ALGO_TYPE_3DES_CBC: 80 /* Cipher-Auth (3DES-CBC *) case */ 81 case ALGO_TYPE_NULL: 82 /* NULL case */ 83 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 84 break; 85 case ALGO_TYPE_AES_GMAC: 86 /* GMAC case */ 87 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 88 89 /* fill AAD IV (located inside crypto op) */ 90 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 91 sa->iv_ofs); 92 aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 93 break; 94 case ALGO_TYPE_AES_GCM: 95 /* AEAD (AES_GCM) case */ 96 sop_aead_prepare(sop, sa, icv, hlen, plen); 97 98 /* fill AAD IV (located inside crypto op) */ 99 gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, 100 sa->iv_ofs); 101 aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 102 break; 103 case ALGO_TYPE_AES_CCM: 104 /* AEAD (AES_CCM) case */ 105 sop_aead_prepare(sop, sa, icv, hlen, plen); 106 107 /* fill AAD IV (located inside crypto op) */ 108 ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *, 109 sa->iv_ofs); 110 aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 111 break; 112 case ALGO_TYPE_CHACHA20_POLY1305: 113 /* AEAD (CHACHA20_POLY) case */ 114 sop_aead_prepare(sop, sa, icv, hlen, plen); 115 116 /* fill AAD IV (located inside crypto op) */ 117 chacha20_poly1305 = rte_crypto_op_ctod_offset(cop, 118 struct aead_chacha20_poly1305_iv *, 119 sa->iv_ofs); 120 aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 121 ivp[0], sa->salt); 122 break; 123 case ALGO_TYPE_AES_CTR: 124 /* Cipher-Auth (AES-CTR *) case */ 125 sop_ciph_auth_prepare(sop, sa, icv, hlen, plen); 126 127 /* fill CTR block (located inside crypto op) */ 128 ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *, 129 sa->iv_ofs); 130 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 131 break; 132 } 133 } 134 135 /* 136 * setup/update packet data and metadata for ESP outbound tunnel case. 137 */ 138 static inline int32_t 139 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 140 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 141 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso) 142 { 143 uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen; 144 struct rte_mbuf *ml; 145 struct rte_esp_hdr *esph; 146 struct rte_esp_tail *espt; 147 char *ph, *pt; 148 uint64_t *iv; 149 150 /* calculate extra header space required */ 151 hlen = sa->hdr_len + sa->iv_len + sizeof(*esph); 152 153 /* size of ipsec protected data */ 154 l2len = mb->l2_len; 155 plen = mb->pkt_len - l2len; 156 157 /* number of bytes to encrypt */ 158 clen = plen + sizeof(*espt); 159 160 if (!tso) { 161 clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 162 /* pad length + esp tail */ 163 pdlen = clen - plen; 164 tlen = pdlen + sa->icv_len + sqh_len; 165 } else { 166 /* We don't need to pad/align packet or append ICV length 167 * when using TSO offload 168 */ 169 pdlen = clen - plen; 170 tlen = pdlen + sqh_len; 171 } 172 173 /* do append and prepend */ 174 ml = rte_pktmbuf_lastseg(mb); 175 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 176 return -ENOSPC; 177 178 /* prepend header */ 179 ph = rte_pktmbuf_prepend(mb, hlen - l2len); 180 if (ph == NULL) 181 return -ENOSPC; 182 183 /* append tail */ 184 pdofs = ml->data_len; 185 ml->data_len += tlen; 186 mb->pkt_len += tlen; 187 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 188 189 /* update pkt l2/l3 len */ 190 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) | 191 sa->tx_offload.val; 192 193 /* copy tunnel pkt header */ 194 rte_memcpy(ph, sa->hdr, sa->hdr_len); 195 196 /* if UDP encap is enabled update the dgram_len */ 197 if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { 198 struct rte_udp_hdr *udph = (struct rte_udp_hdr *) 199 (ph + sa->hdr_len - sizeof(struct rte_udp_hdr)); 200 udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len - 201 sa->hdr_len + sizeof(struct rte_udp_hdr)); 202 } 203 204 /* update original and new ip header fields */ 205 update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen, 206 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc)); 207 208 /* update spi, seqn and iv */ 209 esph = (struct rte_esp_hdr *)(ph + sa->hdr_len); 210 iv = (uint64_t *)(esph + 1); 211 copy_iv(iv, ivp, sa->iv_len); 212 213 esph->spi = sa->spi; 214 esph->seq = sqn_low32(sqc); 215 216 /* offset for ICV */ 217 pdofs += pdlen + sa->sqh_len; 218 219 /* pad length */ 220 pdlen -= sizeof(*espt); 221 222 RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); 223 224 /* copy padding data */ 225 rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); 226 227 /* update esp trailer */ 228 espt = (struct rte_esp_tail *)(pt + pdlen); 229 espt->pad_len = pdlen; 230 espt->next_proto = sa->proto; 231 232 /* set icv va/pa value(s) */ 233 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 234 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 235 236 return clen; 237 } 238 239 /* 240 * for pure cryptodev (lookaside none) depending on SA settings, 241 * we might have to write some extra data to the packet. 242 */ 243 static inline void 244 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc, 245 const union sym_op_data *icv) 246 { 247 uint32_t *psqh; 248 struct aead_gcm_aad *gaad; 249 struct aead_ccm_aad *caad; 250 struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad; 251 252 /* insert SQN.hi between ESP trailer and ICV */ 253 if (sa->sqh_len != 0) { 254 psqh = (uint32_t *)(icv->va - sa->sqh_len); 255 psqh[0] = sqn_hi32(sqc); 256 } 257 258 /* 259 * fill IV and AAD fields, if any (aad fields are placed after icv), 260 * right now we support only one AEAD algorithm: AES-GCM . 261 */ 262 switch (sa->algo_type) { 263 case ALGO_TYPE_AES_GCM: 264 if (sa->aad_len != 0) { 265 gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len); 266 aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa)); 267 } 268 break; 269 case ALGO_TYPE_AES_CCM: 270 if (sa->aad_len != 0) { 271 caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len); 272 aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa)); 273 } 274 break; 275 case ALGO_TYPE_CHACHA20_POLY1305: 276 if (sa->aad_len != 0) { 277 chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *) 278 (icv->va + sa->icv_len); 279 aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad, 280 sa->spi, sqc, IS_ESN(sa)); 281 } 282 break; 283 default: 284 break; 285 } 286 } 287 288 /* 289 * setup/update packets and crypto ops for ESP outbound tunnel case. 290 */ 291 uint16_t 292 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 293 struct rte_crypto_op *cop[], uint16_t num) 294 { 295 int32_t rc; 296 uint32_t i, k, n; 297 uint64_t sqn; 298 rte_be64_t sqc; 299 struct rte_ipsec_sa *sa; 300 struct rte_cryptodev_sym_session *cs; 301 union sym_op_data icv; 302 uint64_t iv[IPSEC_MAX_IV_QWORD]; 303 uint32_t dr[num]; 304 305 sa = ss->sa; 306 cs = ss->crypto.ses; 307 308 n = num; 309 sqn = esn_outb_update_sqn(sa, &n); 310 if (n != num) 311 rte_errno = EOVERFLOW; 312 313 k = 0; 314 for (i = 0; i != n; i++) { 315 316 sqc = rte_cpu_to_be_64(sqn + i); 317 gen_iv(iv, sqc); 318 319 /* try to update the packet itself */ 320 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 321 sa->sqh_len, 0); 322 /* success, setup crypto op */ 323 if (rc >= 0) { 324 outb_pkt_xprepare(sa, sqc, &icv); 325 lksd_none_cop_prepare(cop[k], cs, mb[i]); 326 outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc); 327 k++; 328 /* failure, put packet into the death-row */ 329 } else { 330 dr[i - k] = i; 331 rte_errno = -rc; 332 } 333 } 334 335 /* copy not prepared mbufs beyond good ones */ 336 if (k != n && k != 0) 337 move_bad_mbufs(mb, dr, n, n - k); 338 339 return k; 340 } 341 342 /* 343 * setup/update packet data and metadata for ESP outbound transport case. 344 */ 345 static inline int32_t 346 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, 347 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, 348 union sym_op_data *icv, uint8_t sqh_len, uint8_t tso) 349 { 350 uint8_t np; 351 uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen; 352 struct rte_mbuf *ml; 353 struct rte_esp_hdr *esph; 354 struct rte_esp_tail *espt; 355 char *ph, *pt; 356 uint64_t *iv; 357 uint32_t l2len, l3len; 358 359 l2len = mb->l2_len; 360 l3len = mb->l3_len; 361 362 uhlen = l2len + l3len; 363 plen = mb->pkt_len - uhlen; 364 365 /* calculate extra header space required */ 366 hlen = sa->iv_len + sizeof(*esph); 367 368 /* number of bytes to encrypt */ 369 clen = plen + sizeof(*espt); 370 371 if (!tso) { 372 clen = RTE_ALIGN_CEIL(clen, sa->pad_align); 373 /* pad length + esp tail */ 374 pdlen = clen - plen; 375 tlen = pdlen + sa->icv_len + sqh_len; 376 } else { 377 /* We don't need to pad/align packet or append ICV length 378 * when using TSO offload 379 */ 380 pdlen = clen - plen; 381 tlen = pdlen + sqh_len; 382 } 383 384 /* do append and insert */ 385 ml = rte_pktmbuf_lastseg(mb); 386 if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) 387 return -ENOSPC; 388 389 /* prepend space for ESP header */ 390 ph = rte_pktmbuf_prepend(mb, hlen); 391 if (ph == NULL) 392 return -ENOSPC; 393 394 /* append tail */ 395 pdofs = ml->data_len; 396 ml->data_len += tlen; 397 mb->pkt_len += tlen; 398 pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); 399 400 /* shift L2/L3 headers */ 401 insert_esph(ph, ph + hlen, uhlen); 402 403 /* update ip header fields */ 404 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len, 405 l3len, IPPROTO_ESP); 406 407 /* update spi, seqn and iv */ 408 esph = (struct rte_esp_hdr *)(ph + uhlen); 409 iv = (uint64_t *)(esph + 1); 410 copy_iv(iv, ivp, sa->iv_len); 411 412 esph->spi = sa->spi; 413 esph->seq = sqn_low32(sqc); 414 415 /* offset for ICV */ 416 pdofs += pdlen + sa->sqh_len; 417 418 /* pad length */ 419 pdlen -= sizeof(*espt); 420 421 RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); 422 423 /* copy padding data */ 424 rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); 425 426 /* update esp trailer */ 427 espt = (struct rte_esp_tail *)(pt + pdlen); 428 espt->pad_len = pdlen; 429 espt->next_proto = np; 430 431 /* set icv va/pa value(s) */ 432 icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); 433 icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); 434 435 return clen; 436 } 437 438 /* 439 * setup/update packets and crypto ops for ESP outbound transport case. 440 */ 441 uint16_t 442 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 443 struct rte_crypto_op *cop[], uint16_t num) 444 { 445 int32_t rc; 446 uint32_t i, k, n, l2, l3; 447 uint64_t sqn; 448 rte_be64_t sqc; 449 struct rte_ipsec_sa *sa; 450 struct rte_cryptodev_sym_session *cs; 451 union sym_op_data icv; 452 uint64_t iv[IPSEC_MAX_IV_QWORD]; 453 uint32_t dr[num]; 454 455 sa = ss->sa; 456 cs = ss->crypto.ses; 457 458 n = num; 459 sqn = esn_outb_update_sqn(sa, &n); 460 if (n != num) 461 rte_errno = EOVERFLOW; 462 463 k = 0; 464 for (i = 0; i != n; i++) { 465 466 l2 = mb[i]->l2_len; 467 l3 = mb[i]->l3_len; 468 469 sqc = rte_cpu_to_be_64(sqn + i); 470 gen_iv(iv, sqc); 471 472 /* try to update the packet itself */ 473 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 474 sa->sqh_len, 0); 475 /* success, setup crypto op */ 476 if (rc >= 0) { 477 outb_pkt_xprepare(sa, sqc, &icv); 478 lksd_none_cop_prepare(cop[k], cs, mb[i]); 479 outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc); 480 k++; 481 /* failure, put packet into the death-row */ 482 } else { 483 dr[i - k] = i; 484 rte_errno = -rc; 485 } 486 } 487 488 /* copy not prepared mbufs beyond good ones */ 489 if (k != n && k != 0) 490 move_bad_mbufs(mb, dr, n, n - k); 491 492 return k; 493 } 494 495 496 static inline uint32_t 497 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs, 498 uint32_t plen, void *iv) 499 { 500 uint64_t *ivp = iv; 501 struct aead_gcm_iv *gcm; 502 struct aead_ccm_iv *ccm; 503 struct aead_chacha20_poly1305_iv *chacha20_poly1305; 504 struct aesctr_cnt_blk *ctr; 505 uint32_t clen; 506 507 switch (sa->algo_type) { 508 case ALGO_TYPE_AES_GCM: 509 gcm = iv; 510 aead_gcm_iv_fill(gcm, ivp[0], sa->salt); 511 break; 512 case ALGO_TYPE_AES_CCM: 513 ccm = iv; 514 aead_ccm_iv_fill(ccm, ivp[0], sa->salt); 515 break; 516 case ALGO_TYPE_CHACHA20_POLY1305: 517 chacha20_poly1305 = iv; 518 aead_chacha20_poly1305_iv_fill(chacha20_poly1305, 519 ivp[0], sa->salt); 520 break; 521 case ALGO_TYPE_AES_CTR: 522 ctr = iv; 523 aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); 524 break; 525 } 526 527 *pofs += sa->ctp.auth.offset; 528 clen = plen + sa->ctp.auth.length; 529 return clen; 530 } 531 532 static uint16_t 533 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, 534 struct rte_mbuf *mb[], uint16_t num, 535 esp_outb_prepare_t prepare, uint32_t cofs_mask) 536 { 537 int32_t rc; 538 uint64_t sqn; 539 rte_be64_t sqc; 540 struct rte_ipsec_sa *sa; 541 uint32_t i, k, n; 542 uint32_t l2, l3; 543 union sym_op_data icv; 544 struct rte_crypto_va_iova_ptr iv[num]; 545 struct rte_crypto_va_iova_ptr aad[num]; 546 struct rte_crypto_va_iova_ptr dgst[num]; 547 uint32_t dr[num]; 548 uint32_t l4ofs[num]; 549 uint32_t clen[num]; 550 uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD]; 551 552 sa = ss->sa; 553 554 n = num; 555 sqn = esn_outb_update_sqn(sa, &n); 556 if (n != num) 557 rte_errno = EOVERFLOW; 558 559 for (i = 0, k = 0; i != n; i++) { 560 561 l2 = mb[i]->l2_len; 562 l3 = mb[i]->l3_len; 563 564 /* calculate ESP header offset */ 565 l4ofs[k] = (l2 + l3) & cofs_mask; 566 567 sqc = rte_cpu_to_be_64(sqn + i); 568 gen_iv(ivbuf[k], sqc); 569 570 /* try to update the packet itself */ 571 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0); 572 573 /* success, proceed with preparations */ 574 if (rc >= 0) { 575 576 outb_pkt_xprepare(sa, sqc, &icv); 577 578 /* get encrypted data offset and length */ 579 clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc, 580 ivbuf[k]); 581 582 /* fill iv, digest and aad */ 583 iv[k].va = ivbuf[k]; 584 aad[k].va = icv.va + sa->icv_len; 585 dgst[k++].va = icv.va; 586 } else { 587 dr[i - k] = i; 588 rte_errno = -rc; 589 } 590 } 591 592 /* copy not prepared mbufs beyond good ones */ 593 if (k != n && k != 0) 594 move_bad_mbufs(mb, dr, n, n - k); 595 596 /* convert mbufs to iovecs and do actual crypto/auth processing */ 597 if (k != 0) 598 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, 599 l4ofs, clen, k); 600 return k; 601 } 602 603 uint16_t 604 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss, 605 struct rte_mbuf *mb[], uint16_t num) 606 { 607 return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0); 608 } 609 610 uint16_t 611 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, 612 struct rte_mbuf *mb[], uint16_t num) 613 { 614 return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare, 615 UINT32_MAX); 616 } 617 618 /* 619 * process outbound packets for SA with ESN support, 620 * for algorithms that require SQN.hibits to be implicitly included 621 * into digest computation. 622 * In that case we have to move ICV bytes back to their proper place. 623 */ 624 uint16_t 625 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 626 uint16_t num) 627 { 628 uint32_t i, k, icv_len, *icv, bytes; 629 struct rte_mbuf *ml; 630 struct rte_ipsec_sa *sa; 631 uint32_t dr[num]; 632 633 sa = ss->sa; 634 635 k = 0; 636 icv_len = sa->icv_len; 637 bytes = 0; 638 639 for (i = 0; i != num; i++) { 640 if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) { 641 ml = rte_pktmbuf_lastseg(mb[i]); 642 /* remove high-order 32 bits of esn from packet len */ 643 mb[i]->pkt_len -= sa->sqh_len; 644 ml->data_len -= sa->sqh_len; 645 icv = rte_pktmbuf_mtod_offset(ml, void *, 646 ml->data_len - icv_len); 647 remove_sqh(icv, icv_len); 648 bytes += mb[i]->pkt_len; 649 k++; 650 } else 651 dr[i - k] = i; 652 } 653 sa->statistics.count += k; 654 sa->statistics.bytes += bytes; 655 656 /* handle unprocessed mbufs */ 657 if (k != num) { 658 rte_errno = EBADMSG; 659 if (k != 0) 660 move_bad_mbufs(mb, dr, num, num - k); 661 } 662 663 return k; 664 } 665 666 /* 667 * prepare packets for inline ipsec processing: 668 * set ol_flags and attach metadata. 669 */ 670 static inline void 671 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss, 672 struct rte_mbuf *mb[], uint16_t num) 673 { 674 uint32_t i, ol_flags, bytes; 675 676 ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA; 677 bytes = 0; 678 for (i = 0; i != num; i++) { 679 680 mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 681 bytes += mb[i]->pkt_len; 682 if (ol_flags != 0) 683 rte_security_set_pkt_metadata(ss->security.ctx, 684 ss->security.ses, mb[i], NULL); 685 } 686 ss->sa->statistics.count += num; 687 ss->sa->statistics.bytes += bytes; 688 } 689 690 691 static inline int 692 esn_outb_nb_segments(struct rte_mbuf *m) 693 { 694 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { 695 uint16_t pkt_l3len = m->pkt_len - m->l2_len; 696 uint16_t segments = 697 (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ? 698 (pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1; 699 return segments; 700 } 701 return 1; /* no TSO */ 702 } 703 704 /* Compute how many packets can be sent before overflow occurs */ 705 static inline uint16_t 706 esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[]) 707 { 708 uint16_t i; 709 uint32_t seg_cnt = 0; 710 for (i = 0; i < num && seg_cnt < n_sqn; i++) 711 seg_cnt += nb_segs[i]; 712 return i - 1; 713 } 714 715 /* 716 * process group of ESP outbound tunnel packets destined for 717 * INLINE_CRYPTO type of device. 718 */ 719 uint16_t 720 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, 721 struct rte_mbuf *mb[], uint16_t num) 722 { 723 int32_t rc; 724 uint32_t i, k, nb_segs_total, n_sqn; 725 uint64_t sqn; 726 rte_be64_t sqc; 727 struct rte_ipsec_sa *sa; 728 union sym_op_data icv; 729 uint64_t iv[IPSEC_MAX_IV_QWORD]; 730 uint32_t dr[num]; 731 uint16_t nb_segs[num]; 732 733 sa = ss->sa; 734 nb_segs_total = 0; 735 /* Calculate number of segments */ 736 for (i = 0; i != num; i++) { 737 nb_segs[i] = esn_outb_nb_segments(mb[i]); 738 nb_segs_total += nb_segs[i]; 739 } 740 741 n_sqn = nb_segs_total; 742 sqn = esn_outb_update_sqn(sa, &n_sqn); 743 if (n_sqn != nb_segs_total) { 744 rte_errno = EOVERFLOW; 745 /* if there are segmented packets find out how many can be 746 * sent until overflow occurs 747 */ 748 if (nb_segs_total > num) /* there is at least 1 */ 749 num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs); 750 else 751 num = n_sqn; /* no segmented packets */ 752 } 753 754 k = 0; 755 for (i = 0; i != num; i++) { 756 757 sqc = rte_cpu_to_be_64(sqn); 758 gen_iv(iv, sqc); 759 sqn += nb_segs[i]; 760 761 /* try to update the packet itself */ 762 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 763 (mb[i]->ol_flags & 764 (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0); 765 766 k += (rc >= 0); 767 768 /* failure, put packet into the death-row */ 769 if (rc < 0) { 770 dr[i - k] = i; 771 rte_errno = -rc; 772 } 773 } 774 775 /* copy not processed mbufs beyond good ones */ 776 if (k != num && k != 0) 777 move_bad_mbufs(mb, dr, num, num - k); 778 779 inline_outb_mbuf_prepare(ss, mb, k); 780 return k; 781 } 782 783 /* 784 * process group of ESP outbound transport packets destined for 785 * INLINE_CRYPTO type of device. 786 */ 787 uint16_t 788 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, 789 struct rte_mbuf *mb[], uint16_t num) 790 { 791 int32_t rc; 792 uint32_t i, k, nb_segs_total, n_sqn; 793 uint64_t sqn; 794 rte_be64_t sqc; 795 struct rte_ipsec_sa *sa; 796 union sym_op_data icv; 797 uint64_t iv[IPSEC_MAX_IV_QWORD]; 798 uint32_t dr[num]; 799 uint16_t nb_segs[num]; 800 801 sa = ss->sa; 802 nb_segs_total = 0; 803 /* Calculate number of segments */ 804 for (i = 0; i != num; i++) { 805 nb_segs[i] = esn_outb_nb_segments(mb[i]); 806 nb_segs_total += nb_segs[i]; 807 } 808 809 n_sqn = nb_segs_total; 810 sqn = esn_outb_update_sqn(sa, &n_sqn); 811 if (n_sqn != nb_segs_total) { 812 rte_errno = EOVERFLOW; 813 /* if there are segmented packets find out how many can be 814 * sent until overflow occurs 815 */ 816 if (nb_segs_total > num) /* there is at least 1 */ 817 num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs); 818 else 819 num = n_sqn; /* no segmented packets */ 820 } 821 822 k = 0; 823 for (i = 0; i != num; i++) { 824 825 sqc = rte_cpu_to_be_64(sqn); 826 gen_iv(iv, sqc); 827 sqn += nb_segs[i]; 828 829 /* try to update the packet itself */ 830 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 831 (mb[i]->ol_flags & 832 (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0); 833 834 k += (rc >= 0); 835 836 /* failure, put packet into the death-row */ 837 if (rc < 0) { 838 dr[i - k] = i; 839 rte_errno = -rc; 840 } 841 } 842 843 /* copy not processed mbufs beyond good ones */ 844 if (k != num && k != 0) 845 move_bad_mbufs(mb, dr, num, num - k); 846 847 inline_outb_mbuf_prepare(ss, mb, k); 848 return k; 849 } 850 851 /* 852 * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 853 * actual processing is done by HW/PMD, just set flags and metadata. 854 */ 855 uint16_t 856 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss, 857 struct rte_mbuf *mb[], uint16_t num) 858 { 859 inline_outb_mbuf_prepare(ss, mb, num); 860 return num; 861 } 862