Lines Matching defs:mb
19 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
140 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
154 l2len = mb->l2_len;
155 plen = mb->pkt_len - l2len;
174 ml = rte_pktmbuf_lastseg(mb);
179 ph = rte_pktmbuf_prepend(mb, hlen - l2len);
186 mb->pkt_len += tlen;
190 mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
200 udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
206 mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
292 esp_outb_tun_prepare_helper(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
314 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
319 lksd_none_cop_prepare(cop[k], cs, mb[i]);
331 move_bad_mbufs(mb, dr, n, n - k);
337 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
348 return esp_outb_tun_prepare_helper(ss, mb, cop, n, sqn);
352 esp_outb_tun_prepare_stateless(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
357 return esp_outb_tun_prepare_helper(ss, mb, cop, num, sqn);
365 const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
377 l2len = mb->l2_len;
378 l3len = mb->l3_len;
381 plen = mb->pkt_len - uhlen;
403 ml = rte_pktmbuf_lastseg(mb);
408 ph = rte_pktmbuf_prepend(mb, hlen);
415 mb->pkt_len += tlen;
422 np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
460 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
484 l2 = mb[i]->l2_len;
485 l3 = mb[i]->l3_len;
491 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
496 lksd_none_cop_prepare(cop[k], cs, mb[i]);
508 move_bad_mbufs(mb, dr, n, n - k);
552 struct rte_mbuf *mb[], uint16_t n, esp_outb_prepare_t prepare,
573 l2 = mb[i]->l2_len;
574 l3 = mb[i]->l3_len;
583 rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
606 move_bad_mbufs(mb, dr, n, n - k);
610 cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
617 struct rte_mbuf *mb[], uint16_t num)
627 return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_tun_pkt_prepare, 0, sqn);
632 struct rte_mbuf *mb[], uint16_t num, struct rte_ipsec_state *state)
636 return cpu_outb_pkt_prepare_helper(ss, mb, num, outb_tun_pkt_prepare, 0, sqn);
641 struct rte_mbuf *mb[], uint16_t num)
651 return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_trs_pkt_prepare,
662 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
677 if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
678 ml = rte_pktmbuf_lastseg(mb[i]);
680 mb[i]->pkt_len -= sa->sqh_len;
685 bytes += mb[i]->pkt_len;
697 move_bad_mbufs(mb, dr, num, num - k);
709 struct rte_mbuf *mb[], uint16_t num)
717 mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
718 bytes += mb[i]->pkt_len;
721 ss->security.ses, mb[i], NULL);
758 struct rte_mbuf *mb[], uint16_t num)
774 nb_segs[i] = esn_outb_nb_segments(mb[i]);
799 rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
800 (mb[i]->ol_flags &
814 move_bad_mbufs(mb, dr, num, num - k);
816 inline_outb_mbuf_prepare(ss, mb, k);
826 struct rte_mbuf *mb[], uint16_t num)
842 nb_segs[i] = esn_outb_nb_segments(mb[i]);
867 rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
868 (mb[i]->ol_flags &
882 move_bad_mbufs(mb, dr, num, num - k);
884 inline_outb_mbuf_prepare(ss, mb, k);
894 struct rte_mbuf *mb[], uint16_t num)
896 inline_outb_mbuf_prepare(ss, mb, num);