Lines Matching defs:pkt
32 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
35 uint32_t ptype = pkt->packet_type;
37 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
86 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
89 vec->port = pkt->port;
92 if (vec->attr_valid && (vec->port != pkt->port))
234 ipv4_pkt_l3_len_set(struct rte_mbuf *pkt)
238 ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
239 pkt->l3_len = ipv4->ihl * 4;
243 ipv6_pkt_l3_len_set(struct rte_mbuf *pkt)
251 ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
253 l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
257 p = rte_pktmbuf_mtod(pkt, uint8_t *);
260 l3_len < pkt->data_len &&
265 /* Drop pkt when IPv6 header exceeds first seg size */
266 if (unlikely(l3_len > pkt->data_len))
269 pkt->l3_len = l3_len;
275 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
283 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
299 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
305 ip = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, RTE_ETHER_HDR_LEN);
318 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
321 return route4_pkt(pkt, rt->rt4_ctx);
323 return route6_pkt(pkt, rt->rt6_ctx);
351 crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
356 /* Get pkt private data */
357 priv = get_priv(pkt);
361 crypto_op_reset(sess, &pkt, &cop, 1);
381 event_crypto_enqueue(struct rte_mbuf *pkt,
391 crypto_prepare_event(pkt, sess, &ev);
397 /* pkt will be freed by the caller */
410 struct rte_mbuf *pkt;
416 /* Get pkt from event */
417 pkt = ev->mbuf;
418 if (is_ip_reassembly_incomplete(pkt) > 0) {
419 free_reassembly_fail_pkt(pkt);
424 type = process_ipsec_get_pkt_type(pkt, &nlp);
428 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
429 if (unlikely(pkt->ol_flags &
435 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
446 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
447 if (unlikely(pkt->ol_flags &
453 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
463 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
464 ipv4_pkt_l3_len_set(pkt);
465 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
472 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
477 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
478 if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
480 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
487 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
516 port_id = get_route(pkt, rt, type);
524 update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id);
527 ipsec_event_pre_forward(pkt, port_id);
531 free_pkts(&pkt, 1);
543 struct rte_mbuf *pkt;
550 /* Get pkt from event */
551 pkt = ev->mbuf;
554 type = process_ipsec_get_pkt_type(pkt, &nlp);
580 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
583 port_id = get_route(pkt, rt, type);
612 sess->security.ses, pkt, NULL);
615 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
620 /* Get the port to which this pkt need to be submitted */
625 pkt->l2_len = RTE_ETHER_HDR_LEN;
631 ipsec_event_pre_forward(pkt, port_id);
635 /* prepare pkt - advance start to L3 */
636 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
638 if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
643 free_pkts(&pkt, 1);
653 struct rte_mbuf *pkt;
659 pkt = t->ip4.pkts[i];
660 port_id = route4_pkt(pkt, rt->rt4_ctx);
663 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
666 ipsec_event_pre_forward(pkt, port_id);
667 ev_vector_attr_update(vec, pkt);
668 vec->mbufs[j++] = pkt;
670 free_pkts(&pkt, 1);
675 pkt = t->ip6.pkts[i];
676 port_id = route6_pkt(pkt, rt->rt6_ctx);
679 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
682 ipsec_event_pre_forward(pkt, port_id);
683 ev_vector_attr_update(vec, pkt);
684 vec->mbufs[j++] = pkt;
686 free_pkts(&pkt, 1);
701 struct rte_mbuf *pkt;
708 pkt = t->ipsec.pkts[i];
711 free_pkts(&pkt, 1);
715 crypto_prepare_event(pkt, sess, &events[ev_len]);
743 struct rte_mbuf *pkt;
755 pkt = t->ipsec.pkts[i];
757 free_pkts(&pkt, 1);
766 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
767 crypto_prepare_event(pkt, sess, &events[ev_len]);
772 sess->security.ses, pkt, NULL);
773 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
777 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
783 ipsec_event_pre_forward(pkt, port_id);
784 ev_vector_attr_update(vec, pkt);
785 vec->mbufs[j++] = pkt;
789 free_pkts(&pkt, 1);
809 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
815 type = process_ipsec_get_pkt_type(pkt, &nlp);
820 t->ip4.pkts[(t->ip4.num)++] = pkt;
824 t->ip6.pkts[(t->ip6.num)++] = pkt;
827 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
828 ipv4_pkt_l3_len_set(pkt);
829 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
832 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
833 if (ipv6_pkt_l3_len_set(pkt) != 0) {
834 free_pkts(&pkt, 1);
837 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
842 free_pkts(&pkt, 1);
853 struct rte_mbuf *pkt;
861 /* Get pkt from event */
862 pkt = vec->mbufs[i];
863 if (is_ip_reassembly_incomplete(pkt) > 0) {
864 free_reassembly_fail_pkt(pkt);
868 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
869 if (unlikely(pkt->ol_flags &
873 free_pkts(&pkt, 1);
878 classify_pkt(pkt, &t);
896 struct rte_mbuf *pkt;
904 /* Get pkt from event */
905 pkt = vec->mbufs[i];
907 classify_pkt(pkt, &t);
910 pkt->l2_len = RTE_ETHER_HDR_LEN;
923 struct rte_mbuf *pkt;
929 pkt = vec->mbufs[i];
930 port_id = pkt->port;
933 free_pkts(&pkt, 1);
936 ipsec_event_pre_forward(pkt, port_id);
939 data[port_id].sess, pkt,
943 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
946 pkt->l2_len = RTE_ETHER_HDR_LEN;
948 vec->mbufs[j++] = pkt;
979 struct rte_mbuf *pkt;
982 pkt = vec->mbufs[0];
987 if (is_unprotected_port(pkt->port))
1012 struct rte_mbuf *pkt;
1015 pkt = vec->mbufs[0];
1017 vec->port = pkt->port;
1019 if (!is_unprotected_port(pkt->port))
1034 const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
1043 free_pkts(&pkt, 1);
1047 ip = rte_pktmbuf_mtod(pkt, struct ip *);
1050 ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
1052 /* Route pkt and update required fields */
1054 pkt->ol_flags |= lconf->outbound.ipv4_offloads;
1055 pkt->l3_len = sizeof(struct ip);
1056 pkt->l2_len = RTE_ETHER_HDR_LEN;
1060 port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
1062 pkt->ol_flags |= lconf->outbound.ipv6_offloads;
1063 pkt->l3_len = sizeof(struct ip6_hdr);
1064 pkt->l2_len = RTE_ETHER_HDR_LEN;
1068 port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
1073 free_pkts(&pkt, 1);
1081 ipsec_event_pre_forward(pkt, port_id);
1095 struct rte_mbuf *pkt;
1100 /* Transform cop vec into pkt vec */
1102 /* Get pkt data */
1104 pkt = cop->sym->m_src;
1105 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
1108 vec->mbufs[n++] = pkt;
1109 ev_vector_attr_update(vec, pkt);
1135 struct rte_mbuf *pkt;
1137 /* Get pkt data */
1139 pkt = cop->sym->m_src;
1141 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
1145 ev->mbuf = pkt;
1196 struct rte_mbuf *pkt;
1261 pkt = ev.mbuf;
1262 port_id = pkt->port;
1264 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
1267 ipsec_event_pre_forward(pkt, port_id);
1272 rte_pktmbuf_free(pkt);
1278 data[port_id].sess, pkt,
1282 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1285 pkt->l2_len = RTE_ETHER_HDR_LEN;
1403 /* The pkt has been dropped or posted */
1727 struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1805 pkt = pkts[j];
1806 ptype = pkt->packet_type &
1813 send_single_packet(pkt, portid, proto);
1829 pkt = pkts[j];
1831 pkt->l2_len = RTE_ETHER_HDR_LEN;
1832 send_single_packet(pkt, sa_out_portid,