xref: /dpdk/examples/l3fwd/l3fwd_acl_scalar.h (revision aa7c6077c19bd39b48ac17cd844b91f0dd03319f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #ifndef L3FWD_ACL_SCALAR_H
6 #define L3FWD_ACL_SCALAR_H
7 
8 #include "l3fwd.h"
9 #if defined RTE_ARCH_X86
10 #include "l3fwd_sse.h"
11 #elif defined __ARM_NEON
12 #include "l3fwd_neon.h"
13 #elif defined RTE_ARCH_PPC_64
14 #include "l3fwd_altivec.h"
15 #else
16 #include "l3fwd_common.h"
17 #endif
18 /*
19  * If the machine has SSE, NEON or PPC 64 then multiple packets
20  * can be sent at once if not only single packets will be sent.
21  */
22 #if defined RTE_ARCH_X86 || defined __ARM_NEON || defined RTE_ARCH_PPC_64
23 #define ACL_SEND_MULTI
24 #endif
25 
26 #define TYPE_NONE	0
27 #define TYPE_IPV4	1
28 #define TYPE_IPV6	2
29 
30 struct acl_search_t {
31 
32 	uint32_t num_ipv4;
33 	uint32_t num_ipv6;
34 
35 	uint8_t types[MAX_PKT_BURST];
36 
37 	const uint8_t *data_ipv4[MAX_PKT_BURST];
38 	uint32_t res_ipv4[MAX_PKT_BURST];
39 
40 	const uint8_t *data_ipv6[MAX_PKT_BURST];
41 	uint32_t res_ipv6[MAX_PKT_BURST];
42 };
43 
44 static inline void
45 l3fwd_acl_prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
46 	int index)
47 {
48 	struct rte_mbuf *pkt = pkts_in[index];
49 
50 	if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
51 		/* Fill acl structure */
52 		acl->data_ipv4[acl->num_ipv4++] = MBUF_IPV4_2PROTO(pkt);
53 		acl->types[index] = TYPE_IPV4;
54 
55 	} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
56 		/* Fill acl structure */
57 		acl->data_ipv6[acl->num_ipv6++] = MBUF_IPV6_2PROTO(pkt);
58 		acl->types[index] = TYPE_IPV6;
59 	} else {
60 		/* Unknown type, will drop the packet */
61 		acl->types[index] = TYPE_NONE;
62 	}
63 }
64 
65 static inline void
66 l3fwd_acl_prepare_acl_parameter(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
67 	int nb_rx)
68 {
69 	int i;
70 
71 	acl->num_ipv4 = 0;
72 	acl->num_ipv6 = 0;
73 
74 	/* Prefetch first packets */
75 	for (i = 0; i < PREFETCH_OFFSET && i < nb_rx; i++) {
76 		rte_prefetch0(rte_pktmbuf_mtod(
77 				pkts_in[i], void *));
78 	}
79 
80 	for (i = 0; i < (nb_rx - PREFETCH_OFFSET); i++) {
81 		rte_prefetch0(rte_pktmbuf_mtod(pkts_in[
82 				i + PREFETCH_OFFSET], void *));
83 		l3fwd_acl_prepare_one_packet(pkts_in, acl, i);
84 	}
85 
86 	/* Process left packets */
87 	for (; i < nb_rx; i++)
88 		l3fwd_acl_prepare_one_packet(pkts_in, acl, i);
89 }
90 
91 static inline void
92 send_packets_single(struct lcore_conf *qconf, struct rte_mbuf *pkts[], uint16_t hops[],
93 	uint32_t nb_tx)
94 {
95 	uint32_t j;
96 	struct rte_ether_hdr *eth_hdr;
97 
98 	for (j = 0; j < nb_tx; j++) {
99 		/* Run rfc1812 if packet is ipv4 and checks enabled. */
100 		rfc1812_process((struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
101 						pkts[j], struct rte_ether_hdr *) + 1),
102 						&hops[j], pkts[j]->packet_type);
103 
104 		/* Set MAC addresses. */
105 		eth_hdr = rte_pktmbuf_mtod(pkts[j], struct rte_ether_hdr *);
106 		if (hops[j] != BAD_PORT) {
107 			*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[hops[j]];
108 			rte_ether_addr_copy(&ports_eth_addr[hops[j]],
109 							&eth_hdr->src_addr);
110 			send_single_packet(qconf, pkts[j], hops[j]);
111 		} else
112 			rte_pktmbuf_free(pkts[j]);
113 	}
114 }
115 
116 #endif /* L3FWD_ACL_SCALAR_H */
117