xref: /dpdk/lib/node/ethdev_rx.c (revision ae282b0611c33aa73a01ee6137d116155053b835)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <rte_debug.h>
6 #include <rte_ethdev.h>
7 #include <rte_ether.h>
8 #include <rte_graph.h>
9 #include <rte_graph_worker.h>
10 
11 #include "ethdev_rx_priv.h"
12 #include "node_private.h"
13 
14 static struct ethdev_rx_node_main ethdev_rx_main;
15 
16 static __rte_always_inline uint16_t
ethdev_rx_node_process_inline(struct rte_graph * graph,struct rte_node * node,ethdev_rx_node_ctx_t * ctx)17 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
18 			      ethdev_rx_node_ctx_t *ctx)
19 {
20 	uint16_t count, next_index;
21 	uint16_t port, queue;
22 
23 	port = ctx->port_id;
24 	queue = ctx->queue_id;
25 	next_index = ctx->cls_next;
26 
27 	/* Get pkts from port */
28 	count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
29 				 RTE_GRAPH_BURST_SIZE);
30 
31 	if (!count)
32 		return 0;
33 	node->idx = count;
34 	/* Enqueue to next node */
35 	rte_node_next_stream_move(graph, node, next_index);
36 
37 	return count;
38 }
39 
40 static __rte_always_inline uint16_t
ethdev_rx_node_process(struct rte_graph * graph,struct rte_node * node,void ** objs,uint16_t cnt)41 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
42 		       void **objs, uint16_t cnt)
43 {
44 	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
45 	uint16_t n_pkts = 0;
46 
47 	RTE_SET_USED(objs);
48 	RTE_SET_USED(cnt);
49 
50 	n_pkts = ethdev_rx_node_process_inline(graph, node, ctx);
51 	return n_pkts;
52 }
53 
54 static inline uint32_t
l3_ptype(uint16_t etype,uint32_t ptype)55 l3_ptype(uint16_t etype, uint32_t ptype)
56 {
57 	ptype = ptype & ~RTE_PTYPE_L3_MASK;
58 	if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
59 		ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
60 	else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
61 		ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
62 	return ptype;
63 }
64 
65 /* Callback for soft ptype parsing */
66 static uint16_t
eth_pkt_parse_cb(uint16_t port,uint16_t queue,struct rte_mbuf ** mbufs,uint16_t nb_pkts,uint16_t max_pkts,void * user_param)67 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
68 		 uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
69 {
70 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
71 	struct rte_ether_hdr *eth_hdr;
72 	uint16_t etype, n_left;
73 	struct rte_mbuf **pkts;
74 
75 	RTE_SET_USED(port);
76 	RTE_SET_USED(queue);
77 	RTE_SET_USED(max_pkts);
78 	RTE_SET_USED(user_param);
79 
80 	pkts = mbufs;
81 	n_left = nb_pkts;
82 	while (n_left >= 12) {
83 
84 		/* Prefetch next-next mbufs */
85 		rte_prefetch0(pkts[8]);
86 		rte_prefetch0(pkts[9]);
87 		rte_prefetch0(pkts[10]);
88 		rte_prefetch0(pkts[11]);
89 
90 		/* Prefetch next mbuf data */
91 		rte_prefetch0(
92 			rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
93 		rte_prefetch0(
94 			rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
95 		rte_prefetch0(
96 			rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
97 		rte_prefetch0(
98 			rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
99 
100 		mbuf0 = pkts[0];
101 		mbuf1 = pkts[1];
102 		mbuf2 = pkts[2];
103 		mbuf3 = pkts[3];
104 		pkts += 4;
105 		n_left -= 4;
106 
107 		/* Extract ptype of mbuf0 */
108 		eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
109 		etype = eth_hdr->ether_type;
110 		mbuf0->packet_type = l3_ptype(etype, 0);
111 
112 		/* Extract ptype of mbuf1 */
113 		eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
114 		etype = eth_hdr->ether_type;
115 		mbuf1->packet_type = l3_ptype(etype, 0);
116 
117 		/* Extract ptype of mbuf2 */
118 		eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
119 		etype = eth_hdr->ether_type;
120 		mbuf2->packet_type = l3_ptype(etype, 0);
121 
122 		/* Extract ptype of mbuf3 */
123 		eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
124 		etype = eth_hdr->ether_type;
125 		mbuf3->packet_type = l3_ptype(etype, 0);
126 	}
127 
128 	while (n_left > 0) {
129 		mbuf0 = pkts[0];
130 
131 		pkts += 1;
132 		n_left -= 1;
133 
134 		/* Extract ptype of mbuf0 */
135 		eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
136 		etype = eth_hdr->ether_type;
137 		mbuf0->packet_type = l3_ptype(etype, 0);
138 	}
139 
140 	return nb_pkts;
141 }
142 
143 #define MAX_PTYPES 16
144 static int
ethdev_ptype_setup(uint16_t port,uint16_t queue)145 ethdev_ptype_setup(uint16_t port, uint16_t queue)
146 {
147 	uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
148 	uint32_t ptypes[MAX_PTYPES];
149 	int i, rc;
150 
151 	/* Check IPv4 & IPv6 ptype support */
152 	rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
153 					      MAX_PTYPES);
154 	for (i = 0; i < rc; i++) {
155 		if (ptypes[i] & RTE_PTYPE_L3_IPV4)
156 			l3_ipv4 = 1;
157 		if (ptypes[i] & RTE_PTYPE_L3_IPV6)
158 			l3_ipv6 = 1;
159 	}
160 
161 	if (!l3_ipv4 || !l3_ipv6) {
162 		node_info("ethdev_rx",
163 			  "Enabling ptype callback for required ptypes on port %u",
164 			  port);
165 
166 		if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
167 					     NULL)) {
168 			node_err("ethdev_rx",
169 				 "Failed to add rx ptype cb: port=%d, queue=%d",
170 				 port, queue);
171 			return -EINVAL;
172 		}
173 	}
174 
175 	return 0;
176 }
177 
178 static int
ethdev_rx_node_init(const struct rte_graph * graph,struct rte_node * node)179 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
180 {
181 	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
182 	ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
183 
184 	RTE_SET_USED(graph);
185 
186 	while (elem) {
187 		if (elem->nid == node->id) {
188 			/* Update node specific context */
189 			memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
190 			break;
191 		}
192 		elem = elem->next;
193 	}
194 
195 	RTE_VERIFY(elem != NULL);
196 
197 	/* Check and setup ptype */
198 	return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
199 }
200 
201 struct ethdev_rx_node_main *
ethdev_rx_get_node_data_get(void)202 ethdev_rx_get_node_data_get(void)
203 {
204 	return &ethdev_rx_main;
205 }
206 
207 static struct rte_node_register ethdev_rx_node_base = {
208 	.process = ethdev_rx_node_process,
209 	.flags = RTE_NODE_SOURCE_F,
210 	.name = "ethdev_rx",
211 
212 	.init = ethdev_rx_node_init,
213 
214 	.nb_edges = ETHDEV_RX_NEXT_MAX,
215 	.next_nodes = {
216 		[ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls",
217 		[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup",
218 		[ETHDEV_RX_NEXT_IP4_REASSEMBLY] = "ip4_reassembly",
219 	},
220 };
221 
222 struct rte_node_register *
ethdev_rx_node_get(void)223 ethdev_rx_node_get(void)
224 {
225 	return &ethdev_rx_node_base;
226 }
227 
228 RTE_NODE_REGISTER(ethdev_rx_node_base);
229