xref: /dpdk/lib/node/pkt_cls.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell.
3  */
4 
5 #include <rte_debug.h>
6 #include <rte_ether.h>
7 #include <rte_ethdev.h>
8 #include <rte_mbuf.h>
9 #include <rte_graph.h>
10 #include <rte_graph_worker.h>
11 
12 #include "pkt_cls_priv.h"
13 #include "node_private.h"
14 
15 /* Next node for each ptype, default is '0' is "pkt_drop" */
16 static const uint8_t p_nxt[256] __rte_cache_aligned = {
17 	[RTE_PTYPE_L3_IPV4] = PKT_CLS_NEXT_IP4_LOOKUP,
18 
19 	[RTE_PTYPE_L3_IPV4_EXT] = PKT_CLS_NEXT_IP4_LOOKUP,
20 
21 	[RTE_PTYPE_L3_IPV4_EXT_UNKNOWN] = PKT_CLS_NEXT_IP4_LOOKUP,
22 
23 	[RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER] =
24 		PKT_CLS_NEXT_IP4_LOOKUP,
25 
26 	[RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER] =
27 		PKT_CLS_NEXT_IP4_LOOKUP,
28 
29 	[RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] =
30 		PKT_CLS_NEXT_IP4_LOOKUP,
31 };
32 
33 static uint16_t
34 pkt_cls_node_process(struct rte_graph *graph, struct rte_node *node,
35 		     void **objs, uint16_t nb_objs)
36 {
37 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
38 	uint8_t l0, l1, l2, l3, last_type;
39 	uint16_t next_index, n_left_from;
40 	uint16_t held = 0, last_spec = 0;
41 	struct pkt_cls_node_ctx *ctx;
42 	void **to_next, **from;
43 	uint32_t i;
44 
45 	pkts = (struct rte_mbuf **)objs;
46 	from = objs;
47 	n_left_from = nb_objs;
48 
49 	for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
50 		rte_prefetch0(&objs[i]);
51 
52 #if RTE_GRAPH_BURST_SIZE > 64
53 	for (i = 0; i < 4 && i < n_left_from; i++)
54 		rte_prefetch0(pkts[i]);
55 #endif
56 
57 	ctx = (struct pkt_cls_node_ctx *)node->ctx;
58 	last_type = ctx->l2l3_type;
59 	next_index = p_nxt[last_type];
60 
61 	/* Get stream for the speculated next node */
62 	to_next = rte_node_next_stream_get(graph, node,
63 					   next_index, nb_objs);
64 	while (n_left_from >= 4) {
65 #if RTE_GRAPH_BURST_SIZE > 64
66 		if (likely(n_left_from > 7)) {
67 			rte_prefetch0(pkts[4]);
68 			rte_prefetch0(pkts[5]);
69 			rte_prefetch0(pkts[6]);
70 			rte_prefetch0(pkts[7]);
71 		}
72 #endif
73 
74 		mbuf0 = pkts[0];
75 		mbuf1 = pkts[1];
76 		mbuf2 = pkts[2];
77 		mbuf3 = pkts[3];
78 		pkts += 4;
79 		n_left_from -= 4;
80 
81 		l0 = mbuf0->packet_type &
82 			(RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
83 		l1 = mbuf1->packet_type &
84 			(RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
85 		l2 = mbuf2->packet_type &
86 			(RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
87 		l3 = mbuf3->packet_type &
88 			(RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
89 
90 		/* Check if they are destined to same
91 		 * next node based on l2l3 packet type.
92 		 */
93 		uint8_t fix_spec = (last_type ^ l0) | (last_type ^ l1) |
94 			(last_type ^ l2) | (last_type ^ l3);
95 
96 		if (unlikely(fix_spec)) {
97 			/* Copy things successfully speculated till now */
98 			rte_memcpy(to_next, from,
99 				   last_spec * sizeof(from[0]));
100 			from += last_spec;
101 			to_next += last_spec;
102 			held += last_spec;
103 			last_spec = 0;
104 
105 			/* l0 */
106 			if (p_nxt[l0] == next_index) {
107 				to_next[0] = from[0];
108 				to_next++;
109 				held++;
110 			} else {
111 				rte_node_enqueue_x1(graph, node,
112 						    p_nxt[l0], from[0]);
113 			}
114 
115 			/* l1 */
116 			if (p_nxt[l1] == next_index) {
117 				to_next[0] = from[1];
118 				to_next++;
119 				held++;
120 			} else {
121 				rte_node_enqueue_x1(graph, node,
122 						    p_nxt[l1], from[1]);
123 			}
124 
125 			/* l2 */
126 			if (p_nxt[l2] == next_index) {
127 				to_next[0] = from[2];
128 				to_next++;
129 				held++;
130 			} else {
131 				rte_node_enqueue_x1(graph, node,
132 						    p_nxt[l2], from[2]);
133 			}
134 
135 			/* l3 */
136 			if (p_nxt[l3] == next_index) {
137 				to_next[0] = from[3];
138 				to_next++;
139 				held++;
140 			} else {
141 				rte_node_enqueue_x1(graph, node,
142 						    p_nxt[l3], from[3]);
143 			}
144 
145 			/* Update speculated ptype */
146 			if ((last_type != l3) && (l2 == l3) &&
147 			    (next_index != p_nxt[l3])) {
148 				/* Put the current stream for
149 				 * speculated ltype.
150 				 */
151 				rte_node_next_stream_put(graph, node,
152 							 next_index, held);
153 
154 				held = 0;
155 
156 				/* Get next stream for new ltype */
157 				next_index = p_nxt[l3];
158 				last_type = l3;
159 				to_next = rte_node_next_stream_get(graph, node,
160 								   next_index,
161 								   nb_objs);
162 			} else if (next_index == p_nxt[l3]) {
163 				last_type = l3;
164 			}
165 
166 			from += 4;
167 		} else {
168 			last_spec += 4;
169 		}
170 	}
171 
172 	while (n_left_from > 0) {
173 		mbuf0 = pkts[0];
174 
175 		pkts += 1;
176 		n_left_from -= 1;
177 
178 		l0 = mbuf0->packet_type &
179 			(RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
180 		if (unlikely((l0 != last_type) &&
181 			     (p_nxt[l0] != next_index))) {
182 			/* Copy things successfully speculated till now */
183 			rte_memcpy(to_next, from,
184 				   last_spec * sizeof(from[0]));
185 			from += last_spec;
186 			to_next += last_spec;
187 			held += last_spec;
188 			last_spec = 0;
189 
190 			rte_node_enqueue_x1(graph, node,
191 					    p_nxt[l0], from[0]);
192 			from += 1;
193 		} else {
194 			last_spec += 1;
195 		}
196 	}
197 
198 	/* !!! Home run !!! */
199 	if (likely(last_spec == nb_objs)) {
200 		rte_node_next_stream_move(graph, node, next_index);
201 		return nb_objs;
202 	}
203 
204 	held += last_spec;
205 	/* Copy things successfully speculated till now */
206 	rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
207 	rte_node_next_stream_put(graph, node, next_index, held);
208 
209 	ctx->l2l3_type = last_type;
210 	return nb_objs;
211 }
212 
213 /* Packet Classification Node */
214 struct rte_node_register pkt_cls_node = {
215 	.process = pkt_cls_node_process,
216 	.name = "pkt_cls",
217 
218 	.nb_edges = PKT_CLS_NEXT_MAX,
219 	.next_nodes = {
220 		/* Pkt drop node starts at '0' */
221 		[PKT_CLS_NEXT_PKT_DROP] = "pkt_drop",
222 		[PKT_CLS_NEXT_IP4_LOOKUP] = "ip4_lookup",
223 	},
224 };
225 RTE_NODE_REGISTER(pkt_cls_node);
226