xref: /dpdk/lib/node/ip4_reassembly.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2023 Marvell.
3  */
4 
5 #include <arpa/inet.h>
6 #include <stdlib.h>
7 #include <sys/socket.h>
8 
9 #include <rte_cycles.h>
10 #include <rte_debug.h>
11 #include <rte_ethdev.h>
12 #include <rte_ether.h>
13 #include <rte_graph.h>
14 #include <rte_graph_worker.h>
15 #include <rte_ip.h>
16 #include <rte_ip_frag.h>
17 #include <rte_mbuf.h>
18 #include <rte_tcp.h>
19 #include <rte_udp.h>
20 
21 #include "rte_node_ip4_api.h"
22 
23 #include "ip4_reassembly_priv.h"
24 #include "node_private.h"
25 
26 struct ip4_reassembly_elem {
27 	struct ip4_reassembly_elem *next;
28 	struct ip4_reassembly_ctx ctx;
29 	rte_node_t node_id;
30 };
31 
32 /* IP4 reassembly global data struct */
33 struct ip4_reassembly_node_main {
34 	struct ip4_reassembly_elem *head;
35 };
36 
37 typedef struct ip4_reassembly_ctx ip4_reassembly_ctx_t;
38 typedef struct ip4_reassembly_elem ip4_reassembly_elem_t;
39 
40 static struct ip4_reassembly_node_main ip4_reassembly_main;
41 
42 static uint16_t
43 ip4_reassembly_node_process(struct rte_graph *graph, struct rte_node *node, void **objs,
44 			    uint16_t nb_objs)
45 {
46 #define PREFETCH_OFFSET 4
47 	struct rte_mbuf *mbuf, *mbuf_out;
48 	struct rte_ip_frag_death_row *dr;
49 	struct ip4_reassembly_ctx *ctx;
50 	struct rte_ipv4_hdr *ipv4_hdr;
51 	struct rte_ip_frag_tbl *tbl;
52 	void **to_next, **to_free;
53 	uint16_t idx = 0;
54 	int i;
55 
56 	ctx = (struct ip4_reassembly_ctx *)node->ctx;
57 
58 	/* Get core specific reassembly tbl */
59 	tbl = ctx->tbl;
60 	dr = ctx->dr;
61 
62 	for (i = 0; i < PREFETCH_OFFSET && i < nb_objs; i++) {
63 		rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i], void *,
64 						      sizeof(struct rte_ether_hdr)));
65 	}
66 
67 	to_next = node->objs;
68 	for (i = 0; i < nb_objs - PREFETCH_OFFSET; i++) {
69 #if RTE_GRAPH_BURST_SIZE > 64
70 		/* Prefetch next-next mbufs */
71 		if (likely(i + 8 < nb_objs))
72 			rte_prefetch0(objs[i + 8]);
73 #endif
74 		rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i + PREFETCH_OFFSET],
75 						      void *, sizeof(struct rte_ether_hdr)));
76 		mbuf = (struct rte_mbuf *)objs[i];
77 
78 		ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
79 						   sizeof(struct rte_ether_hdr));
80 		if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) {
81 			/* prepare mbuf: setup l2_len/l3_len. */
82 			mbuf->l2_len = sizeof(struct rte_ether_hdr);
83 			mbuf->l3_len = sizeof(struct rte_ipv4_hdr);
84 
85 			mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(),
86 								   ipv4_hdr);
87 		} else {
88 			mbuf_out = mbuf;
89 		}
90 
91 		if (mbuf_out)
92 			to_next[idx++] = (void *)mbuf_out;
93 	}
94 
95 	for (; i < nb_objs; i++) {
96 		mbuf = (struct rte_mbuf *)objs[i];
97 
98 		ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
99 						   sizeof(struct rte_ether_hdr));
100 		if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) {
101 			/* prepare mbuf: setup l2_len/l3_len. */
102 			mbuf->l2_len = sizeof(struct rte_ether_hdr);
103 			mbuf->l3_len = sizeof(struct rte_ipv4_hdr);
104 
105 			mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(),
106 								   ipv4_hdr);
107 		} else {
108 			mbuf_out = mbuf;
109 		}
110 
111 		if (mbuf_out)
112 			to_next[idx++] = (void *)mbuf_out;
113 	}
114 	node->idx = idx;
115 	rte_node_next_stream_move(graph, node, 1);
116 	if (dr->cnt) {
117 		to_free = rte_node_next_stream_get(graph, node,
118 						   RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP, dr->cnt);
119 		rte_memcpy(to_free, dr->row, dr->cnt * sizeof(to_free[0]));
120 		rte_node_next_stream_put(graph, node, RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP,
121 					 dr->cnt);
122 		idx += dr->cnt;
123 		NODE_INCREMENT_XSTAT_ID(node, 0, dr->cnt, dr->cnt);
124 		dr->cnt = 0;
125 	}
126 
127 	return idx;
128 }
129 
130 int
131 rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt)
132 {
133 	ip4_reassembly_elem_t *elem;
134 	int i;
135 
136 	for (i = 0; i < cnt; i++) {
137 		elem = malloc(sizeof(ip4_reassembly_elem_t));
138 		if (elem == NULL)
139 			return -ENOMEM;
140 		elem->ctx.dr = cfg[i].dr;
141 		elem->ctx.tbl = cfg[i].tbl;
142 		elem->node_id = cfg[i].node_id;
143 		elem->next = ip4_reassembly_main.head;
144 		ip4_reassembly_main.head = elem;
145 	}
146 
147 	return 0;
148 }
149 
150 static int
151 ip4_reassembly_node_init(const struct rte_graph *graph, struct rte_node *node)
152 {
153 	ip4_reassembly_ctx_t *ctx = (ip4_reassembly_ctx_t *)node->ctx;
154 	ip4_reassembly_elem_t *elem = ip4_reassembly_main.head;
155 
156 	RTE_SET_USED(graph);
157 	while (elem) {
158 		if (elem->node_id == node->id) {
159 			/* Update node specific context */
160 			memcpy(ctx, &elem->ctx, sizeof(ip4_reassembly_ctx_t));
161 			break;
162 		}
163 		elem = elem->next;
164 	}
165 
166 	return 0;
167 }
168 
169 static struct rte_node_xstats ip4_reassembly_xstats = {
170 	.nb_xstats = 1,
171 	.xstat_desc = {
172 		[0] = "ip4_reassembly_error",
173 	},
174 };
175 
176 static struct rte_node_register ip4_reassembly_node = {
177 	.process = ip4_reassembly_node_process,
178 	.name = "ip4_reassembly",
179 
180 	.init = ip4_reassembly_node_init,
181 	.xstats = &ip4_reassembly_xstats,
182 
183 	.nb_edges = RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP + 1,
184 	.next_nodes = {
185 		[RTE_NODE_IP4_REASSEMBLY_NEXT_PKT_DROP] = "pkt_drop",
186 	},
187 };
188 
189 struct rte_node_register *
190 ip4_reassembly_node_get(void)
191 {
192 	return &ip4_reassembly_node;
193 }
194 
195 RTE_NODE_REGISTER(ip4_reassembly_node);
196