xref: /dpdk/lib/gro/gro_udp4.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Inspur Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 
9 #include "gro_udp4.h"
10 
11 void *
12 gro_udp4_tbl_create(uint16_t socket_id,
13 		uint16_t max_flow_num,
14 		uint16_t max_item_per_flow)
15 {
16 	struct gro_udp4_tbl *tbl;
17 	size_t size;
18 	uint32_t entries_num, i;
19 
20 	entries_num = max_flow_num * max_item_per_flow;
21 	entries_num = RTE_MIN(entries_num, GRO_UDP4_TBL_MAX_ITEM_NUM);
22 
23 	if (entries_num == 0)
24 		return NULL;
25 
26 	tbl = rte_zmalloc_socket(__func__,
27 			sizeof(struct gro_udp4_tbl),
28 			RTE_CACHE_LINE_SIZE,
29 			socket_id);
30 	if (tbl == NULL)
31 		return NULL;
32 
33 	size = sizeof(struct gro_udp4_item) * entries_num;
34 	tbl->items = rte_zmalloc_socket(__func__,
35 			size,
36 			RTE_CACHE_LINE_SIZE,
37 			socket_id);
38 	if (tbl->items == NULL) {
39 		rte_free(tbl);
40 		return NULL;
41 	}
42 	tbl->max_item_num = entries_num;
43 
44 	size = sizeof(struct gro_udp4_flow) * entries_num;
45 	tbl->flows = rte_zmalloc_socket(__func__,
46 			size,
47 			RTE_CACHE_LINE_SIZE,
48 			socket_id);
49 	if (tbl->flows == NULL) {
50 		rte_free(tbl->items);
51 		rte_free(tbl);
52 		return NULL;
53 	}
54 	/* INVALID_ARRAY_INDEX indicates an empty flow */
55 	for (i = 0; i < entries_num; i++)
56 		tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
57 	tbl->max_flow_num = entries_num;
58 
59 	return tbl;
60 }
61 
62 void
63 gro_udp4_tbl_destroy(void *tbl)
64 {
65 	struct gro_udp4_tbl *udp_tbl = tbl;
66 
67 	if (udp_tbl) {
68 		rte_free(udp_tbl->items);
69 		rte_free(udp_tbl->flows);
70 	}
71 	rte_free(udp_tbl);
72 }
73 
74 static inline uint32_t
75 find_an_empty_item(struct gro_udp4_tbl *tbl)
76 {
77 	uint32_t i;
78 	uint32_t max_item_num = tbl->max_item_num;
79 
80 	for (i = 0; i < max_item_num; i++)
81 		if (tbl->items[i].firstseg == NULL)
82 			return i;
83 	return INVALID_ARRAY_INDEX;
84 }
85 
86 static inline uint32_t
87 find_an_empty_flow(struct gro_udp4_tbl *tbl)
88 {
89 	uint32_t i;
90 	uint32_t max_flow_num = tbl->max_flow_num;
91 
92 	for (i = 0; i < max_flow_num; i++)
93 		if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
94 			return i;
95 	return INVALID_ARRAY_INDEX;
96 }
97 
98 static inline uint32_t
99 insert_new_item(struct gro_udp4_tbl *tbl,
100 		struct rte_mbuf *pkt,
101 		uint64_t start_time,
102 		uint32_t prev_idx,
103 		uint16_t frag_offset,
104 		uint8_t is_last_frag)
105 {
106 	uint32_t item_idx;
107 
108 	item_idx = find_an_empty_item(tbl);
109 	if (unlikely(item_idx == INVALID_ARRAY_INDEX))
110 		return INVALID_ARRAY_INDEX;
111 
112 	tbl->items[item_idx].firstseg = pkt;
113 	tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
114 	tbl->items[item_idx].start_time = start_time;
115 	tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
116 	tbl->items[item_idx].frag_offset = frag_offset;
117 	tbl->items[item_idx].is_last_frag = is_last_frag;
118 	tbl->items[item_idx].nb_merged = 1;
119 	tbl->item_num++;
120 
121 	/* if the previous packet exists, chain them together. */
122 	if (prev_idx != INVALID_ARRAY_INDEX) {
123 		tbl->items[item_idx].next_pkt_idx =
124 			tbl->items[prev_idx].next_pkt_idx;
125 		tbl->items[prev_idx].next_pkt_idx = item_idx;
126 	}
127 
128 	return item_idx;
129 }
130 
131 static inline uint32_t
132 delete_item(struct gro_udp4_tbl *tbl, uint32_t item_idx,
133 		uint32_t prev_item_idx)
134 {
135 	uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;
136 
137 	/* NULL indicates an empty item */
138 	tbl->items[item_idx].firstseg = NULL;
139 	tbl->item_num--;
140 	if (prev_item_idx != INVALID_ARRAY_INDEX)
141 		tbl->items[prev_item_idx].next_pkt_idx = next_idx;
142 
143 	return next_idx;
144 }
145 
146 static inline uint32_t
147 insert_new_flow(struct gro_udp4_tbl *tbl,
148 		struct udp4_flow_key *src,
149 		uint32_t item_idx)
150 {
151 	struct udp4_flow_key *dst;
152 	uint32_t flow_idx;
153 
154 	flow_idx = find_an_empty_flow(tbl);
155 	if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
156 		return INVALID_ARRAY_INDEX;
157 
158 	dst = &(tbl->flows[flow_idx].key);
159 
160 	rte_ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr));
161 	rte_ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr));
162 	dst->ip_src_addr = src->ip_src_addr;
163 	dst->ip_dst_addr = src->ip_dst_addr;
164 	dst->ip_id = src->ip_id;
165 
166 	tbl->flows[flow_idx].start_index = item_idx;
167 	tbl->flow_num++;
168 
169 	return flow_idx;
170 }
171 
172 /*
173  * update the packet length for the flushed packet.
174  */
175 static inline void
176 update_header(struct gro_udp4_item *item)
177 {
178 	struct rte_ipv4_hdr *ipv4_hdr;
179 	struct rte_mbuf *pkt = item->firstseg;
180 	uint16_t frag_offset;
181 
182 	ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
183 			pkt->l2_len);
184 	ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
185 			pkt->l2_len);
186 
187 	/* Clear MF bit if it is last fragment */
188 	if (item->is_last_frag) {
189 		frag_offset = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
190 		ipv4_hdr->fragment_offset =
191 			rte_cpu_to_be_16(frag_offset & ~RTE_IPV4_HDR_MF_FLAG);
192 	}
193 }
194 
195 int32_t
196 gro_udp4_reassemble(struct rte_mbuf *pkt,
197 		struct gro_udp4_tbl *tbl,
198 		uint64_t start_time)
199 {
200 	struct rte_ether_hdr *eth_hdr;
201 	struct rte_ipv4_hdr *ipv4_hdr;
202 	uint16_t ip_dl;
203 	uint16_t ip_id, hdr_len;
204 	uint16_t frag_offset = 0;
205 	uint8_t is_last_frag;
206 
207 	struct udp4_flow_key key;
208 	uint32_t cur_idx, prev_idx, item_idx;
209 	uint32_t i, max_flow_num, remaining_flow_num;
210 	int cmp;
211 	uint8_t find;
212 
213 	eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
214 	ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
215 	hdr_len = pkt->l2_len + pkt->l3_len;
216 
217 	/*
218 	 * Don't process non-fragment packet.
219 	 */
220 	if (!is_ipv4_fragment(ipv4_hdr))
221 		return -1;
222 
223 	/*
224 	 * Don't process the packet whose payload length is less than or
225 	 * equal to 0.
226 	 */
227 	if (pkt->pkt_len <= hdr_len)
228 		return -1;
229 
230 	ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length);
231 	if (ip_dl <= pkt->l3_len)
232 		return -1;
233 
234 	ip_dl -= pkt->l3_len;
235 	ip_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
236 	frag_offset = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
237 	is_last_frag = ((frag_offset & RTE_IPV4_HDR_MF_FLAG) == 0) ? 1 : 0;
238 	frag_offset = (uint16_t)(frag_offset & RTE_IPV4_HDR_OFFSET_MASK) << 3;
239 
240 	rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.eth_saddr));
241 	rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.eth_daddr));
242 	key.ip_src_addr = ipv4_hdr->src_addr;
243 	key.ip_dst_addr = ipv4_hdr->dst_addr;
244 	key.ip_id = ip_id;
245 
246 	/* Search for a matched flow. */
247 	max_flow_num = tbl->max_flow_num;
248 	remaining_flow_num = tbl->flow_num;
249 	find = 0;
250 	for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
251 		if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
252 			if (is_same_udp4_flow(tbl->flows[i].key, key)) {
253 				find = 1;
254 				break;
255 			}
256 			remaining_flow_num--;
257 		}
258 	}
259 
260 	/*
261 	 * Fail to find a matched flow. Insert a new flow and store the
262 	 * packet into the flow.
263 	 */
264 	if (find == 0) {
265 		item_idx = insert_new_item(tbl, pkt, start_time,
266 				INVALID_ARRAY_INDEX, frag_offset,
267 				is_last_frag);
268 		if (unlikely(item_idx == INVALID_ARRAY_INDEX))
269 			return -1;
270 		if (insert_new_flow(tbl, &key, item_idx) ==
271 				INVALID_ARRAY_INDEX) {
272 			/*
273 			 * Fail to insert a new flow, so delete the
274 			 * stored packet.
275 			 */
276 			delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
277 			return -1;
278 		}
279 		return 0;
280 	}
281 
282 	/*
283 	 * Check all packets in the flow and try to find a neighbor for
284 	 * the input packet.
285 	 */
286 	cur_idx = tbl->flows[i].start_index;
287 	prev_idx = cur_idx;
288 	do {
289 		cmp = udp4_check_neighbor(&(tbl->items[cur_idx]),
290 				frag_offset, ip_dl, 0);
291 		if (cmp) {
292 			if (merge_two_udp4_packets(&(tbl->items[cur_idx]),
293 						pkt, cmp, frag_offset,
294 						is_last_frag, 0))
295 				return 1;
296 			/*
297 			 * Fail to merge the two packets, as the packet
298 			 * length is greater than the max value. Store
299 			 * the packet into the flow.
300 			 */
301 			if (insert_new_item(tbl, pkt, start_time, prev_idx,
302 						frag_offset, is_last_frag) ==
303 					INVALID_ARRAY_INDEX)
304 				return -1;
305 			return 0;
306 		}
307 
308 		/* Ensure inserted items are ordered by frag_offset */
309 		if (frag_offset
310 			< tbl->items[cur_idx].frag_offset) {
311 			break;
312 		}
313 
314 		prev_idx = cur_idx;
315 		cur_idx = tbl->items[cur_idx].next_pkt_idx;
316 	} while (cur_idx != INVALID_ARRAY_INDEX);
317 
318 	/* Fail to find a neighbor, so store the packet into the flow. */
319 	if (cur_idx == tbl->flows[i].start_index) {
320 		/* Insert it before the first packet of the flow */
321 		item_idx = insert_new_item(tbl, pkt, start_time,
322 				INVALID_ARRAY_INDEX, frag_offset,
323 				is_last_frag);
324 		if (unlikely(item_idx == INVALID_ARRAY_INDEX))
325 			return -1;
326 		tbl->items[item_idx].next_pkt_idx = cur_idx;
327 		tbl->flows[i].start_index = item_idx;
328 	} else {
329 		if (insert_new_item(tbl, pkt, start_time, prev_idx,
330 				frag_offset, is_last_frag)
331 			== INVALID_ARRAY_INDEX)
332 			return -1;
333 	}
334 
335 	return 0;
336 }
337 
338 static int
339 gro_udp4_merge_items(struct gro_udp4_tbl *tbl,
340 			   uint32_t start_idx)
341 {
342 	uint16_t frag_offset;
343 	uint8_t is_last_frag;
344 	int16_t ip_dl;
345 	struct rte_mbuf *pkt;
346 	int cmp;
347 	uint32_t item_idx;
348 	uint16_t hdr_len;
349 
350 	item_idx = tbl->items[start_idx].next_pkt_idx;
351 	while (item_idx != INVALID_ARRAY_INDEX) {
352 		pkt = tbl->items[item_idx].firstseg;
353 		hdr_len = pkt->l2_len + pkt->l3_len;
354 		ip_dl = pkt->pkt_len - hdr_len;
355 		frag_offset = tbl->items[item_idx].frag_offset;
356 		is_last_frag = tbl->items[item_idx].is_last_frag;
357 		cmp = udp4_check_neighbor(&(tbl->items[start_idx]),
358 					frag_offset, ip_dl, 0);
359 		if (cmp) {
360 			if (merge_two_udp4_packets(
361 					&(tbl->items[start_idx]),
362 					pkt, cmp, frag_offset,
363 					is_last_frag, 0)) {
364 				item_idx = delete_item(tbl, item_idx,
365 							INVALID_ARRAY_INDEX);
366 				tbl->items[start_idx].next_pkt_idx
367 					= item_idx;
368 			} else
369 				return 0;
370 		} else
371 			return 0;
372 	}
373 
374 	return 0;
375 }
376 
377 uint16_t
378 gro_udp4_tbl_timeout_flush(struct gro_udp4_tbl *tbl,
379 		uint64_t flush_timestamp,
380 		struct rte_mbuf **out,
381 		uint16_t nb_out)
382 {
383 	uint16_t k = 0;
384 	uint32_t i, j;
385 	uint32_t max_flow_num = tbl->max_flow_num;
386 
387 	for (i = 0; i < max_flow_num; i++) {
388 		if (unlikely(tbl->flow_num == 0))
389 			return k;
390 
391 		j = tbl->flows[i].start_index;
392 		while (j != INVALID_ARRAY_INDEX) {
393 			if (tbl->items[j].start_time <= flush_timestamp) {
394 				gro_udp4_merge_items(tbl, j);
395 				out[k++] = tbl->items[j].firstseg;
396 				if (tbl->items[j].nb_merged > 1)
397 					update_header(&(tbl->items[j]));
398 				/*
399 				 * Delete the packet and get the next
400 				 * packet in the flow.
401 				 */
402 				j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
403 				tbl->flows[i].start_index = j;
404 				if (j == INVALID_ARRAY_INDEX)
405 					tbl->flow_num--;
406 
407 				if (unlikely(k == nb_out))
408 					return k;
409 			} else
410 				/*
411 				 * Flushing packets does not strictly follow
412 				 * timestamp. It does not flush left packets of
413 				 * the flow this time once it finds one item
414 				 * whose start_time is greater than
415 				 * flush_timestamp. So go to check other flows.
416 				 */
417 				break;
418 		}
419 	}
420 	return k;
421 }
422 
423 uint32_t
424 gro_udp4_tbl_pkt_count(void *tbl)
425 {
426 	struct gro_udp4_tbl *gro_tbl = tbl;
427 
428 	if (gro_tbl)
429 		return gro_tbl->item_num;
430 
431 	return 0;
432 }
433