xref: /dpdk/lib/gro/gro_vxlan_tcp4.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 #include <rte_udp.h>
9 
10 #include "gro_vxlan_tcp4.h"
11 
12 void *
13 gro_vxlan_tcp4_tbl_create(uint16_t socket_id,
14 		uint16_t max_flow_num,
15 		uint16_t max_item_per_flow)
16 {
17 	struct gro_vxlan_tcp4_tbl *tbl;
18 	size_t size;
19 	uint32_t entries_num, i;
20 
21 	entries_num = max_flow_num * max_item_per_flow;
22 	entries_num = RTE_MIN(entries_num, GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM);
23 
24 	if (entries_num == 0)
25 		return NULL;
26 
27 	tbl = rte_zmalloc_socket(__func__,
28 			sizeof(struct gro_vxlan_tcp4_tbl),
29 			RTE_CACHE_LINE_SIZE,
30 			socket_id);
31 	if (tbl == NULL)
32 		return NULL;
33 
34 	size = sizeof(struct gro_vxlan_tcp4_item) * entries_num;
35 	tbl->items = rte_zmalloc_socket(__func__,
36 			size,
37 			RTE_CACHE_LINE_SIZE,
38 			socket_id);
39 	if (tbl->items == NULL) {
40 		rte_free(tbl);
41 		return NULL;
42 	}
43 	tbl->max_item_num = entries_num;
44 
45 	size = sizeof(struct gro_vxlan_tcp4_flow) * entries_num;
46 	tbl->flows = rte_zmalloc_socket(__func__,
47 			size,
48 			RTE_CACHE_LINE_SIZE,
49 			socket_id);
50 	if (tbl->flows == NULL) {
51 		rte_free(tbl->items);
52 		rte_free(tbl);
53 		return NULL;
54 	}
55 
56 	for (i = 0; i < entries_num; i++)
57 		tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
58 	tbl->max_flow_num = entries_num;
59 
60 	return tbl;
61 }
62 
63 void
64 gro_vxlan_tcp4_tbl_destroy(void *tbl)
65 {
66 	struct gro_vxlan_tcp4_tbl *vxlan_tbl = tbl;
67 
68 	if (vxlan_tbl) {
69 		rte_free(vxlan_tbl->items);
70 		rte_free(vxlan_tbl->flows);
71 	}
72 	rte_free(vxlan_tbl);
73 }
74 
75 static inline uint32_t
76 find_an_empty_item(struct gro_vxlan_tcp4_tbl *tbl)
77 {
78 	uint32_t max_item_num = tbl->max_item_num, i;
79 
80 	for (i = 0; i < max_item_num; i++)
81 		if (tbl->items[i].inner_item.firstseg == NULL)
82 			return i;
83 	return INVALID_ARRAY_INDEX;
84 }
85 
86 static inline uint32_t
87 find_an_empty_flow(struct gro_vxlan_tcp4_tbl *tbl)
88 {
89 	uint32_t max_flow_num = tbl->max_flow_num, i;
90 
91 	for (i = 0; i < max_flow_num; i++)
92 		if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
93 			return i;
94 	return INVALID_ARRAY_INDEX;
95 }
96 
97 static inline uint32_t
98 insert_new_item(struct gro_vxlan_tcp4_tbl *tbl,
99 		struct rte_mbuf *pkt,
100 		uint64_t start_time,
101 		uint32_t prev_idx,
102 		uint32_t sent_seq,
103 		uint16_t outer_ip_id,
104 		uint16_t ip_id,
105 		uint8_t outer_is_atomic,
106 		uint8_t is_atomic)
107 {
108 	uint32_t item_idx;
109 
110 	item_idx = find_an_empty_item(tbl);
111 	if (unlikely(item_idx == INVALID_ARRAY_INDEX))
112 		return INVALID_ARRAY_INDEX;
113 
114 	tbl->items[item_idx].inner_item.firstseg = pkt;
115 	tbl->items[item_idx].inner_item.lastseg = rte_pktmbuf_lastseg(pkt);
116 	tbl->items[item_idx].inner_item.start_time = start_time;
117 	tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX;
118 	tbl->items[item_idx].inner_item.sent_seq = sent_seq;
119 	tbl->items[item_idx].inner_item.l3.ip_id = ip_id;
120 	tbl->items[item_idx].inner_item.nb_merged = 1;
121 	tbl->items[item_idx].inner_item.is_atomic = is_atomic;
122 	tbl->items[item_idx].outer_ip_id = outer_ip_id;
123 	tbl->items[item_idx].outer_is_atomic = outer_is_atomic;
124 	tbl->item_num++;
125 
126 	/* If the previous packet exists, chain the new one with it. */
127 	if (prev_idx != INVALID_ARRAY_INDEX) {
128 		tbl->items[item_idx].inner_item.next_pkt_idx =
129 			tbl->items[prev_idx].inner_item.next_pkt_idx;
130 		tbl->items[prev_idx].inner_item.next_pkt_idx = item_idx;
131 	}
132 
133 	return item_idx;
134 }
135 
136 static inline uint32_t
137 delete_item(struct gro_vxlan_tcp4_tbl *tbl,
138 		uint32_t item_idx,
139 		uint32_t prev_item_idx)
140 {
141 	uint32_t next_idx = tbl->items[item_idx].inner_item.next_pkt_idx;
142 
143 	/* NULL indicates an empty item. */
144 	tbl->items[item_idx].inner_item.firstseg = NULL;
145 	tbl->item_num--;
146 	if (prev_item_idx != INVALID_ARRAY_INDEX)
147 		tbl->items[prev_item_idx].inner_item.next_pkt_idx = next_idx;
148 
149 	return next_idx;
150 }
151 
152 static inline uint32_t
153 insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl,
154 		struct vxlan_tcp4_flow_key *src,
155 		uint32_t item_idx)
156 {
157 	struct vxlan_tcp4_flow_key *dst;
158 	uint32_t flow_idx;
159 
160 	flow_idx = find_an_empty_flow(tbl);
161 	if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
162 		return INVALID_ARRAY_INDEX;
163 
164 	dst = &(tbl->flows[flow_idx].key);
165 
166 	ASSIGN_COMMON_TCP_KEY((&(src->inner_key.cmn_key)), (&(dst->inner_key.cmn_key)));
167 	dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr;
168 	dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr;
169 
170 	dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags;
171 	dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni;
172 	rte_ether_addr_copy(&(src->outer_eth_saddr), &(dst->outer_eth_saddr));
173 	rte_ether_addr_copy(&(src->outer_eth_daddr), &(dst->outer_eth_daddr));
174 	dst->outer_ip_src_addr = src->outer_ip_src_addr;
175 	dst->outer_ip_dst_addr = src->outer_ip_dst_addr;
176 	dst->outer_src_port = src->outer_src_port;
177 	dst->outer_dst_port = src->outer_dst_port;
178 
179 	tbl->flows[flow_idx].start_index = item_idx;
180 	tbl->flow_num++;
181 
182 	return flow_idx;
183 }
184 
185 static inline int
186 is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1,
187 		struct vxlan_tcp4_flow_key k2)
188 {
189 	return (rte_is_same_ether_addr(&k1.outer_eth_saddr,
190 					&k2.outer_eth_saddr) &&
191 			rte_is_same_ether_addr(&k1.outer_eth_daddr,
192 				&k2.outer_eth_daddr) &&
193 			(k1.outer_ip_src_addr == k2.outer_ip_src_addr) &&
194 			(k1.outer_ip_dst_addr == k2.outer_ip_dst_addr) &&
195 			(k1.outer_src_port == k2.outer_src_port) &&
196 			(k1.outer_dst_port == k2.outer_dst_port) &&
197 			(k1.vxlan_hdr.vx_flags == k2.vxlan_hdr.vx_flags) &&
198 			(k1.vxlan_hdr.vx_vni == k2.vxlan_hdr.vx_vni) &&
199 			is_same_tcp4_flow(k1.inner_key, k2.inner_key));
200 }
201 
202 static inline int
203 check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item,
204 		struct rte_tcp_hdr *tcp_hdr,
205 		uint32_t sent_seq,
206 		uint16_t outer_ip_id,
207 		uint16_t ip_id,
208 		uint16_t tcp_hl,
209 		uint16_t tcp_dl,
210 		uint8_t outer_is_atomic,
211 		uint8_t is_atomic)
212 {
213 	struct rte_mbuf *pkt = item->inner_item.firstseg;
214 	int cmp;
215 	uint16_t l2_offset;
216 
217 	/* Don't merge packets whose outer DF bits are different. */
218 	if (unlikely(item->outer_is_atomic ^ outer_is_atomic))
219 		return 0;
220 
221 	l2_offset = pkt->outer_l2_len + pkt->outer_l3_len;
222 	cmp = check_seq_option(&item->inner_item, tcp_hdr, sent_seq, ip_id,
223 			tcp_hl, tcp_dl, l2_offset, is_atomic);
224 	if ((cmp > 0) && (outer_is_atomic ||
225 				(outer_ip_id == item->outer_ip_id + 1)))
226 		/* Append the new packet. */
227 		return 1;
228 	else if ((cmp < 0) && (outer_is_atomic ||
229 				(outer_ip_id + item->inner_item.nb_merged ==
230 				 item->outer_ip_id)))
231 		/* Prepend the new packet. */
232 		return -1;
233 
234 	return 0;
235 }
236 
237 static inline int
238 merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,
239 		struct rte_mbuf *pkt,
240 		int cmp,
241 		uint32_t sent_seq,
242 		uint8_t tcp_flags,
243 		uint16_t outer_ip_id,
244 		uint16_t ip_id)
245 {
246 	if (merge_two_tcp_packets(&item->inner_item, pkt, cmp, sent_seq, tcp_flags,
247 				ip_id, pkt->outer_l2_len +
248 				pkt->outer_l3_len)) {
249 		/* Update the outer IPv4 ID to the large value. */
250 		item->outer_ip_id = cmp > 0 ? outer_ip_id : item->outer_ip_id;
251 		return 1;
252 	}
253 
254 	return 0;
255 }
256 
257 static inline void
258 update_vxlan_header(struct gro_vxlan_tcp4_item *item)
259 {
260 	struct rte_ipv4_hdr *ipv4_hdr;
261 	struct rte_udp_hdr *udp_hdr;
262 	struct rte_mbuf *pkt = item->inner_item.firstseg;
263 	uint16_t len;
264 
265 	/* Update the outer IPv4 header. */
266 	len = pkt->pkt_len - pkt->outer_l2_len;
267 	ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
268 					   pkt->outer_l2_len);
269 	ipv4_hdr->total_length = rte_cpu_to_be_16(len);
270 
271 	/* Update the outer UDP header. */
272 	len -= pkt->outer_l3_len;
273 	udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
274 	udp_hdr->dgram_len = rte_cpu_to_be_16(len);
275 
276 	/* Update the inner IPv4 header. */
277 	len -= pkt->l2_len;
278 	ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
279 	ipv4_hdr->total_length = rte_cpu_to_be_16(len);
280 }
281 
282 int32_t
283 gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
284 		struct gro_vxlan_tcp4_tbl *tbl,
285 		uint64_t start_time)
286 {
287 	struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
288 	struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
289 	struct rte_tcp_hdr *tcp_hdr;
290 	struct rte_udp_hdr *udp_hdr;
291 	struct rte_vxlan_hdr *vxlan_hdr;
292 	uint32_t sent_seq;
293 	int32_t tcp_dl;
294 	uint16_t frag_off, outer_ip_id, ip_id;
295 	uint8_t outer_is_atomic, is_atomic;
296 
297 	struct vxlan_tcp4_flow_key key;
298 	uint32_t cur_idx, prev_idx, item_idx;
299 	uint32_t i, max_flow_num, remaining_flow_num;
300 	int cmp;
301 	uint16_t hdr_len;
302 	uint8_t find;
303 
304 	/*
305 	 * Don't process the packet whose TCP header length is greater
306 	 * than 60 bytes or less than 20 bytes.
307 	 */
308 	if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))
309 		return -1;
310 
311 	outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
312 	outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
313 			pkt->outer_l2_len);
314 	udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr +
315 			pkt->outer_l3_len);
316 	vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr +
317 			sizeof(struct rte_udp_hdr));
318 	eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
319 			sizeof(struct rte_vxlan_hdr));
320 	ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
321 	tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
322 
323 	/*
324 	 * Don't process the packet which has FIN, SYN, RST, PSH, URG,
325 	 * ECE or CWR set.
326 	 */
327 	if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
328 		return -1;
329 
330 	hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len +
331 		pkt->l3_len + pkt->l4_len;
332 	/*
333 	 * Don't process the packet whose payload length is less than or
334 	 * equal to 0.
335 	 */
336 	tcp_dl = pkt->pkt_len - hdr_len;
337 	if (tcp_dl <= 0)
338 		return -1;
339 
340 	/*
341 	 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
342 	 * whose DF bit is 1, IPv4 ID is ignored.
343 	 */
344 	frag_off = rte_be_to_cpu_16(outer_ipv4_hdr->fragment_offset);
345 	outer_is_atomic =
346 		(frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
347 	outer_ip_id = outer_is_atomic ? 0 :
348 		rte_be_to_cpu_16(outer_ipv4_hdr->packet_id);
349 	frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
350 	is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
351 	ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
352 
353 	sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
354 
355 	rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.cmn_key.eth_saddr));
356 	rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.cmn_key.eth_daddr));
357 	key.inner_key.ip_src_addr = ipv4_hdr->src_addr;
358 	key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr;
359 	key.inner_key.cmn_key.recv_ack = tcp_hdr->recv_ack;
360 	key.inner_key.cmn_key.src_port = tcp_hdr->src_port;
361 	key.inner_key.cmn_key.dst_port = tcp_hdr->dst_port;
362 
363 	key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags;
364 	key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni;
365 	rte_ether_addr_copy(&(outer_eth_hdr->src_addr), &(key.outer_eth_saddr));
366 	rte_ether_addr_copy(&(outer_eth_hdr->dst_addr), &(key.outer_eth_daddr));
367 	key.outer_ip_src_addr = outer_ipv4_hdr->src_addr;
368 	key.outer_ip_dst_addr = outer_ipv4_hdr->dst_addr;
369 	key.outer_src_port = udp_hdr->src_port;
370 	key.outer_dst_port = udp_hdr->dst_port;
371 
372 	/* Search for a matched flow. */
373 	max_flow_num = tbl->max_flow_num;
374 	remaining_flow_num = tbl->flow_num;
375 	find = 0;
376 	for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
377 		if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
378 			if (is_same_vxlan_tcp4_flow(tbl->flows[i].key, key)) {
379 				find = 1;
380 				break;
381 			}
382 			remaining_flow_num--;
383 		}
384 	}
385 
386 	/*
387 	 * Can't find a matched flow. Insert a new flow and store the
388 	 * packet into the flow.
389 	 */
390 	if (find == 0) {
391 		item_idx = insert_new_item(tbl, pkt, start_time,
392 				INVALID_ARRAY_INDEX, sent_seq, outer_ip_id,
393 				ip_id, outer_is_atomic, is_atomic);
394 		if (item_idx == INVALID_ARRAY_INDEX)
395 			return -1;
396 		if (insert_new_flow(tbl, &key, item_idx) ==
397 				INVALID_ARRAY_INDEX) {
398 			/*
399 			 * Fail to insert a new flow, so
400 			 * delete the inserted packet.
401 			 */
402 			delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
403 			return -1;
404 		}
405 		return 0;
406 	}
407 
408 	/* Check all packets in the flow and try to find a neighbor. */
409 	cur_idx = tbl->flows[i].start_index;
410 	prev_idx = cur_idx;
411 	do {
412 		cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
413 				sent_seq, outer_ip_id, ip_id, pkt->l4_len,
414 				tcp_dl, outer_is_atomic, is_atomic);
415 		if (cmp) {
416 			if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]),
417 						pkt, cmp, sent_seq, tcp_hdr->tcp_flags,
418 						outer_ip_id, ip_id))
419 				return 1;
420 			/*
421 			 * Can't merge two packets, as the packet
422 			 * length will be greater than the max value.
423 			 * Insert the packet into the flow.
424 			 */
425 			if (insert_new_item(tbl, pkt, start_time, prev_idx,
426 						sent_seq, outer_ip_id,
427 						ip_id, outer_is_atomic,
428 						is_atomic) ==
429 					INVALID_ARRAY_INDEX)
430 				return -1;
431 			return 0;
432 		}
433 		prev_idx = cur_idx;
434 		cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx;
435 	} while (cur_idx != INVALID_ARRAY_INDEX);
436 
437 	/* Can't find neighbor. Insert the packet into the flow. */
438 	if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
439 				outer_ip_id, ip_id, outer_is_atomic,
440 				is_atomic) == INVALID_ARRAY_INDEX)
441 		return -1;
442 
443 	return 0;
444 }
445 
446 uint16_t
447 gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl *tbl,
448 		uint64_t flush_timestamp,
449 		struct rte_mbuf **out,
450 		uint16_t nb_out)
451 {
452 	uint16_t k = 0;
453 	uint32_t i, j;
454 	uint32_t max_flow_num = tbl->max_flow_num;
455 
456 	for (i = 0; i < max_flow_num; i++) {
457 		if (unlikely(tbl->flow_num == 0))
458 			return k;
459 
460 		j = tbl->flows[i].start_index;
461 		while (j != INVALID_ARRAY_INDEX) {
462 			if (tbl->items[j].inner_item.start_time <=
463 					flush_timestamp) {
464 				out[k++] = tbl->items[j].inner_item.firstseg;
465 				if (tbl->items[j].inner_item.nb_merged > 1)
466 					update_vxlan_header(&(tbl->items[j]));
467 				/*
468 				 * Delete the item and get the next packet
469 				 * index.
470 				 */
471 				j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
472 				tbl->flows[i].start_index = j;
473 				if (j == INVALID_ARRAY_INDEX)
474 					tbl->flow_num--;
475 
476 				if (unlikely(k == nb_out))
477 					return k;
478 			} else
479 				/*
480 				 * The left packets in the flow won't be
481 				 * timeout. Go to check other flows.
482 				 */
483 				break;
484 		}
485 	}
486 	return k;
487 }
488 
489 uint32_t
490 gro_vxlan_tcp4_tbl_pkt_count(void *tbl)
491 {
492 	struct gro_vxlan_tcp4_tbl *gro_tbl = tbl;
493 
494 	if (gro_tbl)
495 		return gro_tbl->item_num;
496 
497 	return 0;
498 }
499