1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8
9 #include "gro_tcp4.h"
10 #include "gro_tcp_internal.h"
11
12 void *
gro_tcp4_tbl_create(uint16_t socket_id,uint16_t max_flow_num,uint16_t max_item_per_flow)13 gro_tcp4_tbl_create(uint16_t socket_id,
14 uint16_t max_flow_num,
15 uint16_t max_item_per_flow)
16 {
17 struct gro_tcp4_tbl *tbl;
18 size_t size;
19 uint32_t entries_num, i;
20
21 entries_num = max_flow_num * max_item_per_flow;
22 entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM);
23
24 if (entries_num == 0)
25 return NULL;
26
27 tbl = rte_zmalloc_socket(__func__,
28 sizeof(struct gro_tcp4_tbl),
29 RTE_CACHE_LINE_SIZE,
30 socket_id);
31 if (tbl == NULL)
32 return NULL;
33
34 size = sizeof(struct gro_tcp_item) * entries_num;
35 tbl->items = rte_zmalloc_socket(__func__,
36 size,
37 RTE_CACHE_LINE_SIZE,
38 socket_id);
39 if (tbl->items == NULL) {
40 rte_free(tbl);
41 return NULL;
42 }
43 tbl->max_item_num = entries_num;
44
45 size = sizeof(struct gro_tcp4_flow) * entries_num;
46 tbl->flows = rte_zmalloc_socket(__func__,
47 size,
48 RTE_CACHE_LINE_SIZE,
49 socket_id);
50 if (tbl->flows == NULL) {
51 rte_free(tbl->items);
52 rte_free(tbl);
53 return NULL;
54 }
55 /* INVALID_ARRAY_INDEX indicates an empty flow */
56 for (i = 0; i < entries_num; i++)
57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
58 tbl->max_flow_num = entries_num;
59
60 return tbl;
61 }
62
63 void
gro_tcp4_tbl_destroy(void * tbl)64 gro_tcp4_tbl_destroy(void *tbl)
65 {
66 struct gro_tcp4_tbl *tcp_tbl = tbl;
67
68 if (tcp_tbl) {
69 rte_free(tcp_tbl->items);
70 rte_free(tcp_tbl->flows);
71 }
72 rte_free(tcp_tbl);
73 }
74
75 static inline uint32_t
find_an_empty_flow(struct gro_tcp4_tbl * tbl)76 find_an_empty_flow(struct gro_tcp4_tbl *tbl)
77 {
78 uint32_t i;
79 uint32_t max_flow_num = tbl->max_flow_num;
80
81 for (i = 0; i < max_flow_num; i++)
82 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
83 return i;
84 return INVALID_ARRAY_INDEX;
85 }
86
87 static inline uint32_t
insert_new_flow(struct gro_tcp4_tbl * tbl,struct tcp4_flow_key * src,uint32_t item_idx)88 insert_new_flow(struct gro_tcp4_tbl *tbl,
89 struct tcp4_flow_key *src,
90 uint32_t item_idx)
91 {
92 struct tcp4_flow_key *dst;
93 uint32_t flow_idx;
94
95 flow_idx = find_an_empty_flow(tbl);
96 if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
97 return INVALID_ARRAY_INDEX;
98
99 dst = &(tbl->flows[flow_idx].key);
100
101 ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key));
102
103 dst->ip_src_addr = src->ip_src_addr;
104 dst->ip_dst_addr = src->ip_dst_addr;
105
106 tbl->flows[flow_idx].start_index = item_idx;
107 tbl->flow_num++;
108
109 return flow_idx;
110 }
111
112 int32_t
gro_tcp4_reassemble(struct rte_mbuf * pkt,struct gro_tcp4_tbl * tbl,uint64_t start_time)113 gro_tcp4_reassemble(struct rte_mbuf *pkt,
114 struct gro_tcp4_tbl *tbl,
115 uint64_t start_time)
116 {
117 struct rte_ether_hdr *eth_hdr;
118 struct rte_ipv4_hdr *ipv4_hdr;
119 struct rte_tcp_hdr *tcp_hdr;
120 uint32_t sent_seq;
121 int32_t tcp_dl;
122 uint16_t ip_id, hdr_len, frag_off, ip_tlen;
123 uint8_t is_atomic;
124
125 struct tcp4_flow_key key;
126 uint32_t item_idx;
127 uint32_t i, max_flow_num, remaining_flow_num;
128 uint8_t find;
129 uint32_t item_start_idx;
130
131 /*
132 * Don't process the packet whose TCP header length is greater
133 * than 60 bytes or less than 20 bytes.
134 */
135 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))
136 return -1;
137
138 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
139 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
140 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
141 hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
142
143 /* Return early if the TCP flags are not handled in GRO layer */
144 if (tcp_hdr->tcp_flags & ~VALID_GRO_TCP_FLAGS)
145 return -1;
146
147 /* trim the tail padding bytes */
148 ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length);
149 if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len))
150 rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_tlen - pkt->l2_len);
151
152 /*
153 * Don't process the packet whose payload length is less than or
154 * equal to 0.
155 */
156 tcp_dl = pkt->pkt_len - hdr_len;
157 if (tcp_dl <= 0)
158 return -1;
159
160 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr));
161 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr));
162 key.ip_src_addr = ipv4_hdr->src_addr;
163 key.ip_dst_addr = ipv4_hdr->dst_addr;
164 key.cmn_key.src_port = tcp_hdr->src_port;
165 key.cmn_key.dst_port = tcp_hdr->dst_port;
166 key.cmn_key.recv_ack = tcp_hdr->recv_ack;
167
168 /*
169 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
170 * whose DF bit is 1, IPv4 ID is ignored.
171 */
172 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
173 is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
174 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
175
176 /* Search for a matched flow. */
177 max_flow_num = tbl->max_flow_num;
178 remaining_flow_num = tbl->flow_num;
179 find = 0;
180 for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
181 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
182 if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
183 find = 1;
184 item_start_idx = tbl->flows[i].start_index;
185 break;
186 }
187 remaining_flow_num--;
188 }
189 }
190
191 if (find == 1) {
192 /*
193 * Any packet with additional flags like PSH,FIN should be processed
194 * and flushed immediately.
195 * Hence marking the start time to 0, so that the packets will be flushed
196 * immediately in timer mode.
197 */
198 if (tcp_hdr->tcp_flags & (RTE_TCP_ACK_FLAG | RTE_TCP_PSH_FLAG | RTE_TCP_FIN_FLAG)) {
199 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
200 tbl->items[item_start_idx].start_time = 0;
201 return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items,
202 tbl->flows[i].start_index, &tbl->item_num,
203 tbl->max_item_num, ip_id, is_atomic, start_time);
204 } else {
205 return -1;
206 }
207 }
208 /*
209 * Add new flow to the table only if contains ACK flag with data.
210 * Do not add any packets with additional tcp flags to the GRO table
211 */
212 if (tcp_hdr->tcp_flags == RTE_TCP_ACK_FLAG) {
213 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
214 item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num,
215 tbl->max_item_num, start_time,
216 INVALID_ARRAY_INDEX, sent_seq, ip_id,
217 is_atomic);
218 if (item_idx == INVALID_ARRAY_INDEX)
219 return -1;
220 if (insert_new_flow(tbl, &key, item_idx) ==
221 INVALID_ARRAY_INDEX) {
222 /*
223 * Fail to insert a new flow, so delete the
224 * stored packet.
225 */
226 delete_tcp_item(tbl->items, item_idx, &tbl->item_num, INVALID_ARRAY_INDEX);
227 return -1;
228 }
229 return 0;
230 }
231
232 return -1;
233 }
234
235 /*
236 * update the packet length for the flushed packet.
237 */
238 static inline void
update_header(struct gro_tcp_item * item)239 update_header(struct gro_tcp_item *item)
240 {
241 struct rte_ipv4_hdr *ipv4_hdr;
242 struct rte_mbuf *pkt = item->firstseg;
243
244 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
245 pkt->l2_len);
246 ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
247 pkt->l2_len);
248 }
249
250 uint16_t
gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl * tbl,uint64_t flush_timestamp,struct rte_mbuf ** out,uint16_t nb_out)251 gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,
252 uint64_t flush_timestamp,
253 struct rte_mbuf **out,
254 uint16_t nb_out)
255 {
256 uint16_t k = 0;
257 uint32_t i, j;
258 uint32_t max_flow_num = tbl->max_flow_num;
259
260 for (i = 0; i < max_flow_num; i++) {
261 if (unlikely(tbl->flow_num == 0))
262 return k;
263
264 j = tbl->flows[i].start_index;
265 while (j != INVALID_ARRAY_INDEX) {
266 if (tbl->items[j].start_time <= flush_timestamp) {
267 out[k++] = tbl->items[j].firstseg;
268 if (tbl->items[j].nb_merged > 1)
269 update_header(&(tbl->items[j]));
270 /*
271 * Delete the packet and get the next
272 * packet in the flow.
273 */
274 j = delete_tcp_item(tbl->items, j,
275 &tbl->item_num, INVALID_ARRAY_INDEX);
276 tbl->flows[i].start_index = j;
277 if (j == INVALID_ARRAY_INDEX)
278 tbl->flow_num--;
279
280 if (unlikely(k == nb_out))
281 return k;
282 } else
283 /*
284 * The left packets in this flow won't be
285 * timeout. Go to check other flows.
286 */
287 break;
288 }
289 }
290 return k;
291 }
292
293 uint32_t
gro_tcp4_tbl_pkt_count(void * tbl)294 gro_tcp4_tbl_pkt_count(void *tbl)
295 {
296 struct gro_tcp4_tbl *gro_tbl = tbl;
297
298 if (gro_tbl)
299 return gro_tbl->item_num;
300
301 return 0;
302 }
303