1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_ethdev.h>
8
9 #include "rte_gro.h"
10 #include "gro_tcp4.h"
11 #include "gro_tcp6.h"
12 #include "gro_udp4.h"
13 #include "gro_vxlan_tcp4.h"
14 #include "gro_vxlan_udp4.h"
15
16 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
17 uint16_t max_flow_num,
18 uint16_t max_item_per_flow);
19 typedef void (*gro_tbl_destroy_fn)(void *tbl);
20 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
21
22 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
23 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,
24 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, gro_tcp6_tbl_create, NULL};
25 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
26 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,
27 gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,
28 gro_tcp6_tbl_destroy,
29 NULL};
30 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
31 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,
32 gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,
33 gro_tcp6_tbl_pkt_count,
34 NULL};
35
36 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
37 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \
38 ((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \
39 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
40
41 /* GRO with extension headers is not supported */
42 #define IS_IPV6_TCP_PKT(ptype) (RTE_ETH_IS_IPV6_HDR(ptype) && \
43 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \
44 ((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \
45 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
46
47 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
48 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
49 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))
50
51 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
52 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
53 ((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \
54 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
55 RTE_PTYPE_TUNNEL_VXLAN) && \
56 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \
57 RTE_PTYPE_INNER_L4_TCP) && \
58 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
59 RTE_PTYPE_INNER_L3_IPV4) || \
60 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
61 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
62 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
63 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
64
65 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \
66 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \
67 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \
68 RTE_PTYPE_TUNNEL_VXLAN) && \
69 ((ptype & RTE_PTYPE_INNER_L4_UDP) == \
70 RTE_PTYPE_INNER_L4_UDP) && \
71 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \
72 RTE_PTYPE_INNER_L3_IPV4) || \
73 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
74 RTE_PTYPE_INNER_L3_IPV4_EXT) || \
75 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \
76 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN)))
77
78 /*
79 * GRO context structure. It keeps the table structures, which are
80 * used to merge packets, for different GRO types. Before using
81 * rte_gro_reassemble(), applications need to create the GRO context
82 * first.
83 */
84 struct gro_ctx {
85 /* GRO types to perform */
86 uint64_t gro_types;
87 /* reassembly tables */
88 void *tbls[RTE_GRO_TYPE_MAX_NUM];
89 };
90
91 void *
rte_gro_ctx_create(const struct rte_gro_param * param)92 rte_gro_ctx_create(const struct rte_gro_param *param)
93 {
94 struct gro_ctx *gro_ctx;
95 gro_tbl_create_fn create_tbl_fn;
96 uint64_t gro_type_flag = 0;
97 uint64_t gro_types = 0;
98 uint8_t i;
99
100 gro_ctx = rte_zmalloc_socket(__func__,
101 sizeof(struct gro_ctx),
102 RTE_CACHE_LINE_SIZE,
103 param->socket_id);
104 if (gro_ctx == NULL)
105 return NULL;
106
107 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
108 gro_type_flag = 1ULL << i;
109 if ((param->gro_types & gro_type_flag) == 0)
110 continue;
111
112 create_tbl_fn = tbl_create_fn[i];
113 if (create_tbl_fn == NULL)
114 continue;
115
116 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
117 param->max_flow_num,
118 param->max_item_per_flow);
119 if (gro_ctx->tbls[i] == NULL) {
120 /* destroy all created tables */
121 gro_ctx->gro_types = gro_types;
122 rte_gro_ctx_destroy(gro_ctx);
123 return NULL;
124 }
125 gro_types |= gro_type_flag;
126 }
127 gro_ctx->gro_types = param->gro_types;
128
129 return gro_ctx;
130 }
131
132 void
rte_gro_ctx_destroy(void * ctx)133 rte_gro_ctx_destroy(void *ctx)
134 {
135 gro_tbl_destroy_fn destroy_tbl_fn;
136 struct gro_ctx *gro_ctx = ctx;
137 uint64_t gro_type_flag;
138 uint8_t i;
139
140 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
141 gro_type_flag = 1ULL << i;
142 if ((gro_ctx->gro_types & gro_type_flag) == 0)
143 continue;
144 destroy_tbl_fn = tbl_destroy_fn[i];
145 if (destroy_tbl_fn)
146 destroy_tbl_fn(gro_ctx->tbls[i]);
147 }
148 rte_free(gro_ctx);
149 }
150
151 uint16_t
rte_gro_reassemble_burst(struct rte_mbuf ** pkts,uint16_t nb_pkts,const struct rte_gro_param * param)152 rte_gro_reassemble_burst(struct rte_mbuf **pkts,
153 uint16_t nb_pkts,
154 const struct rte_gro_param *param)
155 {
156 /* allocate a reassembly table for TCP/IPv4 GRO */
157 struct gro_tcp4_tbl tcp_tbl;
158 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
159 struct gro_tcp_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
160
161 struct gro_tcp6_tbl tcp6_tbl;
162 struct gro_tcp6_flow tcp6_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
163 struct gro_tcp_item tcp6_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
164
165 /* allocate a reassembly table for UDP/IPv4 GRO */
166 struct gro_udp4_tbl udp_tbl;
167 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
168 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
169
170 /* Allocate a reassembly table for VXLAN TCP GRO */
171 struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl;
172 struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
173 struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
174 = {{{0}, 0, 0} };
175
176 /* Allocate a reassembly table for VXLAN UDP GRO */
177 struct gro_vxlan_udp4_tbl vxlan_udp_tbl;
178 struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];
179 struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM]
180 = {{{0}} };
181
182 struct rte_mbuf *unprocess_pkts[nb_pkts];
183 uint32_t item_num;
184 int32_t ret;
185 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;
186 uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,
187 do_vxlan_udp_gro = 0, do_tcp6_gro = 0;
188
189 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
190 RTE_GRO_TCP_IPV4 | RTE_GRO_TCP_IPV6 |
191 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
192 RTE_GRO_UDP_IPV4)) == 0))
193 return nb_pkts;
194
195 /* Get the maximum number of packets */
196 item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
197 param->max_item_per_flow));
198 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
199
200 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
201 for (i = 0; i < item_num; i++)
202 vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
203
204 vxlan_tcp_tbl.flows = vxlan_tcp_flows;
205 vxlan_tcp_tbl.items = vxlan_tcp_items;
206 vxlan_tcp_tbl.flow_num = 0;
207 vxlan_tcp_tbl.item_num = 0;
208 vxlan_tcp_tbl.max_flow_num = item_num;
209 vxlan_tcp_tbl.max_item_num = item_num;
210 do_vxlan_tcp_gro = 1;
211 }
212
213 if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) {
214 for (i = 0; i < item_num; i++)
215 vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX;
216
217 vxlan_udp_tbl.flows = vxlan_udp_flows;
218 vxlan_udp_tbl.items = vxlan_udp_items;
219 vxlan_udp_tbl.flow_num = 0;
220 vxlan_udp_tbl.item_num = 0;
221 vxlan_udp_tbl.max_flow_num = item_num;
222 vxlan_udp_tbl.max_item_num = item_num;
223 do_vxlan_udp_gro = 1;
224 }
225
226 if (param->gro_types & RTE_GRO_TCP_IPV4) {
227 for (i = 0; i < item_num; i++)
228 tcp_flows[i].start_index = INVALID_ARRAY_INDEX;
229
230 tcp_tbl.flows = tcp_flows;
231 tcp_tbl.items = tcp_items;
232 tcp_tbl.flow_num = 0;
233 tcp_tbl.item_num = 0;
234 tcp_tbl.max_flow_num = item_num;
235 tcp_tbl.max_item_num = item_num;
236 do_tcp4_gro = 1;
237 }
238
239 if (param->gro_types & RTE_GRO_UDP_IPV4) {
240 for (i = 0; i < item_num; i++)
241 udp_flows[i].start_index = INVALID_ARRAY_INDEX;
242
243 udp_tbl.flows = udp_flows;
244 udp_tbl.items = udp_items;
245 udp_tbl.flow_num = 0;
246 udp_tbl.item_num = 0;
247 udp_tbl.max_flow_num = item_num;
248 udp_tbl.max_item_num = item_num;
249 do_udp4_gro = 1;
250 }
251
252 if (param->gro_types & RTE_GRO_TCP_IPV6) {
253 for (i = 0; i < item_num; i++)
254 tcp6_flows[i].start_index = INVALID_ARRAY_INDEX;
255
256 tcp6_tbl.flows = tcp6_flows;
257 tcp6_tbl.items = tcp6_items;
258 tcp6_tbl.flow_num = 0;
259 tcp6_tbl.item_num = 0;
260 tcp6_tbl.max_flow_num = item_num;
261 tcp6_tbl.max_item_num = item_num;
262 do_tcp6_gro = 1;
263 }
264
265 for (i = 0; i < nb_pkts; i++) {
266 /*
267 * The timestamp is ignored, since all packets
268 * will be flushed from the tables.
269 */
270 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
271 do_vxlan_tcp_gro) {
272 ret = gro_vxlan_tcp4_reassemble(pkts[i],
273 &vxlan_tcp_tbl, 0);
274 if (ret > 0)
275 /* Merge successfully */
276 nb_after_gro--;
277 else if (ret < 0)
278 unprocess_pkts[unprocess_num++] = pkts[i];
279 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
280 do_vxlan_udp_gro) {
281 ret = gro_vxlan_udp4_reassemble(pkts[i],
282 &vxlan_udp_tbl, 0);
283 if (ret > 0)
284 /* Merge successfully */
285 nb_after_gro--;
286 else if (ret < 0)
287 unprocess_pkts[unprocess_num++] = pkts[i];
288 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
289 do_tcp4_gro) {
290 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0);
291 if (ret > 0)
292 /* merge successfully */
293 nb_after_gro--;
294 else if (ret < 0)
295 unprocess_pkts[unprocess_num++] = pkts[i];
296 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
297 do_udp4_gro) {
298 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0);
299 if (ret > 0)
300 /* merge successfully */
301 nb_after_gro--;
302 else if (ret < 0)
303 unprocess_pkts[unprocess_num++] = pkts[i];
304 } else if (IS_IPV6_TCP_PKT(pkts[i]->packet_type) &&
305 do_tcp6_gro) {
306 ret = gro_tcp6_reassemble(pkts[i], &tcp6_tbl, 0);
307 if (ret > 0)
308 /* merge successfully */
309 nb_after_gro--;
310 else if (ret < 0)
311 unprocess_pkts[unprocess_num++] = pkts[i];
312 } else
313 unprocess_pkts[unprocess_num++] = pkts[i];
314 }
315
316 if ((nb_after_gro < nb_pkts)
317 || (unprocess_num < nb_pkts)) {
318 i = 0;
319 /* Copy unprocessed packets */
320 if (unprocess_num > 0) {
321 memcpy(&pkts[i], unprocess_pkts,
322 sizeof(struct rte_mbuf *) *
323 unprocess_num);
324 i = unprocess_num;
325 }
326
327 /* Flush all packets from the tables */
328 if (do_vxlan_tcp_gro) {
329 i += gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,
330 0, pkts, nb_pkts);
331 }
332
333 if (do_vxlan_udp_gro) {
334 i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl,
335 0, &pkts[i], nb_pkts - i);
336
337 }
338
339 if (do_tcp4_gro) {
340 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0,
341 &pkts[i], nb_pkts - i);
342 }
343
344 if (do_udp4_gro) {
345 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,
346 &pkts[i], nb_pkts - i);
347 }
348
349 if (do_tcp6_gro) {
350 i += gro_tcp6_tbl_timeout_flush(&tcp6_tbl, 0,
351 &pkts[i], nb_pkts - i);
352 }
353 }
354
355 return nb_after_gro;
356 }
357
358 uint16_t
rte_gro_reassemble(struct rte_mbuf ** pkts,uint16_t nb_pkts,void * ctx)359 rte_gro_reassemble(struct rte_mbuf **pkts,
360 uint16_t nb_pkts,
361 void *ctx)
362 {
363 struct rte_mbuf *unprocess_pkts[nb_pkts];
364 struct gro_ctx *gro_ctx = ctx;
365 void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl, *tcp6_tbl;
366 uint64_t current_time;
367 uint16_t i, unprocess_num = 0;
368 uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro, do_tcp6_gro;
369
370 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |
371 RTE_GRO_TCP_IPV4 | RTE_GRO_TCP_IPV6 |
372 RTE_GRO_IPV4_VXLAN_UDP_IPV4 |
373 RTE_GRO_UDP_IPV4)) == 0))
374 return nb_pkts;
375
376 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX];
377 vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];
378 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];
379 vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];
380 tcp6_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV6_INDEX];
381
382 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==
383 RTE_GRO_TCP_IPV4;
384 do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) ==
385 RTE_GRO_IPV4_VXLAN_TCP_IPV4;
386 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) ==
387 RTE_GRO_UDP_IPV4;
388 do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==
389 RTE_GRO_IPV4_VXLAN_UDP_IPV4;
390 do_tcp6_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV6) == RTE_GRO_TCP_IPV6;
391
392 current_time = rte_rdtsc();
393
394 for (i = 0; i < nb_pkts; i++) {
395 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) &&
396 do_vxlan_tcp_gro) {
397 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl,
398 current_time) < 0)
399 unprocess_pkts[unprocess_num++] = pkts[i];
400 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) &&
401 do_vxlan_udp_gro) {
402 if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl,
403 current_time) < 0)
404 unprocess_pkts[unprocess_num++] = pkts[i];
405 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) &&
406 do_tcp4_gro) {
407 if (gro_tcp4_reassemble(pkts[i], tcp_tbl,
408 current_time) < 0)
409 unprocess_pkts[unprocess_num++] = pkts[i];
410 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) &&
411 do_udp4_gro) {
412 if (gro_udp4_reassemble(pkts[i], udp_tbl,
413 current_time) < 0)
414 unprocess_pkts[unprocess_num++] = pkts[i];
415 } else if (IS_IPV6_TCP_PKT(pkts[i]->packet_type) &&
416 do_tcp6_gro) {
417 if (gro_tcp6_reassemble(pkts[i], tcp6_tbl,
418 current_time) < 0)
419 unprocess_pkts[unprocess_num++] = pkts[i];
420 } else
421 unprocess_pkts[unprocess_num++] = pkts[i];
422 }
423 if (unprocess_num > 0) {
424 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) *
425 unprocess_num);
426 }
427
428 return unprocess_num;
429 }
430
431 uint16_t
rte_gro_timeout_flush(void * ctx,uint64_t timeout_cycles,uint64_t gro_types,struct rte_mbuf ** out,uint16_t max_nb_out)432 rte_gro_timeout_flush(void *ctx,
433 uint64_t timeout_cycles,
434 uint64_t gro_types,
435 struct rte_mbuf **out,
436 uint16_t max_nb_out)
437 {
438 struct gro_ctx *gro_ctx = ctx;
439 uint64_t flush_timestamp;
440 uint16_t num = 0;
441 uint16_t left_nb_out = max_nb_out;
442
443 gro_types = gro_types & gro_ctx->gro_types;
444 flush_timestamp = rte_rdtsc() - timeout_cycles;
445
446 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) {
447 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[
448 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX],
449 flush_timestamp, out, left_nb_out);
450 left_nb_out = max_nb_out - num;
451 }
452
453 if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) {
454 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[
455 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX],
456 flush_timestamp, &out[num], left_nb_out);
457 left_nb_out = max_nb_out - num;
458 }
459
460 /* If no available space in 'out', stop flushing. */
461 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) {
462 num += gro_tcp4_tbl_timeout_flush(
463 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
464 flush_timestamp,
465 &out[num], left_nb_out);
466 left_nb_out = max_nb_out - num;
467 }
468
469 /* If no available space in 'out', stop flushing. */
470 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) {
471 num += gro_udp4_tbl_timeout_flush(
472 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],
473 flush_timestamp,
474 &out[num], left_nb_out);
475 left_nb_out = max_nb_out - num;
476 }
477
478 if ((gro_types & RTE_GRO_TCP_IPV6) && left_nb_out > 0) {
479 num += gro_tcp6_tbl_timeout_flush(
480 gro_ctx->tbls[RTE_GRO_TCP_IPV6_INDEX],
481 flush_timestamp,
482 &out[num], left_nb_out);
483
484 }
485
486 return num;
487 }
488
489 uint64_t
rte_gro_get_pkt_count(void * ctx)490 rte_gro_get_pkt_count(void *ctx)
491 {
492 struct gro_ctx *gro_ctx = ctx;
493 gro_tbl_pkt_count_fn pkt_count_fn;
494 uint64_t gro_types = gro_ctx->gro_types, flag;
495 uint64_t item_num = 0;
496 uint8_t i;
497
498 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) {
499 flag = 1ULL << i;
500 if ((gro_types & flag) == 0)
501 continue;
502
503 gro_types ^= flag;
504 pkt_count_fn = tbl_pkt_count_fn[i];
505 if (pkt_count_fn)
506 item_num += pkt_count_fn(gro_ctx->tbls[i]);
507 }
508
509 return item_num;
510 }
511