1
2 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2023 Intel Corporation
4 */
5
6 #ifndef _GRO_TCP_INTERNAL_H_
7 #define _GRO_TCP_INTERNAL_H_
8
9 static inline uint32_t
find_an_empty_item(struct gro_tcp_item * items,uint32_t max_item_num)10 find_an_empty_item(struct gro_tcp_item *items,
11 uint32_t max_item_num)
12 {
13 uint32_t i;
14
15 for (i = 0; i < max_item_num; i++)
16 if (items[i].firstseg == NULL)
17 return i;
18 return INVALID_ARRAY_INDEX;
19 }
20
21 static inline uint32_t
insert_new_tcp_item(struct rte_mbuf * pkt,struct gro_tcp_item * items,uint32_t * item_num,uint32_t max_item_num,uint64_t start_time,uint32_t prev_idx,uint32_t sent_seq,uint16_t ip_id,uint8_t is_atomic)22 insert_new_tcp_item(struct rte_mbuf *pkt,
23 struct gro_tcp_item *items,
24 uint32_t *item_num,
25 uint32_t max_item_num,
26 uint64_t start_time,
27 uint32_t prev_idx,
28 uint32_t sent_seq,
29 uint16_t ip_id,
30 uint8_t is_atomic)
31 {
32 uint32_t item_idx;
33
34 item_idx = find_an_empty_item(items, max_item_num);
35 if (item_idx == INVALID_ARRAY_INDEX)
36 return INVALID_ARRAY_INDEX;
37
38 items[item_idx].firstseg = pkt;
39 items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);
40 items[item_idx].start_time = start_time;
41 items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;
42 items[item_idx].sent_seq = sent_seq;
43 items[item_idx].l3.ip_id = ip_id;
44 items[item_idx].nb_merged = 1;
45 items[item_idx].is_atomic = is_atomic;
46 (*item_num) += 1;
47
48 /* if the previous packet exists, chain them together. */
49 if (prev_idx != INVALID_ARRAY_INDEX) {
50 items[item_idx].next_pkt_idx =
51 items[prev_idx].next_pkt_idx;
52 items[prev_idx].next_pkt_idx = item_idx;
53 }
54
55 return item_idx;
56 }
57
58 static inline uint32_t
delete_tcp_item(struct gro_tcp_item * items,uint32_t item_idx,uint32_t * item_num,uint32_t prev_item_idx)59 delete_tcp_item(struct gro_tcp_item *items, uint32_t item_idx,
60 uint32_t *item_num,
61 uint32_t prev_item_idx)
62 {
63 uint32_t next_idx = items[item_idx].next_pkt_idx;
64
65 /* NULL indicates an empty item */
66 items[item_idx].firstseg = NULL;
67 (*item_num) -= 1;
68 if (prev_item_idx != INVALID_ARRAY_INDEX)
69 items[prev_item_idx].next_pkt_idx = next_idx;
70
71 return next_idx;
72 }
73
74 static inline int32_t
process_tcp_item(struct rte_mbuf * pkt,struct rte_tcp_hdr * tcp_hdr,int32_t tcp_dl,struct gro_tcp_item * items,uint32_t item_idx,uint32_t * item_num,uint32_t max_item_num,uint16_t ip_id,uint8_t is_atomic,uint64_t start_time)75 process_tcp_item(struct rte_mbuf *pkt,
76 struct rte_tcp_hdr *tcp_hdr,
77 int32_t tcp_dl,
78 struct gro_tcp_item *items,
79 uint32_t item_idx,
80 uint32_t *item_num,
81 uint32_t max_item_num,
82 uint16_t ip_id,
83 uint8_t is_atomic,
84 uint64_t start_time)
85 {
86 uint32_t cur_idx;
87 uint32_t prev_idx;
88 int cmp;
89 uint32_t sent_seq;
90
91 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
92 /*
93 * Check all packets in the flow and try to find a neighbor for
94 * the input packet.
95 */
96 cur_idx = item_idx;
97 prev_idx = cur_idx;
98 do {
99 cmp = check_seq_option(&items[cur_idx], tcp_hdr,
100 sent_seq, ip_id, pkt->l4_len, tcp_dl, 0,
101 is_atomic);
102 if (cmp) {
103 if (merge_two_tcp_packets(&items[cur_idx],
104 pkt, cmp, sent_seq, tcp_hdr->tcp_flags, ip_id, 0))
105 return 1;
106 /*
107 * Fail to merge the two packets, as the packet
108 * length is greater than the max value. Store
109 * the packet into the flow.
110 */
111 if (insert_new_tcp_item(pkt, items, item_num, max_item_num,
112 start_time, cur_idx, sent_seq, ip_id, is_atomic) ==
113 INVALID_ARRAY_INDEX)
114 return -1;
115 return 0;
116 }
117 prev_idx = cur_idx;
118 cur_idx = items[cur_idx].next_pkt_idx;
119 } while (cur_idx != INVALID_ARRAY_INDEX);
120
121 /* Fail to find a neighbor, so store the packet into the flow. */
122 if (insert_new_tcp_item(pkt, items, item_num, max_item_num, start_time, prev_idx, sent_seq,
123 ip_id, is_atomic) == INVALID_ARRAY_INDEX)
124 return -1;
125
126 return 0;
127 }
128 #endif
129