1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf.h> 7 #include <rte_ethdev.h> 8 9 #include "gro_tcp4.h" 10 #include "gro_tcp_internal.h" 11 12 void * 13 gro_tcp4_tbl_create(uint16_t socket_id, 14 uint16_t max_flow_num, 15 uint16_t max_item_per_flow) 16 { 17 struct gro_tcp4_tbl *tbl; 18 size_t size; 19 uint32_t entries_num, i; 20 21 entries_num = max_flow_num * max_item_per_flow; 22 entries_num = RTE_MIN(entries_num, GRO_TCP4_TBL_MAX_ITEM_NUM); 23 24 if (entries_num == 0) 25 return NULL; 26 27 tbl = rte_zmalloc_socket(__func__, 28 sizeof(struct gro_tcp4_tbl), 29 RTE_CACHE_LINE_SIZE, 30 socket_id); 31 if (tbl == NULL) 32 return NULL; 33 34 size = sizeof(struct gro_tcp_item) * entries_num; 35 tbl->items = rte_zmalloc_socket(__func__, 36 size, 37 RTE_CACHE_LINE_SIZE, 38 socket_id); 39 if (tbl->items == NULL) { 40 rte_free(tbl); 41 return NULL; 42 } 43 tbl->max_item_num = entries_num; 44 45 size = sizeof(struct gro_tcp4_flow) * entries_num; 46 tbl->flows = rte_zmalloc_socket(__func__, 47 size, 48 RTE_CACHE_LINE_SIZE, 49 socket_id); 50 if (tbl->flows == NULL) { 51 rte_free(tbl->items); 52 rte_free(tbl); 53 return NULL; 54 } 55 /* INVALID_ARRAY_INDEX indicates an empty flow */ 56 for (i = 0; i < entries_num; i++) 57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; 58 tbl->max_flow_num = entries_num; 59 60 return tbl; 61 } 62 63 void 64 gro_tcp4_tbl_destroy(void *tbl) 65 { 66 struct gro_tcp4_tbl *tcp_tbl = tbl; 67 68 if (tcp_tbl) { 69 rte_free(tcp_tbl->items); 70 rte_free(tcp_tbl->flows); 71 } 72 rte_free(tcp_tbl); 73 } 74 75 static inline uint32_t 76 find_an_empty_flow(struct gro_tcp4_tbl *tbl) 77 { 78 uint32_t i; 79 uint32_t max_flow_num = tbl->max_flow_num; 80 81 for (i = 0; i < max_flow_num; i++) 82 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) 83 return i; 84 return INVALID_ARRAY_INDEX; 85 } 86 87 static inline uint32_t 88 insert_new_flow(struct gro_tcp4_tbl *tbl, 89 struct tcp4_flow_key *src, 90 uint32_t item_idx) 91 { 92 struct tcp4_flow_key *dst; 93 uint32_t flow_idx; 94 95 flow_idx = find_an_empty_flow(tbl); 96 if (unlikely(flow_idx == INVALID_ARRAY_INDEX)) 97 return INVALID_ARRAY_INDEX; 98 99 dst = &(tbl->flows[flow_idx].key); 100 101 ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key)); 102 103 dst->ip_src_addr = src->ip_src_addr; 104 dst->ip_dst_addr = src->ip_dst_addr; 105 106 tbl->flows[flow_idx].start_index = item_idx; 107 tbl->flow_num++; 108 109 return flow_idx; 110 } 111 112 int32_t 113 gro_tcp4_reassemble(struct rte_mbuf *pkt, 114 struct gro_tcp4_tbl *tbl, 115 uint64_t start_time) 116 { 117 struct rte_ether_hdr *eth_hdr; 118 struct rte_ipv4_hdr *ipv4_hdr; 119 struct rte_tcp_hdr *tcp_hdr; 120 uint32_t sent_seq; 121 int32_t tcp_dl; 122 uint16_t ip_id, hdr_len, frag_off, ip_tlen; 123 uint8_t is_atomic; 124 125 struct tcp4_flow_key key; 126 uint32_t item_idx; 127 uint32_t i, max_flow_num, remaining_flow_num; 128 uint8_t find; 129 130 /* 131 * Don't process the packet whose TCP header length is greater 132 * than 60 bytes or less than 20 bytes. 133 */ 134 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len))) 135 return -1; 136 137 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 138 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len); 139 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); 140 hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len; 141 142 /* 143 * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE 144 * or CWR set. 145 */ 146 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) 147 return -1; 148 149 /* trim the tail padding bytes */ 150 ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length); 151 if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len)) 152 rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_tlen - pkt->l2_len); 153 154 /* 155 * Don't process the packet whose payload length is less than or 156 * equal to 0. 157 */ 158 tcp_dl = pkt->pkt_len - hdr_len; 159 if (tcp_dl <= 0) 160 return -1; 161 162 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr)); 163 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr)); 164 key.ip_src_addr = ipv4_hdr->src_addr; 165 key.ip_dst_addr = ipv4_hdr->dst_addr; 166 key.cmn_key.src_port = tcp_hdr->src_port; 167 key.cmn_key.dst_port = tcp_hdr->dst_port; 168 key.cmn_key.recv_ack = tcp_hdr->recv_ack; 169 170 /* 171 * Save IPv4 ID for the packet whose DF bit is 0. For the packet 172 * whose DF bit is 1, IPv4 ID is ignored. 173 */ 174 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset); 175 is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG; 176 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id); 177 178 /* Search for a matched flow. */ 179 max_flow_num = tbl->max_flow_num; 180 remaining_flow_num = tbl->flow_num; 181 find = 0; 182 for (i = 0; i < max_flow_num && remaining_flow_num; i++) { 183 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { 184 if (is_same_tcp4_flow(tbl->flows[i].key, key)) { 185 find = 1; 186 break; 187 } 188 remaining_flow_num--; 189 } 190 } 191 192 if (find == 0) { 193 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); 194 item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num, 195 tbl->max_item_num, start_time, 196 INVALID_ARRAY_INDEX, sent_seq, ip_id, 197 is_atomic); 198 if (item_idx == INVALID_ARRAY_INDEX) 199 return -1; 200 if (insert_new_flow(tbl, &key, item_idx) == 201 INVALID_ARRAY_INDEX) { 202 /* 203 * Fail to insert a new flow, so delete the 204 * stored packet. 205 */ 206 delete_tcp_item(tbl->items, item_idx, &tbl->item_num, INVALID_ARRAY_INDEX); 207 return -1; 208 } 209 return 0; 210 } 211 212 return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items, tbl->flows[i].start_index, 213 &tbl->item_num, tbl->max_item_num, 214 ip_id, is_atomic, start_time); 215 } 216 217 /* 218 * update the packet length for the flushed packet. 219 */ 220 static inline void 221 update_header(struct gro_tcp_item *item) 222 { 223 struct rte_ipv4_hdr *ipv4_hdr; 224 struct rte_mbuf *pkt = item->firstseg; 225 226 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) + 227 pkt->l2_len); 228 ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - 229 pkt->l2_len); 230 } 231 232 uint16_t 233 gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl, 234 uint64_t flush_timestamp, 235 struct rte_mbuf **out, 236 uint16_t nb_out) 237 { 238 uint16_t k = 0; 239 uint32_t i, j; 240 uint32_t max_flow_num = tbl->max_flow_num; 241 242 for (i = 0; i < max_flow_num; i++) { 243 if (unlikely(tbl->flow_num == 0)) 244 return k; 245 246 j = tbl->flows[i].start_index; 247 while (j != INVALID_ARRAY_INDEX) { 248 if (tbl->items[j].start_time <= flush_timestamp) { 249 out[k++] = tbl->items[j].firstseg; 250 if (tbl->items[j].nb_merged > 1) 251 update_header(&(tbl->items[j])); 252 /* 253 * Delete the packet and get the next 254 * packet in the flow. 255 */ 256 j = delete_tcp_item(tbl->items, j, 257 &tbl->item_num, INVALID_ARRAY_INDEX); 258 tbl->flows[i].start_index = j; 259 if (j == INVALID_ARRAY_INDEX) 260 tbl->flow_num--; 261 262 if (unlikely(k == nb_out)) 263 return k; 264 } else 265 /* 266 * The left packets in this flow won't be 267 * timeout. Go to check other flows. 268 */ 269 break; 270 } 271 } 272 return k; 273 } 274 275 uint32_t 276 gro_tcp4_tbl_pkt_count(void *tbl) 277 { 278 struct gro_tcp4_tbl *gro_tbl = tbl; 279 280 if (gro_tbl) 281 return gro_tbl->item_num; 282 283 return 0; 284 } 285