1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2023 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf.h> 7 #include <rte_ethdev.h> 8 9 #include "gro_tcp6.h" 10 #include "gro_tcp_internal.h" 11 12 void * 13 gro_tcp6_tbl_create(uint16_t socket_id, 14 uint16_t max_flow_num, 15 uint16_t max_item_per_flow) 16 { 17 struct gro_tcp6_tbl *tbl; 18 size_t size; 19 uint32_t entries_num, i; 20 21 entries_num = max_flow_num * max_item_per_flow; 22 entries_num = RTE_MIN(entries_num, GRO_TCP6_TBL_MAX_ITEM_NUM); 23 24 if (entries_num == 0) 25 return NULL; 26 27 tbl = rte_zmalloc_socket(__func__, 28 sizeof(struct gro_tcp6_tbl), 29 RTE_CACHE_LINE_SIZE, 30 socket_id); 31 if (tbl == NULL) 32 return NULL; 33 34 size = sizeof(struct gro_tcp_item) * entries_num; 35 tbl->items = rte_zmalloc_socket(__func__, 36 size, 37 RTE_CACHE_LINE_SIZE, 38 socket_id); 39 if (tbl->items == NULL) { 40 rte_free(tbl); 41 return NULL; 42 } 43 tbl->max_item_num = entries_num; 44 45 size = sizeof(struct gro_tcp6_flow) * entries_num; 46 tbl->flows = rte_zmalloc_socket(__func__, 47 size, 48 RTE_CACHE_LINE_SIZE, 49 socket_id); 50 if (tbl->flows == NULL) { 51 rte_free(tbl->items); 52 rte_free(tbl); 53 return NULL; 54 } 55 /* INVALID_ARRAY_INDEX indicates an empty flow */ 56 for (i = 0; i < entries_num; i++) 57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; 58 tbl->max_flow_num = entries_num; 59 60 return tbl; 61 } 62 63 void 64 gro_tcp6_tbl_destroy(void *tbl) 65 { 66 struct gro_tcp6_tbl *tcp_tbl = tbl; 67 68 if (tcp_tbl) { 69 rte_free(tcp_tbl->items); 70 rte_free(tcp_tbl->flows); 71 } 72 rte_free(tcp_tbl); 73 } 74 75 static inline uint32_t 76 find_an_empty_flow(struct gro_tcp6_tbl *tbl) 77 { 78 uint32_t i; 79 uint32_t max_flow_num = tbl->max_flow_num; 80 81 for (i = 0; i < max_flow_num; i++) 82 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) 83 return i; 84 return INVALID_ARRAY_INDEX; 85 } 86 87 static inline uint32_t 88 insert_new_flow(struct gro_tcp6_tbl *tbl, 89 struct tcp6_flow_key *src, 90 uint32_t item_idx) 91 { 92 struct tcp6_flow_key *dst; 93 uint32_t flow_idx; 94 95 flow_idx = find_an_empty_flow(tbl); 96 if (unlikely(flow_idx == INVALID_ARRAY_INDEX)) 97 return INVALID_ARRAY_INDEX; 98 99 dst = &(tbl->flows[flow_idx].key); 100 101 ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key)); 102 memcpy(&dst->src_addr[0], &src->src_addr[0], sizeof(dst->src_addr)); 103 memcpy(&dst->dst_addr[0], &src->dst_addr[0], sizeof(dst->dst_addr)); 104 dst->vtc_flow = src->vtc_flow; 105 106 tbl->flows[flow_idx].start_index = item_idx; 107 tbl->flow_num++; 108 109 return flow_idx; 110 } 111 112 /* 113 * update the packet length for the flushed packet. 114 */ 115 static inline void 116 update_header(struct gro_tcp_item *item) 117 { 118 struct rte_ipv6_hdr *ipv6_hdr; 119 struct rte_mbuf *pkt = item->firstseg; 120 121 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, 122 pkt->l2_len); 123 ipv6_hdr->payload_len = rte_cpu_to_be_16(pkt->pkt_len - 124 pkt->l2_len - pkt->l3_len); 125 } 126 127 int32_t 128 gro_tcp6_reassemble(struct rte_mbuf *pkt, 129 struct gro_tcp6_tbl *tbl, 130 uint64_t start_time) 131 { 132 struct rte_ether_hdr *eth_hdr; 133 struct rte_ipv6_hdr *ipv6_hdr; 134 int32_t tcp_dl; 135 uint16_t ip_tlen; 136 struct tcp6_flow_key key; 137 uint32_t i, max_flow_num, remaining_flow_num; 138 uint32_t sent_seq; 139 struct rte_tcp_hdr *tcp_hdr; 140 uint8_t find; 141 uint32_t item_idx; 142 /* 143 * Don't process the packet whose TCP header length is greater 144 * than 60 bytes or less than 20 bytes. 145 */ 146 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len))) 147 return -1; 148 149 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 150 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)eth_hdr + pkt->l2_len); 151 tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_tcp_hdr *, pkt->l2_len + pkt->l3_len); 152 153 /* 154 * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE 155 * or CWR set. 156 */ 157 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) 158 return -1; 159 160 ip_tlen = rte_be_to_cpu_16(ipv6_hdr->payload_len); 161 /* 162 * Don't process the packet whose payload length is less than or 163 * equal to 0. 164 */ 165 tcp_dl = ip_tlen - pkt->l4_len; 166 if (tcp_dl <= 0) 167 return -1; 168 169 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr)); 170 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr)); 171 memcpy(&key.src_addr[0], &ipv6_hdr->src_addr, sizeof(key.src_addr)); 172 memcpy(&key.dst_addr[0], &ipv6_hdr->dst_addr, sizeof(key.dst_addr)); 173 key.cmn_key.src_port = tcp_hdr->src_port; 174 key.cmn_key.dst_port = tcp_hdr->dst_port; 175 key.cmn_key.recv_ack = tcp_hdr->recv_ack; 176 key.vtc_flow = ipv6_hdr->vtc_flow; 177 178 /* Search for a matched flow. */ 179 max_flow_num = tbl->max_flow_num; 180 remaining_flow_num = tbl->flow_num; 181 find = 0; 182 for (i = 0; i < max_flow_num && remaining_flow_num; i++) { 183 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { 184 if (is_same_tcp6_flow(&tbl->flows[i].key, &key)) { 185 find = 1; 186 break; 187 } 188 remaining_flow_num--; 189 } 190 } 191 192 if (find == 0) { 193 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); 194 item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num, 195 tbl->max_item_num, start_time, 196 INVALID_ARRAY_INDEX, sent_seq, 0, true); 197 if (item_idx == INVALID_ARRAY_INDEX) 198 return -1; 199 if (insert_new_flow(tbl, &key, item_idx) == 200 INVALID_ARRAY_INDEX) { 201 /* 202 * Fail to insert a new flow, so delete the 203 * stored packet. 204 */ 205 delete_tcp_item(tbl->items, item_idx, &tbl->item_num, INVALID_ARRAY_INDEX); 206 return -1; 207 } 208 return 0; 209 } 210 211 return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items, tbl->flows[i].start_index, 212 &tbl->item_num, tbl->max_item_num, 213 0, true, start_time); 214 } 215 216 uint16_t 217 gro_tcp6_tbl_timeout_flush(struct gro_tcp6_tbl *tbl, 218 uint64_t flush_timestamp, 219 struct rte_mbuf **out, 220 uint16_t nb_out) 221 { 222 uint16_t k = 0; 223 uint32_t i, j; 224 uint32_t max_flow_num = tbl->max_flow_num; 225 226 for (i = 0; i < max_flow_num; i++) { 227 if (unlikely(tbl->flow_num == 0)) 228 return k; 229 230 j = tbl->flows[i].start_index; 231 while (j != INVALID_ARRAY_INDEX) { 232 if (tbl->items[j].start_time <= flush_timestamp) { 233 out[k++] = tbl->items[j].firstseg; 234 if (tbl->items[j].nb_merged > 1) 235 update_header(&(tbl->items[j])); 236 /* 237 * Delete the packet and get the next 238 * packet in the flow. 239 */ 240 j = delete_tcp_item(tbl->items, j, 241 &tbl->item_num, INVALID_ARRAY_INDEX); 242 tbl->flows[i].start_index = j; 243 if (j == INVALID_ARRAY_INDEX) 244 tbl->flow_num--; 245 246 if (unlikely(k == nb_out)) 247 return k; 248 } else 249 /* 250 * The left packets in this flow won't be 251 * timeout. Go to check other flows. 252 */ 253 break; 254 } 255 } 256 return k; 257 } 258 259 uint32_t 260 gro_tcp6_tbl_pkt_count(void *tbl) 261 { 262 struct gro_tcp6_tbl *gro_tbl = tbl; 263 264 if (gro_tbl) 265 return gro_tbl->item_num; 266 267 return 0; 268 } 269