1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf.h> 7 #include <rte_ethdev.h> 8 #include <rte_udp.h> 9 10 #include "gro_vxlan_tcp4.h" 11 12 void * 13 gro_vxlan_tcp4_tbl_create(uint16_t socket_id, 14 uint16_t max_flow_num, 15 uint16_t max_item_per_flow) 16 { 17 struct gro_vxlan_tcp4_tbl *tbl; 18 size_t size; 19 uint32_t entries_num, i; 20 21 entries_num = max_flow_num * max_item_per_flow; 22 entries_num = RTE_MIN(entries_num, GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM); 23 24 if (entries_num == 0) 25 return NULL; 26 27 tbl = rte_zmalloc_socket(__func__, 28 sizeof(struct gro_vxlan_tcp4_tbl), 29 RTE_CACHE_LINE_SIZE, 30 socket_id); 31 if (tbl == NULL) 32 return NULL; 33 34 size = sizeof(struct gro_vxlan_tcp4_item) * entries_num; 35 tbl->items = rte_zmalloc_socket(__func__, 36 size, 37 RTE_CACHE_LINE_SIZE, 38 socket_id); 39 if (tbl->items == NULL) { 40 rte_free(tbl); 41 return NULL; 42 } 43 tbl->max_item_num = entries_num; 44 45 size = sizeof(struct gro_vxlan_tcp4_flow) * entries_num; 46 tbl->flows = rte_zmalloc_socket(__func__, 47 size, 48 RTE_CACHE_LINE_SIZE, 49 socket_id); 50 if (tbl->flows == NULL) { 51 rte_free(tbl->items); 52 rte_free(tbl); 53 return NULL; 54 } 55 56 for (i = 0; i < entries_num; i++) 57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; 58 tbl->max_flow_num = entries_num; 59 60 return tbl; 61 } 62 63 void 64 gro_vxlan_tcp4_tbl_destroy(void *tbl) 65 { 66 struct gro_vxlan_tcp4_tbl *vxlan_tbl = tbl; 67 68 if (vxlan_tbl) { 69 rte_free(vxlan_tbl->items); 70 rte_free(vxlan_tbl->flows); 71 } 72 rte_free(vxlan_tbl); 73 } 74 75 static inline uint32_t 76 find_an_empty_item(struct gro_vxlan_tcp4_tbl *tbl) 77 { 78 uint32_t max_item_num = tbl->max_item_num, i; 79 80 for (i = 0; i < max_item_num; i++) 81 if (tbl->items[i].inner_item.firstseg == NULL) 82 return i; 83 return INVALID_ARRAY_INDEX; 84 } 85 86 static inline uint32_t 87 find_an_empty_flow(struct gro_vxlan_tcp4_tbl *tbl) 88 { 89 uint32_t max_flow_num = tbl->max_flow_num, i; 90 91 for (i = 0; i < max_flow_num; i++) 92 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) 93 return i; 94 return INVALID_ARRAY_INDEX; 95 } 96 97 static inline uint32_t 98 insert_new_item(struct gro_vxlan_tcp4_tbl *tbl, 99 struct rte_mbuf *pkt, 100 uint64_t start_time, 101 uint32_t prev_idx, 102 uint32_t sent_seq, 103 uint16_t outer_ip_id, 104 uint16_t ip_id, 105 uint8_t outer_is_atomic, 106 uint8_t is_atomic) 107 { 108 uint32_t item_idx; 109 110 item_idx = find_an_empty_item(tbl); 111 if (unlikely(item_idx == INVALID_ARRAY_INDEX)) 112 return INVALID_ARRAY_INDEX; 113 114 tbl->items[item_idx].inner_item.firstseg = pkt; 115 tbl->items[item_idx].inner_item.lastseg = rte_pktmbuf_lastseg(pkt); 116 tbl->items[item_idx].inner_item.start_time = start_time; 117 tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX; 118 tbl->items[item_idx].inner_item.sent_seq = sent_seq; 119 tbl->items[item_idx].inner_item.l3.ip_id = ip_id; 120 tbl->items[item_idx].inner_item.nb_merged = 1; 121 tbl->items[item_idx].inner_item.is_atomic = is_atomic; 122 tbl->items[item_idx].outer_ip_id = outer_ip_id; 123 tbl->items[item_idx].outer_is_atomic = outer_is_atomic; 124 tbl->item_num++; 125 126 /* If the previous packet exists, chain the new one with it. */ 127 if (prev_idx != INVALID_ARRAY_INDEX) { 128 tbl->items[item_idx].inner_item.next_pkt_idx = 129 tbl->items[prev_idx].inner_item.next_pkt_idx; 130 tbl->items[prev_idx].inner_item.next_pkt_idx = item_idx; 131 } 132 133 return item_idx; 134 } 135 136 static inline uint32_t 137 delete_item(struct gro_vxlan_tcp4_tbl *tbl, 138 uint32_t item_idx, 139 uint32_t prev_item_idx) 140 { 141 uint32_t next_idx = tbl->items[item_idx].inner_item.next_pkt_idx; 142 143 /* NULL indicates an empty item. */ 144 tbl->items[item_idx].inner_item.firstseg = NULL; 145 tbl->item_num--; 146 if (prev_item_idx != INVALID_ARRAY_INDEX) 147 tbl->items[prev_item_idx].inner_item.next_pkt_idx = next_idx; 148 149 return next_idx; 150 } 151 152 static inline uint32_t 153 insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl, 154 struct vxlan_tcp4_flow_key *src, 155 uint32_t item_idx) 156 { 157 struct vxlan_tcp4_flow_key *dst; 158 uint32_t flow_idx; 159 160 flow_idx = find_an_empty_flow(tbl); 161 if (unlikely(flow_idx == INVALID_ARRAY_INDEX)) 162 return INVALID_ARRAY_INDEX; 163 164 dst = &(tbl->flows[flow_idx].key); 165 166 ASSIGN_COMMON_TCP_KEY((&(src->inner_key.cmn_key)), (&(dst->inner_key.cmn_key))); 167 dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr; 168 dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr; 169 170 dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags; 171 dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni; 172 rte_ether_addr_copy(&(src->outer_eth_saddr), &(dst->outer_eth_saddr)); 173 rte_ether_addr_copy(&(src->outer_eth_daddr), &(dst->outer_eth_daddr)); 174 dst->outer_ip_src_addr = src->outer_ip_src_addr; 175 dst->outer_ip_dst_addr = src->outer_ip_dst_addr; 176 dst->outer_src_port = src->outer_src_port; 177 dst->outer_dst_port = src->outer_dst_port; 178 179 tbl->flows[flow_idx].start_index = item_idx; 180 tbl->flow_num++; 181 182 return flow_idx; 183 } 184 185 static inline int 186 is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1, 187 struct vxlan_tcp4_flow_key k2) 188 { 189 return (rte_is_same_ether_addr(&k1.outer_eth_saddr, 190 &k2.outer_eth_saddr) && 191 rte_is_same_ether_addr(&k1.outer_eth_daddr, 192 &k2.outer_eth_daddr) && 193 (k1.outer_ip_src_addr == k2.outer_ip_src_addr) && 194 (k1.outer_ip_dst_addr == k2.outer_ip_dst_addr) && 195 (k1.outer_src_port == k2.outer_src_port) && 196 (k1.outer_dst_port == k2.outer_dst_port) && 197 (k1.vxlan_hdr.vx_flags == k2.vxlan_hdr.vx_flags) && 198 (k1.vxlan_hdr.vx_vni == k2.vxlan_hdr.vx_vni) && 199 is_same_tcp4_flow(k1.inner_key, k2.inner_key)); 200 } 201 202 static inline int 203 check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item, 204 struct rte_tcp_hdr *tcp_hdr, 205 uint32_t sent_seq, 206 uint16_t outer_ip_id, 207 uint16_t ip_id, 208 uint16_t tcp_hl, 209 uint16_t tcp_dl, 210 uint8_t outer_is_atomic, 211 uint8_t is_atomic) 212 { 213 struct rte_mbuf *pkt = item->inner_item.firstseg; 214 int cmp; 215 uint16_t l2_offset; 216 217 /* Don't merge packets whose outer DF bits are different. */ 218 if (unlikely(item->outer_is_atomic ^ outer_is_atomic)) 219 return 0; 220 221 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len; 222 cmp = check_seq_option(&item->inner_item, tcp_hdr, sent_seq, ip_id, 223 tcp_hl, tcp_dl, l2_offset, is_atomic); 224 if ((cmp > 0) && (outer_is_atomic || 225 (outer_ip_id == item->outer_ip_id + 1))) 226 /* Append the new packet. */ 227 return 1; 228 else if ((cmp < 0) && (outer_is_atomic || 229 (outer_ip_id + item->inner_item.nb_merged == 230 item->outer_ip_id))) 231 /* Prepend the new packet. */ 232 return -1; 233 234 return 0; 235 } 236 237 static inline int 238 merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item, 239 struct rte_mbuf *pkt, 240 int cmp, 241 uint32_t sent_seq, 242 uint16_t outer_ip_id, 243 uint16_t ip_id) 244 { 245 if (merge_two_tcp_packets(&item->inner_item, pkt, cmp, sent_seq, 246 ip_id, pkt->outer_l2_len + 247 pkt->outer_l3_len)) { 248 /* Update the outer IPv4 ID to the large value. */ 249 item->outer_ip_id = cmp > 0 ? outer_ip_id : item->outer_ip_id; 250 return 1; 251 } 252 253 return 0; 254 } 255 256 static inline void 257 update_vxlan_header(struct gro_vxlan_tcp4_item *item) 258 { 259 struct rte_ipv4_hdr *ipv4_hdr; 260 struct rte_udp_hdr *udp_hdr; 261 struct rte_mbuf *pkt = item->inner_item.firstseg; 262 uint16_t len; 263 264 /* Update the outer IPv4 header. */ 265 len = pkt->pkt_len - pkt->outer_l2_len; 266 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) + 267 pkt->outer_l2_len); 268 ipv4_hdr->total_length = rte_cpu_to_be_16(len); 269 270 /* Update the outer UDP header. */ 271 len -= pkt->outer_l3_len; 272 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len); 273 udp_hdr->dgram_len = rte_cpu_to_be_16(len); 274 275 /* Update the inner IPv4 header. */ 276 len -= pkt->l2_len; 277 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len); 278 ipv4_hdr->total_length = rte_cpu_to_be_16(len); 279 } 280 281 int32_t 282 gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt, 283 struct gro_vxlan_tcp4_tbl *tbl, 284 uint64_t start_time) 285 { 286 struct rte_ether_hdr *outer_eth_hdr, *eth_hdr; 287 struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr; 288 struct rte_tcp_hdr *tcp_hdr; 289 struct rte_udp_hdr *udp_hdr; 290 struct rte_vxlan_hdr *vxlan_hdr; 291 uint32_t sent_seq; 292 int32_t tcp_dl; 293 uint16_t frag_off, outer_ip_id, ip_id; 294 uint8_t outer_is_atomic, is_atomic; 295 296 struct vxlan_tcp4_flow_key key; 297 uint32_t cur_idx, prev_idx, item_idx; 298 uint32_t i, max_flow_num, remaining_flow_num; 299 int cmp; 300 uint16_t hdr_len; 301 uint8_t find; 302 303 /* 304 * Don't process the packet whose TCP header length is greater 305 * than 60 bytes or less than 20 bytes. 306 */ 307 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len))) 308 return -1; 309 310 outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 311 outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr + 312 pkt->outer_l2_len); 313 udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr + 314 pkt->outer_l3_len); 315 vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr + 316 sizeof(struct rte_udp_hdr)); 317 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr + 318 sizeof(struct rte_vxlan_hdr)); 319 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len); 320 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); 321 322 /* 323 * Don't process the packet which has FIN, SYN, RST, PSH, URG, 324 * ECE or CWR set. 325 */ 326 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) 327 return -1; 328 329 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len + 330 pkt->l3_len + pkt->l4_len; 331 /* 332 * Don't process the packet whose payload length is less than or 333 * equal to 0. 334 */ 335 tcp_dl = pkt->pkt_len - hdr_len; 336 if (tcp_dl <= 0) 337 return -1; 338 339 /* 340 * Save IPv4 ID for the packet whose DF bit is 0. For the packet 341 * whose DF bit is 1, IPv4 ID is ignored. 342 */ 343 frag_off = rte_be_to_cpu_16(outer_ipv4_hdr->fragment_offset); 344 outer_is_atomic = 345 (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG; 346 outer_ip_id = outer_is_atomic ? 0 : 347 rte_be_to_cpu_16(outer_ipv4_hdr->packet_id); 348 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset); 349 is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG; 350 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id); 351 352 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); 353 354 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.cmn_key.eth_saddr)); 355 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.cmn_key.eth_daddr)); 356 key.inner_key.ip_src_addr = ipv4_hdr->src_addr; 357 key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr; 358 key.inner_key.cmn_key.recv_ack = tcp_hdr->recv_ack; 359 key.inner_key.cmn_key.src_port = tcp_hdr->src_port; 360 key.inner_key.cmn_key.dst_port = tcp_hdr->dst_port; 361 362 key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags; 363 key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni; 364 rte_ether_addr_copy(&(outer_eth_hdr->src_addr), &(key.outer_eth_saddr)); 365 rte_ether_addr_copy(&(outer_eth_hdr->dst_addr), &(key.outer_eth_daddr)); 366 key.outer_ip_src_addr = outer_ipv4_hdr->src_addr; 367 key.outer_ip_dst_addr = outer_ipv4_hdr->dst_addr; 368 key.outer_src_port = udp_hdr->src_port; 369 key.outer_dst_port = udp_hdr->dst_port; 370 371 /* Search for a matched flow. */ 372 max_flow_num = tbl->max_flow_num; 373 remaining_flow_num = tbl->flow_num; 374 find = 0; 375 for (i = 0; i < max_flow_num && remaining_flow_num; i++) { 376 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { 377 if (is_same_vxlan_tcp4_flow(tbl->flows[i].key, key)) { 378 find = 1; 379 break; 380 } 381 remaining_flow_num--; 382 } 383 } 384 385 /* 386 * Can't find a matched flow. Insert a new flow and store the 387 * packet into the flow. 388 */ 389 if (find == 0) { 390 item_idx = insert_new_item(tbl, pkt, start_time, 391 INVALID_ARRAY_INDEX, sent_seq, outer_ip_id, 392 ip_id, outer_is_atomic, is_atomic); 393 if (item_idx == INVALID_ARRAY_INDEX) 394 return -1; 395 if (insert_new_flow(tbl, &key, item_idx) == 396 INVALID_ARRAY_INDEX) { 397 /* 398 * Fail to insert a new flow, so 399 * delete the inserted packet. 400 */ 401 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX); 402 return -1; 403 } 404 return 0; 405 } 406 407 /* Check all packets in the flow and try to find a neighbor. */ 408 cur_idx = tbl->flows[i].start_index; 409 prev_idx = cur_idx; 410 do { 411 cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr, 412 sent_seq, outer_ip_id, ip_id, pkt->l4_len, 413 tcp_dl, outer_is_atomic, is_atomic); 414 if (cmp) { 415 if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]), 416 pkt, cmp, sent_seq, 417 outer_ip_id, ip_id)) 418 return 1; 419 /* 420 * Can't merge two packets, as the packet 421 * length will be greater than the max value. 422 * Insert the packet into the flow. 423 */ 424 if (insert_new_item(tbl, pkt, start_time, prev_idx, 425 sent_seq, outer_ip_id, 426 ip_id, outer_is_atomic, 427 is_atomic) == 428 INVALID_ARRAY_INDEX) 429 return -1; 430 return 0; 431 } 432 prev_idx = cur_idx; 433 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx; 434 } while (cur_idx != INVALID_ARRAY_INDEX); 435 436 /* Can't find neighbor. Insert the packet into the flow. */ 437 if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq, 438 outer_ip_id, ip_id, outer_is_atomic, 439 is_atomic) == INVALID_ARRAY_INDEX) 440 return -1; 441 442 return 0; 443 } 444 445 uint16_t 446 gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl *tbl, 447 uint64_t flush_timestamp, 448 struct rte_mbuf **out, 449 uint16_t nb_out) 450 { 451 uint16_t k = 0; 452 uint32_t i, j; 453 uint32_t max_flow_num = tbl->max_flow_num; 454 455 for (i = 0; i < max_flow_num; i++) { 456 if (unlikely(tbl->flow_num == 0)) 457 return k; 458 459 j = tbl->flows[i].start_index; 460 while (j != INVALID_ARRAY_INDEX) { 461 if (tbl->items[j].inner_item.start_time <= 462 flush_timestamp) { 463 out[k++] = tbl->items[j].inner_item.firstseg; 464 if (tbl->items[j].inner_item.nb_merged > 1) 465 update_vxlan_header(&(tbl->items[j])); 466 /* 467 * Delete the item and get the next packet 468 * index. 469 */ 470 j = delete_item(tbl, j, INVALID_ARRAY_INDEX); 471 tbl->flows[i].start_index = j; 472 if (j == INVALID_ARRAY_INDEX) 473 tbl->flow_num--; 474 475 if (unlikely(k == nb_out)) 476 return k; 477 } else 478 /* 479 * The left packets in the flow won't be 480 * timeout. Go to check other flows. 481 */ 482 break; 483 } 484 } 485 return k; 486 } 487 488 uint32_t 489 gro_vxlan_tcp4_tbl_pkt_count(void *tbl) 490 { 491 struct gro_vxlan_tcp4_tbl *gro_tbl = tbl; 492 493 if (gro_tbl) 494 return gro_tbl->item_num; 495 496 return 0; 497 } 498