1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf.h> 7 #include <rte_cycles.h> 8 #include <rte_ethdev.h> 9 #include <rte_udp.h> 10 11 #include "gro_vxlan_tcp4.h" 12 13 void * 14 gro_vxlan_tcp4_tbl_create(uint16_t socket_id, 15 uint16_t max_flow_num, 16 uint16_t max_item_per_flow) 17 { 18 struct gro_vxlan_tcp4_tbl *tbl; 19 size_t size; 20 uint32_t entries_num, i; 21 22 entries_num = max_flow_num * max_item_per_flow; 23 entries_num = RTE_MIN(entries_num, GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM); 24 25 if (entries_num == 0) 26 return NULL; 27 28 tbl = rte_zmalloc_socket(__func__, 29 sizeof(struct gro_vxlan_tcp4_tbl), 30 RTE_CACHE_LINE_SIZE, 31 socket_id); 32 if (tbl == NULL) 33 return NULL; 34 35 size = sizeof(struct gro_vxlan_tcp4_item) * entries_num; 36 tbl->items = rte_zmalloc_socket(__func__, 37 size, 38 RTE_CACHE_LINE_SIZE, 39 socket_id); 40 if (tbl->items == NULL) { 41 rte_free(tbl); 42 return NULL; 43 } 44 tbl->max_item_num = entries_num; 45 46 size = sizeof(struct gro_vxlan_tcp4_flow) * entries_num; 47 tbl->flows = rte_zmalloc_socket(__func__, 48 size, 49 RTE_CACHE_LINE_SIZE, 50 socket_id); 51 if (tbl->flows == NULL) { 52 rte_free(tbl->items); 53 rte_free(tbl); 54 return NULL; 55 } 56 57 for (i = 0; i < entries_num; i++) 58 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; 59 tbl->max_flow_num = entries_num; 60 61 return tbl; 62 } 63 64 void 65 gro_vxlan_tcp4_tbl_destroy(void *tbl) 66 { 67 struct gro_vxlan_tcp4_tbl *vxlan_tbl = tbl; 68 69 if (vxlan_tbl) { 70 rte_free(vxlan_tbl->items); 71 rte_free(vxlan_tbl->flows); 72 } 73 rte_free(vxlan_tbl); 74 } 75 76 static inline uint32_t 77 find_an_empty_item(struct gro_vxlan_tcp4_tbl *tbl) 78 { 79 uint32_t max_item_num = tbl->max_item_num, i; 80 81 for (i = 0; i < max_item_num; i++) 82 if (tbl->items[i].inner_item.firstseg == NULL) 83 return i; 84 return INVALID_ARRAY_INDEX; 85 } 86 87 static inline uint32_t 88 find_an_empty_flow(struct gro_vxlan_tcp4_tbl *tbl) 89 { 90 uint32_t max_flow_num = tbl->max_flow_num, i; 91 92 for (i = 0; i < max_flow_num; i++) 93 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) 94 return i; 95 return INVALID_ARRAY_INDEX; 96 } 97 98 static inline uint32_t 99 insert_new_item(struct gro_vxlan_tcp4_tbl *tbl, 100 struct rte_mbuf *pkt, 101 uint64_t start_time, 102 uint32_t prev_idx, 103 uint32_t sent_seq, 104 uint16_t outer_ip_id, 105 uint16_t ip_id, 106 uint8_t outer_is_atomic, 107 uint8_t is_atomic) 108 { 109 uint32_t item_idx; 110 111 item_idx = find_an_empty_item(tbl); 112 if (unlikely(item_idx == INVALID_ARRAY_INDEX)) 113 return INVALID_ARRAY_INDEX; 114 115 tbl->items[item_idx].inner_item.firstseg = pkt; 116 tbl->items[item_idx].inner_item.lastseg = rte_pktmbuf_lastseg(pkt); 117 tbl->items[item_idx].inner_item.start_time = start_time; 118 tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX; 119 tbl->items[item_idx].inner_item.sent_seq = sent_seq; 120 tbl->items[item_idx].inner_item.ip_id = ip_id; 121 tbl->items[item_idx].inner_item.nb_merged = 1; 122 tbl->items[item_idx].inner_item.is_atomic = is_atomic; 123 tbl->items[item_idx].outer_ip_id = outer_ip_id; 124 tbl->items[item_idx].outer_is_atomic = outer_is_atomic; 125 tbl->item_num++; 126 127 /* If the previous packet exists, chain the new one with it. */ 128 if (prev_idx != INVALID_ARRAY_INDEX) { 129 tbl->items[item_idx].inner_item.next_pkt_idx = 130 tbl->items[prev_idx].inner_item.next_pkt_idx; 131 tbl->items[prev_idx].inner_item.next_pkt_idx = item_idx; 132 } 133 134 return item_idx; 135 } 136 137 static inline uint32_t 138 delete_item(struct gro_vxlan_tcp4_tbl *tbl, 139 uint32_t item_idx, 140 uint32_t prev_item_idx) 141 { 142 uint32_t next_idx = tbl->items[item_idx].inner_item.next_pkt_idx; 143 144 /* NULL indicates an empty item. */ 145 tbl->items[item_idx].inner_item.firstseg = NULL; 146 tbl->item_num--; 147 if (prev_item_idx != INVALID_ARRAY_INDEX) 148 tbl->items[prev_item_idx].inner_item.next_pkt_idx = next_idx; 149 150 return next_idx; 151 } 152 153 static inline uint32_t 154 insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl, 155 struct vxlan_tcp4_flow_key *src, 156 uint32_t item_idx) 157 { 158 struct vxlan_tcp4_flow_key *dst; 159 uint32_t flow_idx; 160 161 flow_idx = find_an_empty_flow(tbl); 162 if (unlikely(flow_idx == INVALID_ARRAY_INDEX)) 163 return INVALID_ARRAY_INDEX; 164 165 dst = &(tbl->flows[flow_idx].key); 166 167 rte_ether_addr_copy(&(src->inner_key.eth_saddr), 168 &(dst->inner_key.eth_saddr)); 169 rte_ether_addr_copy(&(src->inner_key.eth_daddr), 170 &(dst->inner_key.eth_daddr)); 171 dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr; 172 dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr; 173 dst->inner_key.recv_ack = src->inner_key.recv_ack; 174 dst->inner_key.src_port = src->inner_key.src_port; 175 dst->inner_key.dst_port = src->inner_key.dst_port; 176 177 dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags; 178 dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni; 179 rte_ether_addr_copy(&(src->outer_eth_saddr), &(dst->outer_eth_saddr)); 180 rte_ether_addr_copy(&(src->outer_eth_daddr), &(dst->outer_eth_daddr)); 181 dst->outer_ip_src_addr = src->outer_ip_src_addr; 182 dst->outer_ip_dst_addr = src->outer_ip_dst_addr; 183 dst->outer_src_port = src->outer_src_port; 184 dst->outer_dst_port = src->outer_dst_port; 185 186 tbl->flows[flow_idx].start_index = item_idx; 187 tbl->flow_num++; 188 189 return flow_idx; 190 } 191 192 static inline int 193 is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1, 194 struct vxlan_tcp4_flow_key k2) 195 { 196 return (rte_is_same_ether_addr(&k1.outer_eth_saddr, 197 &k2.outer_eth_saddr) && 198 rte_is_same_ether_addr(&k1.outer_eth_daddr, 199 &k2.outer_eth_daddr) && 200 (k1.outer_ip_src_addr == k2.outer_ip_src_addr) && 201 (k1.outer_ip_dst_addr == k2.outer_ip_dst_addr) && 202 (k1.outer_src_port == k2.outer_src_port) && 203 (k1.outer_dst_port == k2.outer_dst_port) && 204 (k1.vxlan_hdr.vx_flags == k2.vxlan_hdr.vx_flags) && 205 (k1.vxlan_hdr.vx_vni == k2.vxlan_hdr.vx_vni) && 206 is_same_tcp4_flow(k1.inner_key, k2.inner_key)); 207 } 208 209 static inline int 210 check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item, 211 struct rte_tcp_hdr *tcp_hdr, 212 uint32_t sent_seq, 213 uint16_t outer_ip_id, 214 uint16_t ip_id, 215 uint16_t tcp_hl, 216 uint16_t tcp_dl, 217 uint8_t outer_is_atomic, 218 uint8_t is_atomic) 219 { 220 struct rte_mbuf *pkt = item->inner_item.firstseg; 221 int cmp; 222 uint16_t l2_offset; 223 224 /* Don't merge packets whose outer DF bits are different. */ 225 if (unlikely(item->outer_is_atomic ^ outer_is_atomic)) 226 return 0; 227 228 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len; 229 cmp = check_seq_option(&item->inner_item, tcp_hdr, sent_seq, ip_id, 230 tcp_hl, tcp_dl, l2_offset, is_atomic); 231 if ((cmp > 0) && (outer_is_atomic || 232 (outer_ip_id == item->outer_ip_id + 1))) 233 /* Append the new packet. */ 234 return 1; 235 else if ((cmp < 0) && (outer_is_atomic || 236 (outer_ip_id + item->inner_item.nb_merged == 237 item->outer_ip_id))) 238 /* Prepend the new packet. */ 239 return -1; 240 241 return 0; 242 } 243 244 static inline int 245 merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item, 246 struct rte_mbuf *pkt, 247 int cmp, 248 uint32_t sent_seq, 249 uint16_t outer_ip_id, 250 uint16_t ip_id) 251 { 252 if (merge_two_tcp4_packets(&item->inner_item, pkt, cmp, sent_seq, 253 ip_id, pkt->outer_l2_len + 254 pkt->outer_l3_len)) { 255 /* Update the outer IPv4 ID to the large value. */ 256 item->outer_ip_id = cmp > 0 ? outer_ip_id : item->outer_ip_id; 257 return 1; 258 } 259 260 return 0; 261 } 262 263 static inline void 264 update_vxlan_header(struct gro_vxlan_tcp4_item *item) 265 { 266 struct rte_ipv4_hdr *ipv4_hdr; 267 struct rte_udp_hdr *udp_hdr; 268 struct rte_mbuf *pkt = item->inner_item.firstseg; 269 uint16_t len; 270 271 /* Update the outer IPv4 header. */ 272 len = pkt->pkt_len - pkt->outer_l2_len; 273 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) + 274 pkt->outer_l2_len); 275 ipv4_hdr->total_length = rte_cpu_to_be_16(len); 276 277 /* Update the outer UDP header. */ 278 len -= pkt->outer_l3_len; 279 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len); 280 udp_hdr->dgram_len = rte_cpu_to_be_16(len); 281 282 /* Update the inner IPv4 header. */ 283 len -= pkt->l2_len; 284 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len); 285 ipv4_hdr->total_length = rte_cpu_to_be_16(len); 286 } 287 288 int32_t 289 gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt, 290 struct gro_vxlan_tcp4_tbl *tbl, 291 uint64_t start_time) 292 { 293 struct rte_ether_hdr *outer_eth_hdr, *eth_hdr; 294 struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr; 295 struct rte_tcp_hdr *tcp_hdr; 296 struct rte_udp_hdr *udp_hdr; 297 struct rte_vxlan_hdr *vxlan_hdr; 298 uint32_t sent_seq; 299 int32_t tcp_dl; 300 uint16_t frag_off, outer_ip_id, ip_id; 301 uint8_t outer_is_atomic, is_atomic; 302 303 struct vxlan_tcp4_flow_key key; 304 uint32_t cur_idx, prev_idx, item_idx; 305 uint32_t i, max_flow_num, remaining_flow_num; 306 int cmp; 307 uint16_t hdr_len; 308 uint8_t find; 309 310 /* 311 * Don't process the packet whose TCP header length is greater 312 * than 60 bytes or less than 20 bytes. 313 */ 314 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len))) 315 return -1; 316 317 outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 318 outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr + 319 pkt->outer_l2_len); 320 udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr + 321 pkt->outer_l3_len); 322 vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr + 323 sizeof(struct rte_udp_hdr)); 324 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr + 325 sizeof(struct rte_vxlan_hdr)); 326 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len); 327 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); 328 329 /* 330 * Don't process the packet which has FIN, SYN, RST, PSH, URG, 331 * ECE or CWR set. 332 */ 333 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) 334 return -1; 335 336 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len + 337 pkt->l3_len + pkt->l4_len; 338 /* 339 * Don't process the packet whose payload length is less than or 340 * equal to 0. 341 */ 342 tcp_dl = pkt->pkt_len - hdr_len; 343 if (tcp_dl <= 0) 344 return -1; 345 346 /* 347 * Save IPv4 ID for the packet whose DF bit is 0. For the packet 348 * whose DF bit is 1, IPv4 ID is ignored. 349 */ 350 frag_off = rte_be_to_cpu_16(outer_ipv4_hdr->fragment_offset); 351 outer_is_atomic = 352 (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG; 353 outer_ip_id = outer_is_atomic ? 0 : 354 rte_be_to_cpu_16(outer_ipv4_hdr->packet_id); 355 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset); 356 is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG; 357 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id); 358 359 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); 360 361 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.eth_saddr)); 362 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.eth_daddr)); 363 key.inner_key.ip_src_addr = ipv4_hdr->src_addr; 364 key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr; 365 key.inner_key.recv_ack = tcp_hdr->recv_ack; 366 key.inner_key.src_port = tcp_hdr->src_port; 367 key.inner_key.dst_port = tcp_hdr->dst_port; 368 369 key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags; 370 key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni; 371 rte_ether_addr_copy(&(outer_eth_hdr->src_addr), &(key.outer_eth_saddr)); 372 rte_ether_addr_copy(&(outer_eth_hdr->dst_addr), &(key.outer_eth_daddr)); 373 key.outer_ip_src_addr = outer_ipv4_hdr->src_addr; 374 key.outer_ip_dst_addr = outer_ipv4_hdr->dst_addr; 375 key.outer_src_port = udp_hdr->src_port; 376 key.outer_dst_port = udp_hdr->dst_port; 377 378 /* Search for a matched flow. */ 379 max_flow_num = tbl->max_flow_num; 380 remaining_flow_num = tbl->flow_num; 381 find = 0; 382 for (i = 0; i < max_flow_num && remaining_flow_num; i++) { 383 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { 384 if (is_same_vxlan_tcp4_flow(tbl->flows[i].key, key)) { 385 find = 1; 386 break; 387 } 388 remaining_flow_num--; 389 } 390 } 391 392 /* 393 * Can't find a matched flow. Insert a new flow and store the 394 * packet into the flow. 395 */ 396 if (find == 0) { 397 item_idx = insert_new_item(tbl, pkt, start_time, 398 INVALID_ARRAY_INDEX, sent_seq, outer_ip_id, 399 ip_id, outer_is_atomic, is_atomic); 400 if (item_idx == INVALID_ARRAY_INDEX) 401 return -1; 402 if (insert_new_flow(tbl, &key, item_idx) == 403 INVALID_ARRAY_INDEX) { 404 /* 405 * Fail to insert a new flow, so 406 * delete the inserted packet. 407 */ 408 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX); 409 return -1; 410 } 411 return 0; 412 } 413 414 /* Check all packets in the flow and try to find a neighbor. */ 415 cur_idx = tbl->flows[i].start_index; 416 prev_idx = cur_idx; 417 do { 418 cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr, 419 sent_seq, outer_ip_id, ip_id, pkt->l4_len, 420 tcp_dl, outer_is_atomic, is_atomic); 421 if (cmp) { 422 if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]), 423 pkt, cmp, sent_seq, 424 outer_ip_id, ip_id)) 425 return 1; 426 /* 427 * Can't merge two packets, as the packet 428 * length will be greater than the max value. 429 * Insert the packet into the flow. 430 */ 431 if (insert_new_item(tbl, pkt, start_time, prev_idx, 432 sent_seq, outer_ip_id, 433 ip_id, outer_is_atomic, 434 is_atomic) == 435 INVALID_ARRAY_INDEX) 436 return -1; 437 return 0; 438 } 439 prev_idx = cur_idx; 440 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx; 441 } while (cur_idx != INVALID_ARRAY_INDEX); 442 443 /* Can't find neighbor. Insert the packet into the flow. */ 444 if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq, 445 outer_ip_id, ip_id, outer_is_atomic, 446 is_atomic) == INVALID_ARRAY_INDEX) 447 return -1; 448 449 return 0; 450 } 451 452 uint16_t 453 gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl *tbl, 454 uint64_t flush_timestamp, 455 struct rte_mbuf **out, 456 uint16_t nb_out) 457 { 458 uint16_t k = 0; 459 uint32_t i, j; 460 uint32_t max_flow_num = tbl->max_flow_num; 461 462 for (i = 0; i < max_flow_num; i++) { 463 if (unlikely(tbl->flow_num == 0)) 464 return k; 465 466 j = tbl->flows[i].start_index; 467 while (j != INVALID_ARRAY_INDEX) { 468 if (tbl->items[j].inner_item.start_time <= 469 flush_timestamp) { 470 out[k++] = tbl->items[j].inner_item.firstseg; 471 if (tbl->items[j].inner_item.nb_merged > 1) 472 update_vxlan_header(&(tbl->items[j])); 473 /* 474 * Delete the item and get the next packet 475 * index. 476 */ 477 j = delete_item(tbl, j, INVALID_ARRAY_INDEX); 478 tbl->flows[i].start_index = j; 479 if (j == INVALID_ARRAY_INDEX) 480 tbl->flow_num--; 481 482 if (unlikely(k == nb_out)) 483 return k; 484 } else 485 /* 486 * The left packets in the flow won't be 487 * timeout. Go to check other flows. 488 */ 489 break; 490 } 491 } 492 return k; 493 } 494 495 uint32_t 496 gro_vxlan_tcp4_tbl_pkt_count(void *tbl) 497 { 498 struct gro_vxlan_tcp4_tbl *gro_tbl = tbl; 499 500 if (gro_tbl) 501 return gro_tbl->item_num; 502 503 return 0; 504 } 505