1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Inspur Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf.h> 7 #include <rte_ethdev.h> 8 9 #include "gro_udp4.h" 10 11 void * 12 gro_udp4_tbl_create(uint16_t socket_id, 13 uint16_t max_flow_num, 14 uint16_t max_item_per_flow) 15 { 16 struct gro_udp4_tbl *tbl; 17 size_t size; 18 uint32_t entries_num, i; 19 20 entries_num = max_flow_num * max_item_per_flow; 21 entries_num = RTE_MIN(entries_num, GRO_UDP4_TBL_MAX_ITEM_NUM); 22 23 if (entries_num == 0) 24 return NULL; 25 26 tbl = rte_zmalloc_socket(__func__, 27 sizeof(struct gro_udp4_tbl), 28 RTE_CACHE_LINE_SIZE, 29 socket_id); 30 if (tbl == NULL) 31 return NULL; 32 33 size = sizeof(struct gro_udp4_item) * entries_num; 34 tbl->items = rte_zmalloc_socket(__func__, 35 size, 36 RTE_CACHE_LINE_SIZE, 37 socket_id); 38 if (tbl->items == NULL) { 39 rte_free(tbl); 40 return NULL; 41 } 42 tbl->max_item_num = entries_num; 43 44 size = sizeof(struct gro_udp4_flow) * entries_num; 45 tbl->flows = rte_zmalloc_socket(__func__, 46 size, 47 RTE_CACHE_LINE_SIZE, 48 socket_id); 49 if (tbl->flows == NULL) { 50 rte_free(tbl->items); 51 rte_free(tbl); 52 return NULL; 53 } 54 /* INVALID_ARRAY_INDEX indicates an empty flow */ 55 for (i = 0; i < entries_num; i++) 56 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; 57 tbl->max_flow_num = entries_num; 58 59 return tbl; 60 } 61 62 void 63 gro_udp4_tbl_destroy(void *tbl) 64 { 65 struct gro_udp4_tbl *udp_tbl = tbl; 66 67 if (udp_tbl) { 68 rte_free(udp_tbl->items); 69 rte_free(udp_tbl->flows); 70 } 71 rte_free(udp_tbl); 72 } 73 74 static inline uint32_t 75 find_an_empty_item(struct gro_udp4_tbl *tbl) 76 { 77 uint32_t i; 78 uint32_t max_item_num = tbl->max_item_num; 79 80 for (i = 0; i < max_item_num; i++) 81 if (tbl->items[i].firstseg == NULL) 82 return i; 83 return INVALID_ARRAY_INDEX; 84 } 85 86 static inline uint32_t 87 find_an_empty_flow(struct gro_udp4_tbl *tbl) 88 { 89 uint32_t i; 90 uint32_t max_flow_num = tbl->max_flow_num; 91 92 for (i = 0; i < max_flow_num; i++) 93 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) 94 return i; 95 return INVALID_ARRAY_INDEX; 96 } 97 98 static inline uint32_t 99 insert_new_item(struct gro_udp4_tbl *tbl, 100 struct rte_mbuf *pkt, 101 uint64_t start_time, 102 uint32_t prev_idx, 103 uint16_t frag_offset, 104 uint8_t is_last_frag) 105 { 106 uint32_t item_idx; 107 108 item_idx = find_an_empty_item(tbl); 109 if (unlikely(item_idx == INVALID_ARRAY_INDEX)) 110 return INVALID_ARRAY_INDEX; 111 112 tbl->items[item_idx].firstseg = pkt; 113 tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt); 114 tbl->items[item_idx].start_time = start_time; 115 tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX; 116 tbl->items[item_idx].frag_offset = frag_offset; 117 tbl->items[item_idx].is_last_frag = is_last_frag; 118 tbl->items[item_idx].nb_merged = 1; 119 tbl->item_num++; 120 121 /* if the previous packet exists, chain them together. */ 122 if (prev_idx != INVALID_ARRAY_INDEX) { 123 tbl->items[item_idx].next_pkt_idx = 124 tbl->items[prev_idx].next_pkt_idx; 125 tbl->items[prev_idx].next_pkt_idx = item_idx; 126 } 127 128 return item_idx; 129 } 130 131 static inline uint32_t 132 delete_item(struct gro_udp4_tbl *tbl, uint32_t item_idx, 133 uint32_t prev_item_idx) 134 { 135 uint32_t next_idx = tbl->items[item_idx].next_pkt_idx; 136 137 /* NULL indicates an empty item */ 138 tbl->items[item_idx].firstseg = NULL; 139 tbl->item_num--; 140 if (prev_item_idx != INVALID_ARRAY_INDEX) 141 tbl->items[prev_item_idx].next_pkt_idx = next_idx; 142 143 return next_idx; 144 } 145 146 static inline uint32_t 147 insert_new_flow(struct gro_udp4_tbl *tbl, 148 struct udp4_flow_key *src, 149 uint32_t item_idx) 150 { 151 struct udp4_flow_key *dst; 152 uint32_t flow_idx; 153 154 flow_idx = find_an_empty_flow(tbl); 155 if (unlikely(flow_idx == INVALID_ARRAY_INDEX)) 156 return INVALID_ARRAY_INDEX; 157 158 dst = &(tbl->flows[flow_idx].key); 159 160 rte_ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr)); 161 rte_ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr)); 162 dst->ip_src_addr = src->ip_src_addr; 163 dst->ip_dst_addr = src->ip_dst_addr; 164 dst->ip_id = src->ip_id; 165 166 tbl->flows[flow_idx].start_index = item_idx; 167 tbl->flow_num++; 168 169 return flow_idx; 170 } 171 172 /* 173 * update the packet length for the flushed packet. 174 */ 175 static inline void 176 update_header(struct gro_udp4_item *item) 177 { 178 struct rte_ipv4_hdr *ipv4_hdr; 179 struct rte_mbuf *pkt = item->firstseg; 180 uint16_t frag_offset; 181 182 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, 183 pkt->l2_len); 184 ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - 185 pkt->l2_len); 186 187 /* Clear MF bit if it is last fragment */ 188 if (item->is_last_frag) { 189 frag_offset = rte_be_to_cpu_16(ipv4_hdr->fragment_offset); 190 ipv4_hdr->fragment_offset = 191 rte_cpu_to_be_16(frag_offset & ~RTE_IPV4_HDR_MF_FLAG); 192 } 193 } 194 195 int32_t 196 gro_udp4_reassemble(struct rte_mbuf *pkt, 197 struct gro_udp4_tbl *tbl, 198 uint64_t start_time) 199 { 200 struct rte_ether_hdr *eth_hdr; 201 struct rte_ipv4_hdr *ipv4_hdr; 202 uint16_t ip_dl; 203 uint16_t ip_id, hdr_len; 204 uint16_t frag_offset = 0; 205 uint8_t is_last_frag; 206 207 struct udp4_flow_key key; 208 uint32_t cur_idx, prev_idx, item_idx; 209 uint32_t i, max_flow_num, remaining_flow_num; 210 int cmp; 211 uint8_t find; 212 213 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 214 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len); 215 hdr_len = pkt->l2_len + pkt->l3_len; 216 217 /* 218 * Don't process non-fragment packet. 219 */ 220 if (!is_ipv4_fragment(ipv4_hdr)) 221 return -1; 222 223 ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length); 224 /* trim the tail padding bytes */ 225 if (pkt->pkt_len > (uint32_t)(ip_dl + pkt->l2_len)) 226 rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_dl - pkt->l2_len); 227 228 /* 229 * Don't process the packet whose payload length is less than or 230 * equal to 0. 231 */ 232 if (pkt->pkt_len <= hdr_len) 233 return -1; 234 235 if (ip_dl <= pkt->l3_len) 236 return -1; 237 238 ip_dl -= pkt->l3_len; 239 ip_id = rte_be_to_cpu_16(ipv4_hdr->packet_id); 240 frag_offset = rte_be_to_cpu_16(ipv4_hdr->fragment_offset); 241 is_last_frag = ((frag_offset & RTE_IPV4_HDR_MF_FLAG) == 0) ? 1 : 0; 242 frag_offset = (uint16_t)(frag_offset & RTE_IPV4_HDR_OFFSET_MASK) << 3; 243 244 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.eth_saddr)); 245 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.eth_daddr)); 246 key.ip_src_addr = ipv4_hdr->src_addr; 247 key.ip_dst_addr = ipv4_hdr->dst_addr; 248 key.ip_id = ip_id; 249 250 /* Search for a matched flow. */ 251 max_flow_num = tbl->max_flow_num; 252 remaining_flow_num = tbl->flow_num; 253 find = 0; 254 for (i = 0; i < max_flow_num && remaining_flow_num; i++) { 255 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { 256 if (is_same_udp4_flow(tbl->flows[i].key, key)) { 257 find = 1; 258 break; 259 } 260 remaining_flow_num--; 261 } 262 } 263 264 /* 265 * Fail to find a matched flow. Insert a new flow and store the 266 * packet into the flow. 267 */ 268 if (find == 0) { 269 item_idx = insert_new_item(tbl, pkt, start_time, 270 INVALID_ARRAY_INDEX, frag_offset, 271 is_last_frag); 272 if (unlikely(item_idx == INVALID_ARRAY_INDEX)) 273 return -1; 274 if (insert_new_flow(tbl, &key, item_idx) == 275 INVALID_ARRAY_INDEX) { 276 /* 277 * Fail to insert a new flow, so delete the 278 * stored packet. 279 */ 280 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX); 281 return -1; 282 } 283 return 0; 284 } 285 286 /* 287 * Check all packets in the flow and try to find a neighbor for 288 * the input packet. 289 */ 290 cur_idx = tbl->flows[i].start_index; 291 prev_idx = cur_idx; 292 do { 293 cmp = udp4_check_neighbor(&(tbl->items[cur_idx]), 294 frag_offset, ip_dl, 0); 295 if (cmp) { 296 if (merge_two_udp4_packets(&(tbl->items[cur_idx]), 297 pkt, cmp, frag_offset, 298 is_last_frag, 0)) 299 return 1; 300 /* 301 * Fail to merge the two packets, as the packet 302 * length is greater than the max value. Store 303 * the packet into the flow. 304 */ 305 if (insert_new_item(tbl, pkt, start_time, prev_idx, 306 frag_offset, is_last_frag) == 307 INVALID_ARRAY_INDEX) 308 return -1; 309 return 0; 310 } 311 312 /* Ensure inserted items are ordered by frag_offset */ 313 if (frag_offset 314 < tbl->items[cur_idx].frag_offset) { 315 break; 316 } 317 318 prev_idx = cur_idx; 319 cur_idx = tbl->items[cur_idx].next_pkt_idx; 320 } while (cur_idx != INVALID_ARRAY_INDEX); 321 322 /* Fail to find a neighbor, so store the packet into the flow. */ 323 if (cur_idx == tbl->flows[i].start_index) { 324 /* Insert it before the first packet of the flow */ 325 item_idx = insert_new_item(tbl, pkt, start_time, 326 INVALID_ARRAY_INDEX, frag_offset, 327 is_last_frag); 328 if (unlikely(item_idx == INVALID_ARRAY_INDEX)) 329 return -1; 330 tbl->items[item_idx].next_pkt_idx = cur_idx; 331 tbl->flows[i].start_index = item_idx; 332 } else { 333 if (insert_new_item(tbl, pkt, start_time, prev_idx, 334 frag_offset, is_last_frag) 335 == INVALID_ARRAY_INDEX) 336 return -1; 337 } 338 339 return 0; 340 } 341 342 static int 343 gro_udp4_merge_items(struct gro_udp4_tbl *tbl, 344 uint32_t start_idx) 345 { 346 uint16_t frag_offset; 347 uint8_t is_last_frag; 348 int16_t ip_dl; 349 struct rte_mbuf *pkt; 350 int cmp; 351 uint32_t item_idx; 352 uint16_t hdr_len; 353 354 item_idx = tbl->items[start_idx].next_pkt_idx; 355 while (item_idx != INVALID_ARRAY_INDEX) { 356 pkt = tbl->items[item_idx].firstseg; 357 hdr_len = pkt->l2_len + pkt->l3_len; 358 ip_dl = pkt->pkt_len - hdr_len; 359 frag_offset = tbl->items[item_idx].frag_offset; 360 is_last_frag = tbl->items[item_idx].is_last_frag; 361 cmp = udp4_check_neighbor(&(tbl->items[start_idx]), 362 frag_offset, ip_dl, 0); 363 if (cmp) { 364 if (merge_two_udp4_packets( 365 &(tbl->items[start_idx]), 366 pkt, cmp, frag_offset, 367 is_last_frag, 0)) { 368 item_idx = delete_item(tbl, item_idx, 369 INVALID_ARRAY_INDEX); 370 tbl->items[start_idx].next_pkt_idx 371 = item_idx; 372 } else 373 return 0; 374 } else 375 return 0; 376 } 377 378 return 0; 379 } 380 381 uint16_t 382 gro_udp4_tbl_timeout_flush(struct gro_udp4_tbl *tbl, 383 uint64_t flush_timestamp, 384 struct rte_mbuf **out, 385 uint16_t nb_out) 386 { 387 uint16_t k = 0; 388 uint32_t i, j; 389 uint32_t max_flow_num = tbl->max_flow_num; 390 391 for (i = 0; i < max_flow_num; i++) { 392 if (unlikely(tbl->flow_num == 0)) 393 return k; 394 395 j = tbl->flows[i].start_index; 396 while (j != INVALID_ARRAY_INDEX) { 397 if (tbl->items[j].start_time <= flush_timestamp) { 398 gro_udp4_merge_items(tbl, j); 399 out[k++] = tbl->items[j].firstseg; 400 if (tbl->items[j].nb_merged > 1) 401 update_header(&(tbl->items[j])); 402 /* 403 * Delete the packet and get the next 404 * packet in the flow. 405 */ 406 j = delete_item(tbl, j, INVALID_ARRAY_INDEX); 407 tbl->flows[i].start_index = j; 408 if (j == INVALID_ARRAY_INDEX) 409 tbl->flow_num--; 410 411 if (unlikely(k == nb_out)) 412 return k; 413 } else 414 /* 415 * Flushing packets does not strictly follow 416 * timestamp. It does not flush left packets of 417 * the flow this time once it finds one item 418 * whose start_time is greater than 419 * flush_timestamp. So go to check other flows. 420 */ 421 break; 422 } 423 } 424 return k; 425 } 426 427 uint32_t 428 gro_udp4_tbl_pkt_count(void *tbl) 429 { 430 struct gro_udp4_tbl *gro_tbl = tbl; 431 432 if (gro_tbl) 433 return gro_tbl->item_num; 434 435 return 0; 436 } 437