1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_string_fns.h> 8 #include <rte_log.h> 9 #include <rte_mbuf.h> 10 #include <rte_mbuf_dyn.h> 11 #include <rte_eal_memconfig.h> 12 #include <rte_errno.h> 13 #include <rte_malloc.h> 14 #include <rte_tailq.h> 15 16 #include "rte_reorder.h" 17 18 TAILQ_HEAD(rte_reorder_list, rte_tailq_entry); 19 20 static struct rte_tailq_elem rte_reorder_tailq = { 21 .name = "RTE_REORDER", 22 }; 23 EAL_REGISTER_TAILQ(rte_reorder_tailq) 24 25 #define NO_FLAGS 0 26 #define RTE_REORDER_PREFIX "RO_" 27 #define RTE_REORDER_NAMESIZE 32 28 29 /* Macros for printing using RTE_LOG */ 30 #define RTE_LOGTYPE_REORDER RTE_LOGTYPE_USER1 31 32 #define RTE_REORDER_SEQN_DYNFIELD_NAME "rte_reorder_seqn_dynfield" 33 int rte_reorder_seqn_dynfield_offset = -1; 34 35 /* A generic circular buffer */ 36 struct cir_buffer { 37 unsigned int size; /**< Number of entries that can be stored */ 38 unsigned int mask; /**< [buffer_size - 1]: used for wrap-around */ 39 unsigned int head; /**< insertion point in buffer */ 40 unsigned int tail; /**< extraction point in buffer */ 41 struct rte_mbuf **entries; 42 } __rte_cache_aligned; 43 44 /* The reorder buffer data structure itself */ 45 struct rte_reorder_buffer { 46 char name[RTE_REORDER_NAMESIZE]; 47 uint32_t min_seqn; /**< Lowest seq. number that can be in the buffer */ 48 unsigned int memsize; /**< memory area size of reorder buffer */ 49 struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */ 50 struct cir_buffer order_buf; /**< buffer used to reorder entries */ 51 int is_initialized; 52 } __rte_cache_aligned; 53 54 static void 55 rte_reorder_free_mbufs(struct rte_reorder_buffer *b); 56 57 struct rte_reorder_buffer * 58 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, 59 const char *name, unsigned int size) 60 { 61 const unsigned int min_bufsize = sizeof(*b) + 62 (2 * size * sizeof(struct rte_mbuf *)); 63 64 if (b == NULL) { 65 RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:" 66 " NULL\n"); 67 rte_errno = EINVAL; 68 return NULL; 69 } 70 if (!rte_is_power_of_2(size)) { 71 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size" 72 " - Not a power of 2\n"); 73 rte_errno = EINVAL; 74 return NULL; 75 } 76 if (name == NULL) { 77 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:" 78 " NULL\n"); 79 rte_errno = EINVAL; 80 return NULL; 81 } 82 if (bufsize < min_bufsize) { 83 RTE_LOG(ERR, REORDER, "Invalid reorder buffer memory size: %u, " 84 "minimum required: %u\n", bufsize, min_bufsize); 85 rte_errno = EINVAL; 86 return NULL; 87 } 88 89 memset(b, 0, bufsize); 90 strlcpy(b->name, name, sizeof(b->name)); 91 b->memsize = bufsize; 92 b->order_buf.size = b->ready_buf.size = size; 93 b->order_buf.mask = b->ready_buf.mask = size - 1; 94 b->ready_buf.entries = (void *)&b[1]; 95 b->order_buf.entries = RTE_PTR_ADD(&b[1], 96 size * sizeof(b->ready_buf.entries[0])); 97 98 return b; 99 } 100 101 struct rte_reorder_buffer* 102 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) 103 { 104 struct rte_reorder_buffer *b = NULL; 105 struct rte_tailq_entry *te; 106 struct rte_reorder_list *reorder_list; 107 const unsigned int bufsize = sizeof(struct rte_reorder_buffer) + 108 (2 * size * sizeof(struct rte_mbuf *)); 109 static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = { 110 .name = RTE_REORDER_SEQN_DYNFIELD_NAME, 111 .size = sizeof(rte_reorder_seqn_t), 112 .align = __alignof__(rte_reorder_seqn_t), 113 }; 114 115 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); 116 117 /* Check user arguments. */ 118 if (!rte_is_power_of_2(size)) { 119 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size" 120 " - Not a power of 2\n"); 121 rte_errno = EINVAL; 122 return NULL; 123 } 124 if (name == NULL) { 125 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:" 126 " NULL\n"); 127 rte_errno = EINVAL; 128 return NULL; 129 } 130 131 rte_reorder_seqn_dynfield_offset = 132 rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc); 133 if (rte_reorder_seqn_dynfield_offset < 0) { 134 RTE_LOG(ERR, REORDER, "Failed to register mbuf field for reorder sequence number\n"); 135 rte_errno = ENOMEM; 136 return NULL; 137 } 138 139 rte_mcfg_tailq_write_lock(); 140 141 /* guarantee there's no existing */ 142 TAILQ_FOREACH(te, reorder_list, next) { 143 b = (struct rte_reorder_buffer *) te->data; 144 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0) 145 break; 146 } 147 if (te != NULL) 148 goto exit; 149 150 /* allocate tailq entry */ 151 te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0); 152 if (te == NULL) { 153 RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n"); 154 rte_errno = ENOMEM; 155 b = NULL; 156 goto exit; 157 } 158 159 /* Allocate memory to store the reorder buffer structure. */ 160 b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id); 161 if (b == NULL) { 162 RTE_LOG(ERR, REORDER, "Memzone allocation failed\n"); 163 rte_errno = ENOMEM; 164 rte_free(te); 165 } else { 166 rte_reorder_init(b, bufsize, name, size); 167 te->data = (void *)b; 168 TAILQ_INSERT_TAIL(reorder_list, te, next); 169 } 170 171 exit: 172 rte_mcfg_tailq_write_unlock(); 173 return b; 174 } 175 176 void 177 rte_reorder_reset(struct rte_reorder_buffer *b) 178 { 179 char name[RTE_REORDER_NAMESIZE]; 180 181 rte_reorder_free_mbufs(b); 182 strlcpy(name, b->name, sizeof(name)); 183 /* No error checking as current values should be valid */ 184 rte_reorder_init(b, b->memsize, name, b->order_buf.size); 185 } 186 187 static void 188 rte_reorder_free_mbufs(struct rte_reorder_buffer *b) 189 { 190 unsigned i; 191 192 /* Free up the mbufs of order buffer & ready buffer */ 193 for (i = 0; i < b->order_buf.size; i++) { 194 rte_pktmbuf_free(b->order_buf.entries[i]); 195 rte_pktmbuf_free(b->ready_buf.entries[i]); 196 } 197 } 198 199 void 200 rte_reorder_free(struct rte_reorder_buffer *b) 201 { 202 struct rte_reorder_list *reorder_list; 203 struct rte_tailq_entry *te; 204 205 /* Check user arguments. */ 206 if (b == NULL) 207 return; 208 209 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); 210 211 rte_mcfg_tailq_write_lock(); 212 213 /* find our tailq entry */ 214 TAILQ_FOREACH(te, reorder_list, next) { 215 if (te->data == (void *) b) 216 break; 217 } 218 if (te == NULL) { 219 rte_mcfg_tailq_write_unlock(); 220 return; 221 } 222 223 TAILQ_REMOVE(reorder_list, te, next); 224 225 rte_mcfg_tailq_write_unlock(); 226 227 rte_reorder_free_mbufs(b); 228 229 rte_free(b); 230 rte_free(te); 231 } 232 233 struct rte_reorder_buffer * 234 rte_reorder_find_existing(const char *name) 235 { 236 struct rte_reorder_buffer *b = NULL; 237 struct rte_tailq_entry *te; 238 struct rte_reorder_list *reorder_list; 239 240 if (name == NULL) { 241 rte_errno = EINVAL; 242 return NULL; 243 } 244 245 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); 246 247 rte_mcfg_tailq_read_lock(); 248 TAILQ_FOREACH(te, reorder_list, next) { 249 b = (struct rte_reorder_buffer *) te->data; 250 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0) 251 break; 252 } 253 rte_mcfg_tailq_read_unlock(); 254 255 if (te == NULL) { 256 rte_errno = ENOENT; 257 return NULL; 258 } 259 260 return b; 261 } 262 263 static unsigned 264 rte_reorder_fill_overflow(struct rte_reorder_buffer *b, unsigned n) 265 { 266 /* 267 * 1. Move all ready entries that fit to the ready_buf 268 * 2. check if we meet the minimum needed (n). 269 * 3. If not, then skip any gaps and keep moving. 270 * 4. If at any point the ready buffer is full, stop 271 * 5. Return the number of positions the order_buf head has moved 272 */ 273 274 struct cir_buffer *order_buf = &b->order_buf, 275 *ready_buf = &b->ready_buf; 276 277 unsigned int order_head_adv = 0; 278 279 /* 280 * move at least n packets to ready buffer, assuming ready buffer 281 * has room for those packets. 282 */ 283 while (order_head_adv < n && 284 ((ready_buf->head + 1) & ready_buf->mask) != ready_buf->tail) { 285 286 /* if we are blocked waiting on a packet, skip it */ 287 if (order_buf->entries[order_buf->head] == NULL) { 288 order_buf->head = (order_buf->head + 1) & order_buf->mask; 289 order_head_adv++; 290 } 291 292 /* Move all ready entries that fit to the ready_buf */ 293 while (order_buf->entries[order_buf->head] != NULL) { 294 ready_buf->entries[ready_buf->head] = 295 order_buf->entries[order_buf->head]; 296 297 order_buf->entries[order_buf->head] = NULL; 298 order_head_adv++; 299 300 order_buf->head = (order_buf->head + 1) & order_buf->mask; 301 302 if (((ready_buf->head + 1) & ready_buf->mask) == ready_buf->tail) 303 break; 304 305 ready_buf->head = (ready_buf->head + 1) & ready_buf->mask; 306 } 307 } 308 309 b->min_seqn += order_head_adv; 310 /* Return the number of positions the order_buf head has moved */ 311 return order_head_adv; 312 } 313 314 int 315 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf) 316 { 317 uint32_t offset, position; 318 struct cir_buffer *order_buf; 319 320 if (b == NULL || mbuf == NULL) { 321 rte_errno = EINVAL; 322 return -1; 323 } 324 325 order_buf = &b->order_buf; 326 if (!b->is_initialized) { 327 b->min_seqn = *rte_reorder_seqn(mbuf); 328 b->is_initialized = 1; 329 } 330 331 /* 332 * calculate the offset from the head pointer we need to go. 333 * The subtraction takes care of the sequence number wrapping. 334 * For example (using 16-bit for brevity): 335 * min_seqn = 0xFFFD 336 * mbuf_seqn = 0x0010 337 * offset = 0x0010 - 0xFFFD = 0x13 338 */ 339 offset = *rte_reorder_seqn(mbuf) - b->min_seqn; 340 341 /* 342 * action to take depends on offset. 343 * offset < buffer->size: the mbuf fits within the current window of 344 * sequence numbers we can reorder. EXPECTED CASE. 345 * offset > buffer->size: the mbuf is outside the current window. There 346 * are a number of cases to consider: 347 * 1. The packet sequence is just outside the window, then we need 348 * to see about shifting the head pointer and taking any ready 349 * to return packets out of the ring. If there was a delayed 350 * or dropped packet preventing drains from shifting the window 351 * this case will skip over the dropped packet instead, and any 352 * packets dequeued here will be returned on the next drain call. 353 * 2. The packet sequence number is vastly outside our window, taken 354 * here as having offset greater than twice the buffer size. In 355 * this case, the packet is probably an old or late packet that 356 * was previously skipped, so just enqueue the packet for 357 * immediate return on the next drain call, or else return error. 358 */ 359 if (offset < b->order_buf.size) { 360 position = (order_buf->head + offset) & order_buf->mask; 361 order_buf->entries[position] = mbuf; 362 } else if (offset < 2 * b->order_buf.size) { 363 if (rte_reorder_fill_overflow(b, offset + 1 - order_buf->size) 364 < (offset + 1 - order_buf->size)) { 365 /* Put in handling for enqueue straight to output */ 366 rte_errno = ENOSPC; 367 return -1; 368 } 369 offset = *rte_reorder_seqn(mbuf) - b->min_seqn; 370 position = (order_buf->head + offset) & order_buf->mask; 371 order_buf->entries[position] = mbuf; 372 } else { 373 /* Put in handling for enqueue straight to output */ 374 rte_errno = ERANGE; 375 return -1; 376 } 377 return 0; 378 } 379 380 unsigned int 381 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs, 382 unsigned max_mbufs) 383 { 384 unsigned int drain_cnt = 0; 385 386 struct cir_buffer *order_buf = &b->order_buf, 387 *ready_buf = &b->ready_buf; 388 389 /* Try to fetch requested number of mbufs from ready buffer */ 390 while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) { 391 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail]; 392 ready_buf->entries[ready_buf->tail] = NULL; 393 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask; 394 } 395 396 /* 397 * If requested number of buffers not fetched from ready buffer, fetch 398 * remaining buffers from order buffer 399 */ 400 while ((drain_cnt < max_mbufs) && 401 (order_buf->entries[order_buf->head] != NULL)) { 402 mbufs[drain_cnt++] = order_buf->entries[order_buf->head]; 403 order_buf->entries[order_buf->head] = NULL; 404 b->min_seqn++; 405 order_buf->head = (order_buf->head + 1) & order_buf->mask; 406 } 407 408 return drain_cnt; 409 } 410 411 /* Binary search seqn in ready buffer */ 412 static inline uint32_t 413 ready_buffer_seqn_find(const struct cir_buffer *ready_buf, const uint32_t seqn) 414 { 415 uint32_t mid, value, position, high; 416 uint32_t low = 0; 417 418 if (ready_buf->tail > ready_buf->head) 419 high = ready_buf->tail - ready_buf->head; 420 else 421 high = ready_buf->head - ready_buf->tail; 422 423 while (low <= high) { 424 mid = low + (high - low) / 2; 425 position = (ready_buf->tail + mid) & ready_buf->mask; 426 value = *rte_reorder_seqn(ready_buf->entries[position]); 427 if (seqn == value) 428 return mid; 429 else if (seqn > value) 430 low = mid + 1; 431 else 432 high = mid - 1; 433 } 434 435 return low; 436 } 437 438 unsigned int 439 rte_reorder_drain_up_to_seqn(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs, 440 const unsigned int max_mbufs, const rte_reorder_seqn_t seqn) 441 { 442 uint32_t i, position, offset; 443 unsigned int drain_cnt = 0; 444 445 struct cir_buffer *order_buf = &b->order_buf, 446 *ready_buf = &b->ready_buf; 447 448 /* Seqn in Ready buffer */ 449 if (seqn < b->min_seqn) { 450 /* All sequence numbers are higher then given */ 451 if ((ready_buf->tail == ready_buf->head) || 452 (*rte_reorder_seqn(ready_buf->entries[ready_buf->tail]) > seqn)) 453 return 0; 454 455 offset = ready_buffer_seqn_find(ready_buf, seqn); 456 457 for (i = 0; (i < offset) && (drain_cnt < max_mbufs); i++) { 458 position = (ready_buf->tail + i) & ready_buf->mask; 459 mbufs[drain_cnt++] = ready_buf->entries[position]; 460 ready_buf->entries[position] = NULL; 461 } 462 ready_buf->tail = (ready_buf->tail + i) & ready_buf->mask; 463 464 return drain_cnt; 465 } 466 467 /* Seqn in Order buffer, add all buffers from Ready buffer */ 468 while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) { 469 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail]; 470 ready_buf->entries[ready_buf->tail] = NULL; 471 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask; 472 } 473 474 /* Fetch buffers from Order buffer up to given sequence number (exclusive) */ 475 offset = RTE_MIN(seqn - b->min_seqn, b->order_buf.size); 476 for (i = 0; (i < offset) && (drain_cnt < max_mbufs); i++) { 477 position = (order_buf->head + i) & order_buf->mask; 478 if (order_buf->entries[position] == NULL) 479 continue; 480 mbufs[drain_cnt++] = order_buf->entries[position]; 481 order_buf->entries[position] = NULL; 482 } 483 b->min_seqn += i; 484 order_buf->head = (order_buf->head + i) & order_buf->mask; 485 486 return drain_cnt; 487 } 488 489 static bool 490 rte_reorder_is_empty(const struct rte_reorder_buffer *b) 491 { 492 const struct cir_buffer *order_buf = &b->order_buf, *ready_buf = &b->ready_buf; 493 unsigned int i; 494 495 /* Ready buffer does not have gaps */ 496 if (ready_buf->tail != ready_buf->head) 497 return false; 498 499 /* Order buffer could have gaps, iterate */ 500 for (i = 0; i < order_buf->size; i++) { 501 if (order_buf->entries[i] != NULL) 502 return false; 503 } 504 505 return true; 506 } 507 508 unsigned int 509 rte_reorder_min_seqn_set(struct rte_reorder_buffer *b, rte_reorder_seqn_t min_seqn) 510 { 511 if (!rte_reorder_is_empty(b)) 512 return -ENOTEMPTY; 513 514 b->min_seqn = min_seqn; 515 b->is_initialized = true; 516 517 return 0; 518 } 519