1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_UTILS_H_ 7 #define RTE_PMD_MLX5_UTILS_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <stdio.h> 12 #include <limits.h> 13 #include <errno.h> 14 15 #include <rte_spinlock.h> 16 #include <rte_rwlock.h> 17 #include <rte_memory.h> 18 #include <rte_bitmap.h> 19 20 #include <mlx5_common.h> 21 #include <mlx5_common_utils.h> 22 23 #include "mlx5_defs.h" 24 25 /* Convert a bit number to the corresponding 64-bit mask */ 26 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) 27 28 /* Save and restore errno around argument evaluation. */ 29 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0])) 30 31 extern int mlx5_logtype; 32 33 #define MLX5_NET_LOG_PREFIX "mlx5_net" 34 35 /* Generic printf()-like logging macro with automatic line feed. */ 36 #define DRV_LOG(level, ...) \ 37 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_NET_LOG_PREFIX, \ 38 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ 39 PMD_DRV_LOG_CPAREN) 40 41 /* Convenience macros for accessing mbuf fields. */ 42 #define NEXT(m) ((m)->next) 43 #define DATA_LEN(m) ((m)->data_len) 44 #define PKT_LEN(m) ((m)->pkt_len) 45 #define DATA_OFF(m) ((m)->data_off) 46 #define SET_DATA_OFF(m, o) ((m)->data_off = (o)) 47 #define NB_SEGS(m) ((m)->nb_segs) 48 #define PORT(m) ((m)->port) 49 50 /* Transpose flags. Useful to convert IBV to DPDK flags. */ 51 #define TRANSPOSE(val, from, to) \ 52 (((from) >= (to)) ? \ 53 (((val) & (from)) / ((from) / (to))) : \ 54 (((val) & (from)) * ((to) / (from)))) 55 56 /* 57 * For the case which data is linked with sequence increased index, the 58 * array table will be more efficient than hash table once need to search 59 * one data entry in large numbers of entries. Since the traditional hash 60 * tables has fixed table size, when huge numbers of data saved to the hash 61 * table, it also comes lots of hash conflict. 62 * 63 * But simple array table also has fixed size, allocates all the needed 64 * memory at once will waste lots of memory. For the case don't know the 65 * exactly number of entries will be impossible to allocate the array. 66 * 67 * Then the multiple level table helps to balance the two disadvantages. 68 * Allocate a global high level table with sub table entries at first, 69 * the global table contains the sub table entries, and the sub table will 70 * be allocated only once the corresponding index entry need to be saved. 71 * e.g. for up to 32-bits index, three level table with 10-10-12 splitting, 72 * with sequence increased index, the memory grows with every 4K entries. 73 * 74 * The currently implementation introduces 10-10-12 32-bits splitting 75 * Three-Level table to help the cases which have millions of enties to 76 * save. The index entries can be addressed directly by the index, no 77 * search will be needed.q 78 */ 79 80 /* L3 table global table define. */ 81 #define MLX5_L3T_GT_OFFSET 22 82 #define MLX5_L3T_GT_SIZE (1 << 10) 83 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1) 84 85 /* L3 table middle table define. */ 86 #define MLX5_L3T_MT_OFFSET 12 87 #define MLX5_L3T_MT_SIZE (1 << 10) 88 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1) 89 90 /* L3 table entry table define. */ 91 #define MLX5_L3T_ET_OFFSET 0 92 #define MLX5_L3T_ET_SIZE (1 << 12) 93 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1) 94 95 /* L3 table type. */ 96 enum mlx5_l3t_type { 97 MLX5_L3T_TYPE_WORD = 0, 98 MLX5_L3T_TYPE_DWORD, 99 MLX5_L3T_TYPE_QWORD, 100 MLX5_L3T_TYPE_PTR, 101 MLX5_L3T_TYPE_MAX, 102 }; 103 104 struct mlx5_indexed_pool; 105 106 /* Generic data struct. */ 107 union mlx5_l3t_data { 108 uint16_t word; 109 uint32_t dword; 110 uint64_t qword; 111 void *ptr; 112 }; 113 114 /* L3 level table data structure. */ 115 struct mlx5_l3t_level_tbl { 116 uint64_t ref_cnt; /* Table ref_cnt. */ 117 void *tbl[]; /* Table array. */ 118 }; 119 120 /* L3 word entry table data structure. */ 121 struct mlx5_l3t_entry_word { 122 uint32_t idx; /* Table index. */ 123 uint64_t ref_cnt; /* Table ref_cnt. */ 124 struct { 125 uint16_t data; 126 uint32_t ref_cnt; 127 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ 128 } __rte_packed; 129 130 /* L3 double word entry table data structure. */ 131 struct mlx5_l3t_entry_dword { 132 uint32_t idx; /* Table index. */ 133 uint64_t ref_cnt; /* Table ref_cnt. */ 134 struct { 135 uint32_t data; 136 int32_t ref_cnt; 137 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ 138 } __rte_packed; 139 140 /* L3 quad word entry table data structure. */ 141 struct mlx5_l3t_entry_qword { 142 uint32_t idx; /* Table index. */ 143 uint64_t ref_cnt; /* Table ref_cnt. */ 144 struct { 145 uint64_t data; 146 uint32_t ref_cnt; 147 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ 148 } __rte_packed; 149 150 /* L3 pointer entry table data structure. */ 151 struct mlx5_l3t_entry_ptr { 152 uint32_t idx; /* Table index. */ 153 uint64_t ref_cnt; /* Table ref_cnt. */ 154 struct { 155 void *data; 156 uint32_t ref_cnt; 157 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ 158 } __rte_packed; 159 160 /* L3 table data structure. */ 161 struct mlx5_l3t_tbl { 162 enum mlx5_l3t_type type; /* Table type. */ 163 struct mlx5_indexed_pool *eip; 164 /* Table index pool handles. */ 165 struct mlx5_l3t_level_tbl *tbl; /* Global table index. */ 166 rte_spinlock_t sl; /* The table lock. */ 167 }; 168 169 /** Type of function that is used to handle the data before freeing. */ 170 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx, 171 union mlx5_l3t_data *data); 172 173 /* 174 * The default ipool threshold value indicates which per_core_cache 175 * value to set. 176 */ 177 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19) 178 /* The default min local cache size. */ 179 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9) 180 181 /* 182 * The indexed memory entry index is made up of trunk index and offset of 183 * the entry in the trunk. Since the entry index is 32 bits, in case user 184 * prefers to have small trunks, user can change the macro below to a big 185 * number which helps the pool contains more trunks with lots of entries 186 * allocated. 187 */ 188 #define TRUNK_IDX_BITS 16 189 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1) 190 #define TRUNK_INVALID TRUNK_MAX_IDX 191 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS)) 192 #ifdef RTE_LIBRTE_MLX5_DEBUG 193 #define POOL_DEBUG 1 194 #endif 195 196 struct mlx5_indexed_pool_config { 197 uint32_t size; /* Pool entry size. */ 198 uint32_t trunk_size:22; 199 /* 200 * Trunk entry number. Must be power of 2. It can be increased 201 * if trunk_grow enable. The trunk entry number increases with 202 * left shift grow_shift. Trunks with index are after grow_trunk 203 * will keep the entry number same with the last grow trunk. 204 */ 205 uint32_t grow_trunk:4; 206 /* 207 * Trunks with entry number increase in the pool. Set it to 0 208 * to make the pool works as trunk entry fixed pool. It works 209 * only if grow_shift is not 0. 210 */ 211 uint32_t grow_shift:4; 212 /* 213 * Trunk entry number increase shift value, stop after grow_trunk. 214 * It works only if grow_trunk is not 0. 215 */ 216 uint32_t need_lock:1; 217 /* Lock is needed for multiple thread usage. */ 218 uint32_t release_mem_en:1; /* Release trunk when it is free. */ 219 uint32_t max_idx; /* The maximum index can be allocated. */ 220 uint32_t per_core_cache; 221 /* 222 * Cache entry number per core for performance. Should not be 223 * set with release_mem_en. 224 */ 225 const char *type; /* Memory allocate type name. */ 226 void *(*malloc)(uint32_t flags, size_t size, unsigned int align, 227 int socket); 228 /* User defined memory allocator. */ 229 void (*free)(void *addr); /* User defined memory release. */ 230 }; 231 232 struct mlx5_indexed_trunk { 233 uint32_t idx; /* Trunk id. */ 234 uint32_t prev; /* Previous free trunk in free list. */ 235 uint32_t next; /* Next free trunk in free list. */ 236 uint32_t free; /* Free entries available */ 237 struct rte_bitmap *bmp; 238 uint8_t data[] __rte_cache_aligned; /* Entry data start. */ 239 }; 240 241 struct mlx5_indexed_cache { 242 struct mlx5_indexed_trunk **trunks; 243 volatile uint32_t n_trunk_valid; /* Trunks allocated. */ 244 uint32_t n_trunk; /* Trunk pointer array size. */ 245 uint32_t ref_cnt; 246 uint32_t len; 247 uint32_t idx[]; 248 }; 249 250 struct mlx5_ipool_per_lcore { 251 struct mlx5_indexed_cache *lc; 252 uint32_t len; /**< Current cache count. */ 253 uint32_t idx[]; /**< Cache objects. */ 254 }; 255 256 struct mlx5_indexed_pool { 257 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */ 258 rte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */ 259 rte_spinlock_t lcore_lock; 260 /* Dim of trunk pointer array. */ 261 union { 262 struct { 263 uint32_t n_trunk_valid; /* Trunks allocated. */ 264 uint32_t n_trunk; /* Trunk pointer array size. */ 265 struct mlx5_indexed_trunk **trunks; 266 uint32_t free_list; /* Index to first free trunk. */ 267 }; 268 struct { 269 struct mlx5_indexed_cache *gc; 270 /* Global cache. */ 271 struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1]; 272 /* Local cache. */ 273 struct rte_bitmap *ibmp; 274 void *bmp_mem; 275 /* Allocate objects bitmap. Use during flush. */ 276 }; 277 }; 278 #ifdef POOL_DEBUG 279 uint32_t n_entry; 280 uint32_t trunk_new; 281 uint32_t trunk_avail; 282 uint32_t trunk_empty; 283 uint32_t trunk_free; 284 #endif 285 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */ 286 }; 287 288 /** 289 * Return logarithm of the nearest power of two above input value. 290 * 291 * @param v 292 * Input value. 293 * 294 * @return 295 * Logarithm of the nearest power of two above input value. 296 */ 297 static inline unsigned int 298 log2above(unsigned int v) 299 { 300 unsigned int l; 301 unsigned int r; 302 303 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) 304 r |= (v & 1); 305 return l + r; 306 } 307 308 /********************************* indexed pool *************************/ 309 310 /** 311 * This function allocates non-initialized memory entry from pool. 312 * In NUMA systems, the memory entry allocated resides on the same 313 * NUMA socket as the core that calls this function. 314 * 315 * Memory entry is allocated from memory trunk, no alignment. 316 * 317 * @param pool 318 * Pointer to indexed memory entry pool. 319 * No initialization required. 320 * @param[out] idx 321 * Pointer to memory to save allocated index. 322 * Memory index always positive value. 323 * @return 324 * - Pointer to the allocated memory entry. 325 * - NULL on error. Not enough memory, or invalid arguments. 326 */ 327 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx); 328 329 /** 330 * This function allocates zero initialized memory entry from pool. 331 * In NUMA systems, the memory entry allocated resides on the same 332 * NUMA socket as the core that calls this function. 333 * 334 * Memory entry is allocated from memory trunk, no alignment. 335 * 336 * @param pool 337 * Pointer to indexed memory pool. 338 * No initialization required. 339 * @param[out] idx 340 * Pointer to memory to save allocated index. 341 * Memory index always positive value. 342 * @return 343 * - Pointer to the allocated memory entry . 344 * - NULL on error. Not enough memory, or invalid arguments. 345 */ 346 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx); 347 348 /** 349 * This function frees indexed memory entry to pool. 350 * Caller has to make sure that the index is allocated from same pool. 351 * 352 * @param pool 353 * Pointer to indexed memory pool. 354 * @param idx 355 * Allocated memory entry index. 356 */ 357 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx); 358 359 /** 360 * This function returns pointer of indexed memory entry from index. 361 * Caller has to make sure that the index is valid, and allocated 362 * from same pool. 363 * 364 * @param pool 365 * Pointer to indexed memory pool. 366 * @param idx 367 * Allocated memory index. 368 * @return 369 * - Pointer to indexed memory entry. 370 */ 371 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx); 372 373 /** 374 * This function creates indexed memory pool. 375 * Caller has to configure the configuration accordingly. 376 * 377 * @param pool 378 * Pointer to indexed memory pool. 379 * @param cfg 380 * Allocated memory index. 381 */ 382 struct mlx5_indexed_pool * 383 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg); 384 385 /** 386 * This function releases all resources of pool. 387 * Caller has to make sure that all indexes and memories allocated 388 * from this pool not referenced anymore. 389 * 390 * @param pool 391 * Pointer to indexed memory pool. 392 * @return 393 * - non-zero value on error. 394 * - 0 on success. 395 */ 396 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool); 397 398 /** 399 * This function dumps debug info of pool. 400 * 401 * @param pool 402 * Pointer to indexed memory pool. 403 */ 404 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool); 405 406 /** 407 * This function flushes all the cache index back to pool trunk. 408 * 409 * @param pool 410 * Pointer to the index memory pool handler. 411 * 412 */ 413 414 void mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool); 415 416 /** 417 * This function gets the available entry from pos. 418 * 419 * @param pool 420 * Pointer to the index memory pool handler. 421 * @param pos 422 * Pointer to the index position start from. 423 * 424 * @return 425 * - Pointer to the next available entry. 426 * 427 */ 428 void *mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos); 429 430 /** 431 * This function allocates new empty Three-level table. 432 * 433 * @param type 434 * The l3t can set as word, double word, quad word or pointer with index. 435 * 436 * @return 437 * - Pointer to the allocated l3t. 438 * - NULL on error. Not enough memory, or invalid arguments. 439 */ 440 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type); 441 442 /** 443 * This function destroys Three-level table. 444 * 445 * @param tbl 446 * Pointer to the l3t. 447 */ 448 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl); 449 450 /** 451 * This function gets the index entry from Three-level table. 452 * 453 * @param tbl 454 * Pointer to the l3t. 455 * @param idx 456 * Index to the entry. 457 * @param data 458 * Pointer to the memory which saves the entry data. 459 * When function call returns 0, data contains the entry data get from 460 * l3t. 461 * When function call returns -1, data is not modified. 462 * 463 * @return 464 * 0 if success, -1 on error. 465 */ 466 467 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 468 union mlx5_l3t_data *data); 469 470 /** 471 * This function decreases and clear index entry if reference 472 * counter is 0 from Three-level table. 473 * 474 * @param tbl 475 * Pointer to the l3t. 476 * @param idx 477 * Index to the entry. 478 * 479 * @return 480 * The remaining reference count, 0 means entry be cleared, -1 on error. 481 */ 482 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx); 483 484 /** 485 * This function sets the index entry to Three-level table. 486 * If the entry is already set, the EEXIST errno will be given, and 487 * the set data will be filled to the data. 488 * 489 * @param tbl[in] 490 * Pointer to the l3t. 491 * @param idx[in] 492 * Index to the entry. 493 * @param data[in/out] 494 * Pointer to the memory which contains the entry data save to l3t. 495 * If the entry is already set, the set data will be filled. 496 * 497 * @return 498 * 0 if success, -1 on error. 499 */ 500 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 501 union mlx5_l3t_data *data); 502 503 static inline void * 504 mlx5_l3t_get_next(struct mlx5_l3t_tbl *tbl, uint32_t *pos) 505 { 506 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 507 uint32_t i, j, k, g_start, m_start, e_start; 508 uint32_t idx = *pos; 509 void *e_tbl; 510 struct mlx5_l3t_entry_word *w_e_tbl; 511 struct mlx5_l3t_entry_dword *dw_e_tbl; 512 struct mlx5_l3t_entry_qword *qw_e_tbl; 513 struct mlx5_l3t_entry_ptr *ptr_e_tbl; 514 515 if (!tbl) 516 return NULL; 517 g_tbl = tbl->tbl; 518 if (!g_tbl) 519 return NULL; 520 g_start = (idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK; 521 m_start = (idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK; 522 e_start = idx & MLX5_L3T_ET_MASK; 523 for (i = g_start; i < MLX5_L3T_GT_SIZE; i++) { 524 m_tbl = g_tbl->tbl[i]; 525 if (!m_tbl) { 526 /* Jump to new table, reset the sub table start. */ 527 m_start = 0; 528 e_start = 0; 529 continue; 530 } 531 for (j = m_start; j < MLX5_L3T_MT_SIZE; j++) { 532 if (!m_tbl->tbl[j]) { 533 /* 534 * Jump to new table, reset the sub table 535 * start. 536 */ 537 e_start = 0; 538 continue; 539 } 540 e_tbl = m_tbl->tbl[j]; 541 switch (tbl->type) { 542 case MLX5_L3T_TYPE_WORD: 543 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 544 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) { 545 if (!w_e_tbl->entry[k].data) 546 continue; 547 *pos = (i << MLX5_L3T_GT_OFFSET) | 548 (j << MLX5_L3T_MT_OFFSET) | k; 549 return (void *)&w_e_tbl->entry[k].data; 550 } 551 break; 552 case MLX5_L3T_TYPE_DWORD: 553 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 554 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) { 555 if (!dw_e_tbl->entry[k].data) 556 continue; 557 *pos = (i << MLX5_L3T_GT_OFFSET) | 558 (j << MLX5_L3T_MT_OFFSET) | k; 559 return (void *)&dw_e_tbl->entry[k].data; 560 } 561 break; 562 case MLX5_L3T_TYPE_QWORD: 563 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 564 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) { 565 if (!qw_e_tbl->entry[k].data) 566 continue; 567 *pos = (i << MLX5_L3T_GT_OFFSET) | 568 (j << MLX5_L3T_MT_OFFSET) | k; 569 return (void *)&qw_e_tbl->entry[k].data; 570 } 571 break; 572 default: 573 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 574 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) { 575 if (!ptr_e_tbl->entry[k].data) 576 continue; 577 *pos = (i << MLX5_L3T_GT_OFFSET) | 578 (j << MLX5_L3T_MT_OFFSET) | k; 579 return ptr_e_tbl->entry[k].data; 580 } 581 break; 582 } 583 } 584 } 585 return NULL; 586 } 587 588 /* 589 * Macros for linked list based on indexed memory. 590 * Example data structure: 591 * struct Foo { 592 * ILIST_ENTRY(uint16_t) next; 593 * ... 594 * } 595 * 596 */ 597 #define ILIST_ENTRY(type) \ 598 struct { \ 599 type prev; /* Index of previous element. */ \ 600 type next; /* Index of next element. */ \ 601 } 602 603 #define ILIST_INSERT(pool, head, idx, elem, field) \ 604 do { \ 605 typeof(elem) peer; \ 606 MLX5_ASSERT((elem) && (idx)); \ 607 (elem)->field.next = *(head); \ 608 (elem)->field.prev = 0; \ 609 if (*(head)) { \ 610 (peer) = mlx5_ipool_get(pool, *(head)); \ 611 if (peer) \ 612 (peer)->field.prev = (idx); \ 613 } \ 614 *(head) = (idx); \ 615 } while (0) 616 617 #define ILIST_REMOVE(pool, head, idx, elem, field) \ 618 do { \ 619 typeof(elem) peer; \ 620 MLX5_ASSERT(elem); \ 621 MLX5_ASSERT(head); \ 622 if ((elem)->field.prev) { \ 623 (peer) = mlx5_ipool_get \ 624 (pool, (elem)->field.prev); \ 625 if (peer) \ 626 (peer)->field.next = (elem)->field.next;\ 627 } \ 628 if ((elem)->field.next) { \ 629 (peer) = mlx5_ipool_get \ 630 (pool, (elem)->field.next); \ 631 if (peer) \ 632 (peer)->field.prev = (elem)->field.prev;\ 633 } \ 634 if (*(head) == (idx)) \ 635 *(head) = (elem)->field.next; \ 636 } while (0) 637 638 #define ILIST_FOREACH(pool, head, idx, elem, field) \ 639 for ((idx) = (head), (elem) = \ 640 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ 641 idx = (elem)->field.next, (elem) = \ 642 (idx) ? mlx5_ipool_get(pool, idx) : NULL) 643 644 /* Single index list. */ 645 #define SILIST_ENTRY(type) \ 646 struct { \ 647 type next; /* Index of next element. */ \ 648 } 649 650 #define SILIST_INSERT(head, idx, elem, field) \ 651 do { \ 652 MLX5_ASSERT((elem) && (idx)); \ 653 (elem)->field.next = *(head); \ 654 *(head) = (idx); \ 655 } while (0) 656 657 #define SILIST_FOREACH(pool, head, idx, elem, field) \ 658 for ((idx) = (head), (elem) = \ 659 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ 660 idx = (elem)->field.next, (elem) = \ 661 (idx) ? mlx5_ipool_get(pool, idx) : NULL) 662 663 #define MLX5_L3T_FOREACH(tbl, idx, entry) \ 664 for (idx = 0, (entry) = mlx5_l3t_get_next((tbl), &idx); \ 665 (entry); \ 666 idx++, (entry) = mlx5_l3t_get_next((tbl), &idx)) 667 668 #define MLX5_IPOOL_FOREACH(ipool, idx, entry) \ 669 for ((idx) = 0, mlx5_ipool_flush_cache((ipool)), \ 670 (entry) = mlx5_ipool_get_next((ipool), &idx); \ 671 (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx)) 672 673 #endif /* RTE_PMD_MLX5_UTILS_H_ */ 674