xref: /dpdk/drivers/net/mlx5/mlx5_utils.h (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <errno.h>
14 
15 #include <rte_spinlock.h>
16 #include <rte_rwlock.h>
17 #include <rte_memory.h>
18 #include <rte_bitmap.h>
19 
20 #include <mlx5_common.h>
21 #include <mlx5_common_utils.h>
22 
23 #include "mlx5_defs.h"
24 
25 /* Save and restore errno around argument evaluation. */
26 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
27 
28 extern int mlx5_logtype;
29 
30 #define MLX5_NET_LOG_PREFIX "mlx5_net"
31 
32 /* Generic printf()-like logging macro with automatic line feed. */
33 #define DRV_LOG(level, ...) \
34 	PMD_DRV_LOG_(level, mlx5_logtype, MLX5_NET_LOG_PREFIX, \
35 		__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
36 		PMD_DRV_LOG_CPAREN)
37 
38 /* Convenience macros for accessing mbuf fields. */
39 #define NEXT(m) ((m)->next)
40 #define DATA_LEN(m) ((m)->data_len)
41 #define PKT_LEN(m) ((m)->pkt_len)
42 #define DATA_OFF(m) ((m)->data_off)
43 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
44 #define NB_SEGS(m) ((m)->nb_segs)
45 #define PORT(m) ((m)->port)
46 
47 /* Transpose flags. Useful to convert IBV to DPDK flags. */
48 #define TRANSPOSE(val, from, to) \
49 	(((from) >= (to)) ? \
50 	 (((val) & (from)) / ((from) / (to))) : \
51 	 (((val) & (from)) * ((to) / (from))))
52 
53 /*
54  * For the case which data is linked with sequence increased index, the
55  * array table will be more efficient than hash table once need to search
56  * one data entry in large numbers of entries. Since the traditional hash
57  * tables has fixed table size, when huge numbers of data saved to the hash
58  * table, it also comes lots of hash conflict.
59  *
60  * But simple array table also has fixed size, allocates all the needed
61  * memory at once will waste lots of memory. For the case don't know the
62  * exactly number of entries will be impossible to allocate the array.
63  *
64  * Then the multiple level table helps to balance the two disadvantages.
65  * Allocate a global high level table with sub table entries at first,
66  * the global table contains the sub table entries, and the sub table will
67  * be allocated only once the corresponding index entry need to be saved.
68  * e.g. for up to 32-bits index, three level table with 10-10-12 splitting,
69  * with sequence increased index, the memory grows with every 4K entries.
70  *
71  * The currently implementation introduces 10-10-12 32-bits splitting
72  * Three-Level table to help the cases which have millions of enties to
73  * save. The index entries can be addressed directly by the index, no
74  * search will be needed.q
75  */
76 
77 /* L3 table global table define. */
78 #define MLX5_L3T_GT_OFFSET 22
79 #define MLX5_L3T_GT_SIZE (1 << 10)
80 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1)
81 
82 /* L3 table middle table define. */
83 #define MLX5_L3T_MT_OFFSET 12
84 #define MLX5_L3T_MT_SIZE (1 << 10)
85 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1)
86 
87 /* L3 table entry table define. */
88 #define MLX5_L3T_ET_OFFSET 0
89 #define MLX5_L3T_ET_SIZE (1 << 12)
90 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1)
91 
92 /* L3 table type. */
93 enum mlx5_l3t_type {
94 	MLX5_L3T_TYPE_WORD = 0,
95 	MLX5_L3T_TYPE_DWORD,
96 	MLX5_L3T_TYPE_QWORD,
97 	MLX5_L3T_TYPE_PTR,
98 	MLX5_L3T_TYPE_MAX,
99 };
100 
101 struct mlx5_indexed_pool;
102 
103 /* Generic data struct. */
104 union mlx5_l3t_data {
105 	uint16_t word;
106 	uint32_t dword;
107 	uint64_t qword;
108 	void *ptr;
109 };
110 
111 /* L3 level table data structure. */
112 struct mlx5_l3t_level_tbl {
113 	uint64_t ref_cnt; /* Table ref_cnt. */
114 	void *tbl[]; /* Table array. */
115 };
116 
117 /* L3 word entry table data structure. */
118 struct __rte_packed_begin mlx5_l3t_entry_word {
119 	uint32_t idx; /* Table index. */
120 	uint64_t ref_cnt; /* Table ref_cnt. */
121 	struct {
122 		uint16_t data;
123 		uint32_t ref_cnt;
124 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
125 } __rte_packed_end;
126 
127 /* L3 double word entry table data structure. */
128 struct __rte_packed_begin mlx5_l3t_entry_dword {
129 	uint32_t idx; /* Table index. */
130 	uint64_t ref_cnt; /* Table ref_cnt. */
131 	struct {
132 		uint32_t data;
133 		int32_t ref_cnt;
134 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
135 } __rte_packed_end;
136 
137 /* L3 quad word entry table data structure. */
138 struct __rte_packed_begin mlx5_l3t_entry_qword {
139 	uint32_t idx; /* Table index. */
140 	uint64_t ref_cnt; /* Table ref_cnt. */
141 	struct {
142 		uint64_t data;
143 		uint32_t ref_cnt;
144 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
145 } __rte_packed_end;
146 
147 /* L3 pointer entry table data structure. */
148 struct __rte_packed_begin mlx5_l3t_entry_ptr {
149 	uint32_t idx; /* Table index. */
150 	uint64_t ref_cnt; /* Table ref_cnt. */
151 	struct {
152 		void *data;
153 		uint32_t ref_cnt;
154 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
155 } __rte_packed_end;
156 
157 /* L3 table data structure. */
158 struct mlx5_l3t_tbl {
159 	enum mlx5_l3t_type type; /* Table type. */
160 	struct mlx5_indexed_pool *eip;
161 	/* Table index pool handles. */
162 	struct mlx5_l3t_level_tbl *tbl; /* Global table index. */
163 	rte_spinlock_t sl; /* The table lock. */
164 };
165 
166 /** Type of function that is used to handle the data before freeing. */
167 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
168 					   union mlx5_l3t_data *data);
169 
170 /*
171  * The default ipool threshold value indicates which per_core_cache
172  * value to set.
173  */
174 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
175 /* The default min local cache size. */
176 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
177 
178 /*
179  * The indexed memory entry index is made up of trunk index and offset of
180  * the entry in the trunk. Since the entry index is 32 bits, in case user
181  * prefers to have small trunks, user can change the macro below to a big
182  * number which helps the pool contains more trunks with lots of entries
183  * allocated.
184  */
185 #define TRUNK_IDX_BITS 16
186 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
187 #define TRUNK_INVALID TRUNK_MAX_IDX
188 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
189 #ifdef RTE_LIBRTE_MLX5_DEBUG
190 #define POOL_DEBUG 1
191 #endif
192 
193 struct mlx5_indexed_pool_config {
194 	uint32_t size; /* Pool entry size. */
195 	uint32_t trunk_size:22;
196 	/*
197 	 * Trunk entry number. Must be power of 2. It can be increased
198 	 * if trunk_grow enable. The trunk entry number increases with
199 	 * left shift grow_shift. Trunks with index are after grow_trunk
200 	 * will keep the entry number same with the last grow trunk.
201 	 */
202 	uint32_t grow_trunk:4;
203 	/*
204 	 * Trunks with entry number increase in the pool. Set it to 0
205 	 * to make the pool works as trunk entry fixed pool. It works
206 	 * only if grow_shift is not 0.
207 	 */
208 	uint32_t grow_shift:4;
209 	/*
210 	 * Trunk entry number increase shift value, stop after grow_trunk.
211 	 * It works only if grow_trunk is not 0.
212 	 */
213 	uint32_t need_lock:1;
214 	/* Lock is needed for multiple thread usage. */
215 	uint32_t release_mem_en:1; /* Release trunk when it is free. */
216 	uint32_t max_idx; /* The maximum index can be allocated. */
217 	uint32_t per_core_cache;
218 	/*
219 	 * Cache entry number per core for performance. Should not be
220 	 * set with release_mem_en.
221 	 */
222 	const char *type; /* Memory allocate type name. */
223 	void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
224 			int socket);
225 	/* User defined memory allocator. */
226 	void (*free)(void *addr); /* User defined memory release. */
227 };
228 
229 struct mlx5_indexed_trunk {
230 	uint32_t idx; /* Trunk id. */
231 	uint32_t prev; /* Previous free trunk in free list. */
232 	uint32_t next; /* Next free trunk in free list. */
233 	uint32_t free; /* Free entries available */
234 	struct rte_bitmap *bmp;
235 	alignas(RTE_CACHE_LINE_SIZE) uint8_t data[]; /* Entry data start. */
236 };
237 
238 struct mlx5_indexed_cache {
239 	struct mlx5_indexed_trunk **trunks;
240 	volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
241 	uint32_t n_trunk; /* Trunk pointer array size. */
242 	uint32_t ref_cnt;
243 	uint32_t len;
244 	uint32_t idx[];
245 };
246 
247 struct mlx5_ipool_per_lcore {
248 	struct mlx5_indexed_cache *lc;
249 	uint32_t len; /**< Current cache count. */
250 	uint32_t idx[]; /**< Cache objects. */
251 };
252 
253 struct mlx5_indexed_pool {
254 	struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
255 	rte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */
256 	rte_spinlock_t lcore_lock;
257 	/* Dim of trunk pointer array. */
258 	union {
259 		struct {
260 			uint32_t n_trunk_valid; /* Trunks allocated. */
261 			uint32_t n_trunk; /* Trunk pointer array size. */
262 			struct mlx5_indexed_trunk **trunks;
263 			uint32_t free_list; /* Index to first free trunk. */
264 		};
265 		struct {
266 			RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
267 			/* Global cache. */
268 			struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
269 			/* Local cache. */
270 			struct rte_bitmap *ibmp;
271 			void *bmp_mem;
272 			/* Allocate objects bitmap. Use during flush. */
273 		};
274 	};
275 #ifdef POOL_DEBUG
276 	uint32_t n_entry;
277 	uint32_t trunk_new;
278 	uint32_t trunk_avail;
279 	uint32_t trunk_empty;
280 	uint32_t trunk_free;
281 #endif
282 	uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
283 };
284 
285 /**
286  * Return logarithm of the nearest power of two above input value.
287  *
288  * @param v
289  *   Input value.
290  *
291  * @return
292  *   Logarithm of the nearest power of two above input value.
293  */
294 static inline unsigned int
295 log2above(unsigned int v)
296 {
297 	unsigned int l;
298 	unsigned int r;
299 
300 	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
301 		r |= (v & 1);
302 	return l + r;
303 }
304 
305 /********************************* indexed pool *************************/
306 
307 /**
308  * This function allocates non-initialized memory entry from pool.
309  * In NUMA systems, the memory entry allocated resides on the same
310  * NUMA socket as the core that calls this function.
311  *
312  * Memory entry is allocated from memory trunk, no alignment.
313  *
314  * @param pool
315  *   Pointer to indexed memory entry pool.
316  *   No initialization required.
317  * @param[out] idx
318  *   Pointer to memory to save allocated index.
319  *   Memory index always positive value.
320  * @return
321  *   - Pointer to the allocated memory entry.
322  *   - NULL on error. Not enough memory, or invalid arguments.
323  */
324 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
325 
326 /**
327  * This function allocates zero initialized memory entry from pool.
328  * In NUMA systems, the memory entry allocated resides on the same
329  * NUMA socket as the core that calls this function.
330  *
331  * Memory entry is allocated from memory trunk, no alignment.
332  *
333  * @param pool
334  *   Pointer to indexed memory pool.
335  *   No initialization required.
336  * @param[out] idx
337  *   Pointer to memory to save allocated index.
338  *   Memory index always positive value.
339  * @return
340  *   - Pointer to the allocated memory entry .
341  *   - NULL on error. Not enough memory, or invalid arguments.
342  */
343 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
344 
345 /**
346  * This function frees indexed memory entry to pool.
347  * Caller has to make sure that the index is allocated from same pool.
348  *
349  * @param pool
350  *   Pointer to indexed memory pool.
351  * @param idx
352  *   Allocated memory entry index.
353  */
354 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
355 
356 /**
357  * This function returns pointer of indexed memory entry from index.
358  * Caller has to make sure that the index is valid, and allocated
359  * from same pool.
360  *
361  * @param pool
362  *   Pointer to indexed memory pool.
363  * @param idx
364  *   Allocated memory index.
365  * @return
366  *   - Pointer to indexed memory entry.
367  */
368 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
369 
370 /**
371  * This function creates indexed memory pool.
372  * Caller has to configure the configuration accordingly.
373  *
374  * @param pool
375  *   Pointer to indexed memory pool.
376  * @param cfg
377  *   Allocated memory index.
378  */
379 struct mlx5_indexed_pool *
380 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
381 
382 /**
383  * This function releases all resources of pool.
384  * Caller has to make sure that all indexes and memories allocated
385  * from this pool not referenced anymore.
386  *
387  * @param pool
388  *   Pointer to indexed memory pool.
389  * @return
390  *   - non-zero value on error.
391  *   - 0 on success.
392  */
393 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
394 
395 /**
396  * This function dumps debug info of pool.
397  *
398  * @param pool
399  *   Pointer to indexed memory pool.
400  */
401 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
402 
403 /**
404  * This function flushes all the cache index back to pool trunk.
405  *
406  * @param pool
407  *   Pointer to the index memory pool handler.
408  *
409  */
410 
411 void mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool);
412 
413 /**
414  * This function gets the available entry from pos.
415  *
416  * @param pool
417  *   Pointer to the index memory pool handler.
418  * @param pos
419  *   Pointer to the index position start from.
420  *
421  * @return
422  *  - Pointer to the next available entry.
423  *
424  */
425 void *mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos);
426 
427 /**
428  * This function resize the ipool.
429  *
430  * @param pool
431  *   Pointer to the index memory pool handler.
432  * @param num_entries
433  *   Number of entries to be added to the pool.
434  *   This number should be divisible by trunk_size.
435  *
436  * @return
437  *   - non-zero value on error.
438  *   - 0 on success.
439  *
440  */
441 int mlx5_ipool_resize(struct mlx5_indexed_pool *pool, uint32_t num_entries,
442 		      struct rte_flow_error *error);
443 
444 /**
445  * This function allocates new empty Three-level table.
446  *
447  * @param type
448  *   The l3t can set as word, double word, quad word or pointer with index.
449  *
450  * @return
451  *   - Pointer to the allocated l3t.
452  *   - NULL on error. Not enough memory, or invalid arguments.
453  */
454 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type);
455 
456 /**
457  * This function destroys Three-level table.
458  *
459  * @param tbl
460  *   Pointer to the l3t.
461  */
462 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
463 
464 /**
465  * This function gets the index entry from Three-level table.
466  *
467  * @param tbl
468  *   Pointer to the l3t.
469  * @param idx
470  *   Index to the entry.
471  * @param data
472  *   Pointer to the memory which saves the entry data.
473  *   When function call returns 0, data contains the entry data get from
474  *   l3t.
475  *   When function call returns -1, data is not modified.
476  *
477  * @return
478  *   0 if success, -1 on error.
479  */
480 
481 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
482 			    union mlx5_l3t_data *data);
483 
484 /**
485  * This function decreases and clear index entry if reference
486  * counter is 0 from Three-level table.
487  *
488  * @param tbl
489  *   Pointer to the l3t.
490  * @param idx
491  *   Index to the entry.
492  *
493  * @return
494  *   The remaining reference count, 0 means entry be cleared, -1 on error.
495  */
496 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx);
497 
498 /**
499  * This function sets the index entry to Three-level table.
500  * If the entry is already set, the EEXIST errno will be given, and
501  * the set data will be filled to the data.
502  *
503  * @param tbl[in]
504  *   Pointer to the l3t.
505  * @param idx[in]
506  *   Index to the entry.
507  * @param data[in/out]
508  *   Pointer to the memory which contains the entry data save to l3t.
509  *   If the entry is already set, the set data will be filled.
510  *
511  * @return
512  *   0 if success, -1 on error.
513  */
514 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
515 			    union mlx5_l3t_data *data);
516 
517 static inline void *
518 mlx5_l3t_get_next(struct mlx5_l3t_tbl *tbl, uint32_t *pos)
519 {
520 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
521 	uint32_t i, j, k, g_start, m_start, e_start;
522 	uint32_t idx = *pos;
523 	void *e_tbl;
524 	struct mlx5_l3t_entry_word *w_e_tbl;
525 	struct mlx5_l3t_entry_dword *dw_e_tbl;
526 	struct mlx5_l3t_entry_qword *qw_e_tbl;
527 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
528 
529 	if (!tbl)
530 		return NULL;
531 	g_tbl = tbl->tbl;
532 	if (!g_tbl)
533 		return NULL;
534 	g_start = (idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK;
535 	m_start = (idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK;
536 	e_start = idx & MLX5_L3T_ET_MASK;
537 	for (i = g_start; i < MLX5_L3T_GT_SIZE; i++) {
538 		m_tbl = g_tbl->tbl[i];
539 		if (!m_tbl) {
540 			/* Jump to new table, reset the sub table start. */
541 			m_start = 0;
542 			e_start = 0;
543 			continue;
544 		}
545 		for (j = m_start; j < MLX5_L3T_MT_SIZE; j++) {
546 			if (!m_tbl->tbl[j]) {
547 				/*
548 				 * Jump to new table, reset the sub table
549 				 * start.
550 				 */
551 				e_start = 0;
552 				continue;
553 			}
554 			e_tbl = m_tbl->tbl[j];
555 			switch (tbl->type) {
556 			case MLX5_L3T_TYPE_WORD:
557 				w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
558 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
559 					if (!w_e_tbl->entry[k].data)
560 						continue;
561 					*pos = (i << MLX5_L3T_GT_OFFSET) |
562 					       (j << MLX5_L3T_MT_OFFSET) | k;
563 					return (void *)&w_e_tbl->entry[k].data;
564 				}
565 				break;
566 			case MLX5_L3T_TYPE_DWORD:
567 				dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
568 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
569 					if (!dw_e_tbl->entry[k].data)
570 						continue;
571 					*pos = (i << MLX5_L3T_GT_OFFSET) |
572 					       (j << MLX5_L3T_MT_OFFSET) | k;
573 					return (void *)&dw_e_tbl->entry[k].data;
574 				}
575 				break;
576 			case MLX5_L3T_TYPE_QWORD:
577 				qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
578 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
579 					if (!qw_e_tbl->entry[k].data)
580 						continue;
581 					*pos = (i << MLX5_L3T_GT_OFFSET) |
582 					       (j << MLX5_L3T_MT_OFFSET) | k;
583 					return (void *)&qw_e_tbl->entry[k].data;
584 				}
585 				break;
586 			default:
587 				ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
588 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
589 					if (!ptr_e_tbl->entry[k].data)
590 						continue;
591 					*pos = (i << MLX5_L3T_GT_OFFSET) |
592 					       (j << MLX5_L3T_MT_OFFSET) | k;
593 					return ptr_e_tbl->entry[k].data;
594 				}
595 				break;
596 			}
597 		}
598 	}
599 	return NULL;
600 }
601 
602 /*
603  * Macros for linked list based on indexed memory.
604  * Example data structure:
605  * struct Foo {
606  *	ILIST_ENTRY(uint16_t) next;
607  *	...
608  * }
609  *
610  */
611 #define ILIST_ENTRY(type)						\
612 struct {								\
613 	type prev; /* Index of previous element. */			\
614 	type next; /* Index of next element. */				\
615 }
616 
617 #define ILIST_INSERT(pool, head, idx, elem, field)			\
618 	do {								\
619 		typeof(elem) peer;					\
620 		MLX5_ASSERT((elem) && (idx));				\
621 		(elem)->field.next = *(head);				\
622 		(elem)->field.prev = 0;					\
623 		if (*(head)) {						\
624 			(peer) = mlx5_ipool_get(pool, *(head));		\
625 			if (peer)					\
626 				(peer)->field.prev = (idx);		\
627 		}							\
628 		*(head) = (idx);					\
629 	} while (0)
630 
631 #define ILIST_REMOVE(pool, head, idx, elem, field)			\
632 	do {								\
633 		typeof(elem) peer;					\
634 		MLX5_ASSERT(elem);					\
635 		MLX5_ASSERT(head);					\
636 		if ((elem)->field.prev) {				\
637 			(peer) = mlx5_ipool_get				\
638 				 (pool, (elem)->field.prev);		\
639 			if (peer)					\
640 				(peer)->field.next = (elem)->field.next;\
641 		}							\
642 		if ((elem)->field.next) {				\
643 			(peer) = mlx5_ipool_get				\
644 				 (pool, (elem)->field.next);		\
645 			if (peer)					\
646 				(peer)->field.prev = (elem)->field.prev;\
647 		}							\
648 		if (*(head) == (idx))					\
649 			*(head) = (elem)->field.next;			\
650 	} while (0)
651 
652 #define ILIST_FOREACH(pool, head, idx, elem, field)			\
653 	for ((idx) = (head), (elem) =					\
654 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
655 	     idx = (elem)->field.next, (elem) =				\
656 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
657 
658 /* Single index list. */
659 #define SILIST_ENTRY(type)						\
660 struct {								\
661 	type next; /* Index of next element. */				\
662 }
663 
664 #define SILIST_INSERT(head, idx, elem, field)				\
665 	do {								\
666 		MLX5_ASSERT((elem) && (idx));				\
667 		(elem)->field.next = *(head);				\
668 		*(head) = (idx);					\
669 	} while (0)
670 
671 #define SILIST_FOREACH(pool, head, idx, elem, field)			\
672 	for ((idx) = (head), (elem) =					\
673 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
674 	     idx = (elem)->field.next, (elem) =				\
675 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
676 
677 #define MLX5_L3T_FOREACH(tbl, idx, entry)				\
678 	for (idx = 0, (entry) = mlx5_l3t_get_next((tbl), &idx);		\
679 	     (entry);							\
680 	     idx++, (entry) = mlx5_l3t_get_next((tbl), &idx))
681 
682 #define MLX5_IPOOL_FOREACH(ipool, idx, entry)				\
683 	for ((idx) = 0, mlx5_ipool_flush_cache((ipool)),		\
684 	    (entry) = mlx5_ipool_get_next((ipool), &idx);		\
685 	    (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx))
686 
687 #endif /* RTE_PMD_MLX5_UTILS_H_ */
688