xref: /dpdk/drivers/net/mlx5/mlx5_utils.h (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <errno.h>
14 
15 #include <rte_spinlock.h>
16 #include <rte_rwlock.h>
17 #include <rte_memory.h>
18 #include <rte_bitmap.h>
19 
20 #include <mlx5_common.h>
21 #include <mlx5_common_utils.h>
22 
23 #include "mlx5_defs.h"
24 
25 /* Convert a bit number to the corresponding 64-bit mask */
26 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
27 
28 /* Save and restore errno around argument evaluation. */
29 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
30 
31 extern int mlx5_logtype;
32 
33 #define MLX5_NET_LOG_PREFIX "mlx5_net"
34 
35 /* Generic printf()-like logging macro with automatic line feed. */
36 #define DRV_LOG(level, ...) \
37 	PMD_DRV_LOG_(level, mlx5_logtype, MLX5_NET_LOG_PREFIX, \
38 		__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
39 		PMD_DRV_LOG_CPAREN)
40 
41 /* Convenience macros for accessing mbuf fields. */
42 #define NEXT(m) ((m)->next)
43 #define DATA_LEN(m) ((m)->data_len)
44 #define PKT_LEN(m) ((m)->pkt_len)
45 #define DATA_OFF(m) ((m)->data_off)
46 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
47 #define NB_SEGS(m) ((m)->nb_segs)
48 #define PORT(m) ((m)->port)
49 
50 /* Transpose flags. Useful to convert IBV to DPDK flags. */
51 #define TRANSPOSE(val, from, to) \
52 	(((from) >= (to)) ? \
53 	 (((val) & (from)) / ((from) / (to))) : \
54 	 (((val) & (from)) * ((to) / (from))))
55 
56 /*
57  * For the case which data is linked with sequence increased index, the
58  * array table will be more efficient than hash table once need to search
59  * one data entry in large numbers of entries. Since the traditional hash
60  * tables has fixed table size, when huge numbers of data saved to the hash
61  * table, it also comes lots of hash conflict.
62  *
63  * But simple array table also has fixed size, allocates all the needed
64  * memory at once will waste lots of memory. For the case don't know the
65  * exactly number of entries will be impossible to allocate the array.
66  *
67  * Then the multiple level table helps to balance the two disadvantages.
68  * Allocate a global high level table with sub table entries at first,
69  * the global table contains the sub table entries, and the sub table will
70  * be allocated only once the corresponding index entry need to be saved.
71  * e.g. for up to 32-bits index, three level table with 10-10-12 splitting,
72  * with sequence increased index, the memory grows with every 4K entries.
73  *
74  * The currently implementation introduces 10-10-12 32-bits splitting
75  * Three-Level table to help the cases which have millions of enties to
76  * save. The index entries can be addressed directly by the index, no
77  * search will be needed.q
78  */
79 
80 /* L3 table global table define. */
81 #define MLX5_L3T_GT_OFFSET 22
82 #define MLX5_L3T_GT_SIZE (1 << 10)
83 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1)
84 
85 /* L3 table middle table define. */
86 #define MLX5_L3T_MT_OFFSET 12
87 #define MLX5_L3T_MT_SIZE (1 << 10)
88 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1)
89 
90 /* L3 table entry table define. */
91 #define MLX5_L3T_ET_OFFSET 0
92 #define MLX5_L3T_ET_SIZE (1 << 12)
93 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1)
94 
95 /* L3 table type. */
96 enum mlx5_l3t_type {
97 	MLX5_L3T_TYPE_WORD = 0,
98 	MLX5_L3T_TYPE_DWORD,
99 	MLX5_L3T_TYPE_QWORD,
100 	MLX5_L3T_TYPE_PTR,
101 	MLX5_L3T_TYPE_MAX,
102 };
103 
104 struct mlx5_indexed_pool;
105 
106 /* Generic data struct. */
107 union mlx5_l3t_data {
108 	uint16_t word;
109 	uint32_t dword;
110 	uint64_t qword;
111 	void *ptr;
112 };
113 
114 /* L3 level table data structure. */
115 struct mlx5_l3t_level_tbl {
116 	uint64_t ref_cnt; /* Table ref_cnt. */
117 	void *tbl[]; /* Table array. */
118 };
119 
120 /* L3 word entry table data structure. */
121 struct mlx5_l3t_entry_word {
122 	uint32_t idx; /* Table index. */
123 	uint64_t ref_cnt; /* Table ref_cnt. */
124 	struct {
125 		uint16_t data;
126 		uint32_t ref_cnt;
127 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
128 } __rte_packed;
129 
130 /* L3 double word entry table data structure. */
131 struct mlx5_l3t_entry_dword {
132 	uint32_t idx; /* Table index. */
133 	uint64_t ref_cnt; /* Table ref_cnt. */
134 	struct {
135 		uint32_t data;
136 		int32_t ref_cnt;
137 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
138 } __rte_packed;
139 
140 /* L3 quad word entry table data structure. */
141 struct mlx5_l3t_entry_qword {
142 	uint32_t idx; /* Table index. */
143 	uint64_t ref_cnt; /* Table ref_cnt. */
144 	struct {
145 		uint64_t data;
146 		uint32_t ref_cnt;
147 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
148 } __rte_packed;
149 
150 /* L3 pointer entry table data structure. */
151 struct mlx5_l3t_entry_ptr {
152 	uint32_t idx; /* Table index. */
153 	uint64_t ref_cnt; /* Table ref_cnt. */
154 	struct {
155 		void *data;
156 		uint32_t ref_cnt;
157 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
158 } __rte_packed;
159 
160 /* L3 table data structure. */
161 struct mlx5_l3t_tbl {
162 	enum mlx5_l3t_type type; /* Table type. */
163 	struct mlx5_indexed_pool *eip;
164 	/* Table index pool handles. */
165 	struct mlx5_l3t_level_tbl *tbl; /* Global table index. */
166 	rte_spinlock_t sl; /* The table lock. */
167 };
168 
169 /** Type of function that is used to handle the data before freeing. */
170 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
171 					   union mlx5_l3t_data *data);
172 
173 /*
174  * The default ipool threshold value indicates which per_core_cache
175  * value to set.
176  */
177 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
178 /* The default min local cache size. */
179 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
180 
181 /*
182  * The indexed memory entry index is made up of trunk index and offset of
183  * the entry in the trunk. Since the entry index is 32 bits, in case user
184  * prefers to have small trunks, user can change the macro below to a big
185  * number which helps the pool contains more trunks with lots of entries
186  * allocated.
187  */
188 #define TRUNK_IDX_BITS 16
189 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
190 #define TRUNK_INVALID TRUNK_MAX_IDX
191 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
192 #ifdef RTE_LIBRTE_MLX5_DEBUG
193 #define POOL_DEBUG 1
194 #endif
195 
196 struct mlx5_indexed_pool_config {
197 	uint32_t size; /* Pool entry size. */
198 	uint32_t trunk_size:22;
199 	/*
200 	 * Trunk entry number. Must be power of 2. It can be increased
201 	 * if trunk_grow enable. The trunk entry number increases with
202 	 * left shift grow_shift. Trunks with index are after grow_trunk
203 	 * will keep the entry number same with the last grow trunk.
204 	 */
205 	uint32_t grow_trunk:4;
206 	/*
207 	 * Trunks with entry number increase in the pool. Set it to 0
208 	 * to make the pool works as trunk entry fixed pool. It works
209 	 * only if grow_shift is not 0.
210 	 */
211 	uint32_t grow_shift:4;
212 	/*
213 	 * Trunk entry number increase shift value, stop after grow_trunk.
214 	 * It works only if grow_trunk is not 0.
215 	 */
216 	uint32_t need_lock:1;
217 	/* Lock is needed for multiple thread usage. */
218 	uint32_t release_mem_en:1; /* Release trunk when it is free. */
219 	uint32_t max_idx; /* The maximum index can be allocated. */
220 	uint32_t per_core_cache;
221 	/*
222 	 * Cache entry number per core for performance. Should not be
223 	 * set with release_mem_en.
224 	 */
225 	const char *type; /* Memory allocate type name. */
226 	void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
227 			int socket);
228 	/* User defined memory allocator. */
229 	void (*free)(void *addr); /* User defined memory release. */
230 };
231 
232 struct mlx5_indexed_trunk {
233 	uint32_t idx; /* Trunk id. */
234 	uint32_t prev; /* Previous free trunk in free list. */
235 	uint32_t next; /* Next free trunk in free list. */
236 	uint32_t free; /* Free entries available */
237 	struct rte_bitmap *bmp;
238 	alignas(RTE_CACHE_LINE_SIZE) uint8_t data[]; /* Entry data start. */
239 };
240 
241 struct mlx5_indexed_cache {
242 	struct mlx5_indexed_trunk **trunks;
243 	volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
244 	uint32_t n_trunk; /* Trunk pointer array size. */
245 	uint32_t ref_cnt;
246 	uint32_t len;
247 	uint32_t idx[];
248 };
249 
250 struct mlx5_ipool_per_lcore {
251 	struct mlx5_indexed_cache *lc;
252 	uint32_t len; /**< Current cache count. */
253 	uint32_t idx[]; /**< Cache objects. */
254 };
255 
256 struct mlx5_indexed_pool {
257 	struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
258 	rte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */
259 	rte_spinlock_t lcore_lock;
260 	/* Dim of trunk pointer array. */
261 	union {
262 		struct {
263 			uint32_t n_trunk_valid; /* Trunks allocated. */
264 			uint32_t n_trunk; /* Trunk pointer array size. */
265 			struct mlx5_indexed_trunk **trunks;
266 			uint32_t free_list; /* Index to first free trunk. */
267 		};
268 		struct {
269 			RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
270 			/* Global cache. */
271 			struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
272 			/* Local cache. */
273 			struct rte_bitmap *ibmp;
274 			void *bmp_mem;
275 			/* Allocate objects bitmap. Use during flush. */
276 		};
277 	};
278 #ifdef POOL_DEBUG
279 	uint32_t n_entry;
280 	uint32_t trunk_new;
281 	uint32_t trunk_avail;
282 	uint32_t trunk_empty;
283 	uint32_t trunk_free;
284 #endif
285 	uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
286 };
287 
288 /**
289  * Return logarithm of the nearest power of two above input value.
290  *
291  * @param v
292  *   Input value.
293  *
294  * @return
295  *   Logarithm of the nearest power of two above input value.
296  */
297 static inline unsigned int
298 log2above(unsigned int v)
299 {
300 	unsigned int l;
301 	unsigned int r;
302 
303 	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
304 		r |= (v & 1);
305 	return l + r;
306 }
307 
308 /********************************* indexed pool *************************/
309 
310 /**
311  * This function allocates non-initialized memory entry from pool.
312  * In NUMA systems, the memory entry allocated resides on the same
313  * NUMA socket as the core that calls this function.
314  *
315  * Memory entry is allocated from memory trunk, no alignment.
316  *
317  * @param pool
318  *   Pointer to indexed memory entry pool.
319  *   No initialization required.
320  * @param[out] idx
321  *   Pointer to memory to save allocated index.
322  *   Memory index always positive value.
323  * @return
324  *   - Pointer to the allocated memory entry.
325  *   - NULL on error. Not enough memory, or invalid arguments.
326  */
327 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
328 
329 /**
330  * This function allocates zero initialized memory entry from pool.
331  * In NUMA systems, the memory entry allocated resides on the same
332  * NUMA socket as the core that calls this function.
333  *
334  * Memory entry is allocated from memory trunk, no alignment.
335  *
336  * @param pool
337  *   Pointer to indexed memory pool.
338  *   No initialization required.
339  * @param[out] idx
340  *   Pointer to memory to save allocated index.
341  *   Memory index always positive value.
342  * @return
343  *   - Pointer to the allocated memory entry .
344  *   - NULL on error. Not enough memory, or invalid arguments.
345  */
346 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
347 
348 /**
349  * This function frees indexed memory entry to pool.
350  * Caller has to make sure that the index is allocated from same pool.
351  *
352  * @param pool
353  *   Pointer to indexed memory pool.
354  * @param idx
355  *   Allocated memory entry index.
356  */
357 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
358 
359 /**
360  * This function returns pointer of indexed memory entry from index.
361  * Caller has to make sure that the index is valid, and allocated
362  * from same pool.
363  *
364  * @param pool
365  *   Pointer to indexed memory pool.
366  * @param idx
367  *   Allocated memory index.
368  * @return
369  *   - Pointer to indexed memory entry.
370  */
371 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
372 
373 /**
374  * This function creates indexed memory pool.
375  * Caller has to configure the configuration accordingly.
376  *
377  * @param pool
378  *   Pointer to indexed memory pool.
379  * @param cfg
380  *   Allocated memory index.
381  */
382 struct mlx5_indexed_pool *
383 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
384 
385 /**
386  * This function releases all resources of pool.
387  * Caller has to make sure that all indexes and memories allocated
388  * from this pool not referenced anymore.
389  *
390  * @param pool
391  *   Pointer to indexed memory pool.
392  * @return
393  *   - non-zero value on error.
394  *   - 0 on success.
395  */
396 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
397 
398 /**
399  * This function dumps debug info of pool.
400  *
401  * @param pool
402  *   Pointer to indexed memory pool.
403  */
404 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
405 
406 /**
407  * This function flushes all the cache index back to pool trunk.
408  *
409  * @param pool
410  *   Pointer to the index memory pool handler.
411  *
412  */
413 
414 void mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool);
415 
416 /**
417  * This function gets the available entry from pos.
418  *
419  * @param pool
420  *   Pointer to the index memory pool handler.
421  * @param pos
422  *   Pointer to the index position start from.
423  *
424  * @return
425  *  - Pointer to the next available entry.
426  *
427  */
428 void *mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos);
429 
430 /**
431  * This function resize the ipool.
432  *
433  * @param pool
434  *   Pointer to the index memory pool handler.
435  * @param num_entries
436  *   Number of entries to be added to the pool.
437  *   This number should be divisible by trunk_size.
438  *
439  * @return
440  *   - non-zero value on error.
441  *   - 0 on success.
442  *
443  */
444 int mlx5_ipool_resize(struct mlx5_indexed_pool *pool, uint32_t num_entries);
445 
446 /**
447  * This function allocates new empty Three-level table.
448  *
449  * @param type
450  *   The l3t can set as word, double word, quad word or pointer with index.
451  *
452  * @return
453  *   - Pointer to the allocated l3t.
454  *   - NULL on error. Not enough memory, or invalid arguments.
455  */
456 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type);
457 
458 /**
459  * This function destroys Three-level table.
460  *
461  * @param tbl
462  *   Pointer to the l3t.
463  */
464 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
465 
466 /**
467  * This function gets the index entry from Three-level table.
468  *
469  * @param tbl
470  *   Pointer to the l3t.
471  * @param idx
472  *   Index to the entry.
473  * @param data
474  *   Pointer to the memory which saves the entry data.
475  *   When function call returns 0, data contains the entry data get from
476  *   l3t.
477  *   When function call returns -1, data is not modified.
478  *
479  * @return
480  *   0 if success, -1 on error.
481  */
482 
483 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
484 			    union mlx5_l3t_data *data);
485 
486 /**
487  * This function decreases and clear index entry if reference
488  * counter is 0 from Three-level table.
489  *
490  * @param tbl
491  *   Pointer to the l3t.
492  * @param idx
493  *   Index to the entry.
494  *
495  * @return
496  *   The remaining reference count, 0 means entry be cleared, -1 on error.
497  */
498 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx);
499 
500 /**
501  * This function sets the index entry to Three-level table.
502  * If the entry is already set, the EEXIST errno will be given, and
503  * the set data will be filled to the data.
504  *
505  * @param tbl[in]
506  *   Pointer to the l3t.
507  * @param idx[in]
508  *   Index to the entry.
509  * @param data[in/out]
510  *   Pointer to the memory which contains the entry data save to l3t.
511  *   If the entry is already set, the set data will be filled.
512  *
513  * @return
514  *   0 if success, -1 on error.
515  */
516 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
517 			    union mlx5_l3t_data *data);
518 
519 static inline void *
520 mlx5_l3t_get_next(struct mlx5_l3t_tbl *tbl, uint32_t *pos)
521 {
522 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
523 	uint32_t i, j, k, g_start, m_start, e_start;
524 	uint32_t idx = *pos;
525 	void *e_tbl;
526 	struct mlx5_l3t_entry_word *w_e_tbl;
527 	struct mlx5_l3t_entry_dword *dw_e_tbl;
528 	struct mlx5_l3t_entry_qword *qw_e_tbl;
529 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
530 
531 	if (!tbl)
532 		return NULL;
533 	g_tbl = tbl->tbl;
534 	if (!g_tbl)
535 		return NULL;
536 	g_start = (idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK;
537 	m_start = (idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK;
538 	e_start = idx & MLX5_L3T_ET_MASK;
539 	for (i = g_start; i < MLX5_L3T_GT_SIZE; i++) {
540 		m_tbl = g_tbl->tbl[i];
541 		if (!m_tbl) {
542 			/* Jump to new table, reset the sub table start. */
543 			m_start = 0;
544 			e_start = 0;
545 			continue;
546 		}
547 		for (j = m_start; j < MLX5_L3T_MT_SIZE; j++) {
548 			if (!m_tbl->tbl[j]) {
549 				/*
550 				 * Jump to new table, reset the sub table
551 				 * start.
552 				 */
553 				e_start = 0;
554 				continue;
555 			}
556 			e_tbl = m_tbl->tbl[j];
557 			switch (tbl->type) {
558 			case MLX5_L3T_TYPE_WORD:
559 				w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
560 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
561 					if (!w_e_tbl->entry[k].data)
562 						continue;
563 					*pos = (i << MLX5_L3T_GT_OFFSET) |
564 					       (j << MLX5_L3T_MT_OFFSET) | k;
565 					return (void *)&w_e_tbl->entry[k].data;
566 				}
567 				break;
568 			case MLX5_L3T_TYPE_DWORD:
569 				dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
570 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
571 					if (!dw_e_tbl->entry[k].data)
572 						continue;
573 					*pos = (i << MLX5_L3T_GT_OFFSET) |
574 					       (j << MLX5_L3T_MT_OFFSET) | k;
575 					return (void *)&dw_e_tbl->entry[k].data;
576 				}
577 				break;
578 			case MLX5_L3T_TYPE_QWORD:
579 				qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
580 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
581 					if (!qw_e_tbl->entry[k].data)
582 						continue;
583 					*pos = (i << MLX5_L3T_GT_OFFSET) |
584 					       (j << MLX5_L3T_MT_OFFSET) | k;
585 					return (void *)&qw_e_tbl->entry[k].data;
586 				}
587 				break;
588 			default:
589 				ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
590 				for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
591 					if (!ptr_e_tbl->entry[k].data)
592 						continue;
593 					*pos = (i << MLX5_L3T_GT_OFFSET) |
594 					       (j << MLX5_L3T_MT_OFFSET) | k;
595 					return ptr_e_tbl->entry[k].data;
596 				}
597 				break;
598 			}
599 		}
600 	}
601 	return NULL;
602 }
603 
604 /*
605  * Macros for linked list based on indexed memory.
606  * Example data structure:
607  * struct Foo {
608  *	ILIST_ENTRY(uint16_t) next;
609  *	...
610  * }
611  *
612  */
613 #define ILIST_ENTRY(type)						\
614 struct {								\
615 	type prev; /* Index of previous element. */			\
616 	type next; /* Index of next element. */				\
617 }
618 
619 #define ILIST_INSERT(pool, head, idx, elem, field)			\
620 	do {								\
621 		typeof(elem) peer;					\
622 		MLX5_ASSERT((elem) && (idx));				\
623 		(elem)->field.next = *(head);				\
624 		(elem)->field.prev = 0;					\
625 		if (*(head)) {						\
626 			(peer) = mlx5_ipool_get(pool, *(head));		\
627 			if (peer)					\
628 				(peer)->field.prev = (idx);		\
629 		}							\
630 		*(head) = (idx);					\
631 	} while (0)
632 
633 #define ILIST_REMOVE(pool, head, idx, elem, field)			\
634 	do {								\
635 		typeof(elem) peer;					\
636 		MLX5_ASSERT(elem);					\
637 		MLX5_ASSERT(head);					\
638 		if ((elem)->field.prev) {				\
639 			(peer) = mlx5_ipool_get				\
640 				 (pool, (elem)->field.prev);		\
641 			if (peer)					\
642 				(peer)->field.next = (elem)->field.next;\
643 		}							\
644 		if ((elem)->field.next) {				\
645 			(peer) = mlx5_ipool_get				\
646 				 (pool, (elem)->field.next);		\
647 			if (peer)					\
648 				(peer)->field.prev = (elem)->field.prev;\
649 		}							\
650 		if (*(head) == (idx))					\
651 			*(head) = (elem)->field.next;			\
652 	} while (0)
653 
654 #define ILIST_FOREACH(pool, head, idx, elem, field)			\
655 	for ((idx) = (head), (elem) =					\
656 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
657 	     idx = (elem)->field.next, (elem) =				\
658 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
659 
660 /* Single index list. */
661 #define SILIST_ENTRY(type)						\
662 struct {								\
663 	type next; /* Index of next element. */				\
664 }
665 
666 #define SILIST_INSERT(head, idx, elem, field)				\
667 	do {								\
668 		MLX5_ASSERT((elem) && (idx));				\
669 		(elem)->field.next = *(head);				\
670 		*(head) = (idx);					\
671 	} while (0)
672 
673 #define SILIST_FOREACH(pool, head, idx, elem, field)			\
674 	for ((idx) = (head), (elem) =					\
675 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
676 	     idx = (elem)->field.next, (elem) =				\
677 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
678 
679 #define MLX5_L3T_FOREACH(tbl, idx, entry)				\
680 	for (idx = 0, (entry) = mlx5_l3t_get_next((tbl), &idx);		\
681 	     (entry);							\
682 	     idx++, (entry) = mlx5_l3t_get_next((tbl), &idx))
683 
684 #define MLX5_IPOOL_FOREACH(ipool, idx, entry)				\
685 	for ((idx) = 0, mlx5_ipool_flush_cache((ipool)),		\
686 	    (entry) = mlx5_ipool_get_next((ipool), &idx);		\
687 	    (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx))
688 
689 #endif /* RTE_PMD_MLX5_UTILS_H_ */
690