xref: /dpdk/drivers/net/mlx5/mlx5_utils.h (revision bc8e32473cc3978d763a1387eaa8244bcf75e77d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
8 
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <errno.h>
14 
15 #include <rte_spinlock.h>
16 #include <rte_rwlock.h>
17 #include <rte_memory.h>
18 #include <rte_bitmap.h>
19 
20 #include <mlx5_common.h>
21 
22 #include "mlx5_defs.h"
23 
24 /* Convert a bit number to the corresponding 64-bit mask */
25 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
26 
27 /* Save and restore errno around argument evaluation. */
28 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
29 
30 extern int mlx5_logtype;
31 
32 /* Generic printf()-like logging macro with automatic line feed. */
33 #define DRV_LOG(level, ...) \
34 	PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \
35 		__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
36 		PMD_DRV_LOG_CPAREN)
37 
38 /* Convenience macros for accessing mbuf fields. */
39 #define NEXT(m) ((m)->next)
40 #define DATA_LEN(m) ((m)->data_len)
41 #define PKT_LEN(m) ((m)->pkt_len)
42 #define DATA_OFF(m) ((m)->data_off)
43 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
44 #define NB_SEGS(m) ((m)->nb_segs)
45 #define PORT(m) ((m)->port)
46 
47 /* Transpose flags. Useful to convert IBV to DPDK flags. */
48 #define TRANSPOSE(val, from, to) \
49 	(((from) >= (to)) ? \
50 	 (((val) & (from)) / ((from) / (to))) : \
51 	 (((val) & (from)) * ((to) / (from))))
52 
53 /*
54  * For the case which data is linked with sequence increased index, the
55  * array table will be more efficiect than hash table once need to serarch
56  * one data entry in large numbers of entries. Since the traditional hash
57  * tables has fixed table size, when huge numbers of data saved to the hash
58  * table, it also comes lots of hash conflict.
59  *
60  * But simple array table also has fixed size, allocates all the needed
61  * memory at once will waste lots of memory. For the case don't know the
62  * exactly number of entries will be impossible to allocate the array.
63  *
64  * Then the multiple level table helps to balance the two disadvantages.
65  * Allocate a global high level table with sub table entries at first,
66  * the global table contains the sub table entries, and the sub table will
67  * be allocated only once the corresponding index entry need to be saved.
68  * e.g. for up to 32-bits index, three level table with 10-10-12 splitting,
69  * with sequence increased index, the memory grows with every 4K entries.
70  *
71  * The currently implementation introduces 10-10-12 32-bits splitting
72  * Three-Level table to help the cases which have millions of enties to
73  * save. The index entries can be addressed directly by the index, no
74  * search will be needed.q
75  */
76 
77 /* L3 table global table define. */
78 #define MLX5_L3T_GT_OFFSET 22
79 #define MLX5_L3T_GT_SIZE (1 << 10)
80 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1)
81 
82 /* L3 table middle table define. */
83 #define MLX5_L3T_MT_OFFSET 12
84 #define MLX5_L3T_MT_SIZE (1 << 10)
85 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1)
86 
87 /* L3 table entry table define. */
88 #define MLX5_L3T_ET_OFFSET 0
89 #define MLX5_L3T_ET_SIZE (1 << 12)
90 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1)
91 
92 /* L3 table type. */
93 enum mlx5_l3t_type {
94 	MLX5_L3T_TYPE_WORD = 0,
95 	MLX5_L3T_TYPE_DWORD,
96 	MLX5_L3T_TYPE_QWORD,
97 	MLX5_L3T_TYPE_PTR,
98 	MLX5_L3T_TYPE_MAX,
99 };
100 
101 struct mlx5_indexed_pool;
102 
103 /* Generic data struct. */
104 union mlx5_l3t_data {
105 	uint16_t word;
106 	uint32_t dword;
107 	uint64_t qword;
108 	void *ptr;
109 };
110 
111 /* L3 level table data structure. */
112 struct mlx5_l3t_level_tbl {
113 	uint64_t ref_cnt; /* Table ref_cnt. */
114 	void *tbl[]; /* Table array. */
115 };
116 
117 /* L3 word entry table data structure. */
118 struct mlx5_l3t_entry_word {
119 	uint32_t idx; /* Table index. */
120 	uint64_t ref_cnt; /* Table ref_cnt. */
121 	struct {
122 		uint16_t data;
123 		uint32_t ref_cnt;
124 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
125 } __rte_packed;
126 
127 /* L3 double word entry table data structure. */
128 struct mlx5_l3t_entry_dword {
129 	uint32_t idx; /* Table index. */
130 	uint64_t ref_cnt; /* Table ref_cnt. */
131 	struct {
132 		uint32_t data;
133 		int32_t ref_cnt;
134 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
135 } __rte_packed;
136 
137 /* L3 quad word entry table data structure. */
138 struct mlx5_l3t_entry_qword {
139 	uint32_t idx; /* Table index. */
140 	uint64_t ref_cnt; /* Table ref_cnt. */
141 	struct {
142 		uint64_t data;
143 		uint32_t ref_cnt;
144 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
145 } __rte_packed;
146 
147 /* L3 pointer entry table data structure. */
148 struct mlx5_l3t_entry_ptr {
149 	uint32_t idx; /* Table index. */
150 	uint64_t ref_cnt; /* Table ref_cnt. */
151 	struct {
152 		void *data;
153 		uint32_t ref_cnt;
154 	} entry[MLX5_L3T_ET_SIZE]; /* Entry array */
155 } __rte_packed;
156 
157 /* L3 table data structure. */
158 struct mlx5_l3t_tbl {
159 	enum mlx5_l3t_type type; /* Table type. */
160 	struct mlx5_indexed_pool *eip;
161 	/* Table index pool handles. */
162 	struct mlx5_l3t_level_tbl *tbl; /* Global table index. */
163 	rte_spinlock_t sl; /* The table lock. */
164 };
165 
166 /** Type of function that is used to handle the data before freeing. */
167 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
168 					   union mlx5_l3t_data *data);
169 
170 /*
171  * The indexed memory entry index is made up of trunk index and offset of
172  * the entry in the trunk. Since the entry index is 32 bits, in case user
173  * prefers to have small trunks, user can change the macro below to a big
174  * number which helps the pool contains more trunks with lots of entries
175  * allocated.
176  */
177 #define TRUNK_IDX_BITS 16
178 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
179 #define TRUNK_INVALID TRUNK_MAX_IDX
180 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
181 #ifdef RTE_LIBRTE_MLX5_DEBUG
182 #define POOL_DEBUG 1
183 #endif
184 
185 struct mlx5_indexed_pool_config {
186 	uint32_t size; /* Pool entry size. */
187 	uint32_t trunk_size:22;
188 	/*
189 	 * Trunk entry number. Must be power of 2. It can be increased
190 	 * if trunk_grow enable. The trunk entry number increases with
191 	 * left shift grow_shift. Trunks with index are after grow_trunk
192 	 * will keep the entry number same with the last grow trunk.
193 	 */
194 	uint32_t grow_trunk:4;
195 	/*
196 	 * Trunks with entry number increase in the pool. Set it to 0
197 	 * to make the pool works as trunk entry fixed pool. It works
198 	 * only if grow_shift is not 0.
199 	 */
200 	uint32_t grow_shift:4;
201 	/*
202 	 * Trunk entry number increase shift value, stop after grow_trunk.
203 	 * It works only if grow_trunk is not 0.
204 	 */
205 	uint32_t need_lock:1;
206 	/* Lock is needed for multiple thread usage. */
207 	uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
208 	const char *type; /* Memory allocate type name. */
209 	void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
210 			int socket);
211 	/* User defined memory allocator. */
212 	void (*free)(void *addr); /* User defined memory release. */
213 };
214 
215 struct mlx5_indexed_trunk {
216 	uint32_t idx; /* Trunk id. */
217 	uint32_t prev; /* Previous free trunk in free list. */
218 	uint32_t next; /* Next free trunk in free list. */
219 	uint32_t free; /* Free entries available */
220 	struct rte_bitmap *bmp;
221 	uint8_t data[] __rte_cache_aligned; /* Entry data start. */
222 };
223 
224 struct mlx5_indexed_pool {
225 	struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
226 	rte_spinlock_t lock; /* Pool lock for multiple thread usage. */
227 	uint32_t n_trunk_valid; /* Trunks allocated. */
228 	uint32_t n_trunk; /* Trunk pointer array size. */
229 	/* Dim of trunk pointer array. */
230 	struct mlx5_indexed_trunk **trunks;
231 	uint32_t free_list; /* Index to first free trunk. */
232 #ifdef POOL_DEBUG
233 	uint32_t n_entry;
234 	uint32_t trunk_new;
235 	uint32_t trunk_avail;
236 	uint32_t trunk_empty;
237 	uint32_t trunk_free;
238 #endif
239 	uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
240 };
241 
242 /**
243  * Return logarithm of the nearest power of two above input value.
244  *
245  * @param v
246  *   Input value.
247  *
248  * @return
249  *   Logarithm of the nearest power of two above input value.
250  */
251 static inline unsigned int
252 log2above(unsigned int v)
253 {
254 	unsigned int l;
255 	unsigned int r;
256 
257 	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
258 		r |= (v & 1);
259 	return l + r;
260 }
261 
262 #define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */
263 #define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */
264 
265 /** Maximum size of string for naming the hlist table. */
266 #define MLX5_HLIST_NAMESIZE			32
267 
268 struct mlx5_hlist;
269 
270 /**
271  * Structure of the entry in the hash list, user should define its own struct
272  * that contains this in order to store the data. The 'key' is 64-bits right
273  * now and its user's responsibility to guarantee there is no collision.
274  */
275 struct mlx5_hlist_entry {
276 	LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */
277 	uint64_t key; /* user defined 'key', could be the hash signature. */
278 	uint32_t ref_cnt; /* Reference count. */
279 };
280 
281 /** Structure for hash head. */
282 LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry);
283 
284 /**
285  * Type of callback function for entry removal.
286  *
287  * @param list
288  *   The hash list.
289  * @param entry
290  *   The entry in the list.
291  */
292 typedef void (*mlx5_hlist_remove_cb)(struct mlx5_hlist *list,
293 				     struct mlx5_hlist_entry *entry);
294 
295 /**
296  * Type of function for user defined matching.
297  *
298  * @param list
299  *   The hash list.
300  * @param entry
301  *   The entry in the list.
302  * @param key
303  *   The new entry key.
304  * @param ctx
305  *   The pointer to new entry context.
306  *
307  * @return
308  *   0 if matching, non-zero number otherwise.
309  */
310 typedef int (*mlx5_hlist_match_cb)(struct mlx5_hlist *list,
311 				   struct mlx5_hlist_entry *entry,
312 				   uint64_t key, void *ctx);
313 
314 /**
315  * Type of function for user defined hash list entry creation.
316  *
317  * @param list
318  *   The hash list.
319  * @param key
320  *   The key of the new entry.
321  * @param ctx
322  *   The pointer to new entry context.
323  *
324  * @return
325  *   Pointer to allocated entry on success, NULL otherwise.
326  */
327 typedef struct mlx5_hlist_entry *(*mlx5_hlist_create_cb)
328 				  (struct mlx5_hlist *list,
329 				   uint64_t key, void *ctx);
330 
331 /**
332  * Hash list table structure
333  *
334  * Entry in hash list could be reused if entry already exists, reference
335  * count will increase and the existing entry returns.
336  *
337  * When destroy an entry from list, decrease reference count and only
338  * destroy when no further reference.
339  */
340 struct mlx5_hlist {
341 	char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */
342 	/**< number of heads, need to be power of 2. */
343 	uint32_t table_sz;
344 	uint32_t entry_sz; /**< Size of entry, used to allocate entry. */
345 	/**< mask to get the index of the list heads. */
346 	uint32_t mask;
347 	rte_rwlock_t lock;
348 	uint32_t gen_cnt; /* List modification will update generation count. */
349 	bool direct_key; /* Use the new entry key directly as hash index. */
350 	bool write_most; /* List mostly used for append new or destroy. */
351 	void *ctx;
352 	mlx5_hlist_create_cb cb_create; /**< entry create callback. */
353 	mlx5_hlist_match_cb cb_match; /**< entry match callback. */
354 	mlx5_hlist_remove_cb cb_remove; /**< entry remove callback. */
355 	struct mlx5_hlist_head heads[];	/**< list head arrays. */
356 };
357 
358 /**
359  * Create a hash list table, the user can specify the list heads array size
360  * of the table, now the size should be a power of 2 in order to get better
361  * distribution for the entries. Each entry is a part of the whole data element
362  * and the caller should be responsible for the data element's allocation and
363  * cleanup / free. Key of each entry will be calculated with CRC in order to
364  * generate a little fairer distribution.
365  *
366  * @param name
367  *   Name of the hash list(optional).
368  * @param size
369  *   Heads array size of the hash list.
370  * @param entry_size
371  *   Entry size to allocate if cb_create not specified.
372  * @param flags
373  *   The hash list attribute flags.
374  * @param cb_create
375  *   Callback function for entry create.
376  * @param cb_match
377  *   Callback function for entry match.
378  * @param cb_destroy
379  *   Callback function for entry destroy.
380  * @return
381  *   Pointer of the hash list table created, NULL on failure.
382  */
383 struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size,
384 				     uint32_t entry_size, uint32_t flags,
385 				     mlx5_hlist_create_cb cb_create,
386 				     mlx5_hlist_match_cb cb_match,
387 				     mlx5_hlist_remove_cb cb_destroy);
388 
389 /**
390  * Search an entry matching the key.
391  *
392  * Result returned might be destroyed by other thread, must use
393  * this function only in main thread.
394  *
395  * @param h
396  *   Pointer to the hast list table.
397  * @param key
398  *   Key for the searching entry.
399  * @param ctx
400  *   Common context parameter used by entry callback function.
401  *
402  * @return
403  *   Pointer of the hlist entry if found, NULL otherwise.
404  */
405 struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key,
406 					   void *ctx);
407 
408 /**
409  * Insert an entry to the hash list table, the entry is only part of whole data
410  * element and a 64B key is used for matching. User should construct the key or
411  * give a calculated hash signature and guarantee there is no collision.
412  *
413  * @param h
414  *   Pointer to the hast list table.
415  * @param entry
416  *   Entry to be inserted into the hash list table.
417  * @param ctx
418  *   Common context parameter used by callback function.
419  *
420  * @return
421  *   registered entry on success, NULL otherwise
422  */
423 struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
424 					     void *ctx);
425 
426 /**
427  * Remove an entry from the hash list table. User should guarantee the validity
428  * of the entry.
429  *
430  * @param h
431  *   Pointer to the hast list table. (not used)
432  * @param entry
433  *   Entry to be removed from the hash list table.
434  * @return
435  *   0 on entry removed, 1 on entry still referenced.
436  */
437 int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry);
438 
439 /**
440  * Destroy the hash list table, all the entries already inserted into the lists
441  * will be handled by the callback function provided by the user (including
442  * free if needed) before the table is freed.
443  *
444  * @param h
445  *   Pointer to the hast list table.
446  */
447 void mlx5_hlist_destroy(struct mlx5_hlist *h);
448 
449 /************************ cache list *****************************/
450 
451 /** Maximum size of string for naming. */
452 #define MLX5_NAME_SIZE			32
453 
454 struct mlx5_cache_list;
455 
456 /**
457  * Structure of the entry in the cache list, user should define its own struct
458  * that contains this in order to store the data.
459  */
460 struct mlx5_cache_entry {
461 	LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */
462 	uint32_t ref_cnt; /* Reference count. */
463 };
464 
465 /**
466  * Type of callback function for entry removal.
467  *
468  * @param list
469  *   The cache list.
470  * @param entry
471  *   The entry in the list.
472  */
473 typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
474 				     struct mlx5_cache_entry *entry);
475 
476 /**
477  * Type of function for user defined matching.
478  *
479  * @param list
480  *   The cache list.
481  * @param entry
482  *   The entry in the list.
483  * @param ctx
484  *   The pointer to new entry context.
485  *
486  * @return
487  *   0 if matching, non-zero number otherwise.
488  */
489 typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
490 				   struct mlx5_cache_entry *entry, void *ctx);
491 
492 /**
493  * Type of function for user defined cache list entry creation.
494  *
495  * @param list
496  *   The cache list.
497  * @param entry
498  *   The new allocated entry, NULL if list entry size unspecified,
499  *   New entry has to be allocated in callback and return.
500  * @param ctx
501  *   The pointer to new entry context.
502  *
503  * @return
504  *   Pointer of entry on success, NULL otherwise.
505  */
506 typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
507 				 (struct mlx5_cache_list *list,
508 				  struct mlx5_cache_entry *entry,
509 				  void *ctx);
510 
511 /**
512  * Linked cache list structure.
513  *
514  * Entry in cache list could be reused if entry already exists,
515  * reference count will increase and the existing entry returns.
516  *
517  * When destroy an entry from list, decrease reference count and only
518  * destroy when no further reference.
519  *
520  * Linked list cache is designed for limited number of entries cache,
521  * read mostly, less modification.
522  *
523  * For huge amount of entries cache, please consider hash list cache.
524  *
525  */
526 struct mlx5_cache_list {
527 	char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
528 	uint32_t entry_sz; /**< Entry size, 0: use create callback. */
529 	rte_rwlock_t lock; /* read/write lock. */
530 	uint32_t gen_cnt; /* List modification will update generation count. */
531 	uint32_t count; /* number of entries in list. */
532 	void *ctx; /* user objects target to callback. */
533 	mlx5_cache_create_cb cb_create; /**< entry create callback. */
534 	mlx5_cache_match_cb cb_match; /**< entry match callback. */
535 	mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
536 	LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
537 };
538 
539 /**
540  * Initialize a cache list.
541  *
542  * @param list
543  *   Pointer to the hast list table.
544  * @param name
545  *   Name of the cache list.
546  * @param entry_size
547  *   Entry size to allocate, 0 to allocate by creation callback.
548  * @param ctx
549  *   Pointer to the list context data.
550  * @param cb_create
551  *   Callback function for entry create.
552  * @param cb_match
553  *   Callback function for entry match.
554  * @param cb_remove
555  *   Callback function for entry remove.
556  * @return
557  *   0 on success, otherwise failure.
558  */
559 int mlx5_cache_list_init(struct mlx5_cache_list *list,
560 			 const char *name, uint32_t entry_size, void *ctx,
561 			 mlx5_cache_create_cb cb_create,
562 			 mlx5_cache_match_cb cb_match,
563 			 mlx5_cache_remove_cb cb_remove);
564 
565 /**
566  * Search an entry matching the key.
567  *
568  * Result returned might be destroyed by other thread, must use
569  * this function only in main thread.
570  *
571  * @param list
572  *   Pointer to the cache list.
573  * @param ctx
574  *   Common context parameter used by entry callback function.
575  *
576  * @return
577  *   Pointer of the cache entry if found, NULL otherwise.
578  */
579 struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
580 					   void *ctx);
581 
582 /**
583  * Reuse or create an entry to the cache list.
584  *
585  * @param list
586  *   Pointer to the hast list table.
587  * @param ctx
588  *   Common context parameter used by callback function.
589  *
590  * @return
591  *   registered entry on success, NULL otherwise
592  */
593 struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
594 					     void *ctx);
595 
596 /**
597  * Remove an entry from the cache list.
598  *
599  * User should guarantee the validity of the entry.
600  *
601  * @param list
602  *   Pointer to the hast list.
603  * @param entry
604  *   Entry to be removed from the cache list table.
605  * @return
606  *   0 on entry removed, 1 on entry still referenced.
607  */
608 int mlx5_cache_unregister(struct mlx5_cache_list *list,
609 			  struct mlx5_cache_entry *entry);
610 
611 /**
612  * Destroy the cache list.
613  *
614  * @param list
615  *   Pointer to the cache list.
616  */
617 void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
618 
619 /**
620  * Get entry number from the cache list.
621  *
622  * @param list
623  *   Pointer to the hast list.
624  * @return
625  *   Cache list entry number.
626  */
627 uint32_t
628 mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list);
629 
630 /********************************* indexed pool *************************/
631 
632 /**
633  * This function allocates non-initialized memory entry from pool.
634  * In NUMA systems, the memory entry allocated resides on the same
635  * NUMA socket as the core that calls this function.
636  *
637  * Memory entry is allocated from memory trunk, no alignment.
638  *
639  * @param pool
640  *   Pointer to indexed memory entry pool.
641  *   No initialization required.
642  * @param[out] idx
643  *   Pointer to memory to save allocated index.
644  *   Memory index always positive value.
645  * @return
646  *   - Pointer to the allocated memory entry.
647  *   - NULL on error. Not enough memory, or invalid arguments.
648  */
649 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
650 
651 /**
652  * This function allocates zero initialized memory entry from pool.
653  * In NUMA systems, the memory entry allocated resides on the same
654  * NUMA socket as the core that calls this function.
655  *
656  * Memory entry is allocated from memory trunk, no alignment.
657  *
658  * @param pool
659  *   Pointer to indexed memory pool.
660  *   No initialization required.
661  * @param[out] idx
662  *   Pointer to memory to save allocated index.
663  *   Memory index always positive value.
664  * @return
665  *   - Pointer to the allocated memory entry .
666  *   - NULL on error. Not enough memory, or invalid arguments.
667  */
668 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
669 
670 /**
671  * This function frees indexed memory entry to pool.
672  * Caller has to make sure that the index is allocated from same pool.
673  *
674  * @param pool
675  *   Pointer to indexed memory pool.
676  * @param idx
677  *   Allocated memory entry index.
678  */
679 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
680 
681 /**
682  * This function returns pointer of indexed memory entry from index.
683  * Caller has to make sure that the index is valid, and allocated
684  * from same pool.
685  *
686  * @param pool
687  *   Pointer to indexed memory pool.
688  * @param idx
689  *   Allocated memory index.
690  * @return
691  *   - Pointer to indexed memory entry.
692  */
693 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
694 
695 /**
696  * This function creates indexed memory pool.
697  * Caller has to configure the configuration accordingly.
698  *
699  * @param pool
700  *   Pointer to indexed memory pool.
701  * @param cfg
702  *   Allocated memory index.
703  */
704 struct mlx5_indexed_pool *
705 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
706 
707 /**
708  * This function releases all resources of pool.
709  * Caller has to make sure that all indexes and memories allocated
710  * from this pool not referenced anymore.
711  *
712  * @param pool
713  *   Pointer to indexed memory pool.
714  * @return
715  *   - non-zero value on error.
716  *   - 0 on success.
717  */
718 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
719 
720 /**
721  * This function dumps debug info of pool.
722  *
723  * @param pool
724  *   Pointer to indexed memory pool.
725  */
726 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
727 
728 /**
729  * This function allocates new empty Three-level table.
730  *
731  * @param type
732  *   The l3t can set as word, double word, quad word or pointer with index.
733  *
734  * @return
735  *   - Pointer to the allocated l3t.
736  *   - NULL on error. Not enough memory, or invalid arguments.
737  */
738 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type);
739 
740 /**
741  * This function destroys Three-level table.
742  *
743  * @param tbl
744  *   Pointer to the l3t.
745  */
746 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
747 
748 /**
749  * This function gets the index entry from Three-level table.
750  *
751  * @param tbl
752  *   Pointer to the l3t.
753  * @param idx
754  *   Index to the entry.
755  * @param data
756  *   Pointer to the memory which saves the entry data.
757  *   When function call returns 0, data contains the entry data get from
758  *   l3t.
759  *   When function call returns -1, data is not modified.
760  *
761  * @return
762  *   0 if success, -1 on error.
763  */
764 
765 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
766 			    union mlx5_l3t_data *data);
767 
768 /**
769  * This function gets the index entry from Three-level table.
770  *
771  * If the index entry is not available, allocate new one by callback
772  * function and fill in the entry.
773  *
774  * @param tbl
775  *   Pointer to the l3t.
776  * @param idx
777  *   Index to the entry.
778  * @param data
779  *   Pointer to the memory which saves the entry data.
780  *   When function call returns 0, data contains the entry data get from
781  *   l3t.
782  *   When function call returns -1, data is not modified.
783  * @param cb
784  *   Callback function to allocate new data.
785  * @param ctx
786  *   Context for callback function.
787  *
788  * @return
789  *   0 if success, -1 on error.
790  */
791 
792 int32_t mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
793 			       union mlx5_l3t_data *data,
794 			       mlx5_l3t_alloc_callback_fn cb, void *ctx);
795 
796 /**
797  * This function decreases and clear index entry if reference
798  * counter is 0 from Three-level table.
799  *
800  * @param tbl
801  *   Pointer to the l3t.
802  * @param idx
803  *   Index to the entry.
804  *
805  * @return
806  *   The remaining reference count, 0 means entry be cleared, -1 on error.
807  */
808 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx);
809 
810 /**
811  * This function sets the index entry to Three-level table.
812  * If the entry is already set, the EEXIST errno will be given, and
813  * the set data will be filled to the data.
814  *
815  * @param tbl[in]
816  *   Pointer to the l3t.
817  * @param idx[in]
818  *   Index to the entry.
819  * @param data[in/out]
820  *   Pointer to the memory which contains the entry data save to l3t.
821  *   If the entry is already set, the set data will be filled.
822  *
823  * @return
824  *   0 if success, -1 on error.
825  */
826 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
827 			    union mlx5_l3t_data *data);
828 
829 /*
830  * Macros for linked list based on indexed memory.
831  * Example data structure:
832  * struct Foo {
833  *	ILIST_ENTRY(uint16_t) next;
834  *	...
835  * }
836  *
837  */
838 #define ILIST_ENTRY(type)						\
839 struct {								\
840 	type prev; /* Index of previous element. */			\
841 	type next; /* Index of next element. */				\
842 }
843 
844 #define ILIST_INSERT(pool, head, idx, elem, field)			\
845 	do {								\
846 		typeof(elem) peer;					\
847 		MLX5_ASSERT((elem) && (idx));				\
848 		(elem)->field.next = *(head);				\
849 		(elem)->field.prev = 0;					\
850 		if (*(head)) {						\
851 			(peer) = mlx5_ipool_get(pool, *(head));		\
852 			if (peer)					\
853 				(peer)->field.prev = (idx);		\
854 		}							\
855 		*(head) = (idx);					\
856 	} while (0)
857 
858 #define ILIST_REMOVE(pool, head, idx, elem, field)			\
859 	do {								\
860 		typeof(elem) peer;					\
861 		MLX5_ASSERT(elem);					\
862 		MLX5_ASSERT(head);					\
863 		if ((elem)->field.prev) {				\
864 			(peer) = mlx5_ipool_get				\
865 				 (pool, (elem)->field.prev);		\
866 			if (peer)					\
867 				(peer)->field.next = (elem)->field.next;\
868 		}							\
869 		if ((elem)->field.next) {				\
870 			(peer) = mlx5_ipool_get				\
871 				 (pool, (elem)->field.next);		\
872 			if (peer)					\
873 				(peer)->field.prev = (elem)->field.prev;\
874 		}							\
875 		if (*(head) == (idx))					\
876 			*(head) = (elem)->field.next;			\
877 	} while (0)
878 
879 #define ILIST_FOREACH(pool, head, idx, elem, field)			\
880 	for ((idx) = (head), (elem) =					\
881 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
882 	     idx = (elem)->field.next, (elem) =				\
883 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
884 
885 /* Single index list. */
886 #define SILIST_ENTRY(type)						\
887 struct {								\
888 	type next; /* Index of next element. */				\
889 }
890 
891 #define SILIST_INSERT(head, idx, elem, field)				\
892 	do {								\
893 		MLX5_ASSERT((elem) && (idx));				\
894 		(elem)->field.next = *(head);				\
895 		*(head) = (idx);					\
896 	} while (0)
897 
898 #define SILIST_FOREACH(pool, head, idx, elem, field)			\
899 	for ((idx) = (head), (elem) =					\
900 	     (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem);	\
901 	     idx = (elem)->field.next, (elem) =				\
902 	     (idx) ? mlx5_ipool_get(pool, idx) : NULL)
903 
904 #endif /* RTE_PMD_MLX5_UTILS_H_ */
905