xref: /dpdk/drivers/net/mlx5/mlx5_utils.c (revision 1ff37bee32579fddd4c8036c2b01d440db3e3d1b)
146287eacSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause
246287eacSBing Zhao  * Copyright 2019 Mellanox Technologies, Ltd
346287eacSBing Zhao  */
446287eacSBing Zhao 
546287eacSBing Zhao #include <rte_malloc.h>
646287eacSBing Zhao #include <rte_hash_crc.h>
746287eacSBing Zhao 
883c2047cSSuanming Mou #include <mlx5_malloc.h>
983c2047cSSuanming Mou 
1046287eacSBing Zhao #include "mlx5_utils.h"
1146287eacSBing Zhao 
12e69a5922SXueming Li /********************* Hash List **********************/
13e69a5922SXueming Li 
14e69a5922SXueming Li static struct mlx5_hlist_entry *
15e69a5922SXueming Li mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
16e69a5922SXueming Li 			     void *ctx __rte_unused)
17e69a5922SXueming Li {
18e69a5922SXueming Li 	return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
19e69a5922SXueming Li }
20e69a5922SXueming Li 
21e69a5922SXueming Li static void
22e69a5922SXueming Li mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
23e69a5922SXueming Li 			     struct mlx5_hlist_entry *entry)
24e69a5922SXueming Li {
25e69a5922SXueming Li 	mlx5_free(entry);
26e69a5922SXueming Li }
27e69a5922SXueming Li 
28e69a5922SXueming Li static int
29e69a5922SXueming Li mlx5_hlist_default_match_cb(struct mlx5_hlist *h __rte_unused,
30e69a5922SXueming Li 			    struct mlx5_hlist_entry *entry,
31e69a5922SXueming Li 			    uint64_t key, void *ctx __rte_unused)
32e69a5922SXueming Li {
33e69a5922SXueming Li 	return entry->key != key;
34e69a5922SXueming Li }
35e69a5922SXueming Li 
3646287eacSBing Zhao struct mlx5_hlist *
37e69a5922SXueming Li mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
38e69a5922SXueming Li 		  uint32_t flags, mlx5_hlist_create_cb cb_create,
39e69a5922SXueming Li 		  mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
4046287eacSBing Zhao {
4146287eacSBing Zhao 	struct mlx5_hlist *h;
4246287eacSBing Zhao 	uint32_t act_size;
4346287eacSBing Zhao 	uint32_t alloc_size;
4446287eacSBing Zhao 
45e69a5922SXueming Li 	if (!size || (!cb_create ^ !cb_remove))
4646287eacSBing Zhao 		return NULL;
4746287eacSBing Zhao 	/* Align to the next power of 2, 32bits integer is enough now. */
4846287eacSBing Zhao 	if (!rte_is_power_of_2(size)) {
4946287eacSBing Zhao 		act_size = rte_align32pow2(size);
5046287eacSBing Zhao 		DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
5163783b01SDavid Marchand 			"be aligned to 0x%" PRIX32 ".", size, act_size);
5246287eacSBing Zhao 	} else {
5346287eacSBing Zhao 		act_size = size;
5446287eacSBing Zhao 	}
5546287eacSBing Zhao 	alloc_size = sizeof(struct mlx5_hlist) +
5646287eacSBing Zhao 		     sizeof(struct mlx5_hlist_head) * act_size;
5746287eacSBing Zhao 	/* Using zmalloc, then no need to initialize the heads. */
5883c2047cSSuanming Mou 	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
5983c2047cSSuanming Mou 			SOCKET_ID_ANY);
6046287eacSBing Zhao 	if (!h) {
6163783b01SDavid Marchand 		DRV_LOG(ERR, "No memory for hash list %s creation",
6246287eacSBing Zhao 			name ? name : "None");
6346287eacSBing Zhao 		return NULL;
6446287eacSBing Zhao 	}
6546287eacSBing Zhao 	if (name)
6646287eacSBing Zhao 		snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
6746287eacSBing Zhao 	h->table_sz = act_size;
6846287eacSBing Zhao 	h->mask = act_size - 1;
69e69a5922SXueming Li 	h->entry_sz = entry_size;
70e69a5922SXueming Li 	h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
71e69a5922SXueming Li 	h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
72e69a5922SXueming Li 	h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
73e69a5922SXueming Li 	h->cb_match = cb_match ? cb_match : mlx5_hlist_default_match_cb;
74e69a5922SXueming Li 	h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
75e69a5922SXueming Li 	rte_rwlock_init(&h->lock);
7663783b01SDavid Marchand 	DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
7746287eacSBing Zhao 		h->name, act_size);
7846287eacSBing Zhao 	return h;
7946287eacSBing Zhao }
8046287eacSBing Zhao 
81e69a5922SXueming Li static struct mlx5_hlist_entry *
82e69a5922SXueming Li __hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse)
8346287eacSBing Zhao {
8446287eacSBing Zhao 	uint32_t idx;
8546287eacSBing Zhao 	struct mlx5_hlist_head *first;
8646287eacSBing Zhao 	struct mlx5_hlist_entry *node;
8746287eacSBing Zhao 
888e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(h);
89e69a5922SXueming Li 	if (h->direct_key)
90e69a5922SXueming Li 		idx = (uint32_t)(key & h->mask);
91e69a5922SXueming Li 	else
9246287eacSBing Zhao 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
9346287eacSBing Zhao 	first = &h->heads[idx];
9446287eacSBing Zhao 	LIST_FOREACH(node, first, next) {
95e69a5922SXueming Li 		if (!h->cb_match(h, node, key, ctx)) {
96e69a5922SXueming Li 			if (reuse) {
97e69a5922SXueming Li 				__atomic_add_fetch(&node->ref_cnt, 1,
98e69a5922SXueming Li 						   __ATOMIC_RELAXED);
99e69a5922SXueming Li 				DRV_LOG(DEBUG, "Hash list %s entry %p "
100e69a5922SXueming Li 					"reuse: %u.",
101e69a5922SXueming Li 					h->name, (void *)node, node->ref_cnt);
102e69a5922SXueming Li 			}
103e69a5922SXueming Li 			break;
104e69a5922SXueming Li 		}
105e69a5922SXueming Li 	}
10646287eacSBing Zhao 	return node;
10746287eacSBing Zhao }
108e69a5922SXueming Li 
109e69a5922SXueming Li static struct mlx5_hlist_entry *
110e69a5922SXueming Li hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse)
111e69a5922SXueming Li {
112e69a5922SXueming Li 	struct mlx5_hlist_entry *node;
113e69a5922SXueming Li 
114e69a5922SXueming Li 	MLX5_ASSERT(h);
115e69a5922SXueming Li 	rte_rwlock_read_lock(&h->lock);
116e69a5922SXueming Li 	node = __hlist_lookup(h, key, ctx, reuse);
117e69a5922SXueming Li 	rte_rwlock_read_unlock(&h->lock);
118e69a5922SXueming Li 	return node;
11946287eacSBing Zhao }
12046287eacSBing Zhao 
121e69a5922SXueming Li struct mlx5_hlist_entry *
122e69a5922SXueming Li mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
123e69a5922SXueming Li {
124e69a5922SXueming Li 	return hlist_lookup(h, key, ctx, false);
125e69a5922SXueming Li }
126e69a5922SXueming Li 
127e69a5922SXueming Li struct mlx5_hlist_entry*
128e69a5922SXueming Li mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
12946287eacSBing Zhao {
13046287eacSBing Zhao 	uint32_t idx;
13146287eacSBing Zhao 	struct mlx5_hlist_head *first;
132e69a5922SXueming Li 	struct mlx5_hlist_entry *entry;
133e69a5922SXueming Li 	uint32_t prev_gen_cnt = 0;
13446287eacSBing Zhao 
1358e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(h && entry);
136e69a5922SXueming Li 	/* Use write lock directly for write-most list. */
137e69a5922SXueming Li 	if (!h->write_most) {
138e69a5922SXueming Li 		prev_gen_cnt = __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE);
139e69a5922SXueming Li 		entry = hlist_lookup(h, key, ctx, true);
140e69a5922SXueming Li 		if (entry)
141e69a5922SXueming Li 			return entry;
14246287eacSBing Zhao 	}
143e69a5922SXueming Li 	rte_rwlock_write_lock(&h->lock);
144e69a5922SXueming Li 	/* Check if the list changed by other threads. */
145e69a5922SXueming Li 	if (h->write_most ||
146e69a5922SXueming Li 	    prev_gen_cnt != __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE)) {
147e69a5922SXueming Li 		entry = __hlist_lookup(h, key, ctx, true);
148e69a5922SXueming Li 		if (entry)
149e69a5922SXueming Li 			goto done;
150e69a5922SXueming Li 	}
151e69a5922SXueming Li 	if (h->direct_key)
152e69a5922SXueming Li 		idx = (uint32_t)(key & h->mask);
153e69a5922SXueming Li 	else
154e69a5922SXueming Li 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
155e69a5922SXueming Li 	first = &h->heads[idx];
156e69a5922SXueming Li 	entry = h->cb_create(h, key, ctx);
157e69a5922SXueming Li 	if (!entry) {
158e69a5922SXueming Li 		rte_errno = ENOMEM;
159e69a5922SXueming Li 		DRV_LOG(ERR, "Can't allocate hash list %s entry.", h->name);
160e69a5922SXueming Li 		goto done;
161e69a5922SXueming Li 	}
162e69a5922SXueming Li 	entry->key = key;
163e69a5922SXueming Li 	entry->ref_cnt = 1;
16446287eacSBing Zhao 	LIST_INSERT_HEAD(first, entry, next);
165e69a5922SXueming Li 	__atomic_add_fetch(&h->gen_cnt, 1, __ATOMIC_ACQ_REL);
166e69a5922SXueming Li 	DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
167e69a5922SXueming Li 		h->name, (void *)entry, entry->ref_cnt);
168e69a5922SXueming Li done:
169e69a5922SXueming Li 	rte_rwlock_write_unlock(&h->lock);
170e69a5922SXueming Li 	return entry;
17146287eacSBing Zhao }
17246287eacSBing Zhao 
173095c397bSSuanming Mou struct mlx5_hlist_entry *
174095c397bSSuanming Mou mlx5_hlist_lookup_ex(struct mlx5_hlist *h, uint64_t key,
175095c397bSSuanming Mou 		     mlx5_hlist_match_callback_fn cb, void *ctx)
176095c397bSSuanming Mou {
177095c397bSSuanming Mou 	uint32_t idx;
178095c397bSSuanming Mou 	struct mlx5_hlist_head *first;
179095c397bSSuanming Mou 	struct mlx5_hlist_entry *node;
180095c397bSSuanming Mou 
181095c397bSSuanming Mou 	MLX5_ASSERT(h && cb && ctx);
182095c397bSSuanming Mou 	idx = rte_hash_crc_8byte(key, 0) & h->mask;
183095c397bSSuanming Mou 	first = &h->heads[idx];
184095c397bSSuanming Mou 	LIST_FOREACH(node, first, next) {
185095c397bSSuanming Mou 		if (!cb(node, ctx))
186095c397bSSuanming Mou 			return node;
187095c397bSSuanming Mou 	}
188095c397bSSuanming Mou 	return NULL;
189095c397bSSuanming Mou }
190095c397bSSuanming Mou 
191095c397bSSuanming Mou int
192095c397bSSuanming Mou mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry,
193095c397bSSuanming Mou 		     mlx5_hlist_match_callback_fn cb, void *ctx)
194095c397bSSuanming Mou {
195095c397bSSuanming Mou 	uint32_t idx;
196095c397bSSuanming Mou 	struct mlx5_hlist_head *first;
197095c397bSSuanming Mou 	struct mlx5_hlist_entry *node;
198095c397bSSuanming Mou 
199095c397bSSuanming Mou 	MLX5_ASSERT(h && entry && cb && ctx);
200095c397bSSuanming Mou 	idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
201095c397bSSuanming Mou 	first = &h->heads[idx];
202095c397bSSuanming Mou 	/* No need to reuse the lookup function. */
203095c397bSSuanming Mou 	LIST_FOREACH(node, first, next) {
204095c397bSSuanming Mou 		if (!cb(node, ctx))
205095c397bSSuanming Mou 			return -EEXIST;
206095c397bSSuanming Mou 	}
207095c397bSSuanming Mou 	LIST_INSERT_HEAD(first, entry, next);
208095c397bSSuanming Mou 	return 0;
209095c397bSSuanming Mou }
210095c397bSSuanming Mou 
211e69a5922SXueming Li int
212e69a5922SXueming Li mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
21346287eacSBing Zhao {
214e69a5922SXueming Li 	rte_rwlock_write_lock(&h->lock);
215e69a5922SXueming Li 	MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
216e69a5922SXueming Li 	DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
217e69a5922SXueming Li 		h->name, (void *)entry, entry->ref_cnt);
218e69a5922SXueming Li 	if (--entry->ref_cnt) {
219e69a5922SXueming Li 		rte_rwlock_write_unlock(&h->lock);
220e69a5922SXueming Li 		return 1;
221e69a5922SXueming Li 	}
22246287eacSBing Zhao 	LIST_REMOVE(entry, next);
22346287eacSBing Zhao 	/* Set to NULL to get rid of removing action for more than once. */
22446287eacSBing Zhao 	entry->next.le_prev = NULL;
225e69a5922SXueming Li 	h->cb_remove(h, entry);
226e69a5922SXueming Li 	rte_rwlock_write_unlock(&h->lock);
227e69a5922SXueming Li 	DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
228e69a5922SXueming Li 		h->name, (void *)entry);
229e69a5922SXueming Li 	return 0;
23046287eacSBing Zhao }
23146287eacSBing Zhao 
23246287eacSBing Zhao void
233e69a5922SXueming Li mlx5_hlist_destroy(struct mlx5_hlist *h)
23446287eacSBing Zhao {
23546287eacSBing Zhao 	uint32_t idx;
23646287eacSBing Zhao 	struct mlx5_hlist_entry *entry;
23746287eacSBing Zhao 
2388e46d4e1SAlexander Kozyrev 	MLX5_ASSERT(h);
23946287eacSBing Zhao 	for (idx = 0; idx < h->table_sz; ++idx) {
240e69a5922SXueming Li 		/* No LIST_FOREACH_SAFE, using while instead. */
24146287eacSBing Zhao 		while (!LIST_EMPTY(&h->heads[idx])) {
24246287eacSBing Zhao 			entry = LIST_FIRST(&h->heads[idx]);
24346287eacSBing Zhao 			LIST_REMOVE(entry, next);
24446287eacSBing Zhao 			/*
24546287eacSBing Zhao 			 * The owner of whole element which contains data entry
24646287eacSBing Zhao 			 * is the user, so it's the user's duty to do the clean
24746287eacSBing Zhao 			 * up and the free work because someone may not put the
24846287eacSBing Zhao 			 * hlist entry at the beginning(suggested to locate at
24946287eacSBing Zhao 			 * the beginning). Or else the default free function
25046287eacSBing Zhao 			 * will be used.
25146287eacSBing Zhao 			 */
252e69a5922SXueming Li 			h->cb_remove(h, entry);
25346287eacSBing Zhao 		}
25446287eacSBing Zhao 	}
25583c2047cSSuanming Mou 	mlx5_free(h);
25646287eacSBing Zhao }
257a3cf59f5SSuanming Mou 
258*1ff37beeSXueming Li /********************* Cache list ************************/
259*1ff37beeSXueming Li 
260*1ff37beeSXueming Li static struct mlx5_cache_entry *
261*1ff37beeSXueming Li mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
262*1ff37beeSXueming Li 			     struct mlx5_cache_entry *entry __rte_unused,
263*1ff37beeSXueming Li 			     void *ctx __rte_unused)
264*1ff37beeSXueming Li {
265*1ff37beeSXueming Li 	return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
266*1ff37beeSXueming Li }
267*1ff37beeSXueming Li 
268*1ff37beeSXueming Li static void
269*1ff37beeSXueming Li mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
270*1ff37beeSXueming Li 			     struct mlx5_cache_entry *entry)
271*1ff37beeSXueming Li {
272*1ff37beeSXueming Li 	mlx5_free(entry);
273*1ff37beeSXueming Li }
274*1ff37beeSXueming Li 
275*1ff37beeSXueming Li int
276*1ff37beeSXueming Li mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
277*1ff37beeSXueming Li 		     uint32_t entry_size, void *ctx,
278*1ff37beeSXueming Li 		     mlx5_cache_create_cb cb_create,
279*1ff37beeSXueming Li 		     mlx5_cache_match_cb cb_match,
280*1ff37beeSXueming Li 		     mlx5_cache_remove_cb cb_remove)
281*1ff37beeSXueming Li {
282*1ff37beeSXueming Li 	MLX5_ASSERT(list);
283*1ff37beeSXueming Li 	if (!cb_match || (!cb_create ^ !cb_remove))
284*1ff37beeSXueming Li 		return -1;
285*1ff37beeSXueming Li 	if (name)
286*1ff37beeSXueming Li 		snprintf(list->name, sizeof(list->name), "%s", name);
287*1ff37beeSXueming Li 	list->entry_sz = entry_size;
288*1ff37beeSXueming Li 	list->ctx = ctx;
289*1ff37beeSXueming Li 	list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
290*1ff37beeSXueming Li 	list->cb_match = cb_match;
291*1ff37beeSXueming Li 	list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
292*1ff37beeSXueming Li 	rte_rwlock_init(&list->lock);
293*1ff37beeSXueming Li 	DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
294*1ff37beeSXueming Li 	LIST_INIT(&list->head);
295*1ff37beeSXueming Li 	return 0;
296*1ff37beeSXueming Li }
297*1ff37beeSXueming Li 
298*1ff37beeSXueming Li static struct mlx5_cache_entry *
299*1ff37beeSXueming Li __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
300*1ff37beeSXueming Li {
301*1ff37beeSXueming Li 	struct mlx5_cache_entry *entry;
302*1ff37beeSXueming Li 
303*1ff37beeSXueming Li 	LIST_FOREACH(entry, &list->head, next) {
304*1ff37beeSXueming Li 		if (list->cb_match(list, entry, ctx))
305*1ff37beeSXueming Li 			continue;
306*1ff37beeSXueming Li 		if (reuse) {
307*1ff37beeSXueming Li 			__atomic_add_fetch(&entry->ref_cnt, 1,
308*1ff37beeSXueming Li 					   __ATOMIC_RELAXED);
309*1ff37beeSXueming Li 			DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
310*1ff37beeSXueming Li 				list->name, (void *)entry, entry->ref_cnt);
311*1ff37beeSXueming Li 		}
312*1ff37beeSXueming Li 		break;
313*1ff37beeSXueming Li 	}
314*1ff37beeSXueming Li 	return entry;
315*1ff37beeSXueming Li }
316*1ff37beeSXueming Li 
317*1ff37beeSXueming Li static struct mlx5_cache_entry *
318*1ff37beeSXueming Li cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
319*1ff37beeSXueming Li {
320*1ff37beeSXueming Li 	struct mlx5_cache_entry *entry;
321*1ff37beeSXueming Li 
322*1ff37beeSXueming Li 	rte_rwlock_read_lock(&list->lock);
323*1ff37beeSXueming Li 	entry = __cache_lookup(list, ctx, reuse);
324*1ff37beeSXueming Li 	rte_rwlock_read_unlock(&list->lock);
325*1ff37beeSXueming Li 	return entry;
326*1ff37beeSXueming Li }
327*1ff37beeSXueming Li 
328*1ff37beeSXueming Li struct mlx5_cache_entry *
329*1ff37beeSXueming Li mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
330*1ff37beeSXueming Li {
331*1ff37beeSXueming Li 	return cache_lookup(list, ctx, false);
332*1ff37beeSXueming Li }
333*1ff37beeSXueming Li 
334*1ff37beeSXueming Li struct mlx5_cache_entry *
335*1ff37beeSXueming Li mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
336*1ff37beeSXueming Li {
337*1ff37beeSXueming Li 	struct mlx5_cache_entry *entry;
338*1ff37beeSXueming Li 	uint32_t prev_gen_cnt = 0;
339*1ff37beeSXueming Li 
340*1ff37beeSXueming Li 	MLX5_ASSERT(list);
341*1ff37beeSXueming Li 	prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
342*1ff37beeSXueming Li 	/* Lookup with read lock, reuse if found. */
343*1ff37beeSXueming Li 	entry = cache_lookup(list, ctx, true);
344*1ff37beeSXueming Li 	if (entry)
345*1ff37beeSXueming Li 		return entry;
346*1ff37beeSXueming Li 	/* Not found, append with write lock - block read from other threads. */
347*1ff37beeSXueming Li 	rte_rwlock_write_lock(&list->lock);
348*1ff37beeSXueming Li 	/* If list changed by other threads before lock, search again. */
349*1ff37beeSXueming Li 	if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
350*1ff37beeSXueming Li 		/* Lookup and reuse w/o read lock. */
351*1ff37beeSXueming Li 		entry = __cache_lookup(list, ctx, true);
352*1ff37beeSXueming Li 		if (entry)
353*1ff37beeSXueming Li 			goto done;
354*1ff37beeSXueming Li 	}
355*1ff37beeSXueming Li 	entry = list->cb_create(list, entry, ctx);
356*1ff37beeSXueming Li 	if (!entry) {
357*1ff37beeSXueming Li 		DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
358*1ff37beeSXueming Li 			list->name, (void *)entry);
359*1ff37beeSXueming Li 		goto done;
360*1ff37beeSXueming Li 	}
361*1ff37beeSXueming Li 	entry->ref_cnt = 1;
362*1ff37beeSXueming Li 	LIST_INSERT_HEAD(&list->head, entry, next);
363*1ff37beeSXueming Li 	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
364*1ff37beeSXueming Li 	__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
365*1ff37beeSXueming Li 	DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
366*1ff37beeSXueming Li 		list->name, (void *)entry, entry->ref_cnt);
367*1ff37beeSXueming Li done:
368*1ff37beeSXueming Li 	rte_rwlock_write_unlock(&list->lock);
369*1ff37beeSXueming Li 	return entry;
370*1ff37beeSXueming Li }
371*1ff37beeSXueming Li 
372*1ff37beeSXueming Li int
373*1ff37beeSXueming Li mlx5_cache_unregister(struct mlx5_cache_list *list,
374*1ff37beeSXueming Li 		      struct mlx5_cache_entry *entry)
375*1ff37beeSXueming Li {
376*1ff37beeSXueming Li 	rte_rwlock_write_lock(&list->lock);
377*1ff37beeSXueming Li 	MLX5_ASSERT(entry && entry->next.le_prev);
378*1ff37beeSXueming Li 	DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
379*1ff37beeSXueming Li 		list->name, (void *)entry, entry->ref_cnt);
380*1ff37beeSXueming Li 	if (--entry->ref_cnt) {
381*1ff37beeSXueming Li 		rte_rwlock_write_unlock(&list->lock);
382*1ff37beeSXueming Li 		return 1;
383*1ff37beeSXueming Li 	}
384*1ff37beeSXueming Li 	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
385*1ff37beeSXueming Li 	__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
386*1ff37beeSXueming Li 	LIST_REMOVE(entry, next);
387*1ff37beeSXueming Li 	list->cb_remove(list, entry);
388*1ff37beeSXueming Li 	rte_rwlock_write_unlock(&list->lock);
389*1ff37beeSXueming Li 	DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
390*1ff37beeSXueming Li 		list->name, (void *)entry);
391*1ff37beeSXueming Li 	return 0;
392*1ff37beeSXueming Li }
393*1ff37beeSXueming Li 
394*1ff37beeSXueming Li void
395*1ff37beeSXueming Li mlx5_cache_list_destroy(struct mlx5_cache_list *list)
396*1ff37beeSXueming Li {
397*1ff37beeSXueming Li 	struct mlx5_cache_entry *entry;
398*1ff37beeSXueming Li 
399*1ff37beeSXueming Li 	MLX5_ASSERT(list);
400*1ff37beeSXueming Li 	/* no LIST_FOREACH_SAFE, using while instead */
401*1ff37beeSXueming Li 	while (!LIST_EMPTY(&list->head)) {
402*1ff37beeSXueming Li 		entry = LIST_FIRST(&list->head);
403*1ff37beeSXueming Li 		LIST_REMOVE(entry, next);
404*1ff37beeSXueming Li 		list->cb_remove(list, entry);
405*1ff37beeSXueming Li 		DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
406*1ff37beeSXueming Li 			list->name, (void *)entry);
407*1ff37beeSXueming Li 	}
408*1ff37beeSXueming Li 	memset(list, 0, sizeof(*list));
409*1ff37beeSXueming Li }
410*1ff37beeSXueming Li 
411*1ff37beeSXueming Li uint32_t
412*1ff37beeSXueming Li mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
413*1ff37beeSXueming Li {
414*1ff37beeSXueming Li 	MLX5_ASSERT(list);
415*1ff37beeSXueming Li 	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
416*1ff37beeSXueming Li }
417*1ff37beeSXueming Li 
418e69a5922SXueming Li /********************* Indexed pool **********************/
419e69a5922SXueming Li 
420a3cf59f5SSuanming Mou static inline void
421a3cf59f5SSuanming Mou mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
422a3cf59f5SSuanming Mou {
423a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
424a3cf59f5SSuanming Mou 		rte_spinlock_lock(&pool->lock);
425a3cf59f5SSuanming Mou }
426a3cf59f5SSuanming Mou 
427a3cf59f5SSuanming Mou static inline void
428a3cf59f5SSuanming Mou mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
429a3cf59f5SSuanming Mou {
430a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
431a3cf59f5SSuanming Mou 		rte_spinlock_unlock(&pool->lock);
432a3cf59f5SSuanming Mou }
433a3cf59f5SSuanming Mou 
43462d7d519SSuanming Mou static inline uint32_t
43562d7d519SSuanming Mou mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
43662d7d519SSuanming Mou {
43762d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
43862d7d519SSuanming Mou 	uint32_t trunk_idx = 0;
43962d7d519SSuanming Mou 	uint32_t i;
44062d7d519SSuanming Mou 
44162d7d519SSuanming Mou 	if (!cfg->grow_trunk)
44262d7d519SSuanming Mou 		return entry_idx / cfg->trunk_size;
44362d7d519SSuanming Mou 	if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
44462d7d519SSuanming Mou 		trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
44562d7d519SSuanming Mou 			    (cfg->trunk_size << (cfg->grow_shift *
44662d7d519SSuanming Mou 			    cfg->grow_trunk)) + cfg->grow_trunk;
44762d7d519SSuanming Mou 	} else {
44862d7d519SSuanming Mou 		for (i = 0; i < cfg->grow_trunk; i++) {
44962d7d519SSuanming Mou 			if (entry_idx < pool->grow_tbl[i])
45062d7d519SSuanming Mou 				break;
45162d7d519SSuanming Mou 		}
45262d7d519SSuanming Mou 		trunk_idx = i;
45362d7d519SSuanming Mou 	}
45462d7d519SSuanming Mou 	return trunk_idx;
45562d7d519SSuanming Mou }
45662d7d519SSuanming Mou 
45762d7d519SSuanming Mou static inline uint32_t
45862d7d519SSuanming Mou mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
45962d7d519SSuanming Mou {
46062d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
46162d7d519SSuanming Mou 
46262d7d519SSuanming Mou 	return cfg->trunk_size << (cfg->grow_shift *
46362d7d519SSuanming Mou 	       (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
46462d7d519SSuanming Mou }
46562d7d519SSuanming Mou 
46662d7d519SSuanming Mou static inline uint32_t
46762d7d519SSuanming Mou mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
46862d7d519SSuanming Mou {
46962d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
47062d7d519SSuanming Mou 	uint32_t offset = 0;
47162d7d519SSuanming Mou 
47262d7d519SSuanming Mou 	if (!trunk_idx)
47362d7d519SSuanming Mou 		return 0;
47462d7d519SSuanming Mou 	if (!cfg->grow_trunk)
47562d7d519SSuanming Mou 		return cfg->trunk_size * trunk_idx;
47662d7d519SSuanming Mou 	if (trunk_idx < cfg->grow_trunk)
47762d7d519SSuanming Mou 		offset = pool->grow_tbl[trunk_idx - 1];
47862d7d519SSuanming Mou 	else
47962d7d519SSuanming Mou 		offset = pool->grow_tbl[cfg->grow_trunk - 1] +
48062d7d519SSuanming Mou 			 (cfg->trunk_size << (cfg->grow_shift *
48162d7d519SSuanming Mou 			 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
48262d7d519SSuanming Mou 	return offset;
48362d7d519SSuanming Mou }
48462d7d519SSuanming Mou 
485a3cf59f5SSuanming Mou struct mlx5_indexed_pool *
486a3cf59f5SSuanming Mou mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
487a3cf59f5SSuanming Mou {
488a3cf59f5SSuanming Mou 	struct mlx5_indexed_pool *pool;
48962d7d519SSuanming Mou 	uint32_t i;
490a3cf59f5SSuanming Mou 
49179807d6aSXueming Li 	if (!cfg || (!cfg->malloc ^ !cfg->free) ||
492a3cf59f5SSuanming Mou 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
493a3cf59f5SSuanming Mou 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
494a3cf59f5SSuanming Mou 		return NULL;
49583c2047cSSuanming Mou 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
49683c2047cSSuanming Mou 			   sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
49783c2047cSSuanming Mou 			   SOCKET_ID_ANY);
498a3cf59f5SSuanming Mou 	if (!pool)
499a3cf59f5SSuanming Mou 		return NULL;
500a3cf59f5SSuanming Mou 	pool->cfg = *cfg;
501a3cf59f5SSuanming Mou 	if (!pool->cfg.trunk_size)
502a3cf59f5SSuanming Mou 		pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
503a3cf59f5SSuanming Mou 	if (!cfg->malloc && !cfg->free) {
50483c2047cSSuanming Mou 		pool->cfg.malloc = mlx5_malloc;
50583c2047cSSuanming Mou 		pool->cfg.free = mlx5_free;
506a3cf59f5SSuanming Mou 	}
507a3cf59f5SSuanming Mou 	pool->free_list = TRUNK_INVALID;
508a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
509a3cf59f5SSuanming Mou 		rte_spinlock_init(&pool->lock);
51062d7d519SSuanming Mou 	/*
51162d7d519SSuanming Mou 	 * Initialize the dynamic grow trunk size lookup table to have a quick
51262d7d519SSuanming Mou 	 * lookup for the trunk entry index offset.
51362d7d519SSuanming Mou 	 */
51462d7d519SSuanming Mou 	for (i = 0; i < cfg->grow_trunk; i++) {
51562d7d519SSuanming Mou 		pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
51662d7d519SSuanming Mou 		if (i > 0)
51762d7d519SSuanming Mou 			pool->grow_tbl[i] += pool->grow_tbl[i - 1];
51862d7d519SSuanming Mou 	}
519a3cf59f5SSuanming Mou 	return pool;
520a3cf59f5SSuanming Mou }
521a3cf59f5SSuanming Mou 
522a3cf59f5SSuanming Mou static int
523a3cf59f5SSuanming Mou mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
524a3cf59f5SSuanming Mou {
525a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
526a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk **trunk_tmp;
527a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk **p;
528a3cf59f5SSuanming Mou 	size_t trunk_size = 0;
52962d7d519SSuanming Mou 	size_t data_size;
530a3cf59f5SSuanming Mou 	size_t bmp_size;
531a3cf59f5SSuanming Mou 	uint32_t idx;
532a3cf59f5SSuanming Mou 
533a3cf59f5SSuanming Mou 	if (pool->n_trunk_valid == TRUNK_MAX_IDX)
534a3cf59f5SSuanming Mou 		return -ENOMEM;
535a3cf59f5SSuanming Mou 	if (pool->n_trunk_valid == pool->n_trunk) {
536a3cf59f5SSuanming Mou 		/* No free trunk flags, expand trunk list. */
537a3cf59f5SSuanming Mou 		int n_grow = pool->n_trunk_valid ? pool->n_trunk :
538a3cf59f5SSuanming Mou 			     RTE_CACHE_LINE_SIZE / sizeof(void *);
539a3cf59f5SSuanming Mou 
54083c2047cSSuanming Mou 		p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
541a3cf59f5SSuanming Mou 				     sizeof(struct mlx5_indexed_trunk *),
542a3cf59f5SSuanming Mou 				     RTE_CACHE_LINE_SIZE, rte_socket_id());
543a3cf59f5SSuanming Mou 		if (!p)
544a3cf59f5SSuanming Mou 			return -ENOMEM;
545a3cf59f5SSuanming Mou 		if (pool->trunks)
546a3cf59f5SSuanming Mou 			memcpy(p, pool->trunks, pool->n_trunk_valid *
547a3cf59f5SSuanming Mou 			       sizeof(struct mlx5_indexed_trunk *));
548a3cf59f5SSuanming Mou 		memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
549a3cf59f5SSuanming Mou 		       n_grow * sizeof(void *));
550a3cf59f5SSuanming Mou 		trunk_tmp = pool->trunks;
551a3cf59f5SSuanming Mou 		pool->trunks = p;
552a3cf59f5SSuanming Mou 		if (trunk_tmp)
553a3cf59f5SSuanming Mou 			pool->cfg.free(trunk_tmp);
554a3cf59f5SSuanming Mou 		pool->n_trunk += n_grow;
555a3cf59f5SSuanming Mou 	}
5561fd4bb67SSuanming Mou 	if (!pool->cfg.release_mem_en) {
557a3cf59f5SSuanming Mou 		idx = pool->n_trunk_valid;
5581fd4bb67SSuanming Mou 	} else {
5591fd4bb67SSuanming Mou 		/* Find the first available slot in trunk list */
5601fd4bb67SSuanming Mou 		for (idx = 0; idx < pool->n_trunk; idx++)
5611fd4bb67SSuanming Mou 			if (pool->trunks[idx] == NULL)
5621fd4bb67SSuanming Mou 				break;
5631fd4bb67SSuanming Mou 	}
564a3cf59f5SSuanming Mou 	trunk_size += sizeof(*trunk);
56562d7d519SSuanming Mou 	data_size = mlx5_trunk_size_get(pool, idx);
56662d7d519SSuanming Mou 	bmp_size = rte_bitmap_get_memory_footprint(data_size);
567691b3d3eSSuanming Mou 	/* rte_bitmap requires memory cacheline aligned. */
568691b3d3eSSuanming Mou 	trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
569691b3d3eSSuanming Mou 	trunk_size += bmp_size;
57083c2047cSSuanming Mou 	trunk = pool->cfg.malloc(0, trunk_size,
571a3cf59f5SSuanming Mou 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
572a3cf59f5SSuanming Mou 	if (!trunk)
573a3cf59f5SSuanming Mou 		return -ENOMEM;
574a3cf59f5SSuanming Mou 	pool->trunks[idx] = trunk;
575a3cf59f5SSuanming Mou 	trunk->idx = idx;
57662d7d519SSuanming Mou 	trunk->free = data_size;
577a3cf59f5SSuanming Mou 	trunk->prev = TRUNK_INVALID;
578a3cf59f5SSuanming Mou 	trunk->next = TRUNK_INVALID;
579a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
580a3cf59f5SSuanming Mou 	pool->free_list = idx;
581a3cf59f5SSuanming Mou 	/* Mark all entries as available. */
582691b3d3eSSuanming Mou 	trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
583691b3d3eSSuanming Mou 		     [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
584691b3d3eSSuanming Mou 		     bmp_size);
585691b3d3eSSuanming Mou 	MLX5_ASSERT(trunk->bmp);
586a3cf59f5SSuanming Mou 	pool->n_trunk_valid++;
587a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
588a3cf59f5SSuanming Mou 	pool->trunk_new++;
589a3cf59f5SSuanming Mou 	pool->trunk_avail++;
590a3cf59f5SSuanming Mou #endif
591a3cf59f5SSuanming Mou 	return 0;
592a3cf59f5SSuanming Mou }
593a3cf59f5SSuanming Mou 
594a3cf59f5SSuanming Mou void *
595a3cf59f5SSuanming Mou mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
596a3cf59f5SSuanming Mou {
597a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
598a3cf59f5SSuanming Mou 	uint64_t slab = 0;
599a3cf59f5SSuanming Mou 	uint32_t iidx = 0;
600a3cf59f5SSuanming Mou 	void *p;
601a3cf59f5SSuanming Mou 
602a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
603a3cf59f5SSuanming Mou 	if (pool->free_list == TRUNK_INVALID) {
604a3cf59f5SSuanming Mou 		/* If no available trunks, grow new. */
605a3cf59f5SSuanming Mou 		if (mlx5_ipool_grow(pool)) {
606a3cf59f5SSuanming Mou 			mlx5_ipool_unlock(pool);
607a3cf59f5SSuanming Mou 			return NULL;
608a3cf59f5SSuanming Mou 		}
609a3cf59f5SSuanming Mou 	}
610a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
611a3cf59f5SSuanming Mou 	trunk = pool->trunks[pool->free_list];
612a3cf59f5SSuanming Mou 	MLX5_ASSERT(trunk->free);
613a3cf59f5SSuanming Mou 	if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
614a3cf59f5SSuanming Mou 		mlx5_ipool_unlock(pool);
615a3cf59f5SSuanming Mou 		return NULL;
616a3cf59f5SSuanming Mou 	}
617a3cf59f5SSuanming Mou 	MLX5_ASSERT(slab);
618a3cf59f5SSuanming Mou 	iidx += __builtin_ctzll(slab);
619a3cf59f5SSuanming Mou 	MLX5_ASSERT(iidx != UINT32_MAX);
62062d7d519SSuanming Mou 	MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
621a3cf59f5SSuanming Mou 	rte_bitmap_clear(trunk->bmp, iidx);
622a3cf59f5SSuanming Mou 	p = &trunk->data[iidx * pool->cfg.size];
6234ae8825cSXueming Li 	/*
6244ae8825cSXueming Li 	 * The ipool index should grow continually from small to big,
6254ae8825cSXueming Li 	 * some features as metering only accept limited bits of index.
6264ae8825cSXueming Li 	 * Random index with MSB set may be rejected.
6274ae8825cSXueming Li 	 */
62862d7d519SSuanming Mou 	iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
629a3cf59f5SSuanming Mou 	iidx += 1; /* non-zero index. */
630a3cf59f5SSuanming Mou 	trunk->free--;
631a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
632a3cf59f5SSuanming Mou 	pool->n_entry++;
633a3cf59f5SSuanming Mou #endif
634a3cf59f5SSuanming Mou 	if (!trunk->free) {
635a3cf59f5SSuanming Mou 		/* Full trunk will be removed from free list in imalloc. */
636a3cf59f5SSuanming Mou 		MLX5_ASSERT(pool->free_list == trunk->idx);
637a3cf59f5SSuanming Mou 		pool->free_list = trunk->next;
638a3cf59f5SSuanming Mou 		if (trunk->next != TRUNK_INVALID)
639a3cf59f5SSuanming Mou 			pool->trunks[trunk->next]->prev = TRUNK_INVALID;
640a3cf59f5SSuanming Mou 		trunk->prev = TRUNK_INVALID;
641a3cf59f5SSuanming Mou 		trunk->next = TRUNK_INVALID;
642a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
643a3cf59f5SSuanming Mou 		pool->trunk_empty++;
644a3cf59f5SSuanming Mou 		pool->trunk_avail--;
645a3cf59f5SSuanming Mou #endif
646a3cf59f5SSuanming Mou 	}
647a3cf59f5SSuanming Mou 	*idx = iidx;
648a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
649a3cf59f5SSuanming Mou 	return p;
650a3cf59f5SSuanming Mou }
651a3cf59f5SSuanming Mou 
652a3cf59f5SSuanming Mou void *
653a3cf59f5SSuanming Mou mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
654a3cf59f5SSuanming Mou {
655a3cf59f5SSuanming Mou 	void *entry = mlx5_ipool_malloc(pool, idx);
656a3cf59f5SSuanming Mou 
65779807d6aSXueming Li 	if (entry && pool->cfg.size)
658a3cf59f5SSuanming Mou 		memset(entry, 0, pool->cfg.size);
659a3cf59f5SSuanming Mou 	return entry;
660a3cf59f5SSuanming Mou }
661a3cf59f5SSuanming Mou 
662a3cf59f5SSuanming Mou void
663a3cf59f5SSuanming Mou mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
664a3cf59f5SSuanming Mou {
665a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
666a3cf59f5SSuanming Mou 	uint32_t trunk_idx;
66762d7d519SSuanming Mou 	uint32_t entry_idx;
668a3cf59f5SSuanming Mou 
669a3cf59f5SSuanming Mou 	if (!idx)
670a3cf59f5SSuanming Mou 		return;
671a3cf59f5SSuanming Mou 	idx -= 1;
672a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
67362d7d519SSuanming Mou 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
6741fd4bb67SSuanming Mou 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
6751fd4bb67SSuanming Mou 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
676a3cf59f5SSuanming Mou 		goto out;
677a3cf59f5SSuanming Mou 	trunk = pool->trunks[trunk_idx];
67862d7d519SSuanming Mou 	if (!trunk)
679a3cf59f5SSuanming Mou 		goto out;
68062d7d519SSuanming Mou 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
68162d7d519SSuanming Mou 	if (trunk_idx != trunk->idx ||
68262d7d519SSuanming Mou 	    rte_bitmap_get(trunk->bmp, entry_idx))
68362d7d519SSuanming Mou 		goto out;
68462d7d519SSuanming Mou 	rte_bitmap_set(trunk->bmp, entry_idx);
685a3cf59f5SSuanming Mou 	trunk->free++;
6861fd4bb67SSuanming Mou 	if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
6871fd4bb67SSuanming Mou 	   (pool, trunk->idx)) {
6881fd4bb67SSuanming Mou 		if (pool->free_list == trunk->idx)
6891fd4bb67SSuanming Mou 			pool->free_list = trunk->next;
6901fd4bb67SSuanming Mou 		if (trunk->next != TRUNK_INVALID)
6911fd4bb67SSuanming Mou 			pool->trunks[trunk->next]->prev = trunk->prev;
6921fd4bb67SSuanming Mou 		if (trunk->prev != TRUNK_INVALID)
6931fd4bb67SSuanming Mou 			pool->trunks[trunk->prev]->next = trunk->next;
6941fd4bb67SSuanming Mou 		pool->cfg.free(trunk);
6951fd4bb67SSuanming Mou 		pool->trunks[trunk_idx] = NULL;
6961fd4bb67SSuanming Mou 		pool->n_trunk_valid--;
6971fd4bb67SSuanming Mou #ifdef POOL_DEBUG
6981fd4bb67SSuanming Mou 		pool->trunk_avail--;
6991fd4bb67SSuanming Mou 		pool->trunk_free++;
7001fd4bb67SSuanming Mou #endif
7011fd4bb67SSuanming Mou 		if (pool->n_trunk_valid == 0) {
7021fd4bb67SSuanming Mou 			pool->cfg.free(pool->trunks);
7031fd4bb67SSuanming Mou 			pool->trunks = NULL;
7041fd4bb67SSuanming Mou 			pool->n_trunk = 0;
7051fd4bb67SSuanming Mou 		}
7061fd4bb67SSuanming Mou 	} else if (trunk->free == 1) {
707a3cf59f5SSuanming Mou 		/* Put into free trunk list head. */
708a3cf59f5SSuanming Mou 		MLX5_ASSERT(pool->free_list != trunk->idx);
709a3cf59f5SSuanming Mou 		trunk->next = pool->free_list;
710a3cf59f5SSuanming Mou 		trunk->prev = TRUNK_INVALID;
711a3cf59f5SSuanming Mou 		if (pool->free_list != TRUNK_INVALID)
712a3cf59f5SSuanming Mou 			pool->trunks[pool->free_list]->prev = trunk->idx;
713a3cf59f5SSuanming Mou 		pool->free_list = trunk->idx;
714a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
715a3cf59f5SSuanming Mou 		pool->trunk_empty--;
716a3cf59f5SSuanming Mou 		pool->trunk_avail++;
717a3cf59f5SSuanming Mou #endif
718a3cf59f5SSuanming Mou 	}
719a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
720a3cf59f5SSuanming Mou 	pool->n_entry--;
721a3cf59f5SSuanming Mou #endif
722a3cf59f5SSuanming Mou out:
723a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
724a3cf59f5SSuanming Mou }
725a3cf59f5SSuanming Mou 
726a3cf59f5SSuanming Mou void *
727a3cf59f5SSuanming Mou mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
728a3cf59f5SSuanming Mou {
729a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
730a3cf59f5SSuanming Mou 	void *p = NULL;
731a3cf59f5SSuanming Mou 	uint32_t trunk_idx;
73262d7d519SSuanming Mou 	uint32_t entry_idx;
733a3cf59f5SSuanming Mou 
734a3cf59f5SSuanming Mou 	if (!idx)
735a3cf59f5SSuanming Mou 		return NULL;
736a3cf59f5SSuanming Mou 	idx -= 1;
737a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
73862d7d519SSuanming Mou 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
7391fd4bb67SSuanming Mou 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
7401fd4bb67SSuanming Mou 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
741a3cf59f5SSuanming Mou 		goto out;
742a3cf59f5SSuanming Mou 	trunk = pool->trunks[trunk_idx];
74362d7d519SSuanming Mou 	if (!trunk)
744a3cf59f5SSuanming Mou 		goto out;
74562d7d519SSuanming Mou 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
74662d7d519SSuanming Mou 	if (trunk_idx != trunk->idx ||
74762d7d519SSuanming Mou 	    rte_bitmap_get(trunk->bmp, entry_idx))
74862d7d519SSuanming Mou 		goto out;
74962d7d519SSuanming Mou 	p = &trunk->data[entry_idx * pool->cfg.size];
750a3cf59f5SSuanming Mou out:
751a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
752a3cf59f5SSuanming Mou 	return p;
753a3cf59f5SSuanming Mou }
754a3cf59f5SSuanming Mou 
755a3cf59f5SSuanming Mou int
756a3cf59f5SSuanming Mou mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
757a3cf59f5SSuanming Mou {
758a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk **trunks;
759a3cf59f5SSuanming Mou 	uint32_t i;
760a3cf59f5SSuanming Mou 
761a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool);
762a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
763a3cf59f5SSuanming Mou 	trunks = pool->trunks;
764a3cf59f5SSuanming Mou 	for (i = 0; i < pool->n_trunk; i++) {
765a3cf59f5SSuanming Mou 		if (trunks[i])
766a3cf59f5SSuanming Mou 			pool->cfg.free(trunks[i]);
767a3cf59f5SSuanming Mou 	}
768a3cf59f5SSuanming Mou 	if (!pool->trunks)
769a3cf59f5SSuanming Mou 		pool->cfg.free(pool->trunks);
770a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
77183c2047cSSuanming Mou 	mlx5_free(pool);
772a3cf59f5SSuanming Mou 	return 0;
773a3cf59f5SSuanming Mou }
774a3cf59f5SSuanming Mou 
775a3cf59f5SSuanming Mou void
776a3cf59f5SSuanming Mou mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
777a3cf59f5SSuanming Mou {
778a3cf59f5SSuanming Mou 	printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
779a3cf59f5SSuanming Mou 	       "total: %d\n",
780a3cf59f5SSuanming Mou 	       pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
781a3cf59f5SSuanming Mou 	       pool->cfg.trunk_size, pool->n_trunk_valid);
782a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
783a3cf59f5SSuanming Mou 	printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
784a3cf59f5SSuanming Mou 	       "available %u free %u\n",
785a3cf59f5SSuanming Mou 	       pool->cfg.type, pool->n_entry, pool->trunk_new,
786a3cf59f5SSuanming Mou 	       pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
787a3cf59f5SSuanming Mou #endif
788a3cf59f5SSuanming Mou }
789bd81eaebSSuanming Mou 
790bd81eaebSSuanming Mou struct mlx5_l3t_tbl *
791bd81eaebSSuanming Mou mlx5_l3t_create(enum mlx5_l3t_type type)
792bd81eaebSSuanming Mou {
793bd81eaebSSuanming Mou 	struct mlx5_l3t_tbl *tbl;
794bd81eaebSSuanming Mou 	struct mlx5_indexed_pool_config l3t_ip_cfg = {
795bd81eaebSSuanming Mou 		.trunk_size = 16,
796bd81eaebSSuanming Mou 		.grow_trunk = 6,
797bd81eaebSSuanming Mou 		.grow_shift = 1,
798bd81eaebSSuanming Mou 		.need_lock = 0,
799bd81eaebSSuanming Mou 		.release_mem_en = 1,
80083c2047cSSuanming Mou 		.malloc = mlx5_malloc,
80183c2047cSSuanming Mou 		.free = mlx5_free,
802bd81eaebSSuanming Mou 	};
803bd81eaebSSuanming Mou 
804bd81eaebSSuanming Mou 	if (type >= MLX5_L3T_TYPE_MAX) {
805bd81eaebSSuanming Mou 		rte_errno = EINVAL;
806bd81eaebSSuanming Mou 		return NULL;
807bd81eaebSSuanming Mou 	}
80883c2047cSSuanming Mou 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
80983c2047cSSuanming Mou 			  SOCKET_ID_ANY);
810bd81eaebSSuanming Mou 	if (!tbl) {
811bd81eaebSSuanming Mou 		rte_errno = ENOMEM;
812bd81eaebSSuanming Mou 		return NULL;
813bd81eaebSSuanming Mou 	}
814bd81eaebSSuanming Mou 	tbl->type = type;
815bd81eaebSSuanming Mou 	switch (type) {
816bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
8170796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
818bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
819bd81eaebSSuanming Mou 		break;
820bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
8210796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
822bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
823bd81eaebSSuanming Mou 		break;
824bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
8250796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
826bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
827bd81eaebSSuanming Mou 		break;
828bd81eaebSSuanming Mou 	default:
8290796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
830bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
831bd81eaebSSuanming Mou 		break;
832bd81eaebSSuanming Mou 	}
8330796c7b1SSuanming Mou 	rte_spinlock_init(&tbl->sl);
834bd81eaebSSuanming Mou 	tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
835bd81eaebSSuanming Mou 	if (!tbl->eip) {
836bd81eaebSSuanming Mou 		rte_errno = ENOMEM;
83783c2047cSSuanming Mou 		mlx5_free(tbl);
838bd81eaebSSuanming Mou 		tbl = NULL;
839bd81eaebSSuanming Mou 	}
840bd81eaebSSuanming Mou 	return tbl;
841bd81eaebSSuanming Mou }
842bd81eaebSSuanming Mou 
843bd81eaebSSuanming Mou void
844bd81eaebSSuanming Mou mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
845bd81eaebSSuanming Mou {
846bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
847bd81eaebSSuanming Mou 	uint32_t i, j;
848bd81eaebSSuanming Mou 
849bd81eaebSSuanming Mou 	if (!tbl)
850bd81eaebSSuanming Mou 		return;
851bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
852bd81eaebSSuanming Mou 	if (g_tbl) {
853bd81eaebSSuanming Mou 		for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
854bd81eaebSSuanming Mou 			m_tbl = g_tbl->tbl[i];
855bd81eaebSSuanming Mou 			if (!m_tbl)
856bd81eaebSSuanming Mou 				continue;
857bd81eaebSSuanming Mou 			for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
858bd81eaebSSuanming Mou 				if (!m_tbl->tbl[j])
859bd81eaebSSuanming Mou 					continue;
860bd81eaebSSuanming Mou 				MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
861bd81eaebSSuanming Mou 					    m_tbl->tbl[j])->ref_cnt);
862bd81eaebSSuanming Mou 				mlx5_ipool_free(tbl->eip,
863bd81eaebSSuanming Mou 						((struct mlx5_l3t_entry_word *)
864bd81eaebSSuanming Mou 						m_tbl->tbl[j])->idx);
865bd81eaebSSuanming Mou 				m_tbl->tbl[j] = 0;
866bd81eaebSSuanming Mou 				if (!(--m_tbl->ref_cnt))
867bd81eaebSSuanming Mou 					break;
868bd81eaebSSuanming Mou 			}
869bd81eaebSSuanming Mou 			MLX5_ASSERT(!m_tbl->ref_cnt);
87083c2047cSSuanming Mou 			mlx5_free(g_tbl->tbl[i]);
871bd81eaebSSuanming Mou 			g_tbl->tbl[i] = 0;
872bd81eaebSSuanming Mou 			if (!(--g_tbl->ref_cnt))
873bd81eaebSSuanming Mou 				break;
874bd81eaebSSuanming Mou 		}
875bd81eaebSSuanming Mou 		MLX5_ASSERT(!g_tbl->ref_cnt);
87683c2047cSSuanming Mou 		mlx5_free(tbl->tbl);
877bd81eaebSSuanming Mou 		tbl->tbl = 0;
878bd81eaebSSuanming Mou 	}
879bd81eaebSSuanming Mou 	mlx5_ipool_destroy(tbl->eip);
88083c2047cSSuanming Mou 	mlx5_free(tbl);
881bd81eaebSSuanming Mou }
882bd81eaebSSuanming Mou 
8830796c7b1SSuanming Mou static int32_t
8840796c7b1SSuanming Mou __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
885bd81eaebSSuanming Mou 		union mlx5_l3t_data *data)
886bd81eaebSSuanming Mou {
887bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
8880796c7b1SSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
8890796c7b1SSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
8900796c7b1SSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
8910796c7b1SSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
892bd81eaebSSuanming Mou 	void *e_tbl;
893bd81eaebSSuanming Mou 	uint32_t entry_idx;
894bd81eaebSSuanming Mou 
895bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
896bd81eaebSSuanming Mou 	if (!g_tbl)
897bd81eaebSSuanming Mou 		return -1;
898bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
899bd81eaebSSuanming Mou 	if (!m_tbl)
900bd81eaebSSuanming Mou 		return -1;
901bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
902bd81eaebSSuanming Mou 	if (!e_tbl)
903bd81eaebSSuanming Mou 		return -1;
904bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
905bd81eaebSSuanming Mou 	switch (tbl->type) {
906bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
9070796c7b1SSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
9080796c7b1SSuanming Mou 		data->word = w_e_tbl->entry[entry_idx].data;
9090796c7b1SSuanming Mou 		if (w_e_tbl->entry[entry_idx].data)
9100796c7b1SSuanming Mou 			w_e_tbl->entry[entry_idx].ref_cnt++;
911bd81eaebSSuanming Mou 		break;
912bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
9130796c7b1SSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
9140796c7b1SSuanming Mou 		data->dword = dw_e_tbl->entry[entry_idx].data;
9150796c7b1SSuanming Mou 		if (dw_e_tbl->entry[entry_idx].data)
9160796c7b1SSuanming Mou 			dw_e_tbl->entry[entry_idx].ref_cnt++;
917bd81eaebSSuanming Mou 		break;
918bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
9190796c7b1SSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
9200796c7b1SSuanming Mou 		data->qword = qw_e_tbl->entry[entry_idx].data;
9210796c7b1SSuanming Mou 		if (qw_e_tbl->entry[entry_idx].data)
9220796c7b1SSuanming Mou 			qw_e_tbl->entry[entry_idx].ref_cnt++;
923bd81eaebSSuanming Mou 		break;
924bd81eaebSSuanming Mou 	default:
9250796c7b1SSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
9260796c7b1SSuanming Mou 		data->ptr = ptr_e_tbl->entry[entry_idx].data;
9270796c7b1SSuanming Mou 		if (ptr_e_tbl->entry[entry_idx].data)
9280796c7b1SSuanming Mou 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
929bd81eaebSSuanming Mou 		break;
930bd81eaebSSuanming Mou 	}
931bd81eaebSSuanming Mou 	return 0;
932bd81eaebSSuanming Mou }
933bd81eaebSSuanming Mou 
9340796c7b1SSuanming Mou int32_t
9350796c7b1SSuanming Mou mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
9360796c7b1SSuanming Mou 		   union mlx5_l3t_data *data)
9370796c7b1SSuanming Mou {
9380796c7b1SSuanming Mou 	int ret;
9390796c7b1SSuanming Mou 
9400796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
9410796c7b1SSuanming Mou 	ret = __l3t_get_entry(tbl, idx, data);
9420796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
9430796c7b1SSuanming Mou 	return ret;
9440796c7b1SSuanming Mou }
9450796c7b1SSuanming Mou 
9460796c7b1SSuanming Mou int32_t
947bd81eaebSSuanming Mou mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
948bd81eaebSSuanming Mou {
949bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
950bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
951bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
952bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
953bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
954bd81eaebSSuanming Mou 	void *e_tbl;
955bd81eaebSSuanming Mou 	uint32_t entry_idx;
956bd81eaebSSuanming Mou 	uint64_t ref_cnt;
9570796c7b1SSuanming Mou 	int32_t ret = -1;
958bd81eaebSSuanming Mou 
9590796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
960bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
961bd81eaebSSuanming Mou 	if (!g_tbl)
9620796c7b1SSuanming Mou 		goto out;
963bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
964bd81eaebSSuanming Mou 	if (!m_tbl)
9650796c7b1SSuanming Mou 		goto out;
966bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
967bd81eaebSSuanming Mou 	if (!e_tbl)
9680796c7b1SSuanming Mou 		goto out;
969bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
970bd81eaebSSuanming Mou 	switch (tbl->type) {
971bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
972bd81eaebSSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
9730796c7b1SSuanming Mou 		MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
9740796c7b1SSuanming Mou 		ret = --w_e_tbl->entry[entry_idx].ref_cnt;
9750796c7b1SSuanming Mou 		if (ret)
9760796c7b1SSuanming Mou 			goto out;
9770796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].data = 0;
978bd81eaebSSuanming Mou 		ref_cnt = --w_e_tbl->ref_cnt;
979bd81eaebSSuanming Mou 		break;
980bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
981bd81eaebSSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
9820796c7b1SSuanming Mou 		MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
9830796c7b1SSuanming Mou 		ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
9840796c7b1SSuanming Mou 		if (ret)
9850796c7b1SSuanming Mou 			goto out;
9860796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].data = 0;
987bd81eaebSSuanming Mou 		ref_cnt = --dw_e_tbl->ref_cnt;
988bd81eaebSSuanming Mou 		break;
989bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
990bd81eaebSSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
9910796c7b1SSuanming Mou 		MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
9920796c7b1SSuanming Mou 		ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
9930796c7b1SSuanming Mou 		if (ret)
9940796c7b1SSuanming Mou 			goto out;
9950796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].data = 0;
996bd81eaebSSuanming Mou 		ref_cnt = --qw_e_tbl->ref_cnt;
997bd81eaebSSuanming Mou 		break;
998bd81eaebSSuanming Mou 	default:
999bd81eaebSSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
10000796c7b1SSuanming Mou 		MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
10010796c7b1SSuanming Mou 		ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
10020796c7b1SSuanming Mou 		if (ret)
10030796c7b1SSuanming Mou 			goto out;
10040796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].data = NULL;
1005bd81eaebSSuanming Mou 		ref_cnt = --ptr_e_tbl->ref_cnt;
1006bd81eaebSSuanming Mou 		break;
1007bd81eaebSSuanming Mou 	}
1008bd81eaebSSuanming Mou 	if (!ref_cnt) {
1009bd81eaebSSuanming Mou 		mlx5_ipool_free(tbl->eip,
1010bd81eaebSSuanming Mou 				((struct mlx5_l3t_entry_word *)e_tbl)->idx);
1011bd81eaebSSuanming Mou 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1012bd81eaebSSuanming Mou 									NULL;
1013bd81eaebSSuanming Mou 		if (!(--m_tbl->ref_cnt)) {
101483c2047cSSuanming Mou 			mlx5_free(m_tbl);
1015bd81eaebSSuanming Mou 			g_tbl->tbl
1016bd81eaebSSuanming Mou 			[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
1017bd81eaebSSuanming Mou 			if (!(--g_tbl->ref_cnt)) {
101883c2047cSSuanming Mou 				mlx5_free(g_tbl);
1019bd81eaebSSuanming Mou 				tbl->tbl = 0;
1020bd81eaebSSuanming Mou 			}
1021bd81eaebSSuanming Mou 		}
1022bd81eaebSSuanming Mou 	}
10230796c7b1SSuanming Mou out:
10240796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
10250796c7b1SSuanming Mou 	return ret;
1026bd81eaebSSuanming Mou }
1027bd81eaebSSuanming Mou 
10280796c7b1SSuanming Mou static int32_t
10290796c7b1SSuanming Mou __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1030bd81eaebSSuanming Mou 		union mlx5_l3t_data *data)
1031bd81eaebSSuanming Mou {
1032bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1033bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
1034bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
1035bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
1036bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1037bd81eaebSSuanming Mou 	void *e_tbl;
1038bd81eaebSSuanming Mou 	uint32_t entry_idx, tbl_idx = 0;
1039bd81eaebSSuanming Mou 
1040bd81eaebSSuanming Mou 	/* Check the global table, create it if empty. */
1041bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
1042bd81eaebSSuanming Mou 	if (!g_tbl) {
104383c2047cSSuanming Mou 		g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
104483c2047cSSuanming Mou 				    sizeof(struct mlx5_l3t_level_tbl) +
104583c2047cSSuanming Mou 				    sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
104683c2047cSSuanming Mou 				    SOCKET_ID_ANY);
1047bd81eaebSSuanming Mou 		if (!g_tbl) {
1048bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1049bd81eaebSSuanming Mou 			return -1;
1050bd81eaebSSuanming Mou 		}
1051bd81eaebSSuanming Mou 		tbl->tbl = g_tbl;
1052bd81eaebSSuanming Mou 	}
1053bd81eaebSSuanming Mou 	/*
1054bd81eaebSSuanming Mou 	 * Check the middle table, create it if empty. Ref_cnt will be
1055bd81eaebSSuanming Mou 	 * increased if new sub table created.
1056bd81eaebSSuanming Mou 	 */
1057bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1058bd81eaebSSuanming Mou 	if (!m_tbl) {
105983c2047cSSuanming Mou 		m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
106083c2047cSSuanming Mou 				    sizeof(struct mlx5_l3t_level_tbl) +
106183c2047cSSuanming Mou 				    sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
106283c2047cSSuanming Mou 				    SOCKET_ID_ANY);
1063bd81eaebSSuanming Mou 		if (!m_tbl) {
1064bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1065bd81eaebSSuanming Mou 			return -1;
1066bd81eaebSSuanming Mou 		}
1067bd81eaebSSuanming Mou 		g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1068bd81eaebSSuanming Mou 									m_tbl;
1069bd81eaebSSuanming Mou 		g_tbl->ref_cnt++;
1070bd81eaebSSuanming Mou 	}
1071bd81eaebSSuanming Mou 	/*
1072bd81eaebSSuanming Mou 	 * Check the entry table, create it if empty. Ref_cnt will be
1073bd81eaebSSuanming Mou 	 * increased if new sub entry table created.
1074bd81eaebSSuanming Mou 	 */
1075bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1076bd81eaebSSuanming Mou 	if (!e_tbl) {
1077bd81eaebSSuanming Mou 		e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1078bd81eaebSSuanming Mou 		if (!e_tbl) {
1079bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1080bd81eaebSSuanming Mou 			return -1;
1081bd81eaebSSuanming Mou 		}
1082bd81eaebSSuanming Mou 		((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1083bd81eaebSSuanming Mou 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1084bd81eaebSSuanming Mou 									e_tbl;
1085bd81eaebSSuanming Mou 		m_tbl->ref_cnt++;
1086bd81eaebSSuanming Mou 	}
1087bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
1088bd81eaebSSuanming Mou 	switch (tbl->type) {
1089bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
1090bd81eaebSSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
10910796c7b1SSuanming Mou 		if (w_e_tbl->entry[entry_idx].data) {
10920796c7b1SSuanming Mou 			data->word = w_e_tbl->entry[entry_idx].data;
10930796c7b1SSuanming Mou 			w_e_tbl->entry[entry_idx].ref_cnt++;
10940796c7b1SSuanming Mou 			rte_errno = EEXIST;
10950796c7b1SSuanming Mou 			return -1;
10960796c7b1SSuanming Mou 		}
10970796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].data = data->word;
10980796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].ref_cnt = 1;
1099bd81eaebSSuanming Mou 		w_e_tbl->ref_cnt++;
1100bd81eaebSSuanming Mou 		break;
1101bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
1102bd81eaebSSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
11030796c7b1SSuanming Mou 		if (dw_e_tbl->entry[entry_idx].data) {
11040796c7b1SSuanming Mou 			data->dword = dw_e_tbl->entry[entry_idx].data;
11050796c7b1SSuanming Mou 			dw_e_tbl->entry[entry_idx].ref_cnt++;
11060796c7b1SSuanming Mou 			rte_errno = EEXIST;
11070796c7b1SSuanming Mou 			return -1;
11080796c7b1SSuanming Mou 		}
11090796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].data = data->dword;
11100796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1111bd81eaebSSuanming Mou 		dw_e_tbl->ref_cnt++;
1112bd81eaebSSuanming Mou 		break;
1113bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
1114bd81eaebSSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
11150796c7b1SSuanming Mou 		if (qw_e_tbl->entry[entry_idx].data) {
11160796c7b1SSuanming Mou 			data->qword = qw_e_tbl->entry[entry_idx].data;
11170796c7b1SSuanming Mou 			qw_e_tbl->entry[entry_idx].ref_cnt++;
11180796c7b1SSuanming Mou 			rte_errno = EEXIST;
11190796c7b1SSuanming Mou 			return -1;
11200796c7b1SSuanming Mou 		}
11210796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].data = data->qword;
11220796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1123bd81eaebSSuanming Mou 		qw_e_tbl->ref_cnt++;
1124bd81eaebSSuanming Mou 		break;
1125bd81eaebSSuanming Mou 	default:
1126bd81eaebSSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
11270796c7b1SSuanming Mou 		if (ptr_e_tbl->entry[entry_idx].data) {
11280796c7b1SSuanming Mou 			data->ptr = ptr_e_tbl->entry[entry_idx].data;
11290796c7b1SSuanming Mou 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
11300796c7b1SSuanming Mou 			rte_errno = EEXIST;
11310796c7b1SSuanming Mou 			return -1;
11320796c7b1SSuanming Mou 		}
11330796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].data = data->ptr;
11340796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1135bd81eaebSSuanming Mou 		ptr_e_tbl->ref_cnt++;
1136bd81eaebSSuanming Mou 		break;
1137bd81eaebSSuanming Mou 	}
1138bd81eaebSSuanming Mou 	return 0;
1139bd81eaebSSuanming Mou }
11400796c7b1SSuanming Mou 
11410796c7b1SSuanming Mou int32_t
11420796c7b1SSuanming Mou mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
11430796c7b1SSuanming Mou 		   union mlx5_l3t_data *data)
11440796c7b1SSuanming Mou {
11450796c7b1SSuanming Mou 	int ret;
11460796c7b1SSuanming Mou 
11470796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
11480796c7b1SSuanming Mou 	ret = __l3t_set_entry(tbl, idx, data);
11490796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
11500796c7b1SSuanming Mou 	return ret;
11510796c7b1SSuanming Mou }
11520796c7b1SSuanming Mou 
11530796c7b1SSuanming Mou int32_t
11540796c7b1SSuanming Mou mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
11550796c7b1SSuanming Mou 		       union mlx5_l3t_data *data,
11560796c7b1SSuanming Mou 		       mlx5_l3t_alloc_callback_fn cb, void *ctx)
11570796c7b1SSuanming Mou {
11580796c7b1SSuanming Mou 	int32_t ret;
11590796c7b1SSuanming Mou 
11600796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
11610796c7b1SSuanming Mou 	/* Check if entry data is ready. */
11620796c7b1SSuanming Mou 	ret = __l3t_get_entry(tbl, idx, data);
11630796c7b1SSuanming Mou 	if (!ret) {
11640796c7b1SSuanming Mou 		switch (tbl->type) {
11650796c7b1SSuanming Mou 		case MLX5_L3T_TYPE_WORD:
11660796c7b1SSuanming Mou 			if (data->word)
11670796c7b1SSuanming Mou 				goto out;
11680796c7b1SSuanming Mou 			break;
11690796c7b1SSuanming Mou 		case MLX5_L3T_TYPE_DWORD:
11700796c7b1SSuanming Mou 			if (data->dword)
11710796c7b1SSuanming Mou 				goto out;
11720796c7b1SSuanming Mou 			break;
11730796c7b1SSuanming Mou 		case MLX5_L3T_TYPE_QWORD:
11740796c7b1SSuanming Mou 			if (data->qword)
11750796c7b1SSuanming Mou 				goto out;
11760796c7b1SSuanming Mou 			break;
11770796c7b1SSuanming Mou 		default:
11780796c7b1SSuanming Mou 			if (data->ptr)
11790796c7b1SSuanming Mou 				goto out;
11800796c7b1SSuanming Mou 			break;
11810796c7b1SSuanming Mou 		}
11820796c7b1SSuanming Mou 	}
11830796c7b1SSuanming Mou 	/* Entry data is not ready, use user callback to create it. */
11840796c7b1SSuanming Mou 	ret = cb(ctx, data);
11850796c7b1SSuanming Mou 	if (ret)
11860796c7b1SSuanming Mou 		goto out;
11870796c7b1SSuanming Mou 	/* Save the new allocated data to entry. */
11880796c7b1SSuanming Mou 	ret = __l3t_set_entry(tbl, idx, data);
11890796c7b1SSuanming Mou out:
11900796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
11910796c7b1SSuanming Mou 	return ret;
11920796c7b1SSuanming Mou }
1193