xref: /dpdk/drivers/net/mlx5/mlx5_utils.c (revision d54e82e1b22d516e1bb23de60db64ac83c86a9f0)
146287eacSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause
246287eacSBing Zhao  * Copyright 2019 Mellanox Technologies, Ltd
346287eacSBing Zhao  */
446287eacSBing Zhao 
546287eacSBing Zhao #include <rte_malloc.h>
646287eacSBing Zhao 
783c2047cSSuanming Mou #include <mlx5_malloc.h>
883c2047cSSuanming Mou 
946287eacSBing Zhao #include "mlx5_utils.h"
1046287eacSBing Zhao 
11e69a5922SXueming Li /********************* Indexed pool **********************/
12e69a5922SXueming Li 
13a3cf59f5SSuanming Mou static inline void
14a3cf59f5SSuanming Mou mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
15a3cf59f5SSuanming Mou {
16a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
17d15c0946SSuanming Mou 		rte_spinlock_lock(&pool->rsz_lock);
18a3cf59f5SSuanming Mou }
19a3cf59f5SSuanming Mou 
20a3cf59f5SSuanming Mou static inline void
21a3cf59f5SSuanming Mou mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
22a3cf59f5SSuanming Mou {
23a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
24d15c0946SSuanming Mou 		rte_spinlock_unlock(&pool->rsz_lock);
25a3cf59f5SSuanming Mou }
26a3cf59f5SSuanming Mou 
2762d7d519SSuanming Mou static inline uint32_t
2862d7d519SSuanming Mou mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
2962d7d519SSuanming Mou {
3062d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
3162d7d519SSuanming Mou 	uint32_t trunk_idx = 0;
3262d7d519SSuanming Mou 	uint32_t i;
3362d7d519SSuanming Mou 
3462d7d519SSuanming Mou 	if (!cfg->grow_trunk)
3562d7d519SSuanming Mou 		return entry_idx / cfg->trunk_size;
3662d7d519SSuanming Mou 	if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
3762d7d519SSuanming Mou 		trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
3862d7d519SSuanming Mou 			    (cfg->trunk_size << (cfg->grow_shift *
3962d7d519SSuanming Mou 			    cfg->grow_trunk)) + cfg->grow_trunk;
4062d7d519SSuanming Mou 	} else {
4162d7d519SSuanming Mou 		for (i = 0; i < cfg->grow_trunk; i++) {
4262d7d519SSuanming Mou 			if (entry_idx < pool->grow_tbl[i])
4362d7d519SSuanming Mou 				break;
4462d7d519SSuanming Mou 		}
4562d7d519SSuanming Mou 		trunk_idx = i;
4662d7d519SSuanming Mou 	}
4762d7d519SSuanming Mou 	return trunk_idx;
4862d7d519SSuanming Mou }
4962d7d519SSuanming Mou 
5062d7d519SSuanming Mou static inline uint32_t
5162d7d519SSuanming Mou mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
5262d7d519SSuanming Mou {
5362d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
5462d7d519SSuanming Mou 
5562d7d519SSuanming Mou 	return cfg->trunk_size << (cfg->grow_shift *
5662d7d519SSuanming Mou 	       (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
5762d7d519SSuanming Mou }
5862d7d519SSuanming Mou 
5962d7d519SSuanming Mou static inline uint32_t
6062d7d519SSuanming Mou mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
6162d7d519SSuanming Mou {
6262d7d519SSuanming Mou 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
6362d7d519SSuanming Mou 	uint32_t offset = 0;
6462d7d519SSuanming Mou 
6562d7d519SSuanming Mou 	if (!trunk_idx)
6662d7d519SSuanming Mou 		return 0;
6762d7d519SSuanming Mou 	if (!cfg->grow_trunk)
6862d7d519SSuanming Mou 		return cfg->trunk_size * trunk_idx;
6962d7d519SSuanming Mou 	if (trunk_idx < cfg->grow_trunk)
7062d7d519SSuanming Mou 		offset = pool->grow_tbl[trunk_idx - 1];
7162d7d519SSuanming Mou 	else
7262d7d519SSuanming Mou 		offset = pool->grow_tbl[cfg->grow_trunk - 1] +
7362d7d519SSuanming Mou 			 (cfg->trunk_size << (cfg->grow_shift *
7462d7d519SSuanming Mou 			 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
7562d7d519SSuanming Mou 	return offset;
7662d7d519SSuanming Mou }
7762d7d519SSuanming Mou 
78a3cf59f5SSuanming Mou struct mlx5_indexed_pool *
79a3cf59f5SSuanming Mou mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
80a3cf59f5SSuanming Mou {
81a3cf59f5SSuanming Mou 	struct mlx5_indexed_pool *pool;
8262d7d519SSuanming Mou 	uint32_t i;
83a3cf59f5SSuanming Mou 
8479807d6aSXueming Li 	if (!cfg || (!cfg->malloc ^ !cfg->free) ||
85d15c0946SSuanming Mou 	    (cfg->per_core_cache && cfg->release_mem_en) ||
86a3cf59f5SSuanming Mou 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
87a3cf59f5SSuanming Mou 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
88a3cf59f5SSuanming Mou 		return NULL;
8983c2047cSSuanming Mou 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
9083c2047cSSuanming Mou 			   sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
9183c2047cSSuanming Mou 			   SOCKET_ID_ANY);
92a3cf59f5SSuanming Mou 	if (!pool)
93a3cf59f5SSuanming Mou 		return NULL;
94a3cf59f5SSuanming Mou 	pool->cfg = *cfg;
95a3cf59f5SSuanming Mou 	if (!pool->cfg.trunk_size)
96a3cf59f5SSuanming Mou 		pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
97a3cf59f5SSuanming Mou 	if (!cfg->malloc && !cfg->free) {
9883c2047cSSuanming Mou 		pool->cfg.malloc = mlx5_malloc;
9983c2047cSSuanming Mou 		pool->cfg.free = mlx5_free;
100a3cf59f5SSuanming Mou 	}
101a3cf59f5SSuanming Mou 	if (pool->cfg.need_lock)
102d15c0946SSuanming Mou 		rte_spinlock_init(&pool->rsz_lock);
10362d7d519SSuanming Mou 	/*
10462d7d519SSuanming Mou 	 * Initialize the dynamic grow trunk size lookup table to have a quick
10562d7d519SSuanming Mou 	 * lookup for the trunk entry index offset.
10662d7d519SSuanming Mou 	 */
10762d7d519SSuanming Mou 	for (i = 0; i < cfg->grow_trunk; i++) {
10862d7d519SSuanming Mou 		pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
10962d7d519SSuanming Mou 		if (i > 0)
11062d7d519SSuanming Mou 			pool->grow_tbl[i] += pool->grow_tbl[i - 1];
11162d7d519SSuanming Mou 	}
11258ecd3adSSuanming Mou 	if (!pool->cfg.max_idx)
11358ecd3adSSuanming Mou 		pool->cfg.max_idx =
11458ecd3adSSuanming Mou 			mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);
115d15c0946SSuanming Mou 	if (!cfg->per_core_cache)
116d15c0946SSuanming Mou 		pool->free_list = TRUNK_INVALID;
11742f46339SSuanming Mou 	rte_spinlock_init(&pool->lcore_lock);
118a3cf59f5SSuanming Mou 	return pool;
119a3cf59f5SSuanming Mou }
120a3cf59f5SSuanming Mou 
121a3cf59f5SSuanming Mou static int
122a3cf59f5SSuanming Mou mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
123a3cf59f5SSuanming Mou {
124a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
125a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk **trunk_tmp;
126a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk **p;
127a3cf59f5SSuanming Mou 	size_t trunk_size = 0;
12862d7d519SSuanming Mou 	size_t data_size;
129a3cf59f5SSuanming Mou 	size_t bmp_size;
13058ecd3adSSuanming Mou 	uint32_t idx, cur_max_idx, i;
131a3cf59f5SSuanming Mou 
13258ecd3adSSuanming Mou 	cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid);
13358ecd3adSSuanming Mou 	if (pool->n_trunk_valid == TRUNK_MAX_IDX ||
13458ecd3adSSuanming Mou 	    cur_max_idx >= pool->cfg.max_idx)
135a3cf59f5SSuanming Mou 		return -ENOMEM;
136a3cf59f5SSuanming Mou 	if (pool->n_trunk_valid == pool->n_trunk) {
137a3cf59f5SSuanming Mou 		/* No free trunk flags, expand trunk list. */
138a3cf59f5SSuanming Mou 		int n_grow = pool->n_trunk_valid ? pool->n_trunk :
139a3cf59f5SSuanming Mou 			     RTE_CACHE_LINE_SIZE / sizeof(void *);
140a3cf59f5SSuanming Mou 
14183c2047cSSuanming Mou 		p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
142a3cf59f5SSuanming Mou 				     sizeof(struct mlx5_indexed_trunk *),
143a3cf59f5SSuanming Mou 				     RTE_CACHE_LINE_SIZE, rte_socket_id());
144a3cf59f5SSuanming Mou 		if (!p)
145a3cf59f5SSuanming Mou 			return -ENOMEM;
146a3cf59f5SSuanming Mou 		if (pool->trunks)
147a3cf59f5SSuanming Mou 			memcpy(p, pool->trunks, pool->n_trunk_valid *
148a3cf59f5SSuanming Mou 			       sizeof(struct mlx5_indexed_trunk *));
149a3cf59f5SSuanming Mou 		memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
150a3cf59f5SSuanming Mou 		       n_grow * sizeof(void *));
151a3cf59f5SSuanming Mou 		trunk_tmp = pool->trunks;
152a3cf59f5SSuanming Mou 		pool->trunks = p;
153a3cf59f5SSuanming Mou 		if (trunk_tmp)
154a3cf59f5SSuanming Mou 			pool->cfg.free(trunk_tmp);
155a3cf59f5SSuanming Mou 		pool->n_trunk += n_grow;
156a3cf59f5SSuanming Mou 	}
1571fd4bb67SSuanming Mou 	if (!pool->cfg.release_mem_en) {
158a3cf59f5SSuanming Mou 		idx = pool->n_trunk_valid;
1591fd4bb67SSuanming Mou 	} else {
1601fd4bb67SSuanming Mou 		/* Find the first available slot in trunk list */
1611fd4bb67SSuanming Mou 		for (idx = 0; idx < pool->n_trunk; idx++)
1621fd4bb67SSuanming Mou 			if (pool->trunks[idx] == NULL)
1631fd4bb67SSuanming Mou 				break;
1641fd4bb67SSuanming Mou 	}
165a3cf59f5SSuanming Mou 	trunk_size += sizeof(*trunk);
16662d7d519SSuanming Mou 	data_size = mlx5_trunk_size_get(pool, idx);
16762d7d519SSuanming Mou 	bmp_size = rte_bitmap_get_memory_footprint(data_size);
168691b3d3eSSuanming Mou 	/* rte_bitmap requires memory cacheline aligned. */
169691b3d3eSSuanming Mou 	trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
170691b3d3eSSuanming Mou 	trunk_size += bmp_size;
17183c2047cSSuanming Mou 	trunk = pool->cfg.malloc(0, trunk_size,
172a3cf59f5SSuanming Mou 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
173a3cf59f5SSuanming Mou 	if (!trunk)
174a3cf59f5SSuanming Mou 		return -ENOMEM;
175a3cf59f5SSuanming Mou 	pool->trunks[idx] = trunk;
176a3cf59f5SSuanming Mou 	trunk->idx = idx;
17762d7d519SSuanming Mou 	trunk->free = data_size;
178a3cf59f5SSuanming Mou 	trunk->prev = TRUNK_INVALID;
179a3cf59f5SSuanming Mou 	trunk->next = TRUNK_INVALID;
180a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
181a3cf59f5SSuanming Mou 	pool->free_list = idx;
182a3cf59f5SSuanming Mou 	/* Mark all entries as available. */
183691b3d3eSSuanming Mou 	trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
184691b3d3eSSuanming Mou 		     [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
185691b3d3eSSuanming Mou 		     bmp_size);
18658ecd3adSSuanming Mou 	/* Clear the overhead bits in the trunk if it happens. */
18758ecd3adSSuanming Mou 	if (cur_max_idx + data_size > pool->cfg.max_idx) {
18858ecd3adSSuanming Mou 		for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++)
18958ecd3adSSuanming Mou 			rte_bitmap_clear(trunk->bmp, i);
19058ecd3adSSuanming Mou 	}
191691b3d3eSSuanming Mou 	MLX5_ASSERT(trunk->bmp);
192a3cf59f5SSuanming Mou 	pool->n_trunk_valid++;
193a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
194a3cf59f5SSuanming Mou 	pool->trunk_new++;
195a3cf59f5SSuanming Mou 	pool->trunk_avail++;
196a3cf59f5SSuanming Mou #endif
197a3cf59f5SSuanming Mou 	return 0;
198a3cf59f5SSuanming Mou }
199a3cf59f5SSuanming Mou 
200d15c0946SSuanming Mou static inline struct mlx5_indexed_cache *
201d15c0946SSuanming Mou mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
202d15c0946SSuanming Mou {
203d15c0946SSuanming Mou 	struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
204d15c0946SSuanming Mou 
205d15c0946SSuanming Mou 	lc = pool->cache[cidx]->lc;
206e12a0166STyler Retzlaff 	gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
207d15c0946SSuanming Mou 	if (gc && lc != gc) {
208d15c0946SSuanming Mou 		mlx5_ipool_lock(pool);
209d15c0946SSuanming Mou 		if (lc && !(--lc->ref_cnt))
210d15c0946SSuanming Mou 			olc = lc;
211d15c0946SSuanming Mou 		lc = pool->gc;
212d15c0946SSuanming Mou 		lc->ref_cnt++;
213d15c0946SSuanming Mou 		pool->cache[cidx]->lc = lc;
214d15c0946SSuanming Mou 		mlx5_ipool_unlock(pool);
215d15c0946SSuanming Mou 		if (olc)
216d15c0946SSuanming Mou 			pool->cfg.free(olc);
217d15c0946SSuanming Mou 	}
218d15c0946SSuanming Mou 	return lc;
219d15c0946SSuanming Mou }
220d15c0946SSuanming Mou 
221d15c0946SSuanming Mou static uint32_t
222d15c0946SSuanming Mou mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
223d15c0946SSuanming Mou {
224d15c0946SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
225d15c0946SSuanming Mou 	struct mlx5_indexed_cache *p, *lc, *olc = NULL;
226d15c0946SSuanming Mou 	size_t trunk_size = 0;
227d15c0946SSuanming Mou 	size_t data_size;
228d15c0946SSuanming Mou 	uint32_t cur_max_idx, trunk_idx, trunk_n;
229d15c0946SSuanming Mou 	uint32_t fetch_size, ts_idx, i;
230d15c0946SSuanming Mou 	int n_grow;
231d15c0946SSuanming Mou 
232d15c0946SSuanming Mou check_again:
233d15c0946SSuanming Mou 	p = NULL;
234d15c0946SSuanming Mou 	fetch_size = 0;
235d15c0946SSuanming Mou 	/*
236d15c0946SSuanming Mou 	 * Fetch new index from global if possible. First round local
237d15c0946SSuanming Mou 	 * cache will be NULL.
238d15c0946SSuanming Mou 	 */
239d15c0946SSuanming Mou 	lc = pool->cache[cidx]->lc;
240d15c0946SSuanming Mou 	mlx5_ipool_lock(pool);
241d15c0946SSuanming Mou 	/* Try to update local cache first. */
242d15c0946SSuanming Mou 	if (likely(pool->gc)) {
243d15c0946SSuanming Mou 		if (lc != pool->gc) {
244d15c0946SSuanming Mou 			if (lc && !(--lc->ref_cnt))
245d15c0946SSuanming Mou 				olc = lc;
246d15c0946SSuanming Mou 			lc = pool->gc;
247d15c0946SSuanming Mou 			lc->ref_cnt++;
248d15c0946SSuanming Mou 			pool->cache[cidx]->lc = lc;
249d15c0946SSuanming Mou 		}
250d15c0946SSuanming Mou 		if (lc->len) {
251d15c0946SSuanming Mou 			/* Use the updated local cache to fetch index. */
252d15c0946SSuanming Mou 			fetch_size = pool->cfg.per_core_cache >> 2;
253d15c0946SSuanming Mou 			if (lc->len < fetch_size)
254d15c0946SSuanming Mou 				fetch_size = lc->len;
255d15c0946SSuanming Mou 			lc->len -= fetch_size;
256d15c0946SSuanming Mou 			memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len],
257d15c0946SSuanming Mou 			       sizeof(uint32_t) * fetch_size);
258d15c0946SSuanming Mou 		}
259d15c0946SSuanming Mou 	}
260d15c0946SSuanming Mou 	mlx5_ipool_unlock(pool);
261d15c0946SSuanming Mou 	if (unlikely(olc)) {
262d15c0946SSuanming Mou 		pool->cfg.free(olc);
263d15c0946SSuanming Mou 		olc = NULL;
264d15c0946SSuanming Mou 	}
265d15c0946SSuanming Mou 	if (fetch_size) {
266d15c0946SSuanming Mou 		pool->cache[cidx]->len = fetch_size - 1;
267d15c0946SSuanming Mou 		return pool->cache[cidx]->idx[pool->cache[cidx]->len];
268d15c0946SSuanming Mou 	}
269e12a0166STyler Retzlaff 	trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
270e12a0166STyler Retzlaff 			 rte_memory_order_acquire) : 0;
271d15c0946SSuanming Mou 	trunk_n = lc ? lc->n_trunk : 0;
272d15c0946SSuanming Mou 	cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
273d15c0946SSuanming Mou 	/* Check if index reach maximum. */
274d15c0946SSuanming Mou 	if (trunk_idx == TRUNK_MAX_IDX ||
275d15c0946SSuanming Mou 	    cur_max_idx >= pool->cfg.max_idx)
276d15c0946SSuanming Mou 		return 0;
277d15c0946SSuanming Mou 	/* No enough space in trunk array, resize the trunks array. */
278d15c0946SSuanming Mou 	if (trunk_idx == trunk_n) {
279d15c0946SSuanming Mou 		n_grow = trunk_idx ? trunk_idx :
280d15c0946SSuanming Mou 			     RTE_CACHE_LINE_SIZE / sizeof(void *);
281d15c0946SSuanming Mou 		cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow);
282d15c0946SSuanming Mou 		/* Resize the trunk array. */
283d15c0946SSuanming Mou 		p = pool->cfg.malloc(0, ((trunk_idx + n_grow) *
284d15c0946SSuanming Mou 			sizeof(struct mlx5_indexed_trunk *)) +
285d15c0946SSuanming Mou 			(cur_max_idx * sizeof(uint32_t)) + sizeof(*p),
286d15c0946SSuanming Mou 			RTE_CACHE_LINE_SIZE, rte_socket_id());
287d15c0946SSuanming Mou 		if (!p)
288d15c0946SSuanming Mou 			return 0;
289d15c0946SSuanming Mou 		p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx];
290d15c0946SSuanming Mou 		if (lc)
291d15c0946SSuanming Mou 			memcpy(p->trunks, lc->trunks, trunk_idx *
292d15c0946SSuanming Mou 		       sizeof(struct mlx5_indexed_trunk *));
293d15c0946SSuanming Mou #ifdef RTE_LIBRTE_MLX5_DEBUG
294d15c0946SSuanming Mou 		memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0,
295d15c0946SSuanming Mou 			n_grow * sizeof(void *));
296d15c0946SSuanming Mou #endif
297d15c0946SSuanming Mou 		p->n_trunk_valid = trunk_idx;
298d15c0946SSuanming Mou 		p->n_trunk = trunk_n + n_grow;
299d15c0946SSuanming Mou 		p->len = 0;
300d15c0946SSuanming Mou 	}
301d15c0946SSuanming Mou 	/* Prepare the new trunk. */
302d15c0946SSuanming Mou 	trunk_size = sizeof(*trunk);
303d15c0946SSuanming Mou 	data_size = mlx5_trunk_size_get(pool, trunk_idx);
304d15c0946SSuanming Mou 	trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
305d15c0946SSuanming Mou 	trunk = pool->cfg.malloc(0, trunk_size,
306d15c0946SSuanming Mou 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
307d15c0946SSuanming Mou 	if (unlikely(!trunk)) {
308d15c0946SSuanming Mou 		pool->cfg.free(p);
309d15c0946SSuanming Mou 		return 0;
310d15c0946SSuanming Mou 	}
311d15c0946SSuanming Mou 	trunk->idx = trunk_idx;
312d15c0946SSuanming Mou 	trunk->free = data_size;
313d15c0946SSuanming Mou 	mlx5_ipool_lock(pool);
314d15c0946SSuanming Mou 	/*
315d15c0946SSuanming Mou 	 * Double check if trunks has been updated or have available index.
316d15c0946SSuanming Mou 	 * During the new trunk allocate, index may still be flushed to the
317d15c0946SSuanming Mou 	 * global cache. So also need to check the pool->gc->len.
318d15c0946SSuanming Mou 	 */
319d15c0946SSuanming Mou 	if (pool->gc && (lc != pool->gc ||
320d15c0946SSuanming Mou 	    lc->n_trunk_valid != trunk_idx ||
321d15c0946SSuanming Mou 	    pool->gc->len)) {
322d15c0946SSuanming Mou 		mlx5_ipool_unlock(pool);
323d15c0946SSuanming Mou 		if (p)
324d15c0946SSuanming Mou 			pool->cfg.free(p);
325d15c0946SSuanming Mou 		pool->cfg.free(trunk);
326d15c0946SSuanming Mou 		goto check_again;
327d15c0946SSuanming Mou 	}
328d15c0946SSuanming Mou 	/* Resize the trunk array and update local cache first.  */
329d15c0946SSuanming Mou 	if (p) {
330d15c0946SSuanming Mou 		if (lc && !(--lc->ref_cnt))
331d15c0946SSuanming Mou 			olc = lc;
332d15c0946SSuanming Mou 		lc = p;
333d15c0946SSuanming Mou 		lc->ref_cnt = 1;
334d15c0946SSuanming Mou 		pool->cache[cidx]->lc = lc;
335e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
336d15c0946SSuanming Mou 	}
337d15c0946SSuanming Mou 	/* Add trunk to trunks array. */
338d15c0946SSuanming Mou 	lc->trunks[trunk_idx] = trunk;
339e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
340d15c0946SSuanming Mou 	/* Enqueue half of the index to global. */
341d15c0946SSuanming Mou 	ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
342d15c0946SSuanming Mou 	fetch_size = trunk->free >> 1;
3437bc528baSSuanming Mou 	if (fetch_size > pool->cfg.per_core_cache)
3447bc528baSSuanming Mou 		fetch_size = trunk->free - pool->cfg.per_core_cache;
345d15c0946SSuanming Mou 	for (i = 0; i < fetch_size; i++)
346d15c0946SSuanming Mou 		lc->idx[i] = ts_idx + i;
347d15c0946SSuanming Mou 	lc->len = fetch_size;
348d15c0946SSuanming Mou 	mlx5_ipool_unlock(pool);
349d15c0946SSuanming Mou 	/* Copy left half - 1 to local cache index array. */
350d15c0946SSuanming Mou 	pool->cache[cidx]->len = trunk->free - fetch_size - 1;
351d15c0946SSuanming Mou 	ts_idx += fetch_size;
352d15c0946SSuanming Mou 	for (i = 0; i < pool->cache[cidx]->len; i++)
353d15c0946SSuanming Mou 		pool->cache[cidx]->idx[i] = ts_idx + i;
354d15c0946SSuanming Mou 	if (olc)
355d15c0946SSuanming Mou 		pool->cfg.free(olc);
356d15c0946SSuanming Mou 	return ts_idx + i;
357d15c0946SSuanming Mou }
358d15c0946SSuanming Mou 
359d15c0946SSuanming Mou static void *
36042f46339SSuanming Mou _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
361d15c0946SSuanming Mou {
362d15c0946SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
363d15c0946SSuanming Mou 	struct mlx5_indexed_cache *lc;
364d15c0946SSuanming Mou 	uint32_t trunk_idx;
365d15c0946SSuanming Mou 	uint32_t entry_idx;
366d15c0946SSuanming Mou 
367d15c0946SSuanming Mou 	MLX5_ASSERT(idx);
36864a80f1cSSuanming Mou 	if (unlikely(!pool->cache[cidx])) {
36964a80f1cSSuanming Mou 		pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
37064a80f1cSSuanming Mou 			sizeof(struct mlx5_ipool_per_lcore) +
37164a80f1cSSuanming Mou 			(pool->cfg.per_core_cache * sizeof(uint32_t)),
37264a80f1cSSuanming Mou 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
37364a80f1cSSuanming Mou 		if (!pool->cache[cidx]) {
37464a80f1cSSuanming Mou 			DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
37564a80f1cSSuanming Mou 			return NULL;
37664a80f1cSSuanming Mou 		}
37764a80f1cSSuanming Mou 	}
378d15c0946SSuanming Mou 	lc = mlx5_ipool_update_global_cache(pool, cidx);
379d15c0946SSuanming Mou 	idx -= 1;
380d15c0946SSuanming Mou 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
381d15c0946SSuanming Mou 	trunk = lc->trunks[trunk_idx];
38216a7b5d1SHaifei Luo 	if (!trunk)
38316a7b5d1SHaifei Luo 		return NULL;
384d15c0946SSuanming Mou 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx);
385d15c0946SSuanming Mou 	return &trunk->data[entry_idx * pool->cfg.size];
386d15c0946SSuanming Mou }
387d15c0946SSuanming Mou 
388d15c0946SSuanming Mou static void *
38942f46339SSuanming Mou mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
390d15c0946SSuanming Mou {
39142f46339SSuanming Mou 	void *entry;
392d15c0946SSuanming Mou 	int cidx;
393d15c0946SSuanming Mou 
394d15c0946SSuanming Mou 	cidx = rte_lcore_index(rte_lcore_id());
395d15c0946SSuanming Mou 	if (unlikely(cidx == -1)) {
39642f46339SSuanming Mou 		cidx = RTE_MAX_LCORE;
39742f46339SSuanming Mou 		rte_spinlock_lock(&pool->lcore_lock);
398d15c0946SSuanming Mou 	}
39942f46339SSuanming Mou 	entry = _mlx5_ipool_get_cache(pool, cidx, idx);
40042f46339SSuanming Mou 	if (unlikely(cidx == RTE_MAX_LCORE))
40142f46339SSuanming Mou 		rte_spinlock_unlock(&pool->lcore_lock);
40242f46339SSuanming Mou 	return entry;
40342f46339SSuanming Mou }
40442f46339SSuanming Mou 
40542f46339SSuanming Mou 
40642f46339SSuanming Mou static void *
40742f46339SSuanming Mou _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
40842f46339SSuanming Mou 			 uint32_t *idx)
40942f46339SSuanming Mou {
410d15c0946SSuanming Mou 	if (unlikely(!pool->cache[cidx])) {
411d15c0946SSuanming Mou 		pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
412d15c0946SSuanming Mou 			sizeof(struct mlx5_ipool_per_lcore) +
413d15c0946SSuanming Mou 			(pool->cfg.per_core_cache * sizeof(uint32_t)),
414d15c0946SSuanming Mou 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
415d15c0946SSuanming Mou 		if (!pool->cache[cidx]) {
416d15c0946SSuanming Mou 			DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
417d15c0946SSuanming Mou 			return NULL;
418d15c0946SSuanming Mou 		}
419d15c0946SSuanming Mou 	} else if (pool->cache[cidx]->len) {
420d15c0946SSuanming Mou 		pool->cache[cidx]->len--;
421d15c0946SSuanming Mou 		*idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];
42242f46339SSuanming Mou 		return _mlx5_ipool_get_cache(pool, cidx, *idx);
423d15c0946SSuanming Mou 	}
424d15c0946SSuanming Mou 	/* Not enough idx in global cache. Keep fetching from global. */
425d15c0946SSuanming Mou 	*idx = mlx5_ipool_allocate_from_global(pool, cidx);
426d15c0946SSuanming Mou 	if (unlikely(!(*idx)))
427d15c0946SSuanming Mou 		return NULL;
42842f46339SSuanming Mou 	return _mlx5_ipool_get_cache(pool, cidx, *idx);
42942f46339SSuanming Mou }
43042f46339SSuanming Mou 
43142f46339SSuanming Mou static void *
43242f46339SSuanming Mou mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
43342f46339SSuanming Mou {
43442f46339SSuanming Mou 	void *entry;
43542f46339SSuanming Mou 	int cidx;
43642f46339SSuanming Mou 
43742f46339SSuanming Mou 	cidx = rte_lcore_index(rte_lcore_id());
43842f46339SSuanming Mou 	if (unlikely(cidx == -1)) {
43942f46339SSuanming Mou 		cidx = RTE_MAX_LCORE;
44042f46339SSuanming Mou 		rte_spinlock_lock(&pool->lcore_lock);
44142f46339SSuanming Mou 	}
44242f46339SSuanming Mou 	entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
44342f46339SSuanming Mou 	if (unlikely(cidx == RTE_MAX_LCORE))
44442f46339SSuanming Mou 		rte_spinlock_unlock(&pool->lcore_lock);
44542f46339SSuanming Mou 	return entry;
446d15c0946SSuanming Mou }
447d15c0946SSuanming Mou 
448d15c0946SSuanming Mou static void
44942f46339SSuanming Mou _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
450d15c0946SSuanming Mou {
451d15c0946SSuanming Mou 	struct mlx5_ipool_per_lcore *ilc;
452d15c0946SSuanming Mou 	struct mlx5_indexed_cache *gc, *olc = NULL;
453d15c0946SSuanming Mou 	uint32_t reclaim_num = 0;
454d15c0946SSuanming Mou 
455d15c0946SSuanming Mou 	MLX5_ASSERT(idx);
456d15c0946SSuanming Mou 	/*
457d15c0946SSuanming Mou 	 * When index was allocated on core A but freed on core B. In this
458d15c0946SSuanming Mou 	 * case check if local cache on core B was allocated before.
459d15c0946SSuanming Mou 	 */
460d15c0946SSuanming Mou 	if (unlikely(!pool->cache[cidx])) {
461d15c0946SSuanming Mou 		pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,
462d15c0946SSuanming Mou 			sizeof(struct mlx5_ipool_per_lcore) +
463d15c0946SSuanming Mou 			(pool->cfg.per_core_cache * sizeof(uint32_t)),
464d15c0946SSuanming Mou 			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
465d15c0946SSuanming Mou 		if (!pool->cache[cidx]) {
466d15c0946SSuanming Mou 			DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx);
467d15c0946SSuanming Mou 			return;
468d15c0946SSuanming Mou 		}
469d15c0946SSuanming Mou 	}
470d15c0946SSuanming Mou 	/* Try to enqueue to local index cache. */
471d15c0946SSuanming Mou 	if (pool->cache[cidx]->len < pool->cfg.per_core_cache) {
472d15c0946SSuanming Mou 		pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
473d15c0946SSuanming Mou 		pool->cache[cidx]->len++;
474d15c0946SSuanming Mou 		return;
475d15c0946SSuanming Mou 	}
476d15c0946SSuanming Mou 	ilc = pool->cache[cidx];
477d15c0946SSuanming Mou 	reclaim_num = pool->cfg.per_core_cache >> 2;
478d15c0946SSuanming Mou 	ilc->len -= reclaim_num;
479d15c0946SSuanming Mou 	/* Local index cache full, try with global index cache. */
480d15c0946SSuanming Mou 	mlx5_ipool_lock(pool);
481d15c0946SSuanming Mou 	gc = pool->gc;
482d15c0946SSuanming Mou 	if (ilc->lc != gc) {
4837869d603SAlexander Kozyrev 		if (ilc->lc && !(--ilc->lc->ref_cnt))
484d15c0946SSuanming Mou 			olc = ilc->lc;
485d15c0946SSuanming Mou 		gc->ref_cnt++;
486d15c0946SSuanming Mou 		ilc->lc = gc;
487d15c0946SSuanming Mou 	}
488d15c0946SSuanming Mou 	memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len],
489d15c0946SSuanming Mou 	       reclaim_num * sizeof(uint32_t));
490d15c0946SSuanming Mou 	gc->len += reclaim_num;
491d15c0946SSuanming Mou 	mlx5_ipool_unlock(pool);
492d15c0946SSuanming Mou 	if (olc)
493d15c0946SSuanming Mou 		pool->cfg.free(olc);
494d15c0946SSuanming Mou 	pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
495d15c0946SSuanming Mou 	pool->cache[cidx]->len++;
496d15c0946SSuanming Mou }
497d15c0946SSuanming Mou 
49842f46339SSuanming Mou static void
49942f46339SSuanming Mou mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
50042f46339SSuanming Mou {
50142f46339SSuanming Mou 	int cidx;
50242f46339SSuanming Mou 
50342f46339SSuanming Mou 	cidx = rte_lcore_index(rte_lcore_id());
50442f46339SSuanming Mou 	if (unlikely(cidx == -1)) {
50542f46339SSuanming Mou 		cidx = RTE_MAX_LCORE;
50642f46339SSuanming Mou 		rte_spinlock_lock(&pool->lcore_lock);
50742f46339SSuanming Mou 	}
50842f46339SSuanming Mou 	_mlx5_ipool_free_cache(pool, cidx, idx);
50942f46339SSuanming Mou 	if (unlikely(cidx == RTE_MAX_LCORE))
51042f46339SSuanming Mou 		rte_spinlock_unlock(&pool->lcore_lock);
51142f46339SSuanming Mou }
51242f46339SSuanming Mou 
513a3cf59f5SSuanming Mou void *
514a3cf59f5SSuanming Mou mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
515a3cf59f5SSuanming Mou {
516a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
517a3cf59f5SSuanming Mou 	uint64_t slab = 0;
518a3cf59f5SSuanming Mou 	uint32_t iidx = 0;
519a3cf59f5SSuanming Mou 	void *p;
520a3cf59f5SSuanming Mou 
521d15c0946SSuanming Mou 	if (pool->cfg.per_core_cache)
522d15c0946SSuanming Mou 		return mlx5_ipool_malloc_cache(pool, idx);
523a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
524a3cf59f5SSuanming Mou 	if (pool->free_list == TRUNK_INVALID) {
525a3cf59f5SSuanming Mou 		/* If no available trunks, grow new. */
526a3cf59f5SSuanming Mou 		if (mlx5_ipool_grow(pool)) {
527a3cf59f5SSuanming Mou 			mlx5_ipool_unlock(pool);
528a3cf59f5SSuanming Mou 			return NULL;
529a3cf59f5SSuanming Mou 		}
530a3cf59f5SSuanming Mou 	}
531a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
532a3cf59f5SSuanming Mou 	trunk = pool->trunks[pool->free_list];
533a3cf59f5SSuanming Mou 	MLX5_ASSERT(trunk->free);
534a3cf59f5SSuanming Mou 	if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
535a3cf59f5SSuanming Mou 		mlx5_ipool_unlock(pool);
536a3cf59f5SSuanming Mou 		return NULL;
537a3cf59f5SSuanming Mou 	}
538a3cf59f5SSuanming Mou 	MLX5_ASSERT(slab);
5393d4e27fdSDavid Marchand 	iidx += rte_ctz64(slab);
540a3cf59f5SSuanming Mou 	MLX5_ASSERT(iidx != UINT32_MAX);
54162d7d519SSuanming Mou 	MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
542a3cf59f5SSuanming Mou 	rte_bitmap_clear(trunk->bmp, iidx);
543a3cf59f5SSuanming Mou 	p = &trunk->data[iidx * pool->cfg.size];
5444ae8825cSXueming Li 	/*
5454ae8825cSXueming Li 	 * The ipool index should grow continually from small to big,
5464ae8825cSXueming Li 	 * some features as metering only accept limited bits of index.
5474ae8825cSXueming Li 	 * Random index with MSB set may be rejected.
5484ae8825cSXueming Li 	 */
54962d7d519SSuanming Mou 	iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
550a3cf59f5SSuanming Mou 	iidx += 1; /* non-zero index. */
551a3cf59f5SSuanming Mou 	trunk->free--;
552a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
553a3cf59f5SSuanming Mou 	pool->n_entry++;
554a3cf59f5SSuanming Mou #endif
555a3cf59f5SSuanming Mou 	if (!trunk->free) {
556a3cf59f5SSuanming Mou 		/* Full trunk will be removed from free list in imalloc. */
557a3cf59f5SSuanming Mou 		MLX5_ASSERT(pool->free_list == trunk->idx);
558a3cf59f5SSuanming Mou 		pool->free_list = trunk->next;
559a3cf59f5SSuanming Mou 		if (trunk->next != TRUNK_INVALID)
560a3cf59f5SSuanming Mou 			pool->trunks[trunk->next]->prev = TRUNK_INVALID;
561a3cf59f5SSuanming Mou 		trunk->prev = TRUNK_INVALID;
562a3cf59f5SSuanming Mou 		trunk->next = TRUNK_INVALID;
563a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
564a3cf59f5SSuanming Mou 		pool->trunk_empty++;
565a3cf59f5SSuanming Mou 		pool->trunk_avail--;
566a3cf59f5SSuanming Mou #endif
567a3cf59f5SSuanming Mou 	}
568a3cf59f5SSuanming Mou 	*idx = iidx;
569a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
570a3cf59f5SSuanming Mou 	return p;
571a3cf59f5SSuanming Mou }
572a3cf59f5SSuanming Mou 
573a3cf59f5SSuanming Mou void *
574a3cf59f5SSuanming Mou mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
575a3cf59f5SSuanming Mou {
576a3cf59f5SSuanming Mou 	void *entry = mlx5_ipool_malloc(pool, idx);
577a3cf59f5SSuanming Mou 
57879807d6aSXueming Li 	if (entry && pool->cfg.size)
579a3cf59f5SSuanming Mou 		memset(entry, 0, pool->cfg.size);
580a3cf59f5SSuanming Mou 	return entry;
581a3cf59f5SSuanming Mou }
582a3cf59f5SSuanming Mou 
583a3cf59f5SSuanming Mou void
584a3cf59f5SSuanming Mou mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
585a3cf59f5SSuanming Mou {
586a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
587a3cf59f5SSuanming Mou 	uint32_t trunk_idx;
58862d7d519SSuanming Mou 	uint32_t entry_idx;
589a3cf59f5SSuanming Mou 
590a3cf59f5SSuanming Mou 	if (!idx)
591a3cf59f5SSuanming Mou 		return;
592d15c0946SSuanming Mou 	if (pool->cfg.per_core_cache) {
593d15c0946SSuanming Mou 		mlx5_ipool_free_cache(pool, idx);
594d15c0946SSuanming Mou 		return;
595d15c0946SSuanming Mou 	}
596a3cf59f5SSuanming Mou 	idx -= 1;
597a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
59862d7d519SSuanming Mou 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
5991fd4bb67SSuanming Mou 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
6001fd4bb67SSuanming Mou 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
601a3cf59f5SSuanming Mou 		goto out;
602a3cf59f5SSuanming Mou 	trunk = pool->trunks[trunk_idx];
60362d7d519SSuanming Mou 	if (!trunk)
604a3cf59f5SSuanming Mou 		goto out;
60562d7d519SSuanming Mou 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
60662d7d519SSuanming Mou 	if (trunk_idx != trunk->idx ||
60762d7d519SSuanming Mou 	    rte_bitmap_get(trunk->bmp, entry_idx))
60862d7d519SSuanming Mou 		goto out;
60962d7d519SSuanming Mou 	rte_bitmap_set(trunk->bmp, entry_idx);
610a3cf59f5SSuanming Mou 	trunk->free++;
6111fd4bb67SSuanming Mou 	if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
6121fd4bb67SSuanming Mou 	   (pool, trunk->idx)) {
6131fd4bb67SSuanming Mou 		if (pool->free_list == trunk->idx)
6141fd4bb67SSuanming Mou 			pool->free_list = trunk->next;
6151fd4bb67SSuanming Mou 		if (trunk->next != TRUNK_INVALID)
6161fd4bb67SSuanming Mou 			pool->trunks[trunk->next]->prev = trunk->prev;
6171fd4bb67SSuanming Mou 		if (trunk->prev != TRUNK_INVALID)
6181fd4bb67SSuanming Mou 			pool->trunks[trunk->prev]->next = trunk->next;
6191fd4bb67SSuanming Mou 		pool->cfg.free(trunk);
6201fd4bb67SSuanming Mou 		pool->trunks[trunk_idx] = NULL;
6211fd4bb67SSuanming Mou 		pool->n_trunk_valid--;
6221fd4bb67SSuanming Mou #ifdef POOL_DEBUG
6231fd4bb67SSuanming Mou 		pool->trunk_avail--;
6241fd4bb67SSuanming Mou 		pool->trunk_free++;
6251fd4bb67SSuanming Mou #endif
6261fd4bb67SSuanming Mou 		if (pool->n_trunk_valid == 0) {
6271fd4bb67SSuanming Mou 			pool->cfg.free(pool->trunks);
6281fd4bb67SSuanming Mou 			pool->trunks = NULL;
6291fd4bb67SSuanming Mou 			pool->n_trunk = 0;
6301fd4bb67SSuanming Mou 		}
6311fd4bb67SSuanming Mou 	} else if (trunk->free == 1) {
632a3cf59f5SSuanming Mou 		/* Put into free trunk list head. */
633a3cf59f5SSuanming Mou 		MLX5_ASSERT(pool->free_list != trunk->idx);
634a3cf59f5SSuanming Mou 		trunk->next = pool->free_list;
635a3cf59f5SSuanming Mou 		trunk->prev = TRUNK_INVALID;
636a3cf59f5SSuanming Mou 		if (pool->free_list != TRUNK_INVALID)
637a3cf59f5SSuanming Mou 			pool->trunks[pool->free_list]->prev = trunk->idx;
638a3cf59f5SSuanming Mou 		pool->free_list = trunk->idx;
639a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
640a3cf59f5SSuanming Mou 		pool->trunk_empty--;
641a3cf59f5SSuanming Mou 		pool->trunk_avail++;
642a3cf59f5SSuanming Mou #endif
643a3cf59f5SSuanming Mou 	}
644a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
645a3cf59f5SSuanming Mou 	pool->n_entry--;
646a3cf59f5SSuanming Mou #endif
647a3cf59f5SSuanming Mou out:
648a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
649a3cf59f5SSuanming Mou }
650a3cf59f5SSuanming Mou 
651a3cf59f5SSuanming Mou void *
652a3cf59f5SSuanming Mou mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
653a3cf59f5SSuanming Mou {
654a3cf59f5SSuanming Mou 	struct mlx5_indexed_trunk *trunk;
655a3cf59f5SSuanming Mou 	void *p = NULL;
656a3cf59f5SSuanming Mou 	uint32_t trunk_idx;
65762d7d519SSuanming Mou 	uint32_t entry_idx;
658a3cf59f5SSuanming Mou 
659a3cf59f5SSuanming Mou 	if (!idx)
660a3cf59f5SSuanming Mou 		return NULL;
661d15c0946SSuanming Mou 	if (pool->cfg.per_core_cache)
662d15c0946SSuanming Mou 		return mlx5_ipool_get_cache(pool, idx);
663a3cf59f5SSuanming Mou 	idx -= 1;
664a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
66562d7d519SSuanming Mou 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
6661fd4bb67SSuanming Mou 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
6671fd4bb67SSuanming Mou 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
668a3cf59f5SSuanming Mou 		goto out;
669a3cf59f5SSuanming Mou 	trunk = pool->trunks[trunk_idx];
67062d7d519SSuanming Mou 	if (!trunk)
671a3cf59f5SSuanming Mou 		goto out;
67262d7d519SSuanming Mou 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
67362d7d519SSuanming Mou 	if (trunk_idx != trunk->idx ||
67462d7d519SSuanming Mou 	    rte_bitmap_get(trunk->bmp, entry_idx))
67562d7d519SSuanming Mou 		goto out;
67662d7d519SSuanming Mou 	p = &trunk->data[entry_idx * pool->cfg.size];
677a3cf59f5SSuanming Mou out:
678a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
679a3cf59f5SSuanming Mou 	return p;
680a3cf59f5SSuanming Mou }
681a3cf59f5SSuanming Mou 
682a3cf59f5SSuanming Mou int
683a3cf59f5SSuanming Mou mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
684a3cf59f5SSuanming Mou {
685d15c0946SSuanming Mou 	struct mlx5_indexed_trunk **trunks = NULL;
686d15c0946SSuanming Mou 	struct mlx5_indexed_cache *gc = pool->gc;
687d15c0946SSuanming Mou 	uint32_t i, n_trunk_valid = 0;
688a3cf59f5SSuanming Mou 
689a3cf59f5SSuanming Mou 	MLX5_ASSERT(pool);
690a3cf59f5SSuanming Mou 	mlx5_ipool_lock(pool);
691d15c0946SSuanming Mou 	if (pool->cfg.per_core_cache) {
69242f46339SSuanming Mou 		for (i = 0; i <= RTE_MAX_LCORE; i++) {
693d15c0946SSuanming Mou 			/*
694d15c0946SSuanming Mou 			 * Free only old global cache. Pool gc will be
695d15c0946SSuanming Mou 			 * freed at last.
696d15c0946SSuanming Mou 			 */
697d15c0946SSuanming Mou 			if (pool->cache[i]) {
698d15c0946SSuanming Mou 				if (pool->cache[i]->lc &&
699d15c0946SSuanming Mou 				    pool->cache[i]->lc != pool->gc &&
700d15c0946SSuanming Mou 				    (!(--pool->cache[i]->lc->ref_cnt)))
701d15c0946SSuanming Mou 					pool->cfg.free(pool->cache[i]->lc);
702d15c0946SSuanming Mou 				pool->cfg.free(pool->cache[i]);
703d15c0946SSuanming Mou 			}
704d15c0946SSuanming Mou 		}
705d15c0946SSuanming Mou 		if (gc) {
706d15c0946SSuanming Mou 			trunks = gc->trunks;
707d15c0946SSuanming Mou 			n_trunk_valid = gc->n_trunk_valid;
708d15c0946SSuanming Mou 		}
709d15c0946SSuanming Mou 	} else {
710d15c0946SSuanming Mou 		gc = NULL;
711a3cf59f5SSuanming Mou 		trunks = pool->trunks;
712d15c0946SSuanming Mou 		n_trunk_valid = pool->n_trunk_valid;
713d15c0946SSuanming Mou 	}
714d15c0946SSuanming Mou 	for (i = 0; i < n_trunk_valid; i++) {
715a3cf59f5SSuanming Mou 		if (trunks[i])
716a3cf59f5SSuanming Mou 			pool->cfg.free(trunks[i]);
717a3cf59f5SSuanming Mou 	}
718d15c0946SSuanming Mou 	if (!gc && trunks)
719d15c0946SSuanming Mou 		pool->cfg.free(trunks);
720d15c0946SSuanming Mou 	if (gc)
721d15c0946SSuanming Mou 		pool->cfg.free(gc);
722a3cf59f5SSuanming Mou 	mlx5_ipool_unlock(pool);
72383c2047cSSuanming Mou 	mlx5_free(pool);
724a3cf59f5SSuanming Mou 	return 0;
725a3cf59f5SSuanming Mou }
726a3cf59f5SSuanming Mou 
727a3cf59f5SSuanming Mou void
72864a80f1cSSuanming Mou mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
72964a80f1cSSuanming Mou {
73064a80f1cSSuanming Mou 	uint32_t i, j;
73164a80f1cSSuanming Mou 	struct mlx5_indexed_cache *gc;
73264a80f1cSSuanming Mou 	struct rte_bitmap *ibmp;
73364a80f1cSSuanming Mou 	uint32_t bmp_num, mem_size;
73464a80f1cSSuanming Mou 
73564a80f1cSSuanming Mou 	if (!pool->cfg.per_core_cache)
73664a80f1cSSuanming Mou 		return;
73764a80f1cSSuanming Mou 	gc = pool->gc;
73864a80f1cSSuanming Mou 	if (!gc)
73964a80f1cSSuanming Mou 		return;
74064a80f1cSSuanming Mou 	/* Reset bmp. */
74164a80f1cSSuanming Mou 	bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid);
74264a80f1cSSuanming Mou 	mem_size = rte_bitmap_get_memory_footprint(bmp_num);
74364a80f1cSSuanming Mou 	pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size,
74464a80f1cSSuanming Mou 					 RTE_CACHE_LINE_SIZE, rte_socket_id());
74564a80f1cSSuanming Mou 	if (!pool->bmp_mem) {
74664a80f1cSSuanming Mou 		DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n");
74764a80f1cSSuanming Mou 		return;
74864a80f1cSSuanming Mou 	}
74964a80f1cSSuanming Mou 	ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size);
75064a80f1cSSuanming Mou 	if (!ibmp) {
75164a80f1cSSuanming Mou 		pool->cfg.free(pool->bmp_mem);
75264a80f1cSSuanming Mou 		pool->bmp_mem = NULL;
75364a80f1cSSuanming Mou 		DRV_LOG(ERR, "Ipool bitmap create failed.\n");
75464a80f1cSSuanming Mou 		return;
75564a80f1cSSuanming Mou 	}
75664a80f1cSSuanming Mou 	pool->ibmp = ibmp;
75764a80f1cSSuanming Mou 	/* Clear global cache. */
75864a80f1cSSuanming Mou 	for (i = 0; i < gc->len; i++)
75964a80f1cSSuanming Mou 		rte_bitmap_clear(ibmp, gc->idx[i] - 1);
76064a80f1cSSuanming Mou 	/* Clear core cache. */
76142f46339SSuanming Mou 	for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
76264a80f1cSSuanming Mou 		struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
76364a80f1cSSuanming Mou 
76464a80f1cSSuanming Mou 		if (!ilc)
76564a80f1cSSuanming Mou 			continue;
76664a80f1cSSuanming Mou 		for (j = 0; j < ilc->len; j++)
76764a80f1cSSuanming Mou 			rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
76864a80f1cSSuanming Mou 	}
76964a80f1cSSuanming Mou }
77064a80f1cSSuanming Mou 
77164a80f1cSSuanming Mou static void *
77264a80f1cSSuanming Mou mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos)
77364a80f1cSSuanming Mou {
77464a80f1cSSuanming Mou 	struct rte_bitmap *ibmp;
77564a80f1cSSuanming Mou 	uint64_t slab = 0;
77664a80f1cSSuanming Mou 	uint32_t iidx = *pos;
77764a80f1cSSuanming Mou 
77864a80f1cSSuanming Mou 	ibmp = pool->ibmp;
77964a80f1cSSuanming Mou 	if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) {
78064a80f1cSSuanming Mou 		if (pool->bmp_mem) {
78164a80f1cSSuanming Mou 			pool->cfg.free(pool->bmp_mem);
78264a80f1cSSuanming Mou 			pool->bmp_mem = NULL;
78364a80f1cSSuanming Mou 			pool->ibmp = NULL;
78464a80f1cSSuanming Mou 		}
78564a80f1cSSuanming Mou 		return NULL;
78664a80f1cSSuanming Mou 	}
7873d4e27fdSDavid Marchand 	iidx += rte_ctz64(slab);
78864a80f1cSSuanming Mou 	rte_bitmap_clear(ibmp, iidx);
78964a80f1cSSuanming Mou 	iidx++;
79064a80f1cSSuanming Mou 	*pos = iidx;
79164a80f1cSSuanming Mou 	return mlx5_ipool_get_cache(pool, iidx);
79264a80f1cSSuanming Mou }
79364a80f1cSSuanming Mou 
79464a80f1cSSuanming Mou void *
79564a80f1cSSuanming Mou mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos)
79664a80f1cSSuanming Mou {
79764a80f1cSSuanming Mou 	uint32_t idx = *pos;
79864a80f1cSSuanming Mou 	void *entry;
79964a80f1cSSuanming Mou 
80064a80f1cSSuanming Mou 	if (pool->cfg.per_core_cache)
80164a80f1cSSuanming Mou 		return mlx5_ipool_get_next_cache(pool, pos);
80264a80f1cSSuanming Mou 	while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) {
80364a80f1cSSuanming Mou 		entry = mlx5_ipool_get(pool, idx);
80464a80f1cSSuanming Mou 		if (entry) {
80564a80f1cSSuanming Mou 			*pos = idx;
80664a80f1cSSuanming Mou 			return entry;
80764a80f1cSSuanming Mou 		}
80864a80f1cSSuanming Mou 		idx++;
80964a80f1cSSuanming Mou 	}
81064a80f1cSSuanming Mou 	return NULL;
81164a80f1cSSuanming Mou }
81264a80f1cSSuanming Mou 
81389578504SMaayan Kashani int
814*d54e82e1SGregory Etelson mlx5_ipool_resize(struct mlx5_indexed_pool *pool, uint32_t num_entries,
815*d54e82e1SGregory Etelson 	struct rte_flow_error *error)
81689578504SMaayan Kashani {
817*d54e82e1SGregory Etelson 	if (num_entries == pool->cfg.max_idx)
818*d54e82e1SGregory Etelson 		return 0;
819*d54e82e1SGregory Etelson 	else if (num_entries < pool->cfg.max_idx)
820*d54e82e1SGregory Etelson 		return rte_flow_error_set(error, EINVAL,
821*d54e82e1SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
822*d54e82e1SGregory Etelson 					  NULL, "cannot decrease pool size");
823*d54e82e1SGregory Etelson 	if (num_entries % pool->cfg.trunk_size)
824*d54e82e1SGregory Etelson 		return rte_flow_error_set(error, EINVAL,
825*d54e82e1SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
826*d54e82e1SGregory Etelson 					  NULL, "number of entries in pool must be trunk size multiplication");
827*d54e82e1SGregory Etelson 	if (num_entries >= mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1))
828*d54e82e1SGregory Etelson 		return rte_flow_error_set(error, EINVAL,
829*d54e82e1SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
830*d54e82e1SGregory Etelson 					  NULL, "requested number of entries exceeds pool limit");
83189578504SMaayan Kashani 	mlx5_ipool_lock(pool);
832*d54e82e1SGregory Etelson 	pool->cfg.max_idx = num_entries;
83389578504SMaayan Kashani 	mlx5_ipool_unlock(pool);
83489578504SMaayan Kashani 	return 0;
83589578504SMaayan Kashani }
83689578504SMaayan Kashani 
83764a80f1cSSuanming Mou void
838a3cf59f5SSuanming Mou mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
839a3cf59f5SSuanming Mou {
840a3cf59f5SSuanming Mou 	printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
841a3cf59f5SSuanming Mou 	       "total: %d\n",
842a3cf59f5SSuanming Mou 	       pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
843a3cf59f5SSuanming Mou 	       pool->cfg.trunk_size, pool->n_trunk_valid);
844a3cf59f5SSuanming Mou #ifdef POOL_DEBUG
845a3cf59f5SSuanming Mou 	printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
846a3cf59f5SSuanming Mou 	       "available %u free %u\n",
847a3cf59f5SSuanming Mou 	       pool->cfg.type, pool->n_entry, pool->trunk_new,
848a3cf59f5SSuanming Mou 	       pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
849a3cf59f5SSuanming Mou #endif
850a3cf59f5SSuanming Mou }
851bd81eaebSSuanming Mou 
852bd81eaebSSuanming Mou struct mlx5_l3t_tbl *
853bd81eaebSSuanming Mou mlx5_l3t_create(enum mlx5_l3t_type type)
854bd81eaebSSuanming Mou {
855bd81eaebSSuanming Mou 	struct mlx5_l3t_tbl *tbl;
856bd81eaebSSuanming Mou 	struct mlx5_indexed_pool_config l3t_ip_cfg = {
857bd81eaebSSuanming Mou 		.trunk_size = 16,
858bd81eaebSSuanming Mou 		.grow_trunk = 6,
859bd81eaebSSuanming Mou 		.grow_shift = 1,
860bd81eaebSSuanming Mou 		.need_lock = 0,
861bd81eaebSSuanming Mou 		.release_mem_en = 1,
86283c2047cSSuanming Mou 		.malloc = mlx5_malloc,
86383c2047cSSuanming Mou 		.free = mlx5_free,
864bd81eaebSSuanming Mou 	};
865bd81eaebSSuanming Mou 
866bd81eaebSSuanming Mou 	if (type >= MLX5_L3T_TYPE_MAX) {
867bd81eaebSSuanming Mou 		rte_errno = EINVAL;
868bd81eaebSSuanming Mou 		return NULL;
869bd81eaebSSuanming Mou 	}
87083c2047cSSuanming Mou 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
87183c2047cSSuanming Mou 			  SOCKET_ID_ANY);
872bd81eaebSSuanming Mou 	if (!tbl) {
873bd81eaebSSuanming Mou 		rte_errno = ENOMEM;
874bd81eaebSSuanming Mou 		return NULL;
875bd81eaebSSuanming Mou 	}
876bd81eaebSSuanming Mou 	tbl->type = type;
877bd81eaebSSuanming Mou 	switch (type) {
878bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
8790796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
880bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
881bd81eaebSSuanming Mou 		break;
882bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
8830796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
884bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
885bd81eaebSSuanming Mou 		break;
886bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
8870796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
888bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
889bd81eaebSSuanming Mou 		break;
890bd81eaebSSuanming Mou 	default:
8910796c7b1SSuanming Mou 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
892bd81eaebSSuanming Mou 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
893bd81eaebSSuanming Mou 		break;
894bd81eaebSSuanming Mou 	}
8950796c7b1SSuanming Mou 	rte_spinlock_init(&tbl->sl);
896bd81eaebSSuanming Mou 	tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
897bd81eaebSSuanming Mou 	if (!tbl->eip) {
898bd81eaebSSuanming Mou 		rte_errno = ENOMEM;
89983c2047cSSuanming Mou 		mlx5_free(tbl);
900bd81eaebSSuanming Mou 		tbl = NULL;
901bd81eaebSSuanming Mou 	}
902bd81eaebSSuanming Mou 	return tbl;
903bd81eaebSSuanming Mou }
904bd81eaebSSuanming Mou 
905bd81eaebSSuanming Mou void
906bd81eaebSSuanming Mou mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
907bd81eaebSSuanming Mou {
908bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
909bd81eaebSSuanming Mou 	uint32_t i, j;
910bd81eaebSSuanming Mou 
911bd81eaebSSuanming Mou 	if (!tbl)
912bd81eaebSSuanming Mou 		return;
913bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
914bd81eaebSSuanming Mou 	if (g_tbl) {
915bd81eaebSSuanming Mou 		for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
916bd81eaebSSuanming Mou 			m_tbl = g_tbl->tbl[i];
917bd81eaebSSuanming Mou 			if (!m_tbl)
918bd81eaebSSuanming Mou 				continue;
919bd81eaebSSuanming Mou 			for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
920bd81eaebSSuanming Mou 				if (!m_tbl->tbl[j])
921bd81eaebSSuanming Mou 					continue;
922bd81eaebSSuanming Mou 				MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
923bd81eaebSSuanming Mou 					    m_tbl->tbl[j])->ref_cnt);
924bd81eaebSSuanming Mou 				mlx5_ipool_free(tbl->eip,
925bd81eaebSSuanming Mou 						((struct mlx5_l3t_entry_word *)
926bd81eaebSSuanming Mou 						m_tbl->tbl[j])->idx);
927bd81eaebSSuanming Mou 				m_tbl->tbl[j] = 0;
928bd81eaebSSuanming Mou 				if (!(--m_tbl->ref_cnt))
929bd81eaebSSuanming Mou 					break;
930bd81eaebSSuanming Mou 			}
931bd81eaebSSuanming Mou 			MLX5_ASSERT(!m_tbl->ref_cnt);
93283c2047cSSuanming Mou 			mlx5_free(g_tbl->tbl[i]);
933bd81eaebSSuanming Mou 			g_tbl->tbl[i] = 0;
934bd81eaebSSuanming Mou 			if (!(--g_tbl->ref_cnt))
935bd81eaebSSuanming Mou 				break;
936bd81eaebSSuanming Mou 		}
937bd81eaebSSuanming Mou 		MLX5_ASSERT(!g_tbl->ref_cnt);
93883c2047cSSuanming Mou 		mlx5_free(tbl->tbl);
939bd81eaebSSuanming Mou 		tbl->tbl = 0;
940bd81eaebSSuanming Mou 	}
941bd81eaebSSuanming Mou 	mlx5_ipool_destroy(tbl->eip);
94283c2047cSSuanming Mou 	mlx5_free(tbl);
943bd81eaebSSuanming Mou }
944bd81eaebSSuanming Mou 
9450796c7b1SSuanming Mou static int32_t
9460796c7b1SSuanming Mou __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
947bd81eaebSSuanming Mou 		union mlx5_l3t_data *data)
948bd81eaebSSuanming Mou {
949bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
9500796c7b1SSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
9510796c7b1SSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
9520796c7b1SSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
9530796c7b1SSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
954bd81eaebSSuanming Mou 	void *e_tbl;
955bd81eaebSSuanming Mou 	uint32_t entry_idx;
956bd81eaebSSuanming Mou 
957bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
958bd81eaebSSuanming Mou 	if (!g_tbl)
959bd81eaebSSuanming Mou 		return -1;
960bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
961bd81eaebSSuanming Mou 	if (!m_tbl)
962bd81eaebSSuanming Mou 		return -1;
963bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
964bd81eaebSSuanming Mou 	if (!e_tbl)
965bd81eaebSSuanming Mou 		return -1;
966bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
967bd81eaebSSuanming Mou 	switch (tbl->type) {
968bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
9690796c7b1SSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
9700796c7b1SSuanming Mou 		data->word = w_e_tbl->entry[entry_idx].data;
9710796c7b1SSuanming Mou 		if (w_e_tbl->entry[entry_idx].data)
9720796c7b1SSuanming Mou 			w_e_tbl->entry[entry_idx].ref_cnt++;
973bd81eaebSSuanming Mou 		break;
974bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
9750796c7b1SSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
9760796c7b1SSuanming Mou 		data->dword = dw_e_tbl->entry[entry_idx].data;
9770796c7b1SSuanming Mou 		if (dw_e_tbl->entry[entry_idx].data)
9780796c7b1SSuanming Mou 			dw_e_tbl->entry[entry_idx].ref_cnt++;
979bd81eaebSSuanming Mou 		break;
980bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
9810796c7b1SSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
9820796c7b1SSuanming Mou 		data->qword = qw_e_tbl->entry[entry_idx].data;
9830796c7b1SSuanming Mou 		if (qw_e_tbl->entry[entry_idx].data)
9840796c7b1SSuanming Mou 			qw_e_tbl->entry[entry_idx].ref_cnt++;
985bd81eaebSSuanming Mou 		break;
986bd81eaebSSuanming Mou 	default:
9870796c7b1SSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
9880796c7b1SSuanming Mou 		data->ptr = ptr_e_tbl->entry[entry_idx].data;
9890796c7b1SSuanming Mou 		if (ptr_e_tbl->entry[entry_idx].data)
9900796c7b1SSuanming Mou 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
991bd81eaebSSuanming Mou 		break;
992bd81eaebSSuanming Mou 	}
993bd81eaebSSuanming Mou 	return 0;
994bd81eaebSSuanming Mou }
995bd81eaebSSuanming Mou 
9960796c7b1SSuanming Mou int32_t
9970796c7b1SSuanming Mou mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
9980796c7b1SSuanming Mou 		   union mlx5_l3t_data *data)
9990796c7b1SSuanming Mou {
10000796c7b1SSuanming Mou 	int ret;
10010796c7b1SSuanming Mou 
10020796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
10030796c7b1SSuanming Mou 	ret = __l3t_get_entry(tbl, idx, data);
10040796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
10050796c7b1SSuanming Mou 	return ret;
10060796c7b1SSuanming Mou }
10070796c7b1SSuanming Mou 
10080796c7b1SSuanming Mou int32_t
1009bd81eaebSSuanming Mou mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
1010bd81eaebSSuanming Mou {
1011bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1012bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
1013bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
1014bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
1015bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1016bd81eaebSSuanming Mou 	void *e_tbl;
1017bd81eaebSSuanming Mou 	uint32_t entry_idx;
1018bd81eaebSSuanming Mou 	uint64_t ref_cnt;
10190796c7b1SSuanming Mou 	int32_t ret = -1;
1020bd81eaebSSuanming Mou 
10210796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
1022bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
1023bd81eaebSSuanming Mou 	if (!g_tbl)
10240796c7b1SSuanming Mou 		goto out;
1025bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1026bd81eaebSSuanming Mou 	if (!m_tbl)
10270796c7b1SSuanming Mou 		goto out;
1028bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1029bd81eaebSSuanming Mou 	if (!e_tbl)
10300796c7b1SSuanming Mou 		goto out;
1031bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
1032bd81eaebSSuanming Mou 	switch (tbl->type) {
1033bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
1034bd81eaebSSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
10350796c7b1SSuanming Mou 		MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
10360796c7b1SSuanming Mou 		ret = --w_e_tbl->entry[entry_idx].ref_cnt;
10370796c7b1SSuanming Mou 		if (ret)
10380796c7b1SSuanming Mou 			goto out;
10390796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].data = 0;
1040bd81eaebSSuanming Mou 		ref_cnt = --w_e_tbl->ref_cnt;
1041bd81eaebSSuanming Mou 		break;
1042bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
1043bd81eaebSSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
10440796c7b1SSuanming Mou 		MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
10450796c7b1SSuanming Mou 		ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
10460796c7b1SSuanming Mou 		if (ret)
10470796c7b1SSuanming Mou 			goto out;
10480796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].data = 0;
1049bd81eaebSSuanming Mou 		ref_cnt = --dw_e_tbl->ref_cnt;
1050bd81eaebSSuanming Mou 		break;
1051bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
1052bd81eaebSSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
10530796c7b1SSuanming Mou 		MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
10540796c7b1SSuanming Mou 		ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
10550796c7b1SSuanming Mou 		if (ret)
10560796c7b1SSuanming Mou 			goto out;
10570796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].data = 0;
1058bd81eaebSSuanming Mou 		ref_cnt = --qw_e_tbl->ref_cnt;
1059bd81eaebSSuanming Mou 		break;
1060bd81eaebSSuanming Mou 	default:
1061bd81eaebSSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
10620796c7b1SSuanming Mou 		MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
10630796c7b1SSuanming Mou 		ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
10640796c7b1SSuanming Mou 		if (ret)
10650796c7b1SSuanming Mou 			goto out;
10660796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].data = NULL;
1067bd81eaebSSuanming Mou 		ref_cnt = --ptr_e_tbl->ref_cnt;
1068bd81eaebSSuanming Mou 		break;
1069bd81eaebSSuanming Mou 	}
1070bd81eaebSSuanming Mou 	if (!ref_cnt) {
1071bd81eaebSSuanming Mou 		mlx5_ipool_free(tbl->eip,
1072bd81eaebSSuanming Mou 				((struct mlx5_l3t_entry_word *)e_tbl)->idx);
1073bd81eaebSSuanming Mou 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1074bd81eaebSSuanming Mou 									NULL;
1075bd81eaebSSuanming Mou 		if (!(--m_tbl->ref_cnt)) {
107683c2047cSSuanming Mou 			mlx5_free(m_tbl);
1077bd81eaebSSuanming Mou 			g_tbl->tbl
1078bd81eaebSSuanming Mou 			[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
1079bd81eaebSSuanming Mou 			if (!(--g_tbl->ref_cnt)) {
108083c2047cSSuanming Mou 				mlx5_free(g_tbl);
1081bd81eaebSSuanming Mou 				tbl->tbl = 0;
1082bd81eaebSSuanming Mou 			}
1083bd81eaebSSuanming Mou 		}
1084bd81eaebSSuanming Mou 	}
10850796c7b1SSuanming Mou out:
10860796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
10870796c7b1SSuanming Mou 	return ret;
1088bd81eaebSSuanming Mou }
1089bd81eaebSSuanming Mou 
10900796c7b1SSuanming Mou static int32_t
10910796c7b1SSuanming Mou __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1092bd81eaebSSuanming Mou 		union mlx5_l3t_data *data)
1093bd81eaebSSuanming Mou {
1094bd81eaebSSuanming Mou 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
1095bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_word *w_e_tbl;
1096bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_dword *dw_e_tbl;
1097bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_qword *qw_e_tbl;
1098bd81eaebSSuanming Mou 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1099bd81eaebSSuanming Mou 	void *e_tbl;
1100bd81eaebSSuanming Mou 	uint32_t entry_idx, tbl_idx = 0;
1101bd81eaebSSuanming Mou 
1102bd81eaebSSuanming Mou 	/* Check the global table, create it if empty. */
1103bd81eaebSSuanming Mou 	g_tbl = tbl->tbl;
1104bd81eaebSSuanming Mou 	if (!g_tbl) {
110583c2047cSSuanming Mou 		g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
110683c2047cSSuanming Mou 				    sizeof(struct mlx5_l3t_level_tbl) +
110783c2047cSSuanming Mou 				    sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
110883c2047cSSuanming Mou 				    SOCKET_ID_ANY);
1109bd81eaebSSuanming Mou 		if (!g_tbl) {
1110bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1111bd81eaebSSuanming Mou 			return -1;
1112bd81eaebSSuanming Mou 		}
1113bd81eaebSSuanming Mou 		tbl->tbl = g_tbl;
1114bd81eaebSSuanming Mou 	}
1115bd81eaebSSuanming Mou 	/*
1116bd81eaebSSuanming Mou 	 * Check the middle table, create it if empty. Ref_cnt will be
1117bd81eaebSSuanming Mou 	 * increased if new sub table created.
1118bd81eaebSSuanming Mou 	 */
1119bd81eaebSSuanming Mou 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1120bd81eaebSSuanming Mou 	if (!m_tbl) {
112183c2047cSSuanming Mou 		m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
112283c2047cSSuanming Mou 				    sizeof(struct mlx5_l3t_level_tbl) +
112383c2047cSSuanming Mou 				    sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
112483c2047cSSuanming Mou 				    SOCKET_ID_ANY);
1125bd81eaebSSuanming Mou 		if (!m_tbl) {
1126bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1127bd81eaebSSuanming Mou 			return -1;
1128bd81eaebSSuanming Mou 		}
1129bd81eaebSSuanming Mou 		g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1130bd81eaebSSuanming Mou 									m_tbl;
1131bd81eaebSSuanming Mou 		g_tbl->ref_cnt++;
1132bd81eaebSSuanming Mou 	}
1133bd81eaebSSuanming Mou 	/*
1134bd81eaebSSuanming Mou 	 * Check the entry table, create it if empty. Ref_cnt will be
1135bd81eaebSSuanming Mou 	 * increased if new sub entry table created.
1136bd81eaebSSuanming Mou 	 */
1137bd81eaebSSuanming Mou 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1138bd81eaebSSuanming Mou 	if (!e_tbl) {
1139bd81eaebSSuanming Mou 		e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1140bd81eaebSSuanming Mou 		if (!e_tbl) {
1141bd81eaebSSuanming Mou 			rte_errno = ENOMEM;
1142bd81eaebSSuanming Mou 			return -1;
1143bd81eaebSSuanming Mou 		}
1144bd81eaebSSuanming Mou 		((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1145bd81eaebSSuanming Mou 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1146bd81eaebSSuanming Mou 									e_tbl;
1147bd81eaebSSuanming Mou 		m_tbl->ref_cnt++;
1148bd81eaebSSuanming Mou 	}
1149bd81eaebSSuanming Mou 	entry_idx = idx & MLX5_L3T_ET_MASK;
1150bd81eaebSSuanming Mou 	switch (tbl->type) {
1151bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_WORD:
1152bd81eaebSSuanming Mou 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
11530796c7b1SSuanming Mou 		if (w_e_tbl->entry[entry_idx].data) {
11540796c7b1SSuanming Mou 			data->word = w_e_tbl->entry[entry_idx].data;
11550796c7b1SSuanming Mou 			w_e_tbl->entry[entry_idx].ref_cnt++;
11560796c7b1SSuanming Mou 			rte_errno = EEXIST;
11570796c7b1SSuanming Mou 			return -1;
11580796c7b1SSuanming Mou 		}
11590796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].data = data->word;
11600796c7b1SSuanming Mou 		w_e_tbl->entry[entry_idx].ref_cnt = 1;
1161bd81eaebSSuanming Mou 		w_e_tbl->ref_cnt++;
1162bd81eaebSSuanming Mou 		break;
1163bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_DWORD:
1164bd81eaebSSuanming Mou 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
11650796c7b1SSuanming Mou 		if (dw_e_tbl->entry[entry_idx].data) {
11660796c7b1SSuanming Mou 			data->dword = dw_e_tbl->entry[entry_idx].data;
11670796c7b1SSuanming Mou 			dw_e_tbl->entry[entry_idx].ref_cnt++;
11680796c7b1SSuanming Mou 			rte_errno = EEXIST;
11690796c7b1SSuanming Mou 			return -1;
11700796c7b1SSuanming Mou 		}
11710796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].data = data->dword;
11720796c7b1SSuanming Mou 		dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1173bd81eaebSSuanming Mou 		dw_e_tbl->ref_cnt++;
1174bd81eaebSSuanming Mou 		break;
1175bd81eaebSSuanming Mou 	case MLX5_L3T_TYPE_QWORD:
1176bd81eaebSSuanming Mou 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
11770796c7b1SSuanming Mou 		if (qw_e_tbl->entry[entry_idx].data) {
11780796c7b1SSuanming Mou 			data->qword = qw_e_tbl->entry[entry_idx].data;
11790796c7b1SSuanming Mou 			qw_e_tbl->entry[entry_idx].ref_cnt++;
11800796c7b1SSuanming Mou 			rte_errno = EEXIST;
11810796c7b1SSuanming Mou 			return -1;
11820796c7b1SSuanming Mou 		}
11830796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].data = data->qword;
11840796c7b1SSuanming Mou 		qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1185bd81eaebSSuanming Mou 		qw_e_tbl->ref_cnt++;
1186bd81eaebSSuanming Mou 		break;
1187bd81eaebSSuanming Mou 	default:
1188bd81eaebSSuanming Mou 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
11890796c7b1SSuanming Mou 		if (ptr_e_tbl->entry[entry_idx].data) {
11900796c7b1SSuanming Mou 			data->ptr = ptr_e_tbl->entry[entry_idx].data;
11910796c7b1SSuanming Mou 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
11920796c7b1SSuanming Mou 			rte_errno = EEXIST;
11930796c7b1SSuanming Mou 			return -1;
11940796c7b1SSuanming Mou 		}
11950796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].data = data->ptr;
11960796c7b1SSuanming Mou 		ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1197bd81eaebSSuanming Mou 		ptr_e_tbl->ref_cnt++;
1198bd81eaebSSuanming Mou 		break;
1199bd81eaebSSuanming Mou 	}
1200bd81eaebSSuanming Mou 	return 0;
1201bd81eaebSSuanming Mou }
12020796c7b1SSuanming Mou 
12030796c7b1SSuanming Mou int32_t
12040796c7b1SSuanming Mou mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
12050796c7b1SSuanming Mou 		   union mlx5_l3t_data *data)
12060796c7b1SSuanming Mou {
12070796c7b1SSuanming Mou 	int ret;
12080796c7b1SSuanming Mou 
12090796c7b1SSuanming Mou 	rte_spinlock_lock(&tbl->sl);
12100796c7b1SSuanming Mou 	ret = __l3t_set_entry(tbl, idx, data);
12110796c7b1SSuanming Mou 	rte_spinlock_unlock(&tbl->sl);
12120796c7b1SSuanming Mou 	return ret;
12130796c7b1SSuanming Mou }
1214