xref: /dpdk/drivers/net/mlx5/mlx5_utils.c (revision 89813a522e68076e6f50ec18b075fa57cc5ae937)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
7 
8 #include <mlx5_malloc.h>
9 
10 #include "mlx5_utils.h"
11 
12 /********************* Hash List **********************/
13 
14 static struct mlx5_hlist_entry *
15 mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
16 			     void *ctx __rte_unused)
17 {
18 	return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
19 }
20 
21 static void
22 mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
23 			     struct mlx5_hlist_entry *entry)
24 {
25 	mlx5_free(entry);
26 }
27 
28 struct mlx5_hlist *
29 mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
30 		  uint32_t flags, mlx5_hlist_create_cb cb_create,
31 		  mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
32 {
33 	struct mlx5_hlist *h;
34 	uint32_t act_size;
35 	uint32_t alloc_size;
36 	uint32_t i;
37 
38 	if (!size || !cb_match || (!cb_create ^ !cb_remove))
39 		return NULL;
40 	/* Align to the next power of 2, 32bits integer is enough now. */
41 	if (!rte_is_power_of_2(size)) {
42 		act_size = rte_align32pow2(size);
43 		DRV_LOG(DEBUG, "Size 0x%" PRIX32 " is not power of 2, "
44 			"will be aligned to 0x%" PRIX32 ".", size, act_size);
45 	} else {
46 		act_size = size;
47 	}
48 	alloc_size = sizeof(struct mlx5_hlist) +
49 		     sizeof(struct mlx5_hlist_bucket) * act_size;
50 	/* Using zmalloc, then no need to initialize the heads. */
51 	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
52 			SOCKET_ID_ANY);
53 	if (!h) {
54 		DRV_LOG(ERR, "No memory for hash list %s creation",
55 			name ? name : "None");
56 		return NULL;
57 	}
58 	if (name)
59 		snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
60 	h->table_sz = act_size;
61 	h->mask = act_size - 1;
62 	h->entry_sz = entry_size;
63 	h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
64 	h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
65 	h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
66 	h->cb_match = cb_match;
67 	h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
68 	for (i = 0; i < act_size; i++)
69 		rte_rwlock_init(&h->buckets[i].lock);
70 	DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
71 		h->name, act_size);
72 	return h;
73 }
74 
75 static struct mlx5_hlist_entry *
76 __hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
77 	       void *ctx, bool reuse)
78 {
79 	struct mlx5_hlist_head *first;
80 	struct mlx5_hlist_entry *node;
81 
82 	MLX5_ASSERT(h);
83 	first = &h->buckets[idx].head;
84 	LIST_FOREACH(node, first, next) {
85 		if (!h->cb_match(h, node, key, ctx)) {
86 			if (reuse) {
87 				__atomic_add_fetch(&node->ref_cnt, 1,
88 						   __ATOMIC_RELAXED);
89 				DRV_LOG(DEBUG, "Hash list %s entry %p "
90 					"reuse: %u.",
91 					h->name, (void *)node, node->ref_cnt);
92 			}
93 			break;
94 		}
95 	}
96 	return node;
97 }
98 
99 static struct mlx5_hlist_entry *
100 hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
101 	     void *ctx, bool reuse)
102 {
103 	struct mlx5_hlist_entry *node;
104 
105 	MLX5_ASSERT(h);
106 	rte_rwlock_read_lock(&h->buckets[idx].lock);
107 	node = __hlist_lookup(h, key, idx, ctx, reuse);
108 	rte_rwlock_read_unlock(&h->buckets[idx].lock);
109 	return node;
110 }
111 
112 struct mlx5_hlist_entry *
113 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
114 {
115 	uint32_t idx;
116 
117 	if (h->direct_key)
118 		idx = (uint32_t)(key & h->mask);
119 	else
120 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
121 	return hlist_lookup(h, key, idx, ctx, false);
122 }
123 
124 struct mlx5_hlist_entry*
125 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
126 {
127 	uint32_t idx;
128 	struct mlx5_hlist_head *first;
129 	struct mlx5_hlist_bucket *b;
130 	struct mlx5_hlist_entry *entry;
131 	uint32_t prev_gen_cnt = 0;
132 
133 	if (h->direct_key)
134 		idx = (uint32_t)(key & h->mask);
135 	else
136 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
137 	MLX5_ASSERT(h);
138 	b = &h->buckets[idx];
139 	/* Use write lock directly for write-most list. */
140 	if (!h->write_most) {
141 		prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE);
142 		entry = hlist_lookup(h, key, idx, ctx, true);
143 		if (entry)
144 			return entry;
145 	}
146 	rte_rwlock_write_lock(&b->lock);
147 	/* Check if the list changed by other threads. */
148 	if (h->write_most ||
149 	    prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) {
150 		entry = __hlist_lookup(h, key, idx, ctx, true);
151 		if (entry)
152 			goto done;
153 	}
154 	first = &b->head;
155 	entry = h->cb_create(h, key, ctx);
156 	if (!entry) {
157 		rte_errno = ENOMEM;
158 		DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
159 		goto done;
160 	}
161 	entry->idx = idx;
162 	entry->ref_cnt = 1;
163 	LIST_INSERT_HEAD(first, entry, next);
164 	__atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
165 	DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
166 		h->name, (void *)entry, entry->ref_cnt);
167 done:
168 	rte_rwlock_write_unlock(&b->lock);
169 	return entry;
170 }
171 
172 int
173 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
174 {
175 	uint32_t idx = entry->idx;
176 
177 	rte_rwlock_write_lock(&h->buckets[idx].lock);
178 	MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
179 	DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
180 		h->name, (void *)entry, entry->ref_cnt);
181 	if (--entry->ref_cnt) {
182 		rte_rwlock_write_unlock(&h->buckets[idx].lock);
183 		return 1;
184 	}
185 	LIST_REMOVE(entry, next);
186 	/* Set to NULL to get rid of removing action for more than once. */
187 	entry->next.le_prev = NULL;
188 	h->cb_remove(h, entry);
189 	rte_rwlock_write_unlock(&h->buckets[idx].lock);
190 	DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
191 		h->name, (void *)entry);
192 	return 0;
193 }
194 
195 void
196 mlx5_hlist_destroy(struct mlx5_hlist *h)
197 {
198 	uint32_t idx;
199 	struct mlx5_hlist_entry *entry;
200 
201 	MLX5_ASSERT(h);
202 	for (idx = 0; idx < h->table_sz; ++idx) {
203 		/* No LIST_FOREACH_SAFE, using while instead. */
204 		while (!LIST_EMPTY(&h->buckets[idx].head)) {
205 			entry = LIST_FIRST(&h->buckets[idx].head);
206 			LIST_REMOVE(entry, next);
207 			/*
208 			 * The owner of whole element which contains data entry
209 			 * is the user, so it's the user's duty to do the clean
210 			 * up and the free work because someone may not put the
211 			 * hlist entry at the beginning(suggested to locate at
212 			 * the beginning). Or else the default free function
213 			 * will be used.
214 			 */
215 			h->cb_remove(h, entry);
216 		}
217 	}
218 	mlx5_free(h);
219 }
220 
221 /********************* Cache list ************************/
222 
223 static struct mlx5_cache_entry *
224 mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
225 			     struct mlx5_cache_entry *entry __rte_unused,
226 			     void *ctx __rte_unused)
227 {
228 	return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
229 }
230 
231 static void
232 mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
233 			     struct mlx5_cache_entry *entry)
234 {
235 	mlx5_free(entry);
236 }
237 
238 int
239 mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
240 		     uint32_t entry_size, void *ctx,
241 		     mlx5_cache_create_cb cb_create,
242 		     mlx5_cache_match_cb cb_match,
243 		     mlx5_cache_remove_cb cb_remove)
244 {
245 	MLX5_ASSERT(list);
246 	if (!cb_match || (!cb_create ^ !cb_remove))
247 		return -1;
248 	if (name)
249 		snprintf(list->name, sizeof(list->name), "%s", name);
250 	list->entry_sz = entry_size;
251 	list->ctx = ctx;
252 	list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
253 	list->cb_match = cb_match;
254 	list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
255 	rte_rwlock_init(&list->lock);
256 	DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
257 	LIST_INIT(&list->head);
258 	return 0;
259 }
260 
261 static struct mlx5_cache_entry *
262 __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
263 {
264 	struct mlx5_cache_entry *entry;
265 
266 	LIST_FOREACH(entry, &list->head, next) {
267 		if (list->cb_match(list, entry, ctx))
268 			continue;
269 		if (reuse) {
270 			__atomic_add_fetch(&entry->ref_cnt, 1,
271 					   __ATOMIC_RELAXED);
272 			DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
273 				list->name, (void *)entry, entry->ref_cnt);
274 		}
275 		break;
276 	}
277 	return entry;
278 }
279 
280 static struct mlx5_cache_entry *
281 cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
282 {
283 	struct mlx5_cache_entry *entry;
284 
285 	rte_rwlock_read_lock(&list->lock);
286 	entry = __cache_lookup(list, ctx, reuse);
287 	rte_rwlock_read_unlock(&list->lock);
288 	return entry;
289 }
290 
291 struct mlx5_cache_entry *
292 mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
293 {
294 	return cache_lookup(list, ctx, false);
295 }
296 
297 struct mlx5_cache_entry *
298 mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
299 {
300 	struct mlx5_cache_entry *entry;
301 	uint32_t prev_gen_cnt = 0;
302 
303 	MLX5_ASSERT(list);
304 	prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
305 	/* Lookup with read lock, reuse if found. */
306 	entry = cache_lookup(list, ctx, true);
307 	if (entry)
308 		return entry;
309 	/* Not found, append with write lock - block read from other threads. */
310 	rte_rwlock_write_lock(&list->lock);
311 	/* If list changed by other threads before lock, search again. */
312 	if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
313 		/* Lookup and reuse w/o read lock. */
314 		entry = __cache_lookup(list, ctx, true);
315 		if (entry)
316 			goto done;
317 	}
318 	entry = list->cb_create(list, entry, ctx);
319 	if (!entry) {
320 		DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
321 			list->name, (void *)entry);
322 		goto done;
323 	}
324 	entry->ref_cnt = 1;
325 	LIST_INSERT_HEAD(&list->head, entry, next);
326 	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
327 	__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
328 	DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
329 		list->name, (void *)entry, entry->ref_cnt);
330 done:
331 	rte_rwlock_write_unlock(&list->lock);
332 	return entry;
333 }
334 
335 int
336 mlx5_cache_unregister(struct mlx5_cache_list *list,
337 		      struct mlx5_cache_entry *entry)
338 {
339 	rte_rwlock_write_lock(&list->lock);
340 	MLX5_ASSERT(entry && entry->next.le_prev);
341 	DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
342 		list->name, (void *)entry, entry->ref_cnt);
343 	if (--entry->ref_cnt) {
344 		rte_rwlock_write_unlock(&list->lock);
345 		return 1;
346 	}
347 	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
348 	__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
349 	LIST_REMOVE(entry, next);
350 	list->cb_remove(list, entry);
351 	rte_rwlock_write_unlock(&list->lock);
352 	DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
353 		list->name, (void *)entry);
354 	return 0;
355 }
356 
357 void
358 mlx5_cache_list_destroy(struct mlx5_cache_list *list)
359 {
360 	struct mlx5_cache_entry *entry;
361 
362 	MLX5_ASSERT(list);
363 	/* no LIST_FOREACH_SAFE, using while instead */
364 	while (!LIST_EMPTY(&list->head)) {
365 		entry = LIST_FIRST(&list->head);
366 		LIST_REMOVE(entry, next);
367 		list->cb_remove(list, entry);
368 		DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
369 			list->name, (void *)entry);
370 	}
371 	memset(list, 0, sizeof(*list));
372 }
373 
374 uint32_t
375 mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
376 {
377 	MLX5_ASSERT(list);
378 	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
379 }
380 
381 /********************* Indexed pool **********************/
382 
383 static inline void
384 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
385 {
386 	if (pool->cfg.need_lock)
387 		rte_spinlock_lock(&pool->lock);
388 }
389 
390 static inline void
391 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool)
392 {
393 	if (pool->cfg.need_lock)
394 		rte_spinlock_unlock(&pool->lock);
395 }
396 
397 static inline uint32_t
398 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
399 {
400 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
401 	uint32_t trunk_idx = 0;
402 	uint32_t i;
403 
404 	if (!cfg->grow_trunk)
405 		return entry_idx / cfg->trunk_size;
406 	if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
407 		trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
408 			    (cfg->trunk_size << (cfg->grow_shift *
409 			    cfg->grow_trunk)) + cfg->grow_trunk;
410 	} else {
411 		for (i = 0; i < cfg->grow_trunk; i++) {
412 			if (entry_idx < pool->grow_tbl[i])
413 				break;
414 		}
415 		trunk_idx = i;
416 	}
417 	return trunk_idx;
418 }
419 
420 static inline uint32_t
421 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
422 {
423 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
424 
425 	return cfg->trunk_size << (cfg->grow_shift *
426 	       (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
427 }
428 
429 static inline uint32_t
430 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
431 {
432 	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
433 	uint32_t offset = 0;
434 
435 	if (!trunk_idx)
436 		return 0;
437 	if (!cfg->grow_trunk)
438 		return cfg->trunk_size * trunk_idx;
439 	if (trunk_idx < cfg->grow_trunk)
440 		offset = pool->grow_tbl[trunk_idx - 1];
441 	else
442 		offset = pool->grow_tbl[cfg->grow_trunk - 1] +
443 			 (cfg->trunk_size << (cfg->grow_shift *
444 			 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
445 	return offset;
446 }
447 
448 struct mlx5_indexed_pool *
449 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
450 {
451 	struct mlx5_indexed_pool *pool;
452 	uint32_t i;
453 
454 	if (!cfg || (!cfg->malloc ^ !cfg->free) ||
455 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
456 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
457 		return NULL;
458 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
459 			   sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
460 			   SOCKET_ID_ANY);
461 	if (!pool)
462 		return NULL;
463 	pool->cfg = *cfg;
464 	if (!pool->cfg.trunk_size)
465 		pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
466 	if (!cfg->malloc && !cfg->free) {
467 		pool->cfg.malloc = mlx5_malloc;
468 		pool->cfg.free = mlx5_free;
469 	}
470 	pool->free_list = TRUNK_INVALID;
471 	if (pool->cfg.need_lock)
472 		rte_spinlock_init(&pool->lock);
473 	/*
474 	 * Initialize the dynamic grow trunk size lookup table to have a quick
475 	 * lookup for the trunk entry index offset.
476 	 */
477 	for (i = 0; i < cfg->grow_trunk; i++) {
478 		pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
479 		if (i > 0)
480 			pool->grow_tbl[i] += pool->grow_tbl[i - 1];
481 	}
482 	return pool;
483 }
484 
485 static int
486 mlx5_ipool_grow(struct mlx5_indexed_pool *pool)
487 {
488 	struct mlx5_indexed_trunk *trunk;
489 	struct mlx5_indexed_trunk **trunk_tmp;
490 	struct mlx5_indexed_trunk **p;
491 	size_t trunk_size = 0;
492 	size_t data_size;
493 	size_t bmp_size;
494 	uint32_t idx;
495 
496 	if (pool->n_trunk_valid == TRUNK_MAX_IDX)
497 		return -ENOMEM;
498 	if (pool->n_trunk_valid == pool->n_trunk) {
499 		/* No free trunk flags, expand trunk list. */
500 		int n_grow = pool->n_trunk_valid ? pool->n_trunk :
501 			     RTE_CACHE_LINE_SIZE / sizeof(void *);
502 
503 		p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
504 				     sizeof(struct mlx5_indexed_trunk *),
505 				     RTE_CACHE_LINE_SIZE, rte_socket_id());
506 		if (!p)
507 			return -ENOMEM;
508 		if (pool->trunks)
509 			memcpy(p, pool->trunks, pool->n_trunk_valid *
510 			       sizeof(struct mlx5_indexed_trunk *));
511 		memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0,
512 		       n_grow * sizeof(void *));
513 		trunk_tmp = pool->trunks;
514 		pool->trunks = p;
515 		if (trunk_tmp)
516 			pool->cfg.free(trunk_tmp);
517 		pool->n_trunk += n_grow;
518 	}
519 	if (!pool->cfg.release_mem_en) {
520 		idx = pool->n_trunk_valid;
521 	} else {
522 		/* Find the first available slot in trunk list */
523 		for (idx = 0; idx < pool->n_trunk; idx++)
524 			if (pool->trunks[idx] == NULL)
525 				break;
526 	}
527 	trunk_size += sizeof(*trunk);
528 	data_size = mlx5_trunk_size_get(pool, idx);
529 	bmp_size = rte_bitmap_get_memory_footprint(data_size);
530 	/* rte_bitmap requires memory cacheline aligned. */
531 	trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
532 	trunk_size += bmp_size;
533 	trunk = pool->cfg.malloc(0, trunk_size,
534 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
535 	if (!trunk)
536 		return -ENOMEM;
537 	pool->trunks[idx] = trunk;
538 	trunk->idx = idx;
539 	trunk->free = data_size;
540 	trunk->prev = TRUNK_INVALID;
541 	trunk->next = TRUNK_INVALID;
542 	MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
543 	pool->free_list = idx;
544 	/* Mark all entries as available. */
545 	trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data
546 		     [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)],
547 		     bmp_size);
548 	MLX5_ASSERT(trunk->bmp);
549 	pool->n_trunk_valid++;
550 #ifdef POOL_DEBUG
551 	pool->trunk_new++;
552 	pool->trunk_avail++;
553 #endif
554 	return 0;
555 }
556 
557 void *
558 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
559 {
560 	struct mlx5_indexed_trunk *trunk;
561 	uint64_t slab = 0;
562 	uint32_t iidx = 0;
563 	void *p;
564 
565 	mlx5_ipool_lock(pool);
566 	if (pool->free_list == TRUNK_INVALID) {
567 		/* If no available trunks, grow new. */
568 		if (mlx5_ipool_grow(pool)) {
569 			mlx5_ipool_unlock(pool);
570 			return NULL;
571 		}
572 	}
573 	MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
574 	trunk = pool->trunks[pool->free_list];
575 	MLX5_ASSERT(trunk->free);
576 	if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) {
577 		mlx5_ipool_unlock(pool);
578 		return NULL;
579 	}
580 	MLX5_ASSERT(slab);
581 	iidx += __builtin_ctzll(slab);
582 	MLX5_ASSERT(iidx != UINT32_MAX);
583 	MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
584 	rte_bitmap_clear(trunk->bmp, iidx);
585 	p = &trunk->data[iidx * pool->cfg.size];
586 	/*
587 	 * The ipool index should grow continually from small to big,
588 	 * some features as metering only accept limited bits of index.
589 	 * Random index with MSB set may be rejected.
590 	 */
591 	iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
592 	iidx += 1; /* non-zero index. */
593 	trunk->free--;
594 #ifdef POOL_DEBUG
595 	pool->n_entry++;
596 #endif
597 	if (!trunk->free) {
598 		/* Full trunk will be removed from free list in imalloc. */
599 		MLX5_ASSERT(pool->free_list == trunk->idx);
600 		pool->free_list = trunk->next;
601 		if (trunk->next != TRUNK_INVALID)
602 			pool->trunks[trunk->next]->prev = TRUNK_INVALID;
603 		trunk->prev = TRUNK_INVALID;
604 		trunk->next = TRUNK_INVALID;
605 #ifdef POOL_DEBUG
606 		pool->trunk_empty++;
607 		pool->trunk_avail--;
608 #endif
609 	}
610 	*idx = iidx;
611 	mlx5_ipool_unlock(pool);
612 	return p;
613 }
614 
615 void *
616 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
617 {
618 	void *entry = mlx5_ipool_malloc(pool, idx);
619 
620 	if (entry && pool->cfg.size)
621 		memset(entry, 0, pool->cfg.size);
622 	return entry;
623 }
624 
625 void
626 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
627 {
628 	struct mlx5_indexed_trunk *trunk;
629 	uint32_t trunk_idx;
630 	uint32_t entry_idx;
631 
632 	if (!idx)
633 		return;
634 	idx -= 1;
635 	mlx5_ipool_lock(pool);
636 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
637 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
638 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
639 		goto out;
640 	trunk = pool->trunks[trunk_idx];
641 	if (!trunk)
642 		goto out;
643 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
644 	if (trunk_idx != trunk->idx ||
645 	    rte_bitmap_get(trunk->bmp, entry_idx))
646 		goto out;
647 	rte_bitmap_set(trunk->bmp, entry_idx);
648 	trunk->free++;
649 	if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get
650 	   (pool, trunk->idx)) {
651 		if (pool->free_list == trunk->idx)
652 			pool->free_list = trunk->next;
653 		if (trunk->next != TRUNK_INVALID)
654 			pool->trunks[trunk->next]->prev = trunk->prev;
655 		if (trunk->prev != TRUNK_INVALID)
656 			pool->trunks[trunk->prev]->next = trunk->next;
657 		pool->cfg.free(trunk);
658 		pool->trunks[trunk_idx] = NULL;
659 		pool->n_trunk_valid--;
660 #ifdef POOL_DEBUG
661 		pool->trunk_avail--;
662 		pool->trunk_free++;
663 #endif
664 		if (pool->n_trunk_valid == 0) {
665 			pool->cfg.free(pool->trunks);
666 			pool->trunks = NULL;
667 			pool->n_trunk = 0;
668 		}
669 	} else if (trunk->free == 1) {
670 		/* Put into free trunk list head. */
671 		MLX5_ASSERT(pool->free_list != trunk->idx);
672 		trunk->next = pool->free_list;
673 		trunk->prev = TRUNK_INVALID;
674 		if (pool->free_list != TRUNK_INVALID)
675 			pool->trunks[pool->free_list]->prev = trunk->idx;
676 		pool->free_list = trunk->idx;
677 #ifdef POOL_DEBUG
678 		pool->trunk_empty--;
679 		pool->trunk_avail++;
680 #endif
681 	}
682 #ifdef POOL_DEBUG
683 	pool->n_entry--;
684 #endif
685 out:
686 	mlx5_ipool_unlock(pool);
687 }
688 
689 void *
690 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx)
691 {
692 	struct mlx5_indexed_trunk *trunk;
693 	void *p = NULL;
694 	uint32_t trunk_idx;
695 	uint32_t entry_idx;
696 
697 	if (!idx)
698 		return NULL;
699 	idx -= 1;
700 	mlx5_ipool_lock(pool);
701 	trunk_idx = mlx5_trunk_idx_get(pool, idx);
702 	if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) ||
703 	    (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk))
704 		goto out;
705 	trunk = pool->trunks[trunk_idx];
706 	if (!trunk)
707 		goto out;
708 	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
709 	if (trunk_idx != trunk->idx ||
710 	    rte_bitmap_get(trunk->bmp, entry_idx))
711 		goto out;
712 	p = &trunk->data[entry_idx * pool->cfg.size];
713 out:
714 	mlx5_ipool_unlock(pool);
715 	return p;
716 }
717 
718 int
719 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
720 {
721 	struct mlx5_indexed_trunk **trunks;
722 	uint32_t i;
723 
724 	MLX5_ASSERT(pool);
725 	mlx5_ipool_lock(pool);
726 	trunks = pool->trunks;
727 	for (i = 0; i < pool->n_trunk; i++) {
728 		if (trunks[i])
729 			pool->cfg.free(trunks[i]);
730 	}
731 	if (!pool->trunks)
732 		pool->cfg.free(pool->trunks);
733 	mlx5_ipool_unlock(pool);
734 	mlx5_free(pool);
735 	return 0;
736 }
737 
738 void
739 mlx5_ipool_dump(struct mlx5_indexed_pool *pool)
740 {
741 	printf("Pool %s entry size %u, trunks %u, %d entry per trunk, "
742 	       "total: %d\n",
743 	       pool->cfg.type, pool->cfg.size, pool->n_trunk_valid,
744 	       pool->cfg.trunk_size, pool->n_trunk_valid);
745 #ifdef POOL_DEBUG
746 	printf("Pool %s entry %u, trunk alloc %u, empty: %u, "
747 	       "available %u free %u\n",
748 	       pool->cfg.type, pool->n_entry, pool->trunk_new,
749 	       pool->trunk_empty, pool->trunk_avail, pool->trunk_free);
750 #endif
751 }
752 
753 struct mlx5_l3t_tbl *
754 mlx5_l3t_create(enum mlx5_l3t_type type)
755 {
756 	struct mlx5_l3t_tbl *tbl;
757 	struct mlx5_indexed_pool_config l3t_ip_cfg = {
758 		.trunk_size = 16,
759 		.grow_trunk = 6,
760 		.grow_shift = 1,
761 		.need_lock = 0,
762 		.release_mem_en = 1,
763 		.malloc = mlx5_malloc,
764 		.free = mlx5_free,
765 	};
766 
767 	if (type >= MLX5_L3T_TYPE_MAX) {
768 		rte_errno = EINVAL;
769 		return NULL;
770 	}
771 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
772 			  SOCKET_ID_ANY);
773 	if (!tbl) {
774 		rte_errno = ENOMEM;
775 		return NULL;
776 	}
777 	tbl->type = type;
778 	switch (type) {
779 	case MLX5_L3T_TYPE_WORD:
780 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word);
781 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w";
782 		break;
783 	case MLX5_L3T_TYPE_DWORD:
784 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword);
785 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw";
786 		break;
787 	case MLX5_L3T_TYPE_QWORD:
788 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword);
789 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw";
790 		break;
791 	default:
792 		l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr);
793 		l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr";
794 		break;
795 	}
796 	rte_spinlock_init(&tbl->sl);
797 	tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
798 	if (!tbl->eip) {
799 		rte_errno = ENOMEM;
800 		mlx5_free(tbl);
801 		tbl = NULL;
802 	}
803 	return tbl;
804 }
805 
806 void
807 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl)
808 {
809 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
810 	uint32_t i, j;
811 
812 	if (!tbl)
813 		return;
814 	g_tbl = tbl->tbl;
815 	if (g_tbl) {
816 		for (i = 0; i < MLX5_L3T_GT_SIZE; i++) {
817 			m_tbl = g_tbl->tbl[i];
818 			if (!m_tbl)
819 				continue;
820 			for (j = 0; j < MLX5_L3T_MT_SIZE; j++) {
821 				if (!m_tbl->tbl[j])
822 					continue;
823 				MLX5_ASSERT(!((struct mlx5_l3t_entry_word *)
824 					    m_tbl->tbl[j])->ref_cnt);
825 				mlx5_ipool_free(tbl->eip,
826 						((struct mlx5_l3t_entry_word *)
827 						m_tbl->tbl[j])->idx);
828 				m_tbl->tbl[j] = 0;
829 				if (!(--m_tbl->ref_cnt))
830 					break;
831 			}
832 			MLX5_ASSERT(!m_tbl->ref_cnt);
833 			mlx5_free(g_tbl->tbl[i]);
834 			g_tbl->tbl[i] = 0;
835 			if (!(--g_tbl->ref_cnt))
836 				break;
837 		}
838 		MLX5_ASSERT(!g_tbl->ref_cnt);
839 		mlx5_free(tbl->tbl);
840 		tbl->tbl = 0;
841 	}
842 	mlx5_ipool_destroy(tbl->eip);
843 	mlx5_free(tbl);
844 }
845 
846 static int32_t
847 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
848 		union mlx5_l3t_data *data)
849 {
850 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
851 	struct mlx5_l3t_entry_word *w_e_tbl;
852 	struct mlx5_l3t_entry_dword *dw_e_tbl;
853 	struct mlx5_l3t_entry_qword *qw_e_tbl;
854 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
855 	void *e_tbl;
856 	uint32_t entry_idx;
857 
858 	g_tbl = tbl->tbl;
859 	if (!g_tbl)
860 		return -1;
861 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
862 	if (!m_tbl)
863 		return -1;
864 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
865 	if (!e_tbl)
866 		return -1;
867 	entry_idx = idx & MLX5_L3T_ET_MASK;
868 	switch (tbl->type) {
869 	case MLX5_L3T_TYPE_WORD:
870 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
871 		data->word = w_e_tbl->entry[entry_idx].data;
872 		if (w_e_tbl->entry[entry_idx].data)
873 			w_e_tbl->entry[entry_idx].ref_cnt++;
874 		break;
875 	case MLX5_L3T_TYPE_DWORD:
876 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
877 		data->dword = dw_e_tbl->entry[entry_idx].data;
878 		if (dw_e_tbl->entry[entry_idx].data)
879 			dw_e_tbl->entry[entry_idx].ref_cnt++;
880 		break;
881 	case MLX5_L3T_TYPE_QWORD:
882 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
883 		data->qword = qw_e_tbl->entry[entry_idx].data;
884 		if (qw_e_tbl->entry[entry_idx].data)
885 			qw_e_tbl->entry[entry_idx].ref_cnt++;
886 		break;
887 	default:
888 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
889 		data->ptr = ptr_e_tbl->entry[entry_idx].data;
890 		if (ptr_e_tbl->entry[entry_idx].data)
891 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
892 		break;
893 	}
894 	return 0;
895 }
896 
897 int32_t
898 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
899 		   union mlx5_l3t_data *data)
900 {
901 	int ret;
902 
903 	rte_spinlock_lock(&tbl->sl);
904 	ret = __l3t_get_entry(tbl, idx, data);
905 	rte_spinlock_unlock(&tbl->sl);
906 	return ret;
907 }
908 
909 int32_t
910 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx)
911 {
912 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
913 	struct mlx5_l3t_entry_word *w_e_tbl;
914 	struct mlx5_l3t_entry_dword *dw_e_tbl;
915 	struct mlx5_l3t_entry_qword *qw_e_tbl;
916 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
917 	void *e_tbl;
918 	uint32_t entry_idx;
919 	uint64_t ref_cnt;
920 	int32_t ret = -1;
921 
922 	rte_spinlock_lock(&tbl->sl);
923 	g_tbl = tbl->tbl;
924 	if (!g_tbl)
925 		goto out;
926 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
927 	if (!m_tbl)
928 		goto out;
929 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
930 	if (!e_tbl)
931 		goto out;
932 	entry_idx = idx & MLX5_L3T_ET_MASK;
933 	switch (tbl->type) {
934 	case MLX5_L3T_TYPE_WORD:
935 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
936 		MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt);
937 		ret = --w_e_tbl->entry[entry_idx].ref_cnt;
938 		if (ret)
939 			goto out;
940 		w_e_tbl->entry[entry_idx].data = 0;
941 		ref_cnt = --w_e_tbl->ref_cnt;
942 		break;
943 	case MLX5_L3T_TYPE_DWORD:
944 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
945 		MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt);
946 		ret = --dw_e_tbl->entry[entry_idx].ref_cnt;
947 		if (ret)
948 			goto out;
949 		dw_e_tbl->entry[entry_idx].data = 0;
950 		ref_cnt = --dw_e_tbl->ref_cnt;
951 		break;
952 	case MLX5_L3T_TYPE_QWORD:
953 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
954 		MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt);
955 		ret = --qw_e_tbl->entry[entry_idx].ref_cnt;
956 		if (ret)
957 			goto out;
958 		qw_e_tbl->entry[entry_idx].data = 0;
959 		ref_cnt = --qw_e_tbl->ref_cnt;
960 		break;
961 	default:
962 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
963 		MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt);
964 		ret = --ptr_e_tbl->entry[entry_idx].ref_cnt;
965 		if (ret)
966 			goto out;
967 		ptr_e_tbl->entry[entry_idx].data = NULL;
968 		ref_cnt = --ptr_e_tbl->ref_cnt;
969 		break;
970 	}
971 	if (!ref_cnt) {
972 		mlx5_ipool_free(tbl->eip,
973 				((struct mlx5_l3t_entry_word *)e_tbl)->idx);
974 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
975 									NULL;
976 		if (!(--m_tbl->ref_cnt)) {
977 			mlx5_free(m_tbl);
978 			g_tbl->tbl
979 			[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
980 			if (!(--g_tbl->ref_cnt)) {
981 				mlx5_free(g_tbl);
982 				tbl->tbl = 0;
983 			}
984 		}
985 	}
986 out:
987 	rte_spinlock_unlock(&tbl->sl);
988 	return ret;
989 }
990 
991 static int32_t
992 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
993 		union mlx5_l3t_data *data)
994 {
995 	struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
996 	struct mlx5_l3t_entry_word *w_e_tbl;
997 	struct mlx5_l3t_entry_dword *dw_e_tbl;
998 	struct mlx5_l3t_entry_qword *qw_e_tbl;
999 	struct mlx5_l3t_entry_ptr *ptr_e_tbl;
1000 	void *e_tbl;
1001 	uint32_t entry_idx, tbl_idx = 0;
1002 
1003 	/* Check the global table, create it if empty. */
1004 	g_tbl = tbl->tbl;
1005 	if (!g_tbl) {
1006 		g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1007 				    sizeof(struct mlx5_l3t_level_tbl) +
1008 				    sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
1009 				    SOCKET_ID_ANY);
1010 		if (!g_tbl) {
1011 			rte_errno = ENOMEM;
1012 			return -1;
1013 		}
1014 		tbl->tbl = g_tbl;
1015 	}
1016 	/*
1017 	 * Check the middle table, create it if empty. Ref_cnt will be
1018 	 * increased if new sub table created.
1019 	 */
1020 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
1021 	if (!m_tbl) {
1022 		m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
1023 				    sizeof(struct mlx5_l3t_level_tbl) +
1024 				    sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
1025 				    SOCKET_ID_ANY);
1026 		if (!m_tbl) {
1027 			rte_errno = ENOMEM;
1028 			return -1;
1029 		}
1030 		g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] =
1031 									m_tbl;
1032 		g_tbl->ref_cnt++;
1033 	}
1034 	/*
1035 	 * Check the entry table, create it if empty. Ref_cnt will be
1036 	 * increased if new sub entry table created.
1037 	 */
1038 	e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK];
1039 	if (!e_tbl) {
1040 		e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx);
1041 		if (!e_tbl) {
1042 			rte_errno = ENOMEM;
1043 			return -1;
1044 		}
1045 		((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx;
1046 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
1047 									e_tbl;
1048 		m_tbl->ref_cnt++;
1049 	}
1050 	entry_idx = idx & MLX5_L3T_ET_MASK;
1051 	switch (tbl->type) {
1052 	case MLX5_L3T_TYPE_WORD:
1053 		w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
1054 		if (w_e_tbl->entry[entry_idx].data) {
1055 			data->word = w_e_tbl->entry[entry_idx].data;
1056 			w_e_tbl->entry[entry_idx].ref_cnt++;
1057 			rte_errno = EEXIST;
1058 			return -1;
1059 		}
1060 		w_e_tbl->entry[entry_idx].data = data->word;
1061 		w_e_tbl->entry[entry_idx].ref_cnt = 1;
1062 		w_e_tbl->ref_cnt++;
1063 		break;
1064 	case MLX5_L3T_TYPE_DWORD:
1065 		dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
1066 		if (dw_e_tbl->entry[entry_idx].data) {
1067 			data->dword = dw_e_tbl->entry[entry_idx].data;
1068 			dw_e_tbl->entry[entry_idx].ref_cnt++;
1069 			rte_errno = EEXIST;
1070 			return -1;
1071 		}
1072 		dw_e_tbl->entry[entry_idx].data = data->dword;
1073 		dw_e_tbl->entry[entry_idx].ref_cnt = 1;
1074 		dw_e_tbl->ref_cnt++;
1075 		break;
1076 	case MLX5_L3T_TYPE_QWORD:
1077 		qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
1078 		if (qw_e_tbl->entry[entry_idx].data) {
1079 			data->qword = qw_e_tbl->entry[entry_idx].data;
1080 			qw_e_tbl->entry[entry_idx].ref_cnt++;
1081 			rte_errno = EEXIST;
1082 			return -1;
1083 		}
1084 		qw_e_tbl->entry[entry_idx].data = data->qword;
1085 		qw_e_tbl->entry[entry_idx].ref_cnt = 1;
1086 		qw_e_tbl->ref_cnt++;
1087 		break;
1088 	default:
1089 		ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
1090 		if (ptr_e_tbl->entry[entry_idx].data) {
1091 			data->ptr = ptr_e_tbl->entry[entry_idx].data;
1092 			ptr_e_tbl->entry[entry_idx].ref_cnt++;
1093 			rte_errno = EEXIST;
1094 			return -1;
1095 		}
1096 		ptr_e_tbl->entry[entry_idx].data = data->ptr;
1097 		ptr_e_tbl->entry[entry_idx].ref_cnt = 1;
1098 		ptr_e_tbl->ref_cnt++;
1099 		break;
1100 	}
1101 	return 0;
1102 }
1103 
1104 int32_t
1105 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1106 		   union mlx5_l3t_data *data)
1107 {
1108 	int ret;
1109 
1110 	rte_spinlock_lock(&tbl->sl);
1111 	ret = __l3t_set_entry(tbl, idx, data);
1112 	rte_spinlock_unlock(&tbl->sl);
1113 	return ret;
1114 }
1115 
1116 int32_t
1117 mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
1118 		       union mlx5_l3t_data *data,
1119 		       mlx5_l3t_alloc_callback_fn cb, void *ctx)
1120 {
1121 	int32_t ret;
1122 
1123 	rte_spinlock_lock(&tbl->sl);
1124 	/* Check if entry data is ready. */
1125 	ret = __l3t_get_entry(tbl, idx, data);
1126 	if (!ret) {
1127 		switch (tbl->type) {
1128 		case MLX5_L3T_TYPE_WORD:
1129 			if (data->word)
1130 				goto out;
1131 			break;
1132 		case MLX5_L3T_TYPE_DWORD:
1133 			if (data->dword)
1134 				goto out;
1135 			break;
1136 		case MLX5_L3T_TYPE_QWORD:
1137 			if (data->qword)
1138 				goto out;
1139 			break;
1140 		default:
1141 			if (data->ptr)
1142 				goto out;
1143 			break;
1144 		}
1145 	}
1146 	/* Entry data is not ready, use user callback to create it. */
1147 	ret = cb(ctx, data);
1148 	if (ret)
1149 		goto out;
1150 	/* Save the new allocated data to entry. */
1151 	ret = __l3t_set_entry(tbl, idx, data);
1152 out:
1153 	rte_spinlock_unlock(&tbl->sl);
1154 	return ret;
1155 }
1156