xref: /dpdk/drivers/common/mlx5/mlx5_common_utils.c (revision 6507c9f51d9dc3e1ac074ce85bcadaf69afa9dee)
125245d5dSShiri Kuzin /* SPDX-License-Identifier: BSD-3-Clause
225245d5dSShiri Kuzin  * Copyright 2019 Mellanox Technologies, Ltd
325245d5dSShiri Kuzin  */
425245d5dSShiri Kuzin 
525245d5dSShiri Kuzin #include <rte_malloc.h>
625245d5dSShiri Kuzin #include <rte_hash_crc.h>
725245d5dSShiri Kuzin #include <rte_errno.h>
825245d5dSShiri Kuzin 
925245d5dSShiri Kuzin #include <mlx5_malloc.h>
1025245d5dSShiri Kuzin 
1125245d5dSShiri Kuzin #include "mlx5_common_utils.h"
1225245d5dSShiri Kuzin #include "mlx5_common_log.h"
1325245d5dSShiri Kuzin 
149c373c52SSuanming Mou /********************* mlx5 list ************************/
159c373c52SSuanming Mou 
169c373c52SSuanming Mou struct mlx5_list *
17d03b7860SSuanming Mou mlx5_list_create(const char *name, void *ctx, bool lcores_share,
189c373c52SSuanming Mou 		 mlx5_list_create_cb cb_create,
199c373c52SSuanming Mou 		 mlx5_list_match_cb cb_match,
209c373c52SSuanming Mou 		 mlx5_list_remove_cb cb_remove,
219c373c52SSuanming Mou 		 mlx5_list_clone_cb cb_clone,
229c373c52SSuanming Mou 		 mlx5_list_clone_free_cb cb_clone_free)
239c373c52SSuanming Mou {
249c373c52SSuanming Mou 	struct mlx5_list *list;
259c373c52SSuanming Mou 	int i;
269c373c52SSuanming Mou 
279c373c52SSuanming Mou 	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
289c373c52SSuanming Mou 	    !cb_clone_free) {
299c373c52SSuanming Mou 		rte_errno = EINVAL;
309c373c52SSuanming Mou 		return NULL;
319c373c52SSuanming Mou 	}
329c373c52SSuanming Mou 	list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
339c373c52SSuanming Mou 	if (!list)
349c373c52SSuanming Mou 		return NULL;
359c373c52SSuanming Mou 	if (name)
369c373c52SSuanming Mou 		snprintf(list->name, sizeof(list->name), "%s", name);
379c373c52SSuanming Mou 	list->ctx = ctx;
38d03b7860SSuanming Mou 	list->lcores_share = lcores_share;
399c373c52SSuanming Mou 	list->cb_create = cb_create;
409c373c52SSuanming Mou 	list->cb_match = cb_match;
419c373c52SSuanming Mou 	list->cb_remove = cb_remove;
429c373c52SSuanming Mou 	list->cb_clone = cb_clone;
439c373c52SSuanming Mou 	list->cb_clone_free = cb_clone_free;
449c373c52SSuanming Mou 	rte_rwlock_init(&list->lock);
459c373c52SSuanming Mou 	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
469c373c52SSuanming Mou 	for (i = 0; i <= RTE_MAX_LCORE; i++)
479c373c52SSuanming Mou 		LIST_INIT(&list->cache[i].h);
489c373c52SSuanming Mou 	return list;
499c373c52SSuanming Mou }
509c373c52SSuanming Mou 
519c373c52SSuanming Mou static struct mlx5_list_entry *
529c373c52SSuanming Mou __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
539c373c52SSuanming Mou {
549c373c52SSuanming Mou 	struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
559c373c52SSuanming Mou 	uint32_t ret;
569c373c52SSuanming Mou 
579c373c52SSuanming Mou 	while (entry != NULL) {
58*6507c9f5SSuanming Mou 		if (list->cb_match(list->ctx, entry, ctx) == 0) {
599c373c52SSuanming Mou 			if (reuse) {
609c373c52SSuanming Mou 				ret = __atomic_add_fetch(&entry->ref_cnt, 1,
619c373c52SSuanming Mou 							 __ATOMIC_RELAXED) - 1;
629c373c52SSuanming Mou 				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
639c373c52SSuanming Mou 					list->name, (void *)entry,
649c373c52SSuanming Mou 					entry->ref_cnt);
659c373c52SSuanming Mou 			} else if (lcore_index < RTE_MAX_LCORE) {
669c373c52SSuanming Mou 				ret = __atomic_load_n(&entry->ref_cnt,
679c373c52SSuanming Mou 						      __ATOMIC_RELAXED);
689c373c52SSuanming Mou 			}
699c373c52SSuanming Mou 			if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
709c373c52SSuanming Mou 				return entry;
719c373c52SSuanming Mou 			if (reuse && ret == 0)
729c373c52SSuanming Mou 				entry->ref_cnt--; /* Invalid entry. */
739c373c52SSuanming Mou 		}
749c373c52SSuanming Mou 		entry = LIST_NEXT(entry, next);
759c373c52SSuanming Mou 	}
769c373c52SSuanming Mou 	return NULL;
779c373c52SSuanming Mou }
789c373c52SSuanming Mou 
799c373c52SSuanming Mou struct mlx5_list_entry *
809c373c52SSuanming Mou mlx5_list_lookup(struct mlx5_list *list, void *ctx)
819c373c52SSuanming Mou {
829c373c52SSuanming Mou 	struct mlx5_list_entry *entry = NULL;
839c373c52SSuanming Mou 	int i;
849c373c52SSuanming Mou 
859c373c52SSuanming Mou 	rte_rwlock_read_lock(&list->lock);
869c373c52SSuanming Mou 	for (i = 0; i < RTE_MAX_LCORE; i++) {
879c373c52SSuanming Mou 		entry = __list_lookup(list, i, ctx, false);
889c373c52SSuanming Mou 		if (entry)
899c373c52SSuanming Mou 			break;
909c373c52SSuanming Mou 	}
919c373c52SSuanming Mou 	rte_rwlock_read_unlock(&list->lock);
929c373c52SSuanming Mou 	return entry;
939c373c52SSuanming Mou }
949c373c52SSuanming Mou 
959c373c52SSuanming Mou static struct mlx5_list_entry *
969c373c52SSuanming Mou mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
979c373c52SSuanming Mou 		       struct mlx5_list_entry *gentry, void *ctx)
989c373c52SSuanming Mou {
99*6507c9f5SSuanming Mou 	struct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx);
1009c373c52SSuanming Mou 
1019c373c52SSuanming Mou 	if (unlikely(!lentry))
1029c373c52SSuanming Mou 		return NULL;
1039c373c52SSuanming Mou 	lentry->ref_cnt = 1u;
1049c373c52SSuanming Mou 	lentry->gentry = gentry;
1059c373c52SSuanming Mou 	lentry->lcore_idx = (uint32_t)lcore_index;
1069c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
1079c373c52SSuanming Mou 	return lentry;
1089c373c52SSuanming Mou }
1099c373c52SSuanming Mou 
1109c373c52SSuanming Mou static void
1119c373c52SSuanming Mou __list_cache_clean(struct mlx5_list *list, int lcore_index)
1129c373c52SSuanming Mou {
1139c373c52SSuanming Mou 	struct mlx5_list_cache *c = &list->cache[lcore_index];
1149c373c52SSuanming Mou 	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
1159c373c52SSuanming Mou 	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
1169c373c52SSuanming Mou 					       __ATOMIC_RELAXED);
1179c373c52SSuanming Mou 
1189c373c52SSuanming Mou 	while (inv_cnt != 0 && entry != NULL) {
1199c373c52SSuanming Mou 		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
1209c373c52SSuanming Mou 
1219c373c52SSuanming Mou 		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
1229c373c52SSuanming Mou 			LIST_REMOVE(entry, next);
123d03b7860SSuanming Mou 			if (list->lcores_share)
124*6507c9f5SSuanming Mou 				list->cb_clone_free(list->ctx, entry);
125d03b7860SSuanming Mou 			else
126*6507c9f5SSuanming Mou 				list->cb_remove(list->ctx, entry);
1279c373c52SSuanming Mou 			inv_cnt--;
1289c373c52SSuanming Mou 		}
1299c373c52SSuanming Mou 		entry = nentry;
1309c373c52SSuanming Mou 	}
1319c373c52SSuanming Mou }
1329c373c52SSuanming Mou 
1339c373c52SSuanming Mou struct mlx5_list_entry *
1349c373c52SSuanming Mou mlx5_list_register(struct mlx5_list *list, void *ctx)
1359c373c52SSuanming Mou {
136d03b7860SSuanming Mou 	struct mlx5_list_entry *entry = NULL, *local_entry;
1379c373c52SSuanming Mou 	volatile uint32_t prev_gen_cnt = 0;
1389c373c52SSuanming Mou 	int lcore_index = rte_lcore_index(rte_lcore_id());
1399c373c52SSuanming Mou 
1409c373c52SSuanming Mou 	MLX5_ASSERT(list);
1419c373c52SSuanming Mou 	MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
1429c373c52SSuanming Mou 	if (unlikely(lcore_index == -1)) {
1439c373c52SSuanming Mou 		rte_errno = ENOTSUP;
1449c373c52SSuanming Mou 		return NULL;
1459c373c52SSuanming Mou 	}
1469c373c52SSuanming Mou 	/* 0. Free entries that was invalidated by other lcores. */
1479c373c52SSuanming Mou 	__list_cache_clean(list, lcore_index);
1489c373c52SSuanming Mou 	/* 1. Lookup in local cache. */
1499c373c52SSuanming Mou 	local_entry = __list_lookup(list, lcore_index, ctx, true);
1509c373c52SSuanming Mou 	if (local_entry)
1519c373c52SSuanming Mou 		return local_entry;
152d03b7860SSuanming Mou 	if (list->lcores_share) {
1539c373c52SSuanming Mou 		/* 2. Lookup with read lock on global list, reuse if found. */
1549c373c52SSuanming Mou 		rte_rwlock_read_lock(&list->lock);
1559c373c52SSuanming Mou 		entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
1569c373c52SSuanming Mou 		if (likely(entry)) {
1579c373c52SSuanming Mou 			rte_rwlock_read_unlock(&list->lock);
158d03b7860SSuanming Mou 			return mlx5_list_cache_insert(list, lcore_index, entry,
159d03b7860SSuanming Mou 						      ctx);
1609c373c52SSuanming Mou 		}
1619c373c52SSuanming Mou 		prev_gen_cnt = list->gen_cnt;
1629c373c52SSuanming Mou 		rte_rwlock_read_unlock(&list->lock);
163d03b7860SSuanming Mou 	}
1649c373c52SSuanming Mou 	/* 3. Prepare new entry for global list and for cache. */
165*6507c9f5SSuanming Mou 	entry = list->cb_create(list->ctx, ctx);
1669c373c52SSuanming Mou 	if (unlikely(!entry))
1679c373c52SSuanming Mou 		return NULL;
168d03b7860SSuanming Mou 	entry->ref_cnt = 1u;
169d03b7860SSuanming Mou 	if (!list->lcores_share) {
170d03b7860SSuanming Mou 		entry->lcore_idx = (uint32_t)lcore_index;
171d03b7860SSuanming Mou 		LIST_INSERT_HEAD(&list->cache[lcore_index].h, entry, next);
172d03b7860SSuanming Mou 		__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
173d03b7860SSuanming Mou 		DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
174d03b7860SSuanming Mou 			list->name, lcore_index, (void *)entry, entry->ref_cnt);
175d03b7860SSuanming Mou 		return entry;
176d03b7860SSuanming Mou 	}
177*6507c9f5SSuanming Mou 	local_entry = list->cb_clone(list->ctx, entry, ctx);
1789c373c52SSuanming Mou 	if (unlikely(!local_entry)) {
179*6507c9f5SSuanming Mou 		list->cb_remove(list->ctx, entry);
1809c373c52SSuanming Mou 		return NULL;
1819c373c52SSuanming Mou 	}
1829c373c52SSuanming Mou 	local_entry->ref_cnt = 1u;
1839c373c52SSuanming Mou 	local_entry->gentry = entry;
1849c373c52SSuanming Mou 	local_entry->lcore_idx = (uint32_t)lcore_index;
1859c373c52SSuanming Mou 	rte_rwlock_write_lock(&list->lock);
1869c373c52SSuanming Mou 	/* 4. Make sure the same entry was not created before the write lock. */
1879c373c52SSuanming Mou 	if (unlikely(prev_gen_cnt != list->gen_cnt)) {
1889c373c52SSuanming Mou 		struct mlx5_list_entry *oentry = __list_lookup(list,
1899c373c52SSuanming Mou 							       RTE_MAX_LCORE,
1909c373c52SSuanming Mou 							       ctx, true);
1919c373c52SSuanming Mou 
1929c373c52SSuanming Mou 		if (unlikely(oentry)) {
1939c373c52SSuanming Mou 			/* 4.5. Found real race!!, reuse the old entry. */
1949c373c52SSuanming Mou 			rte_rwlock_write_unlock(&list->lock);
195*6507c9f5SSuanming Mou 			list->cb_remove(list->ctx, entry);
196*6507c9f5SSuanming Mou 			list->cb_clone_free(list->ctx, local_entry);
1979c373c52SSuanming Mou 			return mlx5_list_cache_insert(list, lcore_index, oentry,
1989c373c52SSuanming Mou 						      ctx);
1999c373c52SSuanming Mou 		}
2009c373c52SSuanming Mou 	}
2019c373c52SSuanming Mou 	/* 5. Update lists. */
2029c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
2039c373c52SSuanming Mou 	list->gen_cnt++;
2049c373c52SSuanming Mou 	rte_rwlock_write_unlock(&list->lock);
2059c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
2069c373c52SSuanming Mou 	__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
2079c373c52SSuanming Mou 	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
2089c373c52SSuanming Mou 		(void *)entry, entry->ref_cnt);
2099c373c52SSuanming Mou 	return local_entry;
2109c373c52SSuanming Mou }
2119c373c52SSuanming Mou 
2129c373c52SSuanming Mou int
2139c373c52SSuanming Mou mlx5_list_unregister(struct mlx5_list *list,
2149c373c52SSuanming Mou 		      struct mlx5_list_entry *entry)
2159c373c52SSuanming Mou {
2169c373c52SSuanming Mou 	struct mlx5_list_entry *gentry = entry->gentry;
2179c373c52SSuanming Mou 	int lcore_idx;
2189c373c52SSuanming Mou 
2199c373c52SSuanming Mou 	if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
2209c373c52SSuanming Mou 		return 1;
2219c373c52SSuanming Mou 	lcore_idx = rte_lcore_index(rte_lcore_id());
2229c373c52SSuanming Mou 	MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
2239c373c52SSuanming Mou 	if (entry->lcore_idx == (uint32_t)lcore_idx) {
2249c373c52SSuanming Mou 		LIST_REMOVE(entry, next);
225d03b7860SSuanming Mou 		if (list->lcores_share)
226*6507c9f5SSuanming Mou 			list->cb_clone_free(list->ctx, entry);
227d03b7860SSuanming Mou 		else
228*6507c9f5SSuanming Mou 			list->cb_remove(list->ctx, entry);
2299c373c52SSuanming Mou 	} else if (likely(lcore_idx != -1)) {
2309c373c52SSuanming Mou 		__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
2319c373c52SSuanming Mou 				   __ATOMIC_RELAXED);
2329c373c52SSuanming Mou 	} else {
2339c373c52SSuanming Mou 		return 0;
2349c373c52SSuanming Mou 	}
235d03b7860SSuanming Mou 	if (!list->lcores_share) {
236d03b7860SSuanming Mou 		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
237d03b7860SSuanming Mou 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
238d03b7860SSuanming Mou 			list->name, (void *)entry);
239d03b7860SSuanming Mou 		return 0;
240d03b7860SSuanming Mou 	}
2419c373c52SSuanming Mou 	if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
2429c373c52SSuanming Mou 		return 1;
2439c373c52SSuanming Mou 	rte_rwlock_write_lock(&list->lock);
2449c373c52SSuanming Mou 	if (likely(gentry->ref_cnt == 0)) {
2459c373c52SSuanming Mou 		LIST_REMOVE(gentry, next);
2469c373c52SSuanming Mou 		rte_rwlock_write_unlock(&list->lock);
247*6507c9f5SSuanming Mou 		list->cb_remove(list->ctx, gentry);
2489c373c52SSuanming Mou 		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
2499c373c52SSuanming Mou 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
2509c373c52SSuanming Mou 			list->name, (void *)gentry);
2519c373c52SSuanming Mou 		return 0;
2529c373c52SSuanming Mou 	}
2539c373c52SSuanming Mou 	rte_rwlock_write_unlock(&list->lock);
2549c373c52SSuanming Mou 	return 1;
2559c373c52SSuanming Mou }
2569c373c52SSuanming Mou 
2579c373c52SSuanming Mou void
2589c373c52SSuanming Mou mlx5_list_destroy(struct mlx5_list *list)
2599c373c52SSuanming Mou {
2609c373c52SSuanming Mou 	struct mlx5_list_entry *entry;
2619c373c52SSuanming Mou 	int i;
2629c373c52SSuanming Mou 
2639c373c52SSuanming Mou 	MLX5_ASSERT(list);
2649c373c52SSuanming Mou 	for (i = 0; i <= RTE_MAX_LCORE; i++) {
2659c373c52SSuanming Mou 		while (!LIST_EMPTY(&list->cache[i].h)) {
2669c373c52SSuanming Mou 			entry = LIST_FIRST(&list->cache[i].h);
2679c373c52SSuanming Mou 			LIST_REMOVE(entry, next);
2689c373c52SSuanming Mou 			if (i == RTE_MAX_LCORE) {
2699c373c52SSuanming Mou 				list->cb_remove(list, entry);
2709c373c52SSuanming Mou 				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
2719c373c52SSuanming Mou 					"destroyed.", list->name,
2729c373c52SSuanming Mou 					(void *)entry);
2739c373c52SSuanming Mou 			} else {
2749c373c52SSuanming Mou 				list->cb_clone_free(list, entry);
2759c373c52SSuanming Mou 			}
2769c373c52SSuanming Mou 		}
2779c373c52SSuanming Mou 	}
2789c373c52SSuanming Mou 	mlx5_free(list);
2799c373c52SSuanming Mou }
2809c373c52SSuanming Mou 
2819c373c52SSuanming Mou uint32_t
2829c373c52SSuanming Mou mlx5_list_get_entry_num(struct mlx5_list *list)
2839c373c52SSuanming Mou {
2849c373c52SSuanming Mou 	MLX5_ASSERT(list);
2859c373c52SSuanming Mou 	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
2869c373c52SSuanming Mou }
2879c373c52SSuanming Mou 
28825245d5dSShiri Kuzin /********************* Hash List **********************/
28925245d5dSShiri Kuzin 
29025245d5dSShiri Kuzin static struct mlx5_hlist_entry *
29125245d5dSShiri Kuzin mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
29225245d5dSShiri Kuzin 			     void *ctx __rte_unused)
29325245d5dSShiri Kuzin {
29425245d5dSShiri Kuzin 	return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
29525245d5dSShiri Kuzin }
29625245d5dSShiri Kuzin 
29725245d5dSShiri Kuzin static void
29825245d5dSShiri Kuzin mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
29925245d5dSShiri Kuzin 			     struct mlx5_hlist_entry *entry)
30025245d5dSShiri Kuzin {
30125245d5dSShiri Kuzin 	mlx5_free(entry);
30225245d5dSShiri Kuzin }
30325245d5dSShiri Kuzin 
30425245d5dSShiri Kuzin struct mlx5_hlist *
30525245d5dSShiri Kuzin mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
30625245d5dSShiri Kuzin 		  uint32_t flags, mlx5_hlist_create_cb cb_create,
30725245d5dSShiri Kuzin 		  mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
30825245d5dSShiri Kuzin {
30925245d5dSShiri Kuzin 	struct mlx5_hlist *h;
31025245d5dSShiri Kuzin 	uint32_t act_size;
31125245d5dSShiri Kuzin 	uint32_t alloc_size;
31225245d5dSShiri Kuzin 	uint32_t i;
31325245d5dSShiri Kuzin 
31425245d5dSShiri Kuzin 	if (!size || !cb_match || (!cb_create ^ !cb_remove))
31525245d5dSShiri Kuzin 		return NULL;
31625245d5dSShiri Kuzin 	/* Align to the next power of 2, 32bits integer is enough now. */
31725245d5dSShiri Kuzin 	if (!rte_is_power_of_2(size)) {
31825245d5dSShiri Kuzin 		act_size = rte_align32pow2(size);
31925245d5dSShiri Kuzin 		DRV_LOG(DEBUG, "Size 0x%" PRIX32 " is not power of 2, "
32025245d5dSShiri Kuzin 			"will be aligned to 0x%" PRIX32 ".", size, act_size);
32125245d5dSShiri Kuzin 	} else {
32225245d5dSShiri Kuzin 		act_size = size;
32325245d5dSShiri Kuzin 	}
32425245d5dSShiri Kuzin 	alloc_size = sizeof(struct mlx5_hlist) +
32525245d5dSShiri Kuzin 		     sizeof(struct mlx5_hlist_bucket) * act_size;
32625245d5dSShiri Kuzin 	/* Using zmalloc, then no need to initialize the heads. */
32725245d5dSShiri Kuzin 	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
32825245d5dSShiri Kuzin 			SOCKET_ID_ANY);
32925245d5dSShiri Kuzin 	if (!h) {
33025245d5dSShiri Kuzin 		DRV_LOG(ERR, "No memory for hash list %s creation",
33125245d5dSShiri Kuzin 			name ? name : "None");
33225245d5dSShiri Kuzin 		return NULL;
33325245d5dSShiri Kuzin 	}
33425245d5dSShiri Kuzin 	if (name)
33525245d5dSShiri Kuzin 		snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
33625245d5dSShiri Kuzin 	h->table_sz = act_size;
33725245d5dSShiri Kuzin 	h->mask = act_size - 1;
33825245d5dSShiri Kuzin 	h->entry_sz = entry_size;
33925245d5dSShiri Kuzin 	h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
34025245d5dSShiri Kuzin 	h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
34125245d5dSShiri Kuzin 	h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
34225245d5dSShiri Kuzin 	h->cb_match = cb_match;
34325245d5dSShiri Kuzin 	h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
34425245d5dSShiri Kuzin 	for (i = 0; i < act_size; i++)
34525245d5dSShiri Kuzin 		rte_rwlock_init(&h->buckets[i].lock);
34625245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
34725245d5dSShiri Kuzin 		h->name, act_size);
34825245d5dSShiri Kuzin 	return h;
34925245d5dSShiri Kuzin }
35025245d5dSShiri Kuzin 
35125245d5dSShiri Kuzin static struct mlx5_hlist_entry *
35225245d5dSShiri Kuzin __hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
35325245d5dSShiri Kuzin 	       void *ctx, bool reuse)
35425245d5dSShiri Kuzin {
35525245d5dSShiri Kuzin 	struct mlx5_hlist_head *first;
35625245d5dSShiri Kuzin 	struct mlx5_hlist_entry *node;
35725245d5dSShiri Kuzin 
35825245d5dSShiri Kuzin 	MLX5_ASSERT(h);
35925245d5dSShiri Kuzin 	first = &h->buckets[idx].head;
36025245d5dSShiri Kuzin 	LIST_FOREACH(node, first, next) {
36125245d5dSShiri Kuzin 		if (!h->cb_match(h, node, key, ctx)) {
36225245d5dSShiri Kuzin 			if (reuse) {
36325245d5dSShiri Kuzin 				__atomic_add_fetch(&node->ref_cnt, 1,
36425245d5dSShiri Kuzin 						   __ATOMIC_RELAXED);
36525245d5dSShiri Kuzin 				DRV_LOG(DEBUG, "Hash list %s entry %p "
36625245d5dSShiri Kuzin 					"reuse: %u.",
36725245d5dSShiri Kuzin 					h->name, (void *)node, node->ref_cnt);
36825245d5dSShiri Kuzin 			}
36925245d5dSShiri Kuzin 			break;
37025245d5dSShiri Kuzin 		}
37125245d5dSShiri Kuzin 	}
37225245d5dSShiri Kuzin 	return node;
37325245d5dSShiri Kuzin }
37425245d5dSShiri Kuzin 
37525245d5dSShiri Kuzin static struct mlx5_hlist_entry *
37625245d5dSShiri Kuzin hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
37725245d5dSShiri Kuzin 	     void *ctx, bool reuse)
37825245d5dSShiri Kuzin {
37925245d5dSShiri Kuzin 	struct mlx5_hlist_entry *node;
38025245d5dSShiri Kuzin 
38125245d5dSShiri Kuzin 	MLX5_ASSERT(h);
38225245d5dSShiri Kuzin 	rte_rwlock_read_lock(&h->buckets[idx].lock);
38325245d5dSShiri Kuzin 	node = __hlist_lookup(h, key, idx, ctx, reuse);
38425245d5dSShiri Kuzin 	rte_rwlock_read_unlock(&h->buckets[idx].lock);
38525245d5dSShiri Kuzin 	return node;
38625245d5dSShiri Kuzin }
38725245d5dSShiri Kuzin 
38825245d5dSShiri Kuzin struct mlx5_hlist_entry *
38925245d5dSShiri Kuzin mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
39025245d5dSShiri Kuzin {
39125245d5dSShiri Kuzin 	uint32_t idx;
39225245d5dSShiri Kuzin 
39325245d5dSShiri Kuzin 	if (h->direct_key)
39425245d5dSShiri Kuzin 		idx = (uint32_t)(key & h->mask);
39525245d5dSShiri Kuzin 	else
39625245d5dSShiri Kuzin 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
39725245d5dSShiri Kuzin 	return hlist_lookup(h, key, idx, ctx, false);
39825245d5dSShiri Kuzin }
39925245d5dSShiri Kuzin 
40025245d5dSShiri Kuzin struct mlx5_hlist_entry*
40125245d5dSShiri Kuzin mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
40225245d5dSShiri Kuzin {
40325245d5dSShiri Kuzin 	uint32_t idx;
40425245d5dSShiri Kuzin 	struct mlx5_hlist_head *first;
40525245d5dSShiri Kuzin 	struct mlx5_hlist_bucket *b;
40625245d5dSShiri Kuzin 	struct mlx5_hlist_entry *entry;
40725245d5dSShiri Kuzin 	uint32_t prev_gen_cnt = 0;
40825245d5dSShiri Kuzin 
40925245d5dSShiri Kuzin 	if (h->direct_key)
41025245d5dSShiri Kuzin 		idx = (uint32_t)(key & h->mask);
41125245d5dSShiri Kuzin 	else
41225245d5dSShiri Kuzin 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
41325245d5dSShiri Kuzin 	MLX5_ASSERT(h);
41425245d5dSShiri Kuzin 	b = &h->buckets[idx];
41525245d5dSShiri Kuzin 	/* Use write lock directly for write-most list. */
41625245d5dSShiri Kuzin 	if (!h->write_most) {
41725245d5dSShiri Kuzin 		prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE);
41825245d5dSShiri Kuzin 		entry = hlist_lookup(h, key, idx, ctx, true);
41925245d5dSShiri Kuzin 		if (entry)
42025245d5dSShiri Kuzin 			return entry;
42125245d5dSShiri Kuzin 	}
42225245d5dSShiri Kuzin 	rte_rwlock_write_lock(&b->lock);
42325245d5dSShiri Kuzin 	/* Check if the list changed by other threads. */
42425245d5dSShiri Kuzin 	if (h->write_most ||
42525245d5dSShiri Kuzin 	    prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) {
42625245d5dSShiri Kuzin 		entry = __hlist_lookup(h, key, idx, ctx, true);
42725245d5dSShiri Kuzin 		if (entry)
42825245d5dSShiri Kuzin 			goto done;
42925245d5dSShiri Kuzin 	}
43025245d5dSShiri Kuzin 	first = &b->head;
43125245d5dSShiri Kuzin 	entry = h->cb_create(h, key, ctx);
43225245d5dSShiri Kuzin 	if (!entry) {
43325245d5dSShiri Kuzin 		rte_errno = ENOMEM;
43425245d5dSShiri Kuzin 		DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
43525245d5dSShiri Kuzin 		goto done;
43625245d5dSShiri Kuzin 	}
43725245d5dSShiri Kuzin 	entry->idx = idx;
43825245d5dSShiri Kuzin 	entry->ref_cnt = 1;
43925245d5dSShiri Kuzin 	LIST_INSERT_HEAD(first, entry, next);
44025245d5dSShiri Kuzin 	__atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
44125245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
44225245d5dSShiri Kuzin 		h->name, (void *)entry, entry->ref_cnt);
44325245d5dSShiri Kuzin done:
44425245d5dSShiri Kuzin 	rte_rwlock_write_unlock(&b->lock);
44525245d5dSShiri Kuzin 	return entry;
44625245d5dSShiri Kuzin }
44725245d5dSShiri Kuzin 
44825245d5dSShiri Kuzin int
44925245d5dSShiri Kuzin mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
45025245d5dSShiri Kuzin {
45125245d5dSShiri Kuzin 	uint32_t idx = entry->idx;
45225245d5dSShiri Kuzin 
45325245d5dSShiri Kuzin 	rte_rwlock_write_lock(&h->buckets[idx].lock);
45425245d5dSShiri Kuzin 	MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
45525245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
45625245d5dSShiri Kuzin 		h->name, (void *)entry, entry->ref_cnt);
45725245d5dSShiri Kuzin 	if (--entry->ref_cnt) {
45825245d5dSShiri Kuzin 		rte_rwlock_write_unlock(&h->buckets[idx].lock);
45925245d5dSShiri Kuzin 		return 1;
46025245d5dSShiri Kuzin 	}
46125245d5dSShiri Kuzin 	LIST_REMOVE(entry, next);
46225245d5dSShiri Kuzin 	/* Set to NULL to get rid of removing action for more than once. */
46325245d5dSShiri Kuzin 	entry->next.le_prev = NULL;
46425245d5dSShiri Kuzin 	h->cb_remove(h, entry);
46525245d5dSShiri Kuzin 	rte_rwlock_write_unlock(&h->buckets[idx].lock);
46625245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
46725245d5dSShiri Kuzin 		h->name, (void *)entry);
46825245d5dSShiri Kuzin 	return 0;
46925245d5dSShiri Kuzin }
47025245d5dSShiri Kuzin 
47125245d5dSShiri Kuzin void
47225245d5dSShiri Kuzin mlx5_hlist_destroy(struct mlx5_hlist *h)
47325245d5dSShiri Kuzin {
47425245d5dSShiri Kuzin 	uint32_t idx;
47525245d5dSShiri Kuzin 	struct mlx5_hlist_entry *entry;
47625245d5dSShiri Kuzin 
47725245d5dSShiri Kuzin 	MLX5_ASSERT(h);
47825245d5dSShiri Kuzin 	for (idx = 0; idx < h->table_sz; ++idx) {
47925245d5dSShiri Kuzin 		/* No LIST_FOREACH_SAFE, using while instead. */
48025245d5dSShiri Kuzin 		while (!LIST_EMPTY(&h->buckets[idx].head)) {
48125245d5dSShiri Kuzin 			entry = LIST_FIRST(&h->buckets[idx].head);
48225245d5dSShiri Kuzin 			LIST_REMOVE(entry, next);
48325245d5dSShiri Kuzin 			/*
48425245d5dSShiri Kuzin 			 * The owner of whole element which contains data entry
48525245d5dSShiri Kuzin 			 * is the user, so it's the user's duty to do the clean
48625245d5dSShiri Kuzin 			 * up and the free work because someone may not put the
48725245d5dSShiri Kuzin 			 * hlist entry at the beginning(suggested to locate at
48825245d5dSShiri Kuzin 			 * the beginning). Or else the default free function
48925245d5dSShiri Kuzin 			 * will be used.
49025245d5dSShiri Kuzin 			 */
49125245d5dSShiri Kuzin 			h->cb_remove(h, entry);
49225245d5dSShiri Kuzin 		}
49325245d5dSShiri Kuzin 	}
49425245d5dSShiri Kuzin 	mlx5_free(h);
49525245d5dSShiri Kuzin }
496