xref: /dpdk/drivers/common/mlx5/mlx5_common_utils.c (revision 9c373c524bae775e39b1258a9e037f717202a840)
125245d5dSShiri Kuzin /* SPDX-License-Identifier: BSD-3-Clause
225245d5dSShiri Kuzin  * Copyright 2019 Mellanox Technologies, Ltd
325245d5dSShiri Kuzin  */
425245d5dSShiri Kuzin 
525245d5dSShiri Kuzin #include <rte_malloc.h>
625245d5dSShiri Kuzin #include <rte_hash_crc.h>
725245d5dSShiri Kuzin #include <rte_errno.h>
825245d5dSShiri Kuzin 
925245d5dSShiri Kuzin #include <mlx5_malloc.h>
1025245d5dSShiri Kuzin 
1125245d5dSShiri Kuzin #include "mlx5_common_utils.h"
1225245d5dSShiri Kuzin #include "mlx5_common_log.h"
1325245d5dSShiri Kuzin 
14*9c373c52SSuanming Mou /********************* mlx5 list ************************/
15*9c373c52SSuanming Mou 
16*9c373c52SSuanming Mou struct mlx5_list *
17*9c373c52SSuanming Mou mlx5_list_create(const char *name, void *ctx,
18*9c373c52SSuanming Mou 		 mlx5_list_create_cb cb_create,
19*9c373c52SSuanming Mou 		 mlx5_list_match_cb cb_match,
20*9c373c52SSuanming Mou 		 mlx5_list_remove_cb cb_remove,
21*9c373c52SSuanming Mou 		 mlx5_list_clone_cb cb_clone,
22*9c373c52SSuanming Mou 		 mlx5_list_clone_free_cb cb_clone_free)
23*9c373c52SSuanming Mou {
24*9c373c52SSuanming Mou 	struct mlx5_list *list;
25*9c373c52SSuanming Mou 	int i;
26*9c373c52SSuanming Mou 
27*9c373c52SSuanming Mou 	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
28*9c373c52SSuanming Mou 	    !cb_clone_free) {
29*9c373c52SSuanming Mou 		rte_errno = EINVAL;
30*9c373c52SSuanming Mou 		return NULL;
31*9c373c52SSuanming Mou 	}
32*9c373c52SSuanming Mou 	list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
33*9c373c52SSuanming Mou 	if (!list)
34*9c373c52SSuanming Mou 		return NULL;
35*9c373c52SSuanming Mou 	if (name)
36*9c373c52SSuanming Mou 		snprintf(list->name, sizeof(list->name), "%s", name);
37*9c373c52SSuanming Mou 	list->ctx = ctx;
38*9c373c52SSuanming Mou 	list->cb_create = cb_create;
39*9c373c52SSuanming Mou 	list->cb_match = cb_match;
40*9c373c52SSuanming Mou 	list->cb_remove = cb_remove;
41*9c373c52SSuanming Mou 	list->cb_clone = cb_clone;
42*9c373c52SSuanming Mou 	list->cb_clone_free = cb_clone_free;
43*9c373c52SSuanming Mou 	rte_rwlock_init(&list->lock);
44*9c373c52SSuanming Mou 	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
45*9c373c52SSuanming Mou 	for (i = 0; i <= RTE_MAX_LCORE; i++)
46*9c373c52SSuanming Mou 		LIST_INIT(&list->cache[i].h);
47*9c373c52SSuanming Mou 	return list;
48*9c373c52SSuanming Mou }
49*9c373c52SSuanming Mou 
50*9c373c52SSuanming Mou static struct mlx5_list_entry *
51*9c373c52SSuanming Mou __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
52*9c373c52SSuanming Mou {
53*9c373c52SSuanming Mou 	struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
54*9c373c52SSuanming Mou 	uint32_t ret;
55*9c373c52SSuanming Mou 
56*9c373c52SSuanming Mou 	while (entry != NULL) {
57*9c373c52SSuanming Mou 		if (list->cb_match(list, entry, ctx) == 0) {
58*9c373c52SSuanming Mou 			if (reuse) {
59*9c373c52SSuanming Mou 				ret = __atomic_add_fetch(&entry->ref_cnt, 1,
60*9c373c52SSuanming Mou 							 __ATOMIC_RELAXED) - 1;
61*9c373c52SSuanming Mou 				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
62*9c373c52SSuanming Mou 					list->name, (void *)entry,
63*9c373c52SSuanming Mou 					entry->ref_cnt);
64*9c373c52SSuanming Mou 			} else if (lcore_index < RTE_MAX_LCORE) {
65*9c373c52SSuanming Mou 				ret = __atomic_load_n(&entry->ref_cnt,
66*9c373c52SSuanming Mou 						      __ATOMIC_RELAXED);
67*9c373c52SSuanming Mou 			}
68*9c373c52SSuanming Mou 			if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
69*9c373c52SSuanming Mou 				return entry;
70*9c373c52SSuanming Mou 			if (reuse && ret == 0)
71*9c373c52SSuanming Mou 				entry->ref_cnt--; /* Invalid entry. */
72*9c373c52SSuanming Mou 		}
73*9c373c52SSuanming Mou 		entry = LIST_NEXT(entry, next);
74*9c373c52SSuanming Mou 	}
75*9c373c52SSuanming Mou 	return NULL;
76*9c373c52SSuanming Mou }
77*9c373c52SSuanming Mou 
78*9c373c52SSuanming Mou struct mlx5_list_entry *
79*9c373c52SSuanming Mou mlx5_list_lookup(struct mlx5_list *list, void *ctx)
80*9c373c52SSuanming Mou {
81*9c373c52SSuanming Mou 	struct mlx5_list_entry *entry = NULL;
82*9c373c52SSuanming Mou 	int i;
83*9c373c52SSuanming Mou 
84*9c373c52SSuanming Mou 	rte_rwlock_read_lock(&list->lock);
85*9c373c52SSuanming Mou 	for (i = 0; i < RTE_MAX_LCORE; i++) {
86*9c373c52SSuanming Mou 		entry = __list_lookup(list, i, ctx, false);
87*9c373c52SSuanming Mou 		if (entry)
88*9c373c52SSuanming Mou 			break;
89*9c373c52SSuanming Mou 	}
90*9c373c52SSuanming Mou 	rte_rwlock_read_unlock(&list->lock);
91*9c373c52SSuanming Mou 	return entry;
92*9c373c52SSuanming Mou }
93*9c373c52SSuanming Mou 
94*9c373c52SSuanming Mou static struct mlx5_list_entry *
95*9c373c52SSuanming Mou mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
96*9c373c52SSuanming Mou 		       struct mlx5_list_entry *gentry, void *ctx)
97*9c373c52SSuanming Mou {
98*9c373c52SSuanming Mou 	struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
99*9c373c52SSuanming Mou 
100*9c373c52SSuanming Mou 	if (unlikely(!lentry))
101*9c373c52SSuanming Mou 		return NULL;
102*9c373c52SSuanming Mou 	lentry->ref_cnt = 1u;
103*9c373c52SSuanming Mou 	lentry->gentry = gentry;
104*9c373c52SSuanming Mou 	lentry->lcore_idx = (uint32_t)lcore_index;
105*9c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
106*9c373c52SSuanming Mou 	return lentry;
107*9c373c52SSuanming Mou }
108*9c373c52SSuanming Mou 
109*9c373c52SSuanming Mou static void
110*9c373c52SSuanming Mou __list_cache_clean(struct mlx5_list *list, int lcore_index)
111*9c373c52SSuanming Mou {
112*9c373c52SSuanming Mou 	struct mlx5_list_cache *c = &list->cache[lcore_index];
113*9c373c52SSuanming Mou 	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
114*9c373c52SSuanming Mou 	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
115*9c373c52SSuanming Mou 					       __ATOMIC_RELAXED);
116*9c373c52SSuanming Mou 
117*9c373c52SSuanming Mou 	while (inv_cnt != 0 && entry != NULL) {
118*9c373c52SSuanming Mou 		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
119*9c373c52SSuanming Mou 
120*9c373c52SSuanming Mou 		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
121*9c373c52SSuanming Mou 			LIST_REMOVE(entry, next);
122*9c373c52SSuanming Mou 			list->cb_clone_free(list, entry);
123*9c373c52SSuanming Mou 			inv_cnt--;
124*9c373c52SSuanming Mou 		}
125*9c373c52SSuanming Mou 		entry = nentry;
126*9c373c52SSuanming Mou 	}
127*9c373c52SSuanming Mou }
128*9c373c52SSuanming Mou 
129*9c373c52SSuanming Mou struct mlx5_list_entry *
130*9c373c52SSuanming Mou mlx5_list_register(struct mlx5_list *list, void *ctx)
131*9c373c52SSuanming Mou {
132*9c373c52SSuanming Mou 	struct mlx5_list_entry *entry, *local_entry;
133*9c373c52SSuanming Mou 	volatile uint32_t prev_gen_cnt = 0;
134*9c373c52SSuanming Mou 	int lcore_index = rte_lcore_index(rte_lcore_id());
135*9c373c52SSuanming Mou 
136*9c373c52SSuanming Mou 	MLX5_ASSERT(list);
137*9c373c52SSuanming Mou 	MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
138*9c373c52SSuanming Mou 	if (unlikely(lcore_index == -1)) {
139*9c373c52SSuanming Mou 		rte_errno = ENOTSUP;
140*9c373c52SSuanming Mou 		return NULL;
141*9c373c52SSuanming Mou 	}
142*9c373c52SSuanming Mou 	/* 0. Free entries that was invalidated by other lcores. */
143*9c373c52SSuanming Mou 	__list_cache_clean(list, lcore_index);
144*9c373c52SSuanming Mou 	/* 1. Lookup in local cache. */
145*9c373c52SSuanming Mou 	local_entry = __list_lookup(list, lcore_index, ctx, true);
146*9c373c52SSuanming Mou 	if (local_entry)
147*9c373c52SSuanming Mou 		return local_entry;
148*9c373c52SSuanming Mou 	/* 2. Lookup with read lock on global list, reuse if found. */
149*9c373c52SSuanming Mou 	rte_rwlock_read_lock(&list->lock);
150*9c373c52SSuanming Mou 	entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
151*9c373c52SSuanming Mou 	if (likely(entry)) {
152*9c373c52SSuanming Mou 		rte_rwlock_read_unlock(&list->lock);
153*9c373c52SSuanming Mou 		return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
154*9c373c52SSuanming Mou 	}
155*9c373c52SSuanming Mou 	prev_gen_cnt = list->gen_cnt;
156*9c373c52SSuanming Mou 	rte_rwlock_read_unlock(&list->lock);
157*9c373c52SSuanming Mou 	/* 3. Prepare new entry for global list and for cache. */
158*9c373c52SSuanming Mou 	entry = list->cb_create(list, entry, ctx);
159*9c373c52SSuanming Mou 	if (unlikely(!entry))
160*9c373c52SSuanming Mou 		return NULL;
161*9c373c52SSuanming Mou 	local_entry = list->cb_clone(list, entry, ctx);
162*9c373c52SSuanming Mou 	if (unlikely(!local_entry)) {
163*9c373c52SSuanming Mou 		list->cb_remove(list, entry);
164*9c373c52SSuanming Mou 		return NULL;
165*9c373c52SSuanming Mou 	}
166*9c373c52SSuanming Mou 	entry->ref_cnt = 1u;
167*9c373c52SSuanming Mou 	local_entry->ref_cnt = 1u;
168*9c373c52SSuanming Mou 	local_entry->gentry = entry;
169*9c373c52SSuanming Mou 	local_entry->lcore_idx = (uint32_t)lcore_index;
170*9c373c52SSuanming Mou 	rte_rwlock_write_lock(&list->lock);
171*9c373c52SSuanming Mou 	/* 4. Make sure the same entry was not created before the write lock. */
172*9c373c52SSuanming Mou 	if (unlikely(prev_gen_cnt != list->gen_cnt)) {
173*9c373c52SSuanming Mou 		struct mlx5_list_entry *oentry = __list_lookup(list,
174*9c373c52SSuanming Mou 							       RTE_MAX_LCORE,
175*9c373c52SSuanming Mou 							       ctx, true);
176*9c373c52SSuanming Mou 
177*9c373c52SSuanming Mou 		if (unlikely(oentry)) {
178*9c373c52SSuanming Mou 			/* 4.5. Found real race!!, reuse the old entry. */
179*9c373c52SSuanming Mou 			rte_rwlock_write_unlock(&list->lock);
180*9c373c52SSuanming Mou 			list->cb_remove(list, entry);
181*9c373c52SSuanming Mou 			list->cb_clone_free(list, local_entry);
182*9c373c52SSuanming Mou 			return mlx5_list_cache_insert(list, lcore_index, oentry,
183*9c373c52SSuanming Mou 						      ctx);
184*9c373c52SSuanming Mou 		}
185*9c373c52SSuanming Mou 	}
186*9c373c52SSuanming Mou 	/* 5. Update lists. */
187*9c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
188*9c373c52SSuanming Mou 	list->gen_cnt++;
189*9c373c52SSuanming Mou 	rte_rwlock_write_unlock(&list->lock);
190*9c373c52SSuanming Mou 	LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
191*9c373c52SSuanming Mou 	__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
192*9c373c52SSuanming Mou 	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
193*9c373c52SSuanming Mou 		(void *)entry, entry->ref_cnt);
194*9c373c52SSuanming Mou 	return local_entry;
195*9c373c52SSuanming Mou }
196*9c373c52SSuanming Mou 
197*9c373c52SSuanming Mou int
198*9c373c52SSuanming Mou mlx5_list_unregister(struct mlx5_list *list,
199*9c373c52SSuanming Mou 		      struct mlx5_list_entry *entry)
200*9c373c52SSuanming Mou {
201*9c373c52SSuanming Mou 	struct mlx5_list_entry *gentry = entry->gentry;
202*9c373c52SSuanming Mou 	int lcore_idx;
203*9c373c52SSuanming Mou 
204*9c373c52SSuanming Mou 	if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
205*9c373c52SSuanming Mou 		return 1;
206*9c373c52SSuanming Mou 	lcore_idx = rte_lcore_index(rte_lcore_id());
207*9c373c52SSuanming Mou 	MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
208*9c373c52SSuanming Mou 	if (entry->lcore_idx == (uint32_t)lcore_idx) {
209*9c373c52SSuanming Mou 		LIST_REMOVE(entry, next);
210*9c373c52SSuanming Mou 		list->cb_clone_free(list, entry);
211*9c373c52SSuanming Mou 	} else if (likely(lcore_idx != -1)) {
212*9c373c52SSuanming Mou 		__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
213*9c373c52SSuanming Mou 				   __ATOMIC_RELAXED);
214*9c373c52SSuanming Mou 	} else {
215*9c373c52SSuanming Mou 		return 0;
216*9c373c52SSuanming Mou 	}
217*9c373c52SSuanming Mou 	if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
218*9c373c52SSuanming Mou 		return 1;
219*9c373c52SSuanming Mou 	rte_rwlock_write_lock(&list->lock);
220*9c373c52SSuanming Mou 	if (likely(gentry->ref_cnt == 0)) {
221*9c373c52SSuanming Mou 		LIST_REMOVE(gentry, next);
222*9c373c52SSuanming Mou 		rte_rwlock_write_unlock(&list->lock);
223*9c373c52SSuanming Mou 		list->cb_remove(list, gentry);
224*9c373c52SSuanming Mou 		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
225*9c373c52SSuanming Mou 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
226*9c373c52SSuanming Mou 			list->name, (void *)gentry);
227*9c373c52SSuanming Mou 		return 0;
228*9c373c52SSuanming Mou 	}
229*9c373c52SSuanming Mou 	rte_rwlock_write_unlock(&list->lock);
230*9c373c52SSuanming Mou 	return 1;
231*9c373c52SSuanming Mou }
232*9c373c52SSuanming Mou 
233*9c373c52SSuanming Mou void
234*9c373c52SSuanming Mou mlx5_list_destroy(struct mlx5_list *list)
235*9c373c52SSuanming Mou {
236*9c373c52SSuanming Mou 	struct mlx5_list_entry *entry;
237*9c373c52SSuanming Mou 	int i;
238*9c373c52SSuanming Mou 
239*9c373c52SSuanming Mou 	MLX5_ASSERT(list);
240*9c373c52SSuanming Mou 	for (i = 0; i <= RTE_MAX_LCORE; i++) {
241*9c373c52SSuanming Mou 		while (!LIST_EMPTY(&list->cache[i].h)) {
242*9c373c52SSuanming Mou 			entry = LIST_FIRST(&list->cache[i].h);
243*9c373c52SSuanming Mou 			LIST_REMOVE(entry, next);
244*9c373c52SSuanming Mou 			if (i == RTE_MAX_LCORE) {
245*9c373c52SSuanming Mou 				list->cb_remove(list, entry);
246*9c373c52SSuanming Mou 				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
247*9c373c52SSuanming Mou 					"destroyed.", list->name,
248*9c373c52SSuanming Mou 					(void *)entry);
249*9c373c52SSuanming Mou 			} else {
250*9c373c52SSuanming Mou 				list->cb_clone_free(list, entry);
251*9c373c52SSuanming Mou 			}
252*9c373c52SSuanming Mou 		}
253*9c373c52SSuanming Mou 	}
254*9c373c52SSuanming Mou 	mlx5_free(list);
255*9c373c52SSuanming Mou }
256*9c373c52SSuanming Mou 
257*9c373c52SSuanming Mou uint32_t
258*9c373c52SSuanming Mou mlx5_list_get_entry_num(struct mlx5_list *list)
259*9c373c52SSuanming Mou {
260*9c373c52SSuanming Mou 	MLX5_ASSERT(list);
261*9c373c52SSuanming Mou 	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
262*9c373c52SSuanming Mou }
263*9c373c52SSuanming Mou 
26425245d5dSShiri Kuzin /********************* Hash List **********************/
26525245d5dSShiri Kuzin 
26625245d5dSShiri Kuzin static struct mlx5_hlist_entry *
26725245d5dSShiri Kuzin mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
26825245d5dSShiri Kuzin 			     void *ctx __rte_unused)
26925245d5dSShiri Kuzin {
27025245d5dSShiri Kuzin 	return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
27125245d5dSShiri Kuzin }
27225245d5dSShiri Kuzin 
27325245d5dSShiri Kuzin static void
27425245d5dSShiri Kuzin mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
27525245d5dSShiri Kuzin 			     struct mlx5_hlist_entry *entry)
27625245d5dSShiri Kuzin {
27725245d5dSShiri Kuzin 	mlx5_free(entry);
27825245d5dSShiri Kuzin }
27925245d5dSShiri Kuzin 
28025245d5dSShiri Kuzin struct mlx5_hlist *
28125245d5dSShiri Kuzin mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
28225245d5dSShiri Kuzin 		  uint32_t flags, mlx5_hlist_create_cb cb_create,
28325245d5dSShiri Kuzin 		  mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
28425245d5dSShiri Kuzin {
28525245d5dSShiri Kuzin 	struct mlx5_hlist *h;
28625245d5dSShiri Kuzin 	uint32_t act_size;
28725245d5dSShiri Kuzin 	uint32_t alloc_size;
28825245d5dSShiri Kuzin 	uint32_t i;
28925245d5dSShiri Kuzin 
29025245d5dSShiri Kuzin 	if (!size || !cb_match || (!cb_create ^ !cb_remove))
29125245d5dSShiri Kuzin 		return NULL;
29225245d5dSShiri Kuzin 	/* Align to the next power of 2, 32bits integer is enough now. */
29325245d5dSShiri Kuzin 	if (!rte_is_power_of_2(size)) {
29425245d5dSShiri Kuzin 		act_size = rte_align32pow2(size);
29525245d5dSShiri Kuzin 		DRV_LOG(DEBUG, "Size 0x%" PRIX32 " is not power of 2, "
29625245d5dSShiri Kuzin 			"will be aligned to 0x%" PRIX32 ".", size, act_size);
29725245d5dSShiri Kuzin 	} else {
29825245d5dSShiri Kuzin 		act_size = size;
29925245d5dSShiri Kuzin 	}
30025245d5dSShiri Kuzin 	alloc_size = sizeof(struct mlx5_hlist) +
30125245d5dSShiri Kuzin 		     sizeof(struct mlx5_hlist_bucket) * act_size;
30225245d5dSShiri Kuzin 	/* Using zmalloc, then no need to initialize the heads. */
30325245d5dSShiri Kuzin 	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
30425245d5dSShiri Kuzin 			SOCKET_ID_ANY);
30525245d5dSShiri Kuzin 	if (!h) {
30625245d5dSShiri Kuzin 		DRV_LOG(ERR, "No memory for hash list %s creation",
30725245d5dSShiri Kuzin 			name ? name : "None");
30825245d5dSShiri Kuzin 		return NULL;
30925245d5dSShiri Kuzin 	}
31025245d5dSShiri Kuzin 	if (name)
31125245d5dSShiri Kuzin 		snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
31225245d5dSShiri Kuzin 	h->table_sz = act_size;
31325245d5dSShiri Kuzin 	h->mask = act_size - 1;
31425245d5dSShiri Kuzin 	h->entry_sz = entry_size;
31525245d5dSShiri Kuzin 	h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
31625245d5dSShiri Kuzin 	h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
31725245d5dSShiri Kuzin 	h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
31825245d5dSShiri Kuzin 	h->cb_match = cb_match;
31925245d5dSShiri Kuzin 	h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
32025245d5dSShiri Kuzin 	for (i = 0; i < act_size; i++)
32125245d5dSShiri Kuzin 		rte_rwlock_init(&h->buckets[i].lock);
32225245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
32325245d5dSShiri Kuzin 		h->name, act_size);
32425245d5dSShiri Kuzin 	return h;
32525245d5dSShiri Kuzin }
32625245d5dSShiri Kuzin 
32725245d5dSShiri Kuzin static struct mlx5_hlist_entry *
32825245d5dSShiri Kuzin __hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
32925245d5dSShiri Kuzin 	       void *ctx, bool reuse)
33025245d5dSShiri Kuzin {
33125245d5dSShiri Kuzin 	struct mlx5_hlist_head *first;
33225245d5dSShiri Kuzin 	struct mlx5_hlist_entry *node;
33325245d5dSShiri Kuzin 
33425245d5dSShiri Kuzin 	MLX5_ASSERT(h);
33525245d5dSShiri Kuzin 	first = &h->buckets[idx].head;
33625245d5dSShiri Kuzin 	LIST_FOREACH(node, first, next) {
33725245d5dSShiri Kuzin 		if (!h->cb_match(h, node, key, ctx)) {
33825245d5dSShiri Kuzin 			if (reuse) {
33925245d5dSShiri Kuzin 				__atomic_add_fetch(&node->ref_cnt, 1,
34025245d5dSShiri Kuzin 						   __ATOMIC_RELAXED);
34125245d5dSShiri Kuzin 				DRV_LOG(DEBUG, "Hash list %s entry %p "
34225245d5dSShiri Kuzin 					"reuse: %u.",
34325245d5dSShiri Kuzin 					h->name, (void *)node, node->ref_cnt);
34425245d5dSShiri Kuzin 			}
34525245d5dSShiri Kuzin 			break;
34625245d5dSShiri Kuzin 		}
34725245d5dSShiri Kuzin 	}
34825245d5dSShiri Kuzin 	return node;
34925245d5dSShiri Kuzin }
35025245d5dSShiri Kuzin 
35125245d5dSShiri Kuzin static struct mlx5_hlist_entry *
35225245d5dSShiri Kuzin hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
35325245d5dSShiri Kuzin 	     void *ctx, bool reuse)
35425245d5dSShiri Kuzin {
35525245d5dSShiri Kuzin 	struct mlx5_hlist_entry *node;
35625245d5dSShiri Kuzin 
35725245d5dSShiri Kuzin 	MLX5_ASSERT(h);
35825245d5dSShiri Kuzin 	rte_rwlock_read_lock(&h->buckets[idx].lock);
35925245d5dSShiri Kuzin 	node = __hlist_lookup(h, key, idx, ctx, reuse);
36025245d5dSShiri Kuzin 	rte_rwlock_read_unlock(&h->buckets[idx].lock);
36125245d5dSShiri Kuzin 	return node;
36225245d5dSShiri Kuzin }
36325245d5dSShiri Kuzin 
36425245d5dSShiri Kuzin struct mlx5_hlist_entry *
36525245d5dSShiri Kuzin mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
36625245d5dSShiri Kuzin {
36725245d5dSShiri Kuzin 	uint32_t idx;
36825245d5dSShiri Kuzin 
36925245d5dSShiri Kuzin 	if (h->direct_key)
37025245d5dSShiri Kuzin 		idx = (uint32_t)(key & h->mask);
37125245d5dSShiri Kuzin 	else
37225245d5dSShiri Kuzin 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
37325245d5dSShiri Kuzin 	return hlist_lookup(h, key, idx, ctx, false);
37425245d5dSShiri Kuzin }
37525245d5dSShiri Kuzin 
37625245d5dSShiri Kuzin struct mlx5_hlist_entry*
37725245d5dSShiri Kuzin mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
37825245d5dSShiri Kuzin {
37925245d5dSShiri Kuzin 	uint32_t idx;
38025245d5dSShiri Kuzin 	struct mlx5_hlist_head *first;
38125245d5dSShiri Kuzin 	struct mlx5_hlist_bucket *b;
38225245d5dSShiri Kuzin 	struct mlx5_hlist_entry *entry;
38325245d5dSShiri Kuzin 	uint32_t prev_gen_cnt = 0;
38425245d5dSShiri Kuzin 
38525245d5dSShiri Kuzin 	if (h->direct_key)
38625245d5dSShiri Kuzin 		idx = (uint32_t)(key & h->mask);
38725245d5dSShiri Kuzin 	else
38825245d5dSShiri Kuzin 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
38925245d5dSShiri Kuzin 	MLX5_ASSERT(h);
39025245d5dSShiri Kuzin 	b = &h->buckets[idx];
39125245d5dSShiri Kuzin 	/* Use write lock directly for write-most list. */
39225245d5dSShiri Kuzin 	if (!h->write_most) {
39325245d5dSShiri Kuzin 		prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE);
39425245d5dSShiri Kuzin 		entry = hlist_lookup(h, key, idx, ctx, true);
39525245d5dSShiri Kuzin 		if (entry)
39625245d5dSShiri Kuzin 			return entry;
39725245d5dSShiri Kuzin 	}
39825245d5dSShiri Kuzin 	rte_rwlock_write_lock(&b->lock);
39925245d5dSShiri Kuzin 	/* Check if the list changed by other threads. */
40025245d5dSShiri Kuzin 	if (h->write_most ||
40125245d5dSShiri Kuzin 	    prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) {
40225245d5dSShiri Kuzin 		entry = __hlist_lookup(h, key, idx, ctx, true);
40325245d5dSShiri Kuzin 		if (entry)
40425245d5dSShiri Kuzin 			goto done;
40525245d5dSShiri Kuzin 	}
40625245d5dSShiri Kuzin 	first = &b->head;
40725245d5dSShiri Kuzin 	entry = h->cb_create(h, key, ctx);
40825245d5dSShiri Kuzin 	if (!entry) {
40925245d5dSShiri Kuzin 		rte_errno = ENOMEM;
41025245d5dSShiri Kuzin 		DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
41125245d5dSShiri Kuzin 		goto done;
41225245d5dSShiri Kuzin 	}
41325245d5dSShiri Kuzin 	entry->idx = idx;
41425245d5dSShiri Kuzin 	entry->ref_cnt = 1;
41525245d5dSShiri Kuzin 	LIST_INSERT_HEAD(first, entry, next);
41625245d5dSShiri Kuzin 	__atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
41725245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
41825245d5dSShiri Kuzin 		h->name, (void *)entry, entry->ref_cnt);
41925245d5dSShiri Kuzin done:
42025245d5dSShiri Kuzin 	rte_rwlock_write_unlock(&b->lock);
42125245d5dSShiri Kuzin 	return entry;
42225245d5dSShiri Kuzin }
42325245d5dSShiri Kuzin 
42425245d5dSShiri Kuzin int
42525245d5dSShiri Kuzin mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
42625245d5dSShiri Kuzin {
42725245d5dSShiri Kuzin 	uint32_t idx = entry->idx;
42825245d5dSShiri Kuzin 
42925245d5dSShiri Kuzin 	rte_rwlock_write_lock(&h->buckets[idx].lock);
43025245d5dSShiri Kuzin 	MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
43125245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
43225245d5dSShiri Kuzin 		h->name, (void *)entry, entry->ref_cnt);
43325245d5dSShiri Kuzin 	if (--entry->ref_cnt) {
43425245d5dSShiri Kuzin 		rte_rwlock_write_unlock(&h->buckets[idx].lock);
43525245d5dSShiri Kuzin 		return 1;
43625245d5dSShiri Kuzin 	}
43725245d5dSShiri Kuzin 	LIST_REMOVE(entry, next);
43825245d5dSShiri Kuzin 	/* Set to NULL to get rid of removing action for more than once. */
43925245d5dSShiri Kuzin 	entry->next.le_prev = NULL;
44025245d5dSShiri Kuzin 	h->cb_remove(h, entry);
44125245d5dSShiri Kuzin 	rte_rwlock_write_unlock(&h->buckets[idx].lock);
44225245d5dSShiri Kuzin 	DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
44325245d5dSShiri Kuzin 		h->name, (void *)entry);
44425245d5dSShiri Kuzin 	return 0;
44525245d5dSShiri Kuzin }
44625245d5dSShiri Kuzin 
44725245d5dSShiri Kuzin void
44825245d5dSShiri Kuzin mlx5_hlist_destroy(struct mlx5_hlist *h)
44925245d5dSShiri Kuzin {
45025245d5dSShiri Kuzin 	uint32_t idx;
45125245d5dSShiri Kuzin 	struct mlx5_hlist_entry *entry;
45225245d5dSShiri Kuzin 
45325245d5dSShiri Kuzin 	MLX5_ASSERT(h);
45425245d5dSShiri Kuzin 	for (idx = 0; idx < h->table_sz; ++idx) {
45525245d5dSShiri Kuzin 		/* No LIST_FOREACH_SAFE, using while instead. */
45625245d5dSShiri Kuzin 		while (!LIST_EMPTY(&h->buckets[idx].head)) {
45725245d5dSShiri Kuzin 			entry = LIST_FIRST(&h->buckets[idx].head);
45825245d5dSShiri Kuzin 			LIST_REMOVE(entry, next);
45925245d5dSShiri Kuzin 			/*
46025245d5dSShiri Kuzin 			 * The owner of whole element which contains data entry
46125245d5dSShiri Kuzin 			 * is the user, so it's the user's duty to do the clean
46225245d5dSShiri Kuzin 			 * up and the free work because someone may not put the
46325245d5dSShiri Kuzin 			 * hlist entry at the beginning(suggested to locate at
46425245d5dSShiri Kuzin 			 * the beginning). Or else the default free function
46525245d5dSShiri Kuzin 			 * will be used.
46625245d5dSShiri Kuzin 			 */
46725245d5dSShiri Kuzin 			h->cb_remove(h, entry);
46825245d5dSShiri Kuzin 		}
46925245d5dSShiri Kuzin 	}
47025245d5dSShiri Kuzin 	mlx5_free(h);
47125245d5dSShiri Kuzin }
472