xref: /dpdk/drivers/common/mlx5/mlx5_common_utils.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
7 #include <rte_errno.h>
8 
9 #include <mlx5_malloc.h>
10 
11 #include "mlx5_common_utils.h"
12 #include "mlx5_common_log.h"
13 
14 /********************* mlx5 list ************************/
15 
16 static int
mlx5_list_init(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,struct mlx5_list_cache * gc)17 mlx5_list_init(struct mlx5_list_inconst *l_inconst,
18 	       struct mlx5_list_const *l_const,
19 	       struct mlx5_list_cache *gc)
20 {
21 	rte_rwlock_init(&l_inconst->lock);
22 	if (l_const->lcores_share) {
23 		l_inconst->cache[MLX5_LIST_GLOBAL] = gc;
24 		LIST_INIT(&l_inconst->cache[MLX5_LIST_GLOBAL]->h);
25 	}
26 	return 0;
27 }
28 
29 struct mlx5_list *
mlx5_list_create(const char * name,void * ctx,bool lcores_share,mlx5_list_create_cb cb_create,mlx5_list_match_cb cb_match,mlx5_list_remove_cb cb_remove,mlx5_list_clone_cb cb_clone,mlx5_list_clone_free_cb cb_clone_free)30 mlx5_list_create(const char *name, void *ctx, bool lcores_share,
31 		 mlx5_list_create_cb cb_create,
32 		 mlx5_list_match_cb cb_match,
33 		 mlx5_list_remove_cb cb_remove,
34 		 mlx5_list_clone_cb cb_clone,
35 		 mlx5_list_clone_free_cb cb_clone_free)
36 {
37 	struct mlx5_list *list;
38 	struct mlx5_list_cache *gc = NULL;
39 
40 	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
41 	    !cb_clone_free) {
42 		rte_errno = EINVAL;
43 		return NULL;
44 	}
45 	list = mlx5_malloc(MLX5_MEM_ZERO,
46 			   sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
47 			   0, SOCKET_ID_ANY);
48 
49 	if (!list)
50 		return NULL;
51 	if (name)
52 		snprintf(list->l_const.name,
53 			 sizeof(list->l_const.name), "%s", name);
54 	list->l_const.ctx = ctx;
55 	list->l_const.lcores_share = lcores_share;
56 	list->l_const.cb_create = cb_create;
57 	list->l_const.cb_match = cb_match;
58 	list->l_const.cb_remove = cb_remove;
59 	list->l_const.cb_clone = cb_clone;
60 	list->l_const.cb_clone_free = cb_clone_free;
61 	rte_spinlock_init(&list->l_const.lcore_lock);
62 	if (lcores_share)
63 		gc = (struct mlx5_list_cache *)(list + 1);
64 	if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {
65 		mlx5_free(list);
66 		return NULL;
67 	}
68 	DRV_LOG(DEBUG, "mlx5 list %s was created.", name);
69 	return list;
70 }
71 
72 static struct mlx5_list_entry *
__list_lookup(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index,void * ctx,bool reuse)73 __list_lookup(struct mlx5_list_inconst *l_inconst,
74 	      struct mlx5_list_const *l_const,
75 	      int lcore_index, void *ctx, bool reuse)
76 {
77 	struct mlx5_list_entry *entry =
78 				LIST_FIRST(&l_inconst->cache[lcore_index]->h);
79 	uint32_t ret;
80 
81 	while (entry != NULL) {
82 		if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
83 			if (reuse) {
84 				ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
85 							 rte_memory_order_relaxed);
86 				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
87 					l_const->name, (void *)entry,
88 					entry->ref_cnt);
89 			} else if (lcore_index < MLX5_LIST_GLOBAL) {
90 				ret = rte_atomic_load_explicit(&entry->ref_cnt,
91 						      rte_memory_order_relaxed);
92 			}
93 			if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
94 				return entry;
95 			if (reuse && ret == 0)
96 				entry->ref_cnt--; /* Invalid entry. */
97 		}
98 		entry = LIST_NEXT(entry, next);
99 	}
100 	return NULL;
101 }
102 
103 static inline struct mlx5_list_entry *
_mlx5_list_lookup(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,void * ctx)104 _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,
105 		  struct mlx5_list_const *l_const, void *ctx)
106 {
107 	struct mlx5_list_entry *entry = NULL;
108 	int i;
109 
110 	rte_rwlock_read_lock(&l_inconst->lock);
111 	for (i = 0; i < MLX5_LIST_GLOBAL; i++) {
112 		if (!l_inconst->cache[i])
113 			continue;
114 		entry = __list_lookup(l_inconst, l_const, i,
115 			      ctx, false);
116 		if (entry)
117 			break;
118 	}
119 	rte_rwlock_read_unlock(&l_inconst->lock);
120 	return entry;
121 }
122 
123 struct mlx5_list_entry *
mlx5_list_lookup(struct mlx5_list * list,void * ctx)124 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
125 {
126 	return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);
127 }
128 
129 
130 static struct mlx5_list_entry *
mlx5_list_cache_insert(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index,struct mlx5_list_entry * gentry,void * ctx)131 mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,
132 		       struct mlx5_list_const *l_const, int lcore_index,
133 		       struct mlx5_list_entry *gentry, void *ctx)
134 {
135 	struct mlx5_list_entry *lentry =
136 			l_const->cb_clone(l_const->ctx, gentry, ctx);
137 
138 	if (unlikely(!lentry))
139 		return NULL;
140 	lentry->ref_cnt = 1u;
141 	lentry->gentry = gentry;
142 	lentry->lcore_idx = (uint32_t)lcore_index;
143 	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);
144 	return lentry;
145 }
146 
147 static void
__list_cache_clean(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index)148 __list_cache_clean(struct mlx5_list_inconst *l_inconst,
149 		   struct mlx5_list_const *l_const,
150 		   int lcore_index)
151 {
152 	struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
153 	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
154 	uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
155 					       rte_memory_order_relaxed);
156 
157 	while (inv_cnt != 0 && entry != NULL) {
158 		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
159 
160 		if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
161 			LIST_REMOVE(entry, next);
162 			if (l_const->lcores_share)
163 				l_const->cb_clone_free(l_const->ctx, entry);
164 			else
165 				l_const->cb_remove(l_const->ctx, entry);
166 			inv_cnt--;
167 		}
168 		entry = nentry;
169 	}
170 }
171 
172 static inline struct mlx5_list_entry *
_mlx5_list_register(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,void * ctx,int lcore_index)173 _mlx5_list_register(struct mlx5_list_inconst *l_inconst,
174 		    struct mlx5_list_const *l_const,
175 		    void *ctx, int lcore_index)
176 {
177 	struct mlx5_list_entry *entry = NULL, *local_entry;
178 	volatile uint32_t prev_gen_cnt = 0;
179 	MLX5_ASSERT(l_inconst);
180 	if (unlikely(!l_inconst->cache[lcore_index])) {
181 		l_inconst->cache[lcore_index] = mlx5_malloc(0,
182 					sizeof(struct mlx5_list_cache),
183 					RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
184 		if (!l_inconst->cache[lcore_index]) {
185 			rte_errno = ENOMEM;
186 			return NULL;
187 		}
188 		l_inconst->cache[lcore_index]->inv_cnt = 0;
189 		LIST_INIT(&l_inconst->cache[lcore_index]->h);
190 	}
191 	/* 0. Free entries that was invalidated by other lcores. */
192 	__list_cache_clean(l_inconst, l_const, lcore_index);
193 	/* 1. Lookup in local cache. */
194 	local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
195 	if (local_entry)
196 		return local_entry;
197 	if (l_const->lcores_share) {
198 		/* 2. Lookup with read lock on global list, reuse if found. */
199 		rte_rwlock_read_lock(&l_inconst->lock);
200 		entry = __list_lookup(l_inconst, l_const, MLX5_LIST_GLOBAL,
201 				      ctx, true);
202 		if (likely(entry)) {
203 			rte_rwlock_read_unlock(&l_inconst->lock);
204 			return mlx5_list_cache_insert(l_inconst, l_const,
205 						      lcore_index,
206 						      entry, ctx);
207 		}
208 		prev_gen_cnt = l_inconst->gen_cnt;
209 		rte_rwlock_read_unlock(&l_inconst->lock);
210 	}
211 	/* 3. Prepare new entry for global list and for cache. */
212 	entry = l_const->cb_create(l_const->ctx, ctx);
213 	if (unlikely(!entry))
214 		return NULL;
215 	entry->ref_cnt = 1u;
216 	if (!l_const->lcores_share) {
217 		entry->lcore_idx = (uint32_t)lcore_index;
218 		LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
219 				 entry, next);
220 		rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
221 		DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
222 			l_const->name, lcore_index,
223 			(void *)entry, entry->ref_cnt);
224 		return entry;
225 	}
226 	local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
227 	if (unlikely(!local_entry)) {
228 		l_const->cb_remove(l_const->ctx, entry);
229 		return NULL;
230 	}
231 	local_entry->ref_cnt = 1u;
232 	local_entry->gentry = entry;
233 	local_entry->lcore_idx = (uint32_t)lcore_index;
234 	rte_rwlock_write_lock(&l_inconst->lock);
235 	/* 4. Make sure the same entry was not created before the write lock. */
236 	if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
237 		struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
238 							       l_const,
239 							       MLX5_LIST_GLOBAL,
240 							       ctx, true);
241 
242 		if (unlikely(oentry)) {
243 			/* 4.5. Found real race!!, reuse the old entry. */
244 			rte_rwlock_write_unlock(&l_inconst->lock);
245 			l_const->cb_remove(l_const->ctx, entry);
246 			l_const->cb_clone_free(l_const->ctx, local_entry);
247 			return mlx5_list_cache_insert(l_inconst, l_const,
248 						      lcore_index,
249 						      oentry, ctx);
250 		}
251 	}
252 	/* 5. Update lists. */
253 	LIST_INSERT_HEAD(&l_inconst->cache[MLX5_LIST_GLOBAL]->h, entry, next);
254 	l_inconst->gen_cnt++;
255 	rte_rwlock_write_unlock(&l_inconst->lock);
256 	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
257 	rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
258 	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
259 		(void *)entry, entry->ref_cnt);
260 	return local_entry;
261 }
262 
263 struct mlx5_list_entry *
mlx5_list_register(struct mlx5_list * list,void * ctx)264 mlx5_list_register(struct mlx5_list *list, void *ctx)
265 {
266 	struct mlx5_list_entry *entry;
267 	int lcore_index = rte_lcore_index(rte_lcore_id());
268 
269 	if (unlikely(lcore_index == -1)) {
270 		lcore_index = MLX5_LIST_NLCORE;
271 		rte_spinlock_lock(&list->l_const.lcore_lock);
272 	}
273 	entry =  _mlx5_list_register(&list->l_inconst, &list->l_const, ctx,
274 				     lcore_index);
275 	if (unlikely(lcore_index == MLX5_LIST_NLCORE))
276 		rte_spinlock_unlock(&list->l_const.lcore_lock);
277 	return entry;
278 }
279 
280 static inline int
_mlx5_list_unregister(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,struct mlx5_list_entry * entry,int lcore_idx)281 _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
282 		      struct mlx5_list_const *l_const,
283 		      struct mlx5_list_entry *entry,
284 		      int lcore_idx)
285 {
286 	struct mlx5_list_entry *gentry = entry->gentry;
287 
288 	if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
289 		return 1;
290 	if (entry->lcore_idx == (uint32_t)lcore_idx) {
291 		LIST_REMOVE(entry, next);
292 		if (l_const->lcores_share)
293 			l_const->cb_clone_free(l_const->ctx, entry);
294 		else
295 			l_const->cb_remove(l_const->ctx, entry);
296 	} else {
297 		rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
298 				   1, rte_memory_order_relaxed);
299 	}
300 	if (!l_const->lcores_share) {
301 		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
302 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
303 			l_const->name, (void *)entry);
304 		return 0;
305 	}
306 	if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
307 		return 1;
308 	rte_rwlock_write_lock(&l_inconst->lock);
309 	if (likely(gentry->ref_cnt == 0)) {
310 		LIST_REMOVE(gentry, next);
311 		rte_rwlock_write_unlock(&l_inconst->lock);
312 		l_const->cb_remove(l_const->ctx, gentry);
313 		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
314 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
315 			l_const->name, (void *)gentry);
316 		return 0;
317 	}
318 	rte_rwlock_write_unlock(&l_inconst->lock);
319 	return 1;
320 }
321 
322 int
mlx5_list_unregister(struct mlx5_list * list,struct mlx5_list_entry * entry)323 mlx5_list_unregister(struct mlx5_list *list,
324 		      struct mlx5_list_entry *entry)
325 {
326 	int ret;
327 	int lcore_index = rte_lcore_index(rte_lcore_id());
328 
329 	if (unlikely(lcore_index == -1)) {
330 		lcore_index = MLX5_LIST_NLCORE;
331 		rte_spinlock_lock(&list->l_const.lcore_lock);
332 	}
333 	ret = _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry,
334 				    lcore_index);
335 	if (unlikely(lcore_index == MLX5_LIST_NLCORE))
336 		rte_spinlock_unlock(&list->l_const.lcore_lock);
337 	return ret;
338 
339 }
340 
341 static void
mlx5_list_uninit(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const)342 mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
343 		 struct mlx5_list_const *l_const)
344 {
345 	struct mlx5_list_entry *entry;
346 	int i;
347 
348 	MLX5_ASSERT(l_inconst);
349 	for (i = 0; i < MLX5_LIST_MAX; i++) {
350 		if (!l_inconst->cache[i])
351 			continue;
352 		while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
353 			entry = LIST_FIRST(&l_inconst->cache[i]->h);
354 			LIST_REMOVE(entry, next);
355 			if (i == MLX5_LIST_GLOBAL) {
356 				l_const->cb_remove(l_const->ctx, entry);
357 				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
358 					"destroyed.", l_const->name,
359 					(void *)entry);
360 			} else {
361 				l_const->cb_clone_free(l_const->ctx, entry);
362 			}
363 		}
364 		if (i != MLX5_LIST_GLOBAL)
365 			mlx5_free(l_inconst->cache[i]);
366 	}
367 }
368 
369 void
mlx5_list_destroy(struct mlx5_list * list)370 mlx5_list_destroy(struct mlx5_list *list)
371 {
372 	mlx5_list_uninit(&list->l_inconst, &list->l_const);
373 	mlx5_free(list);
374 }
375 
376 uint32_t
mlx5_list_get_entry_num(struct mlx5_list * list)377 mlx5_list_get_entry_num(struct mlx5_list *list)
378 {
379 	MLX5_ASSERT(list);
380 	return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
381 }
382 
383 /********************* Hash List **********************/
384 
385 struct mlx5_hlist *
mlx5_hlist_create(const char * name,uint32_t size,bool direct_key,bool lcores_share,void * ctx,mlx5_list_create_cb cb_create,mlx5_list_match_cb cb_match,mlx5_list_remove_cb cb_remove,mlx5_list_clone_cb cb_clone,mlx5_list_clone_free_cb cb_clone_free)386 mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
387 		  bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
388 		  mlx5_list_match_cb cb_match,
389 		  mlx5_list_remove_cb cb_remove,
390 		  mlx5_list_clone_cb cb_clone,
391 		  mlx5_list_clone_free_cb cb_clone_free)
392 {
393 	struct mlx5_hlist *h;
394 	struct mlx5_list_cache *gc;
395 	uint32_t act_size;
396 	uint32_t alloc_size;
397 	uint32_t i;
398 
399 	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
400 	    !cb_clone_free) {
401 		rte_errno = EINVAL;
402 		return NULL;
403 	}
404 	/* Align to the next power of 2, 32bits integer is enough now. */
405 	if (!rte_is_power_of_2(size)) {
406 		act_size = rte_align32pow2(size);
407 		DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
408 			"be aligned to 0x%" PRIX32 ".", size, act_size);
409 	} else {
410 		act_size = size;
411 	}
412 	alloc_size = sizeof(struct mlx5_hlist) +
413 		     sizeof(struct mlx5_hlist_bucket) * act_size;
414 	if (lcores_share)
415 		alloc_size += sizeof(struct mlx5_list_cache)  * act_size;
416 	/* Using zmalloc, then no need to initialize the heads. */
417 	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
418 			SOCKET_ID_ANY);
419 	if (!h) {
420 		DRV_LOG(ERR, "No memory for hash list %s creation",
421 			name ? name : "None");
422 		return NULL;
423 	}
424 	if (name)
425 		snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name);
426 	h->l_const.ctx = ctx;
427 	h->l_const.lcores_share = lcores_share;
428 	h->l_const.cb_create = cb_create;
429 	h->l_const.cb_match = cb_match;
430 	h->l_const.cb_remove = cb_remove;
431 	h->l_const.cb_clone = cb_clone;
432 	h->l_const.cb_clone_free = cb_clone_free;
433 	rte_spinlock_init(&h->l_const.lcore_lock);
434 	h->mask = act_size - 1;
435 	h->direct_key = direct_key;
436 	gc = (struct mlx5_list_cache *)&h->buckets[act_size];
437 	for (i = 0; i < act_size; i++) {
438 		if (mlx5_list_init(&h->buckets[i].l, &h->l_const,
439 		    lcores_share ? &gc[i] : NULL) != 0) {
440 			mlx5_free(h);
441 			return NULL;
442 		}
443 	}
444 	DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
445 		name, act_size);
446 	return h;
447 }
448 
449 
450 struct mlx5_list_entry *
mlx5_hlist_lookup(struct mlx5_hlist * h,uint64_t key,void * ctx)451 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
452 {
453 	uint32_t idx;
454 
455 	if (h->direct_key)
456 		idx = (uint32_t)(key & h->mask);
457 	else
458 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
459 	return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);
460 }
461 
462 struct mlx5_list_entry*
mlx5_hlist_register(struct mlx5_hlist * h,uint64_t key,void * ctx)463 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
464 {
465 	uint32_t idx;
466 	struct mlx5_list_entry *entry;
467 	int lcore_index = rte_lcore_index(rte_lcore_id());
468 
469 	if (h->direct_key)
470 		idx = (uint32_t)(key & h->mask);
471 	else
472 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
473 	if (unlikely(lcore_index == -1)) {
474 		lcore_index = MLX5_LIST_NLCORE;
475 		rte_spinlock_lock(&h->l_const.lcore_lock);
476 	}
477 	entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx,
478 				    lcore_index);
479 	if (likely(entry)) {
480 		if (h->l_const.lcores_share)
481 			entry->gentry->bucket_idx = idx;
482 		else
483 			entry->bucket_idx = idx;
484 	}
485 	if (unlikely(lcore_index == MLX5_LIST_NLCORE))
486 		rte_spinlock_unlock(&h->l_const.lcore_lock);
487 	return entry;
488 }
489 
490 int
mlx5_hlist_unregister(struct mlx5_hlist * h,struct mlx5_list_entry * entry)491 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
492 {
493 	int lcore_index = rte_lcore_index(rte_lcore_id());
494 	int ret;
495 	uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :
496 							      entry->bucket_idx;
497 	if (unlikely(lcore_index == -1)) {
498 		lcore_index = MLX5_LIST_NLCORE;
499 		rte_spinlock_lock(&h->l_const.lcore_lock);
500 	}
501 	ret = _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry,
502 				    lcore_index);
503 	if (unlikely(lcore_index == MLX5_LIST_NLCORE))
504 		rte_spinlock_unlock(&h->l_const.lcore_lock);
505 	return ret;
506 }
507 
508 void
mlx5_hlist_destroy(struct mlx5_hlist * h)509 mlx5_hlist_destroy(struct mlx5_hlist *h)
510 {
511 	uint32_t i;
512 
513 	for (i = 0; i <= h->mask; i++)
514 		mlx5_list_uninit(&h->buckets[i].l, &h->l_const);
515 	mlx5_free(h);
516 }
517