125245d5dSShiri Kuzin /* SPDX-License-Identifier: BSD-3-Clause 225245d5dSShiri Kuzin * Copyright 2019 Mellanox Technologies, Ltd 325245d5dSShiri Kuzin */ 425245d5dSShiri Kuzin 525245d5dSShiri Kuzin #include <rte_malloc.h> 625245d5dSShiri Kuzin #include <rte_hash_crc.h> 725245d5dSShiri Kuzin #include <rte_errno.h> 825245d5dSShiri Kuzin 925245d5dSShiri Kuzin #include <mlx5_malloc.h> 1025245d5dSShiri Kuzin 1125245d5dSShiri Kuzin #include "mlx5_common_utils.h" 1225245d5dSShiri Kuzin #include "mlx5_common_log.h" 1325245d5dSShiri Kuzin 149c373c52SSuanming Mou /********************* mlx5 list ************************/ 159c373c52SSuanming Mou 16961b6774SMatan Azrad static int 179a4c3688SSuanming Mou mlx5_list_init(struct mlx5_list_inconst *l_inconst, 189a4c3688SSuanming Mou struct mlx5_list_const *l_const, 199a4c3688SSuanming Mou struct mlx5_list_cache *gc) 209c373c52SSuanming Mou { 219a4c3688SSuanming Mou rte_rwlock_init(&l_inconst->lock); 229a4c3688SSuanming Mou if (l_const->lcores_share) { 237e1cf892SSuanming Mou l_inconst->cache[MLX5_LIST_GLOBAL] = gc; 247e1cf892SSuanming Mou LIST_INIT(&l_inconst->cache[MLX5_LIST_GLOBAL]->h); 259c373c52SSuanming Mou } 26961b6774SMatan Azrad return 0; 27961b6774SMatan Azrad } 28961b6774SMatan Azrad 29961b6774SMatan Azrad struct mlx5_list * 30961b6774SMatan Azrad mlx5_list_create(const char *name, void *ctx, bool lcores_share, 31961b6774SMatan Azrad mlx5_list_create_cb cb_create, 32961b6774SMatan Azrad mlx5_list_match_cb cb_match, 33961b6774SMatan Azrad mlx5_list_remove_cb cb_remove, 34961b6774SMatan Azrad mlx5_list_clone_cb cb_clone, 35961b6774SMatan Azrad mlx5_list_clone_free_cb cb_clone_free) 36961b6774SMatan Azrad { 37961b6774SMatan Azrad struct mlx5_list *list; 3825481e50SSuanming Mou struct mlx5_list_cache *gc = NULL; 39961b6774SMatan Azrad 409a4c3688SSuanming Mou if (!cb_match || !cb_create || !cb_remove || !cb_clone || 419a4c3688SSuanming Mou !cb_clone_free) { 429a4c3688SSuanming Mou rte_errno = EINVAL; 439a4c3688SSuanming Mou return NULL; 449a4c3688SSuanming Mou } 4525481e50SSuanming Mou list = mlx5_malloc(MLX5_MEM_ZERO, 4625481e50SSuanming Mou sizeof(*list) + (lcores_share ? sizeof(*gc) : 0), 4725481e50SSuanming Mou 0, SOCKET_ID_ANY); 489a4c3688SSuanming Mou 49961b6774SMatan Azrad if (!list) 50961b6774SMatan Azrad return NULL; 519a4c3688SSuanming Mou if (name) 529a4c3688SSuanming Mou snprintf(list->l_const.name, 539a4c3688SSuanming Mou sizeof(list->l_const.name), "%s", name); 549a4c3688SSuanming Mou list->l_const.ctx = ctx; 559a4c3688SSuanming Mou list->l_const.lcores_share = lcores_share; 569a4c3688SSuanming Mou list->l_const.cb_create = cb_create; 579a4c3688SSuanming Mou list->l_const.cb_match = cb_match; 589a4c3688SSuanming Mou list->l_const.cb_remove = cb_remove; 599a4c3688SSuanming Mou list->l_const.cb_clone = cb_clone; 609a4c3688SSuanming Mou list->l_const.cb_clone_free = cb_clone_free; 617e1cf892SSuanming Mou rte_spinlock_init(&list->l_const.lcore_lock); 6225481e50SSuanming Mou if (lcores_share) 6325481e50SSuanming Mou gc = (struct mlx5_list_cache *)(list + 1); 649a4c3688SSuanming Mou if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) { 65961b6774SMatan Azrad mlx5_free(list); 66961b6774SMatan Azrad return NULL; 67961b6774SMatan Azrad } 6853712685SSean Zhang DRV_LOG(DEBUG, "mlx5 list %s was created.", name); 699c373c52SSuanming Mou return list; 709c373c52SSuanming Mou } 719c373c52SSuanming Mou 729c373c52SSuanming Mou static struct mlx5_list_entry * 739a4c3688SSuanming Mou __list_lookup(struct mlx5_list_inconst *l_inconst, 749a4c3688SSuanming Mou struct mlx5_list_const *l_const, 759a4c3688SSuanming Mou int lcore_index, void *ctx, bool reuse) 769c373c52SSuanming Mou { 7725481e50SSuanming Mou struct mlx5_list_entry *entry = 789a4c3688SSuanming Mou LIST_FIRST(&l_inconst->cache[lcore_index]->h); 799c373c52SSuanming Mou uint32_t ret; 809c373c52SSuanming Mou 819c373c52SSuanming Mou while (entry != NULL) { 829a4c3688SSuanming Mou if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) { 839c373c52SSuanming Mou if (reuse) { 849c373c52SSuanming Mou ret = __atomic_add_fetch(&entry->ref_cnt, 1, 859c373c52SSuanming Mou __ATOMIC_RELAXED) - 1; 869c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.", 879a4c3688SSuanming Mou l_const->name, (void *)entry, 889c373c52SSuanming Mou entry->ref_cnt); 897e1cf892SSuanming Mou } else if (lcore_index < MLX5_LIST_GLOBAL) { 909c373c52SSuanming Mou ret = __atomic_load_n(&entry->ref_cnt, 919c373c52SSuanming Mou __ATOMIC_RELAXED); 929c373c52SSuanming Mou } 937e1cf892SSuanming Mou if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL)) 949c373c52SSuanming Mou return entry; 959c373c52SSuanming Mou if (reuse && ret == 0) 969c373c52SSuanming Mou entry->ref_cnt--; /* Invalid entry. */ 979c373c52SSuanming Mou } 989c373c52SSuanming Mou entry = LIST_NEXT(entry, next); 999c373c52SSuanming Mou } 1009c373c52SSuanming Mou return NULL; 1019c373c52SSuanming Mou } 1029c373c52SSuanming Mou 1039a4c3688SSuanming Mou static inline struct mlx5_list_entry * 1049a4c3688SSuanming Mou _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst, 1059a4c3688SSuanming Mou struct mlx5_list_const *l_const, void *ctx) 1069c373c52SSuanming Mou { 1079c373c52SSuanming Mou struct mlx5_list_entry *entry = NULL; 1089c373c52SSuanming Mou int i; 1099c373c52SSuanming Mou 1109a4c3688SSuanming Mou rte_rwlock_read_lock(&l_inconst->lock); 1117e1cf892SSuanming Mou for (i = 0; i < MLX5_LIST_GLOBAL; i++) { 1129a4c3688SSuanming Mou if (!l_inconst->cache[i]) 1139a4c3688SSuanming Mou continue; 1147e1cf892SSuanming Mou entry = __list_lookup(l_inconst, l_const, i, 1157e1cf892SSuanming Mou ctx, false); 1169c373c52SSuanming Mou if (entry) 1179c373c52SSuanming Mou break; 1189c373c52SSuanming Mou } 1199a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 1209c373c52SSuanming Mou return entry; 1219c373c52SSuanming Mou } 1229c373c52SSuanming Mou 1239a4c3688SSuanming Mou struct mlx5_list_entry * 1249a4c3688SSuanming Mou mlx5_list_lookup(struct mlx5_list *list, void *ctx) 1259a4c3688SSuanming Mou { 1269a4c3688SSuanming Mou return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx); 1279a4c3688SSuanming Mou } 1289a4c3688SSuanming Mou 1299a4c3688SSuanming Mou 1309c373c52SSuanming Mou static struct mlx5_list_entry * 1319a4c3688SSuanming Mou mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst, 1329a4c3688SSuanming Mou struct mlx5_list_const *l_const, int lcore_index, 1339c373c52SSuanming Mou struct mlx5_list_entry *gentry, void *ctx) 1349c373c52SSuanming Mou { 1359a4c3688SSuanming Mou struct mlx5_list_entry *lentry = 1369a4c3688SSuanming Mou l_const->cb_clone(l_const->ctx, gentry, ctx); 1379c373c52SSuanming Mou 1389c373c52SSuanming Mou if (unlikely(!lentry)) 1399c373c52SSuanming Mou return NULL; 1409c373c52SSuanming Mou lentry->ref_cnt = 1u; 1419c373c52SSuanming Mou lentry->gentry = gentry; 1429c373c52SSuanming Mou lentry->lcore_idx = (uint32_t)lcore_index; 1439a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next); 1449c373c52SSuanming Mou return lentry; 1459c373c52SSuanming Mou } 1469c373c52SSuanming Mou 1479c373c52SSuanming Mou static void 1489a4c3688SSuanming Mou __list_cache_clean(struct mlx5_list_inconst *l_inconst, 1499a4c3688SSuanming Mou struct mlx5_list_const *l_const, 1509a4c3688SSuanming Mou int lcore_index) 1519c373c52SSuanming Mou { 1529a4c3688SSuanming Mou struct mlx5_list_cache *c = l_inconst->cache[lcore_index]; 1539c373c52SSuanming Mou struct mlx5_list_entry *entry = LIST_FIRST(&c->h); 1549c373c52SSuanming Mou uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0, 1559c373c52SSuanming Mou __ATOMIC_RELAXED); 1569c373c52SSuanming Mou 1579c373c52SSuanming Mou while (inv_cnt != 0 && entry != NULL) { 1589c373c52SSuanming Mou struct mlx5_list_entry *nentry = LIST_NEXT(entry, next); 1599c373c52SSuanming Mou 1609c373c52SSuanming Mou if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) { 1619c373c52SSuanming Mou LIST_REMOVE(entry, next); 1629a4c3688SSuanming Mou if (l_const->lcores_share) 1639a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 164d03b7860SSuanming Mou else 1659a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 1669c373c52SSuanming Mou inv_cnt--; 1679c373c52SSuanming Mou } 1689c373c52SSuanming Mou entry = nentry; 1699c373c52SSuanming Mou } 1709c373c52SSuanming Mou } 1719c373c52SSuanming Mou 1729a4c3688SSuanming Mou static inline struct mlx5_list_entry * 1739a4c3688SSuanming Mou _mlx5_list_register(struct mlx5_list_inconst *l_inconst, 1749a4c3688SSuanming Mou struct mlx5_list_const *l_const, 1757e1cf892SSuanming Mou void *ctx, int lcore_index) 1769c373c52SSuanming Mou { 177d03b7860SSuanming Mou struct mlx5_list_entry *entry = NULL, *local_entry; 1789c373c52SSuanming Mou volatile uint32_t prev_gen_cnt = 0; 1799a4c3688SSuanming Mou MLX5_ASSERT(l_inconst); 1809a4c3688SSuanming Mou if (unlikely(!l_inconst->cache[lcore_index])) { 1819a4c3688SSuanming Mou l_inconst->cache[lcore_index] = mlx5_malloc(0, 18225481e50SSuanming Mou sizeof(struct mlx5_list_cache), 18325481e50SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 1849a4c3688SSuanming Mou if (!l_inconst->cache[lcore_index]) { 18525481e50SSuanming Mou rte_errno = ENOMEM; 18625481e50SSuanming Mou return NULL; 18725481e50SSuanming Mou } 1889a4c3688SSuanming Mou l_inconst->cache[lcore_index]->inv_cnt = 0; 1899a4c3688SSuanming Mou LIST_INIT(&l_inconst->cache[lcore_index]->h); 19025481e50SSuanming Mou } 1919c373c52SSuanming Mou /* 0. Free entries that was invalidated by other lcores. */ 1929a4c3688SSuanming Mou __list_cache_clean(l_inconst, l_const, lcore_index); 1939c373c52SSuanming Mou /* 1. Lookup in local cache. */ 1949a4c3688SSuanming Mou local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true); 1959c373c52SSuanming Mou if (local_entry) 1969c373c52SSuanming Mou return local_entry; 1979a4c3688SSuanming Mou if (l_const->lcores_share) { 1989c373c52SSuanming Mou /* 2. Lookup with read lock on global list, reuse if found. */ 1999a4c3688SSuanming Mou rte_rwlock_read_lock(&l_inconst->lock); 2007e1cf892SSuanming Mou entry = __list_lookup(l_inconst, l_const, MLX5_LIST_GLOBAL, 2019a4c3688SSuanming Mou ctx, true); 2029c373c52SSuanming Mou if (likely(entry)) { 2039a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 2049a4c3688SSuanming Mou return mlx5_list_cache_insert(l_inconst, l_const, 2059a4c3688SSuanming Mou lcore_index, 2069a4c3688SSuanming Mou entry, ctx); 2079c373c52SSuanming Mou } 2089a4c3688SSuanming Mou prev_gen_cnt = l_inconst->gen_cnt; 2099a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 210d03b7860SSuanming Mou } 2119c373c52SSuanming Mou /* 3. Prepare new entry for global list and for cache. */ 2129a4c3688SSuanming Mou entry = l_const->cb_create(l_const->ctx, ctx); 2139c373c52SSuanming Mou if (unlikely(!entry)) 2149c373c52SSuanming Mou return NULL; 215d03b7860SSuanming Mou entry->ref_cnt = 1u; 2169a4c3688SSuanming Mou if (!l_const->lcores_share) { 217d03b7860SSuanming Mou entry->lcore_idx = (uint32_t)lcore_index; 2189a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, 2199a4c3688SSuanming Mou entry, next); 2209a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 221d03b7860SSuanming Mou DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.", 2229a4c3688SSuanming Mou l_const->name, lcore_index, 2239a4c3688SSuanming Mou (void *)entry, entry->ref_cnt); 224d03b7860SSuanming Mou return entry; 225d03b7860SSuanming Mou } 2269a4c3688SSuanming Mou local_entry = l_const->cb_clone(l_const->ctx, entry, ctx); 2279c373c52SSuanming Mou if (unlikely(!local_entry)) { 2289a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 2299c373c52SSuanming Mou return NULL; 2309c373c52SSuanming Mou } 2319c373c52SSuanming Mou local_entry->ref_cnt = 1u; 2329c373c52SSuanming Mou local_entry->gentry = entry; 2339c373c52SSuanming Mou local_entry->lcore_idx = (uint32_t)lcore_index; 2349a4c3688SSuanming Mou rte_rwlock_write_lock(&l_inconst->lock); 2359c373c52SSuanming Mou /* 4. Make sure the same entry was not created before the write lock. */ 2369a4c3688SSuanming Mou if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) { 2379a4c3688SSuanming Mou struct mlx5_list_entry *oentry = __list_lookup(l_inconst, 2389a4c3688SSuanming Mou l_const, 2397e1cf892SSuanming Mou MLX5_LIST_GLOBAL, 2409c373c52SSuanming Mou ctx, true); 2419c373c52SSuanming Mou 2429c373c52SSuanming Mou if (unlikely(oentry)) { 2439c373c52SSuanming Mou /* 4.5. Found real race!!, reuse the old entry. */ 2449a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 2459a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 2469a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, local_entry); 2479a4c3688SSuanming Mou return mlx5_list_cache_insert(l_inconst, l_const, 2489a4c3688SSuanming Mou lcore_index, 2499a4c3688SSuanming Mou oentry, ctx); 2509c373c52SSuanming Mou } 2519c373c52SSuanming Mou } 2529c373c52SSuanming Mou /* 5. Update lists. */ 2537e1cf892SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[MLX5_LIST_GLOBAL]->h, entry, next); 2549a4c3688SSuanming Mou l_inconst->gen_cnt++; 2559a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 2569a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next); 2579a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 2589a4c3688SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name, 2599c373c52SSuanming Mou (void *)entry, entry->ref_cnt); 2609c373c52SSuanming Mou return local_entry; 2619c373c52SSuanming Mou } 2629c373c52SSuanming Mou 2639a4c3688SSuanming Mou struct mlx5_list_entry * 2649a4c3688SSuanming Mou mlx5_list_register(struct mlx5_list *list, void *ctx) 2659a4c3688SSuanming Mou { 2667e1cf892SSuanming Mou struct mlx5_list_entry *entry; 2677e1cf892SSuanming Mou int lcore_index = rte_lcore_index(rte_lcore_id()); 2687e1cf892SSuanming Mou 2697e1cf892SSuanming Mou if (unlikely(lcore_index == -1)) { 2707e1cf892SSuanming Mou lcore_index = MLX5_LIST_NLCORE; 2717e1cf892SSuanming Mou rte_spinlock_lock(&list->l_const.lcore_lock); 2727e1cf892SSuanming Mou } 2737e1cf892SSuanming Mou entry = _mlx5_list_register(&list->l_inconst, &list->l_const, ctx, 2747e1cf892SSuanming Mou lcore_index); 2757e1cf892SSuanming Mou if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 2767e1cf892SSuanming Mou rte_spinlock_unlock(&list->l_const.lcore_lock); 2777e1cf892SSuanming Mou return entry; 2789a4c3688SSuanming Mou } 2799a4c3688SSuanming Mou 2809a4c3688SSuanming Mou static inline int 2819a4c3688SSuanming Mou _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst, 2829a4c3688SSuanming Mou struct mlx5_list_const *l_const, 2837e1cf892SSuanming Mou struct mlx5_list_entry *entry, 2847e1cf892SSuanming Mou int lcore_idx) 2859c373c52SSuanming Mou { 2869c373c52SSuanming Mou struct mlx5_list_entry *gentry = entry->gentry; 2879c373c52SSuanming Mou 2889c373c52SSuanming Mou if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) 2899c373c52SSuanming Mou return 1; 2909c373c52SSuanming Mou if (entry->lcore_idx == (uint32_t)lcore_idx) { 2919c373c52SSuanming Mou LIST_REMOVE(entry, next); 2929a4c3688SSuanming Mou if (l_const->lcores_share) 2939a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 294d03b7860SSuanming Mou else 2959a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 296*dfa2f533SSuanming Mou } else { 2979a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt, 2989a4c3688SSuanming Mou 1, __ATOMIC_RELAXED); 2999c373c52SSuanming Mou } 3009a4c3688SSuanming Mou if (!l_const->lcores_share) { 3019a4c3688SSuanming Mou __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 302d03b7860SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", 3039a4c3688SSuanming Mou l_const->name, (void *)entry); 304d03b7860SSuanming Mou return 0; 305d03b7860SSuanming Mou } 3069c373c52SSuanming Mou if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) 3079c373c52SSuanming Mou return 1; 3089a4c3688SSuanming Mou rte_rwlock_write_lock(&l_inconst->lock); 3099c373c52SSuanming Mou if (likely(gentry->ref_cnt == 0)) { 3109c373c52SSuanming Mou LIST_REMOVE(gentry, next); 3119a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 3129a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, gentry); 3139a4c3688SSuanming Mou __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 3149c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", 3159a4c3688SSuanming Mou l_const->name, (void *)gentry); 3169c373c52SSuanming Mou return 0; 3179c373c52SSuanming Mou } 3189a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 3199c373c52SSuanming Mou return 1; 3209c373c52SSuanming Mou } 3219c373c52SSuanming Mou 3229a4c3688SSuanming Mou int 3239a4c3688SSuanming Mou mlx5_list_unregister(struct mlx5_list *list, 3249a4c3688SSuanming Mou struct mlx5_list_entry *entry) 3259a4c3688SSuanming Mou { 3267e1cf892SSuanming Mou int ret; 3277e1cf892SSuanming Mou int lcore_index = rte_lcore_index(rte_lcore_id()); 3287e1cf892SSuanming Mou 3297e1cf892SSuanming Mou if (unlikely(lcore_index == -1)) { 3307e1cf892SSuanming Mou lcore_index = MLX5_LIST_NLCORE; 3317e1cf892SSuanming Mou rte_spinlock_lock(&list->l_const.lcore_lock); 3327e1cf892SSuanming Mou } 3337e1cf892SSuanming Mou ret = _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry, 3347e1cf892SSuanming Mou lcore_index); 3357e1cf892SSuanming Mou if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 3367e1cf892SSuanming Mou rte_spinlock_unlock(&list->l_const.lcore_lock); 3377e1cf892SSuanming Mou return ret; 3387e1cf892SSuanming Mou 3399a4c3688SSuanming Mou } 3409a4c3688SSuanming Mou 341961b6774SMatan Azrad static void 3429a4c3688SSuanming Mou mlx5_list_uninit(struct mlx5_list_inconst *l_inconst, 3439a4c3688SSuanming Mou struct mlx5_list_const *l_const) 3449c373c52SSuanming Mou { 3459c373c52SSuanming Mou struct mlx5_list_entry *entry; 3469c373c52SSuanming Mou int i; 3479c373c52SSuanming Mou 3489a4c3688SSuanming Mou MLX5_ASSERT(l_inconst); 3497e1cf892SSuanming Mou for (i = 0; i < MLX5_LIST_MAX; i++) { 3509a4c3688SSuanming Mou if (!l_inconst->cache[i]) 35125481e50SSuanming Mou continue; 3529a4c3688SSuanming Mou while (!LIST_EMPTY(&l_inconst->cache[i]->h)) { 3539a4c3688SSuanming Mou entry = LIST_FIRST(&l_inconst->cache[i]->h); 3549c373c52SSuanming Mou LIST_REMOVE(entry, next); 3557e1cf892SSuanming Mou if (i == MLX5_LIST_GLOBAL) { 3569a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 3579c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p " 3589a4c3688SSuanming Mou "destroyed.", l_const->name, 3599c373c52SSuanming Mou (void *)entry); 3609c373c52SSuanming Mou } else { 3619a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 3629c373c52SSuanming Mou } 3639c373c52SSuanming Mou } 3647e1cf892SSuanming Mou if (i != MLX5_LIST_GLOBAL) 3659a4c3688SSuanming Mou mlx5_free(l_inconst->cache[i]); 3669c373c52SSuanming Mou } 367961b6774SMatan Azrad } 368961b6774SMatan Azrad 369961b6774SMatan Azrad void 370961b6774SMatan Azrad mlx5_list_destroy(struct mlx5_list *list) 371961b6774SMatan Azrad { 3729a4c3688SSuanming Mou mlx5_list_uninit(&list->l_inconst, &list->l_const); 3739c373c52SSuanming Mou mlx5_free(list); 3749c373c52SSuanming Mou } 3759c373c52SSuanming Mou 3769c373c52SSuanming Mou uint32_t 3779c373c52SSuanming Mou mlx5_list_get_entry_num(struct mlx5_list *list) 3789c373c52SSuanming Mou { 3799c373c52SSuanming Mou MLX5_ASSERT(list); 3809a4c3688SSuanming Mou return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED); 3819c373c52SSuanming Mou } 3829c373c52SSuanming Mou 38325245d5dSShiri Kuzin /********************* Hash List **********************/ 38425245d5dSShiri Kuzin 38525245d5dSShiri Kuzin struct mlx5_hlist * 386961b6774SMatan Azrad mlx5_hlist_create(const char *name, uint32_t size, bool direct_key, 387961b6774SMatan Azrad bool lcores_share, void *ctx, mlx5_list_create_cb cb_create, 388961b6774SMatan Azrad mlx5_list_match_cb cb_match, 389961b6774SMatan Azrad mlx5_list_remove_cb cb_remove, 390961b6774SMatan Azrad mlx5_list_clone_cb cb_clone, 391961b6774SMatan Azrad mlx5_list_clone_free_cb cb_clone_free) 39225245d5dSShiri Kuzin { 39325245d5dSShiri Kuzin struct mlx5_hlist *h; 39425481e50SSuanming Mou struct mlx5_list_cache *gc; 39525245d5dSShiri Kuzin uint32_t act_size; 39625245d5dSShiri Kuzin uint32_t alloc_size; 39725245d5dSShiri Kuzin uint32_t i; 39825245d5dSShiri Kuzin 3999a4c3688SSuanming Mou if (!cb_match || !cb_create || !cb_remove || !cb_clone || 4009a4c3688SSuanming Mou !cb_clone_free) { 4019a4c3688SSuanming Mou rte_errno = EINVAL; 4029a4c3688SSuanming Mou return NULL; 4039a4c3688SSuanming Mou } 40425245d5dSShiri Kuzin /* Align to the next power of 2, 32bits integer is enough now. */ 40525245d5dSShiri Kuzin if (!rte_is_power_of_2(size)) { 40625245d5dSShiri Kuzin act_size = rte_align32pow2(size); 407961b6774SMatan Azrad DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " 408961b6774SMatan Azrad "be aligned to 0x%" PRIX32 ".", size, act_size); 40925245d5dSShiri Kuzin } else { 41025245d5dSShiri Kuzin act_size = size; 41125245d5dSShiri Kuzin } 41225245d5dSShiri Kuzin alloc_size = sizeof(struct mlx5_hlist) + 41325245d5dSShiri Kuzin sizeof(struct mlx5_hlist_bucket) * act_size; 41425481e50SSuanming Mou if (lcores_share) 41525481e50SSuanming Mou alloc_size += sizeof(struct mlx5_list_cache) * act_size; 41625245d5dSShiri Kuzin /* Using zmalloc, then no need to initialize the heads. */ 41725245d5dSShiri Kuzin h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE, 41825245d5dSShiri Kuzin SOCKET_ID_ANY); 41925245d5dSShiri Kuzin if (!h) { 42025245d5dSShiri Kuzin DRV_LOG(ERR, "No memory for hash list %s creation", 42125245d5dSShiri Kuzin name ? name : "None"); 42225245d5dSShiri Kuzin return NULL; 42325245d5dSShiri Kuzin } 4249a4c3688SSuanming Mou if (name) 4259a4c3688SSuanming Mou snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name); 4269a4c3688SSuanming Mou h->l_const.ctx = ctx; 4279a4c3688SSuanming Mou h->l_const.lcores_share = lcores_share; 4289a4c3688SSuanming Mou h->l_const.cb_create = cb_create; 4299a4c3688SSuanming Mou h->l_const.cb_match = cb_match; 4309a4c3688SSuanming Mou h->l_const.cb_remove = cb_remove; 4319a4c3688SSuanming Mou h->l_const.cb_clone = cb_clone; 4329a4c3688SSuanming Mou h->l_const.cb_clone_free = cb_clone_free; 4337e1cf892SSuanming Mou rte_spinlock_init(&h->l_const.lcore_lock); 43425245d5dSShiri Kuzin h->mask = act_size - 1; 435961b6774SMatan Azrad h->direct_key = direct_key; 43625481e50SSuanming Mou gc = (struct mlx5_list_cache *)&h->buckets[act_size]; 437961b6774SMatan Azrad for (i = 0; i < act_size; i++) { 4389a4c3688SSuanming Mou if (mlx5_list_init(&h->buckets[i].l, &h->l_const, 4399a4c3688SSuanming Mou lcores_share ? &gc[i] : NULL) != 0) { 440961b6774SMatan Azrad mlx5_free(h); 441961b6774SMatan Azrad return NULL; 442961b6774SMatan Azrad } 443961b6774SMatan Azrad } 444961b6774SMatan Azrad DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.", 445961b6774SMatan Azrad name, act_size); 44625245d5dSShiri Kuzin return h; 44725245d5dSShiri Kuzin } 44825245d5dSShiri Kuzin 4499a4c3688SSuanming Mou 450961b6774SMatan Azrad struct mlx5_list_entry * 45125245d5dSShiri Kuzin mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) 45225245d5dSShiri Kuzin { 45325245d5dSShiri Kuzin uint32_t idx; 45425245d5dSShiri Kuzin 45525245d5dSShiri Kuzin if (h->direct_key) 45625245d5dSShiri Kuzin idx = (uint32_t)(key & h->mask); 45725245d5dSShiri Kuzin else 45825245d5dSShiri Kuzin idx = rte_hash_crc_8byte(key, 0) & h->mask; 4599a4c3688SSuanming Mou return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx); 46025245d5dSShiri Kuzin } 46125245d5dSShiri Kuzin 462961b6774SMatan Azrad struct mlx5_list_entry* 46325245d5dSShiri Kuzin mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx) 46425245d5dSShiri Kuzin { 46525245d5dSShiri Kuzin uint32_t idx; 466961b6774SMatan Azrad struct mlx5_list_entry *entry; 4677e1cf892SSuanming Mou int lcore_index = rte_lcore_index(rte_lcore_id()); 46825245d5dSShiri Kuzin 46925245d5dSShiri Kuzin if (h->direct_key) 47025245d5dSShiri Kuzin idx = (uint32_t)(key & h->mask); 47125245d5dSShiri Kuzin else 47225245d5dSShiri Kuzin idx = rte_hash_crc_8byte(key, 0) & h->mask; 4737e1cf892SSuanming Mou if (unlikely(lcore_index == -1)) { 4747e1cf892SSuanming Mou lcore_index = MLX5_LIST_NLCORE; 4757e1cf892SSuanming Mou rte_spinlock_lock(&h->l_const.lcore_lock); 4767e1cf892SSuanming Mou } 4777e1cf892SSuanming Mou entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx, 4787e1cf892SSuanming Mou lcore_index); 479961b6774SMatan Azrad if (likely(entry)) { 4809a4c3688SSuanming Mou if (h->l_const.lcores_share) 481961b6774SMatan Azrad entry->gentry->bucket_idx = idx; 482961b6774SMatan Azrad else 483961b6774SMatan Azrad entry->bucket_idx = idx; 48425245d5dSShiri Kuzin } 4857e1cf892SSuanming Mou if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 4867e1cf892SSuanming Mou rte_spinlock_unlock(&h->l_const.lcore_lock); 48725245d5dSShiri Kuzin return entry; 48825245d5dSShiri Kuzin } 48925245d5dSShiri Kuzin 49025245d5dSShiri Kuzin int 491961b6774SMatan Azrad mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry) 49225245d5dSShiri Kuzin { 4937e1cf892SSuanming Mou int lcore_index = rte_lcore_index(rte_lcore_id()); 4947e1cf892SSuanming Mou int ret; 4959a4c3688SSuanming Mou uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx : 496961b6774SMatan Azrad entry->bucket_idx; 4977e1cf892SSuanming Mou if (unlikely(lcore_index == -1)) { 4987e1cf892SSuanming Mou lcore_index = MLX5_LIST_NLCORE; 4997e1cf892SSuanming Mou rte_spinlock_lock(&h->l_const.lcore_lock); 5007e1cf892SSuanming Mou } 5017e1cf892SSuanming Mou ret = _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry, 5027e1cf892SSuanming Mou lcore_index); 5037e1cf892SSuanming Mou if (unlikely(lcore_index == MLX5_LIST_NLCORE)) 5047e1cf892SSuanming Mou rte_spinlock_unlock(&h->l_const.lcore_lock); 5057e1cf892SSuanming Mou return ret; 50625245d5dSShiri Kuzin } 50725245d5dSShiri Kuzin 50825245d5dSShiri Kuzin void 50925245d5dSShiri Kuzin mlx5_hlist_destroy(struct mlx5_hlist *h) 51025245d5dSShiri Kuzin { 511961b6774SMatan Azrad uint32_t i; 51225245d5dSShiri Kuzin 513961b6774SMatan Azrad for (i = 0; i <= h->mask; i++) 5149a4c3688SSuanming Mou mlx5_list_uninit(&h->buckets[i].l, &h->l_const); 51525245d5dSShiri Kuzin mlx5_free(h); 51625245d5dSShiri Kuzin } 517