125245d5dSShiri Kuzin /* SPDX-License-Identifier: BSD-3-Clause 225245d5dSShiri Kuzin * Copyright 2019 Mellanox Technologies, Ltd 325245d5dSShiri Kuzin */ 425245d5dSShiri Kuzin 525245d5dSShiri Kuzin #include <rte_malloc.h> 625245d5dSShiri Kuzin #include <rte_hash_crc.h> 725245d5dSShiri Kuzin #include <rte_errno.h> 825245d5dSShiri Kuzin 925245d5dSShiri Kuzin #include <mlx5_malloc.h> 1025245d5dSShiri Kuzin 1125245d5dSShiri Kuzin #include "mlx5_common_utils.h" 1225245d5dSShiri Kuzin #include "mlx5_common_log.h" 1325245d5dSShiri Kuzin 149c373c52SSuanming Mou /********************* mlx5 list ************************/ 159c373c52SSuanming Mou 16961b6774SMatan Azrad static int 17*9a4c3688SSuanming Mou mlx5_list_init(struct mlx5_list_inconst *l_inconst, 18*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, 19*9a4c3688SSuanming Mou struct mlx5_list_cache *gc) 209c373c52SSuanming Mou { 21*9a4c3688SSuanming Mou rte_rwlock_init(&l_inconst->lock); 22*9a4c3688SSuanming Mou if (l_const->lcores_share) { 23*9a4c3688SSuanming Mou l_inconst->cache[RTE_MAX_LCORE] = gc; 24*9a4c3688SSuanming Mou LIST_INIT(&l_inconst->cache[RTE_MAX_LCORE]->h); 259c373c52SSuanming Mou } 26*9a4c3688SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s initialized.", l_const->name); 27961b6774SMatan Azrad return 0; 28961b6774SMatan Azrad } 29961b6774SMatan Azrad 30961b6774SMatan Azrad struct mlx5_list * 31961b6774SMatan Azrad mlx5_list_create(const char *name, void *ctx, bool lcores_share, 32961b6774SMatan Azrad mlx5_list_create_cb cb_create, 33961b6774SMatan Azrad mlx5_list_match_cb cb_match, 34961b6774SMatan Azrad mlx5_list_remove_cb cb_remove, 35961b6774SMatan Azrad mlx5_list_clone_cb cb_clone, 36961b6774SMatan Azrad mlx5_list_clone_free_cb cb_clone_free) 37961b6774SMatan Azrad { 38961b6774SMatan Azrad struct mlx5_list *list; 3925481e50SSuanming Mou struct mlx5_list_cache *gc = NULL; 40961b6774SMatan Azrad 41*9a4c3688SSuanming Mou if (!cb_match || !cb_create || !cb_remove || !cb_clone || 42*9a4c3688SSuanming Mou !cb_clone_free) { 43*9a4c3688SSuanming Mou rte_errno = EINVAL; 44*9a4c3688SSuanming Mou return NULL; 45*9a4c3688SSuanming Mou } 4625481e50SSuanming Mou list = mlx5_malloc(MLX5_MEM_ZERO, 4725481e50SSuanming Mou sizeof(*list) + (lcores_share ? sizeof(*gc) : 0), 4825481e50SSuanming Mou 0, SOCKET_ID_ANY); 49*9a4c3688SSuanming Mou 50961b6774SMatan Azrad if (!list) 51961b6774SMatan Azrad return NULL; 52*9a4c3688SSuanming Mou if (name) 53*9a4c3688SSuanming Mou snprintf(list->l_const.name, 54*9a4c3688SSuanming Mou sizeof(list->l_const.name), "%s", name); 55*9a4c3688SSuanming Mou list->l_const.ctx = ctx; 56*9a4c3688SSuanming Mou list->l_const.lcores_share = lcores_share; 57*9a4c3688SSuanming Mou list->l_const.cb_create = cb_create; 58*9a4c3688SSuanming Mou list->l_const.cb_match = cb_match; 59*9a4c3688SSuanming Mou list->l_const.cb_remove = cb_remove; 60*9a4c3688SSuanming Mou list->l_const.cb_clone = cb_clone; 61*9a4c3688SSuanming Mou list->l_const.cb_clone_free = cb_clone_free; 6225481e50SSuanming Mou if (lcores_share) 6325481e50SSuanming Mou gc = (struct mlx5_list_cache *)(list + 1); 64*9a4c3688SSuanming Mou if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) { 65961b6774SMatan Azrad mlx5_free(list); 66961b6774SMatan Azrad return NULL; 67961b6774SMatan Azrad } 689c373c52SSuanming Mou return list; 699c373c52SSuanming Mou } 709c373c52SSuanming Mou 719c373c52SSuanming Mou static struct mlx5_list_entry * 72*9a4c3688SSuanming Mou __list_lookup(struct mlx5_list_inconst *l_inconst, 73*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, 74*9a4c3688SSuanming Mou int lcore_index, void *ctx, bool reuse) 759c373c52SSuanming Mou { 7625481e50SSuanming Mou struct mlx5_list_entry *entry = 77*9a4c3688SSuanming Mou LIST_FIRST(&l_inconst->cache[lcore_index]->h); 789c373c52SSuanming Mou uint32_t ret; 799c373c52SSuanming Mou 809c373c52SSuanming Mou while (entry != NULL) { 81*9a4c3688SSuanming Mou if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) { 829c373c52SSuanming Mou if (reuse) { 839c373c52SSuanming Mou ret = __atomic_add_fetch(&entry->ref_cnt, 1, 849c373c52SSuanming Mou __ATOMIC_RELAXED) - 1; 859c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.", 86*9a4c3688SSuanming Mou l_const->name, (void *)entry, 879c373c52SSuanming Mou entry->ref_cnt); 889c373c52SSuanming Mou } else if (lcore_index < RTE_MAX_LCORE) { 899c373c52SSuanming Mou ret = __atomic_load_n(&entry->ref_cnt, 909c373c52SSuanming Mou __ATOMIC_RELAXED); 919c373c52SSuanming Mou } 929c373c52SSuanming Mou if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE)) 939c373c52SSuanming Mou return entry; 949c373c52SSuanming Mou if (reuse && ret == 0) 959c373c52SSuanming Mou entry->ref_cnt--; /* Invalid entry. */ 969c373c52SSuanming Mou } 979c373c52SSuanming Mou entry = LIST_NEXT(entry, next); 989c373c52SSuanming Mou } 999c373c52SSuanming Mou return NULL; 1009c373c52SSuanming Mou } 1019c373c52SSuanming Mou 102*9a4c3688SSuanming Mou static inline struct mlx5_list_entry * 103*9a4c3688SSuanming Mou _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst, 104*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, void *ctx) 1059c373c52SSuanming Mou { 1069c373c52SSuanming Mou struct mlx5_list_entry *entry = NULL; 1079c373c52SSuanming Mou int i; 1089c373c52SSuanming Mou 109*9a4c3688SSuanming Mou rte_rwlock_read_lock(&l_inconst->lock); 1109c373c52SSuanming Mou for (i = 0; i < RTE_MAX_LCORE; i++) { 111*9a4c3688SSuanming Mou if (!l_inconst->cache[i]) 112*9a4c3688SSuanming Mou continue; 113*9a4c3688SSuanming Mou entry = __list_lookup(l_inconst, l_const, i, ctx, false); 1149c373c52SSuanming Mou if (entry) 1159c373c52SSuanming Mou break; 1169c373c52SSuanming Mou } 117*9a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 1189c373c52SSuanming Mou return entry; 1199c373c52SSuanming Mou } 1209c373c52SSuanming Mou 121*9a4c3688SSuanming Mou struct mlx5_list_entry * 122*9a4c3688SSuanming Mou mlx5_list_lookup(struct mlx5_list *list, void *ctx) 123*9a4c3688SSuanming Mou { 124*9a4c3688SSuanming Mou return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx); 125*9a4c3688SSuanming Mou } 126*9a4c3688SSuanming Mou 127*9a4c3688SSuanming Mou 1289c373c52SSuanming Mou static struct mlx5_list_entry * 129*9a4c3688SSuanming Mou mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst, 130*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, int lcore_index, 1319c373c52SSuanming Mou struct mlx5_list_entry *gentry, void *ctx) 1329c373c52SSuanming Mou { 133*9a4c3688SSuanming Mou struct mlx5_list_entry *lentry = 134*9a4c3688SSuanming Mou l_const->cb_clone(l_const->ctx, gentry, ctx); 1359c373c52SSuanming Mou 1369c373c52SSuanming Mou if (unlikely(!lentry)) 1379c373c52SSuanming Mou return NULL; 1389c373c52SSuanming Mou lentry->ref_cnt = 1u; 1399c373c52SSuanming Mou lentry->gentry = gentry; 1409c373c52SSuanming Mou lentry->lcore_idx = (uint32_t)lcore_index; 141*9a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next); 1429c373c52SSuanming Mou return lentry; 1439c373c52SSuanming Mou } 1449c373c52SSuanming Mou 1459c373c52SSuanming Mou static void 146*9a4c3688SSuanming Mou __list_cache_clean(struct mlx5_list_inconst *l_inconst, 147*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, 148*9a4c3688SSuanming Mou int lcore_index) 1499c373c52SSuanming Mou { 150*9a4c3688SSuanming Mou struct mlx5_list_cache *c = l_inconst->cache[lcore_index]; 1519c373c52SSuanming Mou struct mlx5_list_entry *entry = LIST_FIRST(&c->h); 1529c373c52SSuanming Mou uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0, 1539c373c52SSuanming Mou __ATOMIC_RELAXED); 1549c373c52SSuanming Mou 1559c373c52SSuanming Mou while (inv_cnt != 0 && entry != NULL) { 1569c373c52SSuanming Mou struct mlx5_list_entry *nentry = LIST_NEXT(entry, next); 1579c373c52SSuanming Mou 1589c373c52SSuanming Mou if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) { 1599c373c52SSuanming Mou LIST_REMOVE(entry, next); 160*9a4c3688SSuanming Mou if (l_const->lcores_share) 161*9a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 162d03b7860SSuanming Mou else 163*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 1649c373c52SSuanming Mou inv_cnt--; 1659c373c52SSuanming Mou } 1669c373c52SSuanming Mou entry = nentry; 1679c373c52SSuanming Mou } 1689c373c52SSuanming Mou } 1699c373c52SSuanming Mou 170*9a4c3688SSuanming Mou static inline struct mlx5_list_entry * 171*9a4c3688SSuanming Mou _mlx5_list_register(struct mlx5_list_inconst *l_inconst, 172*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, 173*9a4c3688SSuanming Mou void *ctx) 1749c373c52SSuanming Mou { 175d03b7860SSuanming Mou struct mlx5_list_entry *entry = NULL, *local_entry; 1769c373c52SSuanming Mou volatile uint32_t prev_gen_cnt = 0; 1779c373c52SSuanming Mou int lcore_index = rte_lcore_index(rte_lcore_id()); 1789c373c52SSuanming Mou 179*9a4c3688SSuanming Mou MLX5_ASSERT(l_inconst); 1809c373c52SSuanming Mou MLX5_ASSERT(lcore_index < RTE_MAX_LCORE); 1819c373c52SSuanming Mou if (unlikely(lcore_index == -1)) { 1829c373c52SSuanming Mou rte_errno = ENOTSUP; 1839c373c52SSuanming Mou return NULL; 1849c373c52SSuanming Mou } 185*9a4c3688SSuanming Mou if (unlikely(!l_inconst->cache[lcore_index])) { 186*9a4c3688SSuanming Mou l_inconst->cache[lcore_index] = mlx5_malloc(0, 18725481e50SSuanming Mou sizeof(struct mlx5_list_cache), 18825481e50SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 189*9a4c3688SSuanming Mou if (!l_inconst->cache[lcore_index]) { 19025481e50SSuanming Mou rte_errno = ENOMEM; 19125481e50SSuanming Mou return NULL; 19225481e50SSuanming Mou } 193*9a4c3688SSuanming Mou l_inconst->cache[lcore_index]->inv_cnt = 0; 194*9a4c3688SSuanming Mou LIST_INIT(&l_inconst->cache[lcore_index]->h); 19525481e50SSuanming Mou } 1969c373c52SSuanming Mou /* 0. Free entries that was invalidated by other lcores. */ 197*9a4c3688SSuanming Mou __list_cache_clean(l_inconst, l_const, lcore_index); 1989c373c52SSuanming Mou /* 1. Lookup in local cache. */ 199*9a4c3688SSuanming Mou local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true); 2009c373c52SSuanming Mou if (local_entry) 2019c373c52SSuanming Mou return local_entry; 202*9a4c3688SSuanming Mou if (l_const->lcores_share) { 2039c373c52SSuanming Mou /* 2. Lookup with read lock on global list, reuse if found. */ 204*9a4c3688SSuanming Mou rte_rwlock_read_lock(&l_inconst->lock); 205*9a4c3688SSuanming Mou entry = __list_lookup(l_inconst, l_const, RTE_MAX_LCORE, 206*9a4c3688SSuanming Mou ctx, true); 2079c373c52SSuanming Mou if (likely(entry)) { 208*9a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 209*9a4c3688SSuanming Mou return mlx5_list_cache_insert(l_inconst, l_const, 210*9a4c3688SSuanming Mou lcore_index, 211*9a4c3688SSuanming Mou entry, ctx); 2129c373c52SSuanming Mou } 213*9a4c3688SSuanming Mou prev_gen_cnt = l_inconst->gen_cnt; 214*9a4c3688SSuanming Mou rte_rwlock_read_unlock(&l_inconst->lock); 215d03b7860SSuanming Mou } 2169c373c52SSuanming Mou /* 3. Prepare new entry for global list and for cache. */ 217*9a4c3688SSuanming Mou entry = l_const->cb_create(l_const->ctx, ctx); 2189c373c52SSuanming Mou if (unlikely(!entry)) 2199c373c52SSuanming Mou return NULL; 220d03b7860SSuanming Mou entry->ref_cnt = 1u; 221*9a4c3688SSuanming Mou if (!l_const->lcores_share) { 222d03b7860SSuanming Mou entry->lcore_idx = (uint32_t)lcore_index; 223*9a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, 224*9a4c3688SSuanming Mou entry, next); 225*9a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 226d03b7860SSuanming Mou DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.", 227*9a4c3688SSuanming Mou l_const->name, lcore_index, 228*9a4c3688SSuanming Mou (void *)entry, entry->ref_cnt); 229d03b7860SSuanming Mou return entry; 230d03b7860SSuanming Mou } 231*9a4c3688SSuanming Mou local_entry = l_const->cb_clone(l_const->ctx, entry, ctx); 2329c373c52SSuanming Mou if (unlikely(!local_entry)) { 233*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 2349c373c52SSuanming Mou return NULL; 2359c373c52SSuanming Mou } 2369c373c52SSuanming Mou local_entry->ref_cnt = 1u; 2379c373c52SSuanming Mou local_entry->gentry = entry; 2389c373c52SSuanming Mou local_entry->lcore_idx = (uint32_t)lcore_index; 239*9a4c3688SSuanming Mou rte_rwlock_write_lock(&l_inconst->lock); 2409c373c52SSuanming Mou /* 4. Make sure the same entry was not created before the write lock. */ 241*9a4c3688SSuanming Mou if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) { 242*9a4c3688SSuanming Mou struct mlx5_list_entry *oentry = __list_lookup(l_inconst, 243*9a4c3688SSuanming Mou l_const, 2449c373c52SSuanming Mou RTE_MAX_LCORE, 2459c373c52SSuanming Mou ctx, true); 2469c373c52SSuanming Mou 2479c373c52SSuanming Mou if (unlikely(oentry)) { 2489c373c52SSuanming Mou /* 4.5. Found real race!!, reuse the old entry. */ 249*9a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 250*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 251*9a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, local_entry); 252*9a4c3688SSuanming Mou return mlx5_list_cache_insert(l_inconst, l_const, 253*9a4c3688SSuanming Mou lcore_index, 254*9a4c3688SSuanming Mou oentry, ctx); 2559c373c52SSuanming Mou } 2569c373c52SSuanming Mou } 2579c373c52SSuanming Mou /* 5. Update lists. */ 258*9a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[RTE_MAX_LCORE]->h, entry, next); 259*9a4c3688SSuanming Mou l_inconst->gen_cnt++; 260*9a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 261*9a4c3688SSuanming Mou LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next); 262*9a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 263*9a4c3688SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name, 2649c373c52SSuanming Mou (void *)entry, entry->ref_cnt); 2659c373c52SSuanming Mou return local_entry; 2669c373c52SSuanming Mou } 2679c373c52SSuanming Mou 268*9a4c3688SSuanming Mou struct mlx5_list_entry * 269*9a4c3688SSuanming Mou mlx5_list_register(struct mlx5_list *list, void *ctx) 270*9a4c3688SSuanming Mou { 271*9a4c3688SSuanming Mou return _mlx5_list_register(&list->l_inconst, &list->l_const, ctx); 272*9a4c3688SSuanming Mou } 273*9a4c3688SSuanming Mou 274*9a4c3688SSuanming Mou static inline int 275*9a4c3688SSuanming Mou _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst, 276*9a4c3688SSuanming Mou struct mlx5_list_const *l_const, 2779c373c52SSuanming Mou struct mlx5_list_entry *entry) 2789c373c52SSuanming Mou { 2799c373c52SSuanming Mou struct mlx5_list_entry *gentry = entry->gentry; 2809c373c52SSuanming Mou int lcore_idx; 2819c373c52SSuanming Mou 2829c373c52SSuanming Mou if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) 2839c373c52SSuanming Mou return 1; 2849c373c52SSuanming Mou lcore_idx = rte_lcore_index(rte_lcore_id()); 2859c373c52SSuanming Mou MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE); 2869c373c52SSuanming Mou if (entry->lcore_idx == (uint32_t)lcore_idx) { 2879c373c52SSuanming Mou LIST_REMOVE(entry, next); 288*9a4c3688SSuanming Mou if (l_const->lcores_share) 289*9a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 290d03b7860SSuanming Mou else 291*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 2929c373c52SSuanming Mou } else if (likely(lcore_idx != -1)) { 293*9a4c3688SSuanming Mou __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt, 294*9a4c3688SSuanming Mou 1, __ATOMIC_RELAXED); 2959c373c52SSuanming Mou } else { 2969c373c52SSuanming Mou return 0; 2979c373c52SSuanming Mou } 298*9a4c3688SSuanming Mou if (!l_const->lcores_share) { 299*9a4c3688SSuanming Mou __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 300d03b7860SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", 301*9a4c3688SSuanming Mou l_const->name, (void *)entry); 302d03b7860SSuanming Mou return 0; 303d03b7860SSuanming Mou } 3049c373c52SSuanming Mou if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0) 3059c373c52SSuanming Mou return 1; 306*9a4c3688SSuanming Mou rte_rwlock_write_lock(&l_inconst->lock); 3079c373c52SSuanming Mou if (likely(gentry->ref_cnt == 0)) { 3089c373c52SSuanming Mou LIST_REMOVE(gentry, next); 309*9a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 310*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, gentry); 311*9a4c3688SSuanming Mou __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED); 3129c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.", 313*9a4c3688SSuanming Mou l_const->name, (void *)gentry); 3149c373c52SSuanming Mou return 0; 3159c373c52SSuanming Mou } 316*9a4c3688SSuanming Mou rte_rwlock_write_unlock(&l_inconst->lock); 3179c373c52SSuanming Mou return 1; 3189c373c52SSuanming Mou } 3199c373c52SSuanming Mou 320*9a4c3688SSuanming Mou int 321*9a4c3688SSuanming Mou mlx5_list_unregister(struct mlx5_list *list, 322*9a4c3688SSuanming Mou struct mlx5_list_entry *entry) 323*9a4c3688SSuanming Mou { 324*9a4c3688SSuanming Mou return _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry); 325*9a4c3688SSuanming Mou } 326*9a4c3688SSuanming Mou 327961b6774SMatan Azrad static void 328*9a4c3688SSuanming Mou mlx5_list_uninit(struct mlx5_list_inconst *l_inconst, 329*9a4c3688SSuanming Mou struct mlx5_list_const *l_const) 3309c373c52SSuanming Mou { 3319c373c52SSuanming Mou struct mlx5_list_entry *entry; 3329c373c52SSuanming Mou int i; 3339c373c52SSuanming Mou 334*9a4c3688SSuanming Mou MLX5_ASSERT(l_inconst); 3359c373c52SSuanming Mou for (i = 0; i <= RTE_MAX_LCORE; i++) { 336*9a4c3688SSuanming Mou if (!l_inconst->cache[i]) 33725481e50SSuanming Mou continue; 338*9a4c3688SSuanming Mou while (!LIST_EMPTY(&l_inconst->cache[i]->h)) { 339*9a4c3688SSuanming Mou entry = LIST_FIRST(&l_inconst->cache[i]->h); 3409c373c52SSuanming Mou LIST_REMOVE(entry, next); 3419c373c52SSuanming Mou if (i == RTE_MAX_LCORE) { 342*9a4c3688SSuanming Mou l_const->cb_remove(l_const->ctx, entry); 3439c373c52SSuanming Mou DRV_LOG(DEBUG, "mlx5 list %s entry %p " 344*9a4c3688SSuanming Mou "destroyed.", l_const->name, 3459c373c52SSuanming Mou (void *)entry); 3469c373c52SSuanming Mou } else { 347*9a4c3688SSuanming Mou l_const->cb_clone_free(l_const->ctx, entry); 3489c373c52SSuanming Mou } 3499c373c52SSuanming Mou } 35025481e50SSuanming Mou if (i != RTE_MAX_LCORE) 351*9a4c3688SSuanming Mou mlx5_free(l_inconst->cache[i]); 3529c373c52SSuanming Mou } 353961b6774SMatan Azrad } 354961b6774SMatan Azrad 355961b6774SMatan Azrad void 356961b6774SMatan Azrad mlx5_list_destroy(struct mlx5_list *list) 357961b6774SMatan Azrad { 358*9a4c3688SSuanming Mou mlx5_list_uninit(&list->l_inconst, &list->l_const); 3599c373c52SSuanming Mou mlx5_free(list); 3609c373c52SSuanming Mou } 3619c373c52SSuanming Mou 3629c373c52SSuanming Mou uint32_t 3639c373c52SSuanming Mou mlx5_list_get_entry_num(struct mlx5_list *list) 3649c373c52SSuanming Mou { 3659c373c52SSuanming Mou MLX5_ASSERT(list); 366*9a4c3688SSuanming Mou return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED); 3679c373c52SSuanming Mou } 3689c373c52SSuanming Mou 36925245d5dSShiri Kuzin /********************* Hash List **********************/ 37025245d5dSShiri Kuzin 37125245d5dSShiri Kuzin struct mlx5_hlist * 372961b6774SMatan Azrad mlx5_hlist_create(const char *name, uint32_t size, bool direct_key, 373961b6774SMatan Azrad bool lcores_share, void *ctx, mlx5_list_create_cb cb_create, 374961b6774SMatan Azrad mlx5_list_match_cb cb_match, 375961b6774SMatan Azrad mlx5_list_remove_cb cb_remove, 376961b6774SMatan Azrad mlx5_list_clone_cb cb_clone, 377961b6774SMatan Azrad mlx5_list_clone_free_cb cb_clone_free) 37825245d5dSShiri Kuzin { 37925245d5dSShiri Kuzin struct mlx5_hlist *h; 38025481e50SSuanming Mou struct mlx5_list_cache *gc; 38125245d5dSShiri Kuzin uint32_t act_size; 38225245d5dSShiri Kuzin uint32_t alloc_size; 38325245d5dSShiri Kuzin uint32_t i; 38425245d5dSShiri Kuzin 385*9a4c3688SSuanming Mou if (!cb_match || !cb_create || !cb_remove || !cb_clone || 386*9a4c3688SSuanming Mou !cb_clone_free) { 387*9a4c3688SSuanming Mou rte_errno = EINVAL; 388*9a4c3688SSuanming Mou return NULL; 389*9a4c3688SSuanming Mou } 39025245d5dSShiri Kuzin /* Align to the next power of 2, 32bits integer is enough now. */ 39125245d5dSShiri Kuzin if (!rte_is_power_of_2(size)) { 39225245d5dSShiri Kuzin act_size = rte_align32pow2(size); 393961b6774SMatan Azrad DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " 394961b6774SMatan Azrad "be aligned to 0x%" PRIX32 ".", size, act_size); 39525245d5dSShiri Kuzin } else { 39625245d5dSShiri Kuzin act_size = size; 39725245d5dSShiri Kuzin } 39825245d5dSShiri Kuzin alloc_size = sizeof(struct mlx5_hlist) + 39925245d5dSShiri Kuzin sizeof(struct mlx5_hlist_bucket) * act_size; 40025481e50SSuanming Mou if (lcores_share) 40125481e50SSuanming Mou alloc_size += sizeof(struct mlx5_list_cache) * act_size; 40225245d5dSShiri Kuzin /* Using zmalloc, then no need to initialize the heads. */ 40325245d5dSShiri Kuzin h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE, 40425245d5dSShiri Kuzin SOCKET_ID_ANY); 40525245d5dSShiri Kuzin if (!h) { 40625245d5dSShiri Kuzin DRV_LOG(ERR, "No memory for hash list %s creation", 40725245d5dSShiri Kuzin name ? name : "None"); 40825245d5dSShiri Kuzin return NULL; 40925245d5dSShiri Kuzin } 410*9a4c3688SSuanming Mou if (name) 411*9a4c3688SSuanming Mou snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name); 412*9a4c3688SSuanming Mou h->l_const.ctx = ctx; 413*9a4c3688SSuanming Mou h->l_const.lcores_share = lcores_share; 414*9a4c3688SSuanming Mou h->l_const.cb_create = cb_create; 415*9a4c3688SSuanming Mou h->l_const.cb_match = cb_match; 416*9a4c3688SSuanming Mou h->l_const.cb_remove = cb_remove; 417*9a4c3688SSuanming Mou h->l_const.cb_clone = cb_clone; 418*9a4c3688SSuanming Mou h->l_const.cb_clone_free = cb_clone_free; 41925245d5dSShiri Kuzin h->mask = act_size - 1; 420961b6774SMatan Azrad h->direct_key = direct_key; 42125481e50SSuanming Mou gc = (struct mlx5_list_cache *)&h->buckets[act_size]; 422961b6774SMatan Azrad for (i = 0; i < act_size; i++) { 423*9a4c3688SSuanming Mou if (mlx5_list_init(&h->buckets[i].l, &h->l_const, 424*9a4c3688SSuanming Mou lcores_share ? &gc[i] : NULL) != 0) { 425961b6774SMatan Azrad mlx5_free(h); 426961b6774SMatan Azrad return NULL; 427961b6774SMatan Azrad } 428961b6774SMatan Azrad } 429961b6774SMatan Azrad DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.", 430961b6774SMatan Azrad name, act_size); 43125245d5dSShiri Kuzin return h; 43225245d5dSShiri Kuzin } 43325245d5dSShiri Kuzin 434*9a4c3688SSuanming Mou 435961b6774SMatan Azrad struct mlx5_list_entry * 43625245d5dSShiri Kuzin mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) 43725245d5dSShiri Kuzin { 43825245d5dSShiri Kuzin uint32_t idx; 43925245d5dSShiri Kuzin 44025245d5dSShiri Kuzin if (h->direct_key) 44125245d5dSShiri Kuzin idx = (uint32_t)(key & h->mask); 44225245d5dSShiri Kuzin else 44325245d5dSShiri Kuzin idx = rte_hash_crc_8byte(key, 0) & h->mask; 444*9a4c3688SSuanming Mou return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx); 44525245d5dSShiri Kuzin } 44625245d5dSShiri Kuzin 447961b6774SMatan Azrad struct mlx5_list_entry* 44825245d5dSShiri Kuzin mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx) 44925245d5dSShiri Kuzin { 45025245d5dSShiri Kuzin uint32_t idx; 451961b6774SMatan Azrad struct mlx5_list_entry *entry; 45225245d5dSShiri Kuzin 45325245d5dSShiri Kuzin if (h->direct_key) 45425245d5dSShiri Kuzin idx = (uint32_t)(key & h->mask); 45525245d5dSShiri Kuzin else 45625245d5dSShiri Kuzin idx = rte_hash_crc_8byte(key, 0) & h->mask; 457*9a4c3688SSuanming Mou entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx); 458961b6774SMatan Azrad if (likely(entry)) { 459*9a4c3688SSuanming Mou if (h->l_const.lcores_share) 460961b6774SMatan Azrad entry->gentry->bucket_idx = idx; 461961b6774SMatan Azrad else 462961b6774SMatan Azrad entry->bucket_idx = idx; 46325245d5dSShiri Kuzin } 46425245d5dSShiri Kuzin return entry; 46525245d5dSShiri Kuzin } 46625245d5dSShiri Kuzin 46725245d5dSShiri Kuzin int 468961b6774SMatan Azrad mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry) 46925245d5dSShiri Kuzin { 470*9a4c3688SSuanming Mou uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx : 471961b6774SMatan Azrad entry->bucket_idx; 47225245d5dSShiri Kuzin 473*9a4c3688SSuanming Mou return _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry); 47425245d5dSShiri Kuzin } 47525245d5dSShiri Kuzin 47625245d5dSShiri Kuzin void 47725245d5dSShiri Kuzin mlx5_hlist_destroy(struct mlx5_hlist *h) 47825245d5dSShiri Kuzin { 479961b6774SMatan Azrad uint32_t i; 48025245d5dSShiri Kuzin 481961b6774SMatan Azrad for (i = 0; i <= h->mask; i++) 482*9a4c3688SSuanming Mou mlx5_list_uninit(&h->buckets[i].l, &h->l_const); 48325245d5dSShiri Kuzin mlx5_free(h); 48425245d5dSShiri Kuzin } 485