146287eacSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause 246287eacSBing Zhao * Copyright 2019 Mellanox Technologies, Ltd 346287eacSBing Zhao */ 446287eacSBing Zhao 546287eacSBing Zhao #include <rte_malloc.h> 646287eacSBing Zhao 783c2047cSSuanming Mou #include <mlx5_malloc.h> 883c2047cSSuanming Mou 946287eacSBing Zhao #include "mlx5_utils.h" 1046287eacSBing Zhao 11a3cf59f5SSuanming Mou 121ff37beeSXueming Li /********************* Cache list ************************/ 131ff37beeSXueming Li 141ff37beeSXueming Li static struct mlx5_cache_entry * 151ff37beeSXueming Li mlx5_clist_default_create_cb(struct mlx5_cache_list *list, 161ff37beeSXueming Li struct mlx5_cache_entry *entry __rte_unused, 171ff37beeSXueming Li void *ctx __rte_unused) 181ff37beeSXueming Li { 191ff37beeSXueming Li return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); 201ff37beeSXueming Li } 211ff37beeSXueming Li 221ff37beeSXueming Li static void 231ff37beeSXueming Li mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, 241ff37beeSXueming Li struct mlx5_cache_entry *entry) 251ff37beeSXueming Li { 261ff37beeSXueming Li mlx5_free(entry); 271ff37beeSXueming Li } 281ff37beeSXueming Li 291ff37beeSXueming Li int 301ff37beeSXueming Li mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, 311ff37beeSXueming Li uint32_t entry_size, void *ctx, 321ff37beeSXueming Li mlx5_cache_create_cb cb_create, 331ff37beeSXueming Li mlx5_cache_match_cb cb_match, 341ff37beeSXueming Li mlx5_cache_remove_cb cb_remove) 351ff37beeSXueming Li { 361ff37beeSXueming Li MLX5_ASSERT(list); 371ff37beeSXueming Li if (!cb_match || (!cb_create ^ !cb_remove)) 381ff37beeSXueming Li return -1; 391ff37beeSXueming Li if (name) 401ff37beeSXueming Li snprintf(list->name, sizeof(list->name), "%s", name); 411ff37beeSXueming Li list->entry_sz = entry_size; 421ff37beeSXueming Li list->ctx = ctx; 431ff37beeSXueming Li list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; 441ff37beeSXueming Li list->cb_match = cb_match; 451ff37beeSXueming Li list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; 461ff37beeSXueming Li rte_rwlock_init(&list->lock); 471ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); 481ff37beeSXueming Li LIST_INIT(&list->head); 491ff37beeSXueming Li return 0; 501ff37beeSXueming Li } 511ff37beeSXueming Li 521ff37beeSXueming Li static struct mlx5_cache_entry * 531ff37beeSXueming Li __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 541ff37beeSXueming Li { 551ff37beeSXueming Li struct mlx5_cache_entry *entry; 561ff37beeSXueming Li 571ff37beeSXueming Li LIST_FOREACH(entry, &list->head, next) { 581ff37beeSXueming Li if (list->cb_match(list, entry, ctx)) 591ff37beeSXueming Li continue; 601ff37beeSXueming Li if (reuse) { 611ff37beeSXueming Li __atomic_add_fetch(&entry->ref_cnt, 1, 621ff37beeSXueming Li __ATOMIC_RELAXED); 631ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", 641ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 651ff37beeSXueming Li } 661ff37beeSXueming Li break; 671ff37beeSXueming Li } 681ff37beeSXueming Li return entry; 691ff37beeSXueming Li } 701ff37beeSXueming Li 711ff37beeSXueming Li static struct mlx5_cache_entry * 721ff37beeSXueming Li cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 731ff37beeSXueming Li { 741ff37beeSXueming Li struct mlx5_cache_entry *entry; 751ff37beeSXueming Li 761ff37beeSXueming Li rte_rwlock_read_lock(&list->lock); 771ff37beeSXueming Li entry = __cache_lookup(list, ctx, reuse); 781ff37beeSXueming Li rte_rwlock_read_unlock(&list->lock); 791ff37beeSXueming Li return entry; 801ff37beeSXueming Li } 811ff37beeSXueming Li 821ff37beeSXueming Li struct mlx5_cache_entry * 831ff37beeSXueming Li mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) 841ff37beeSXueming Li { 851ff37beeSXueming Li return cache_lookup(list, ctx, false); 861ff37beeSXueming Li } 871ff37beeSXueming Li 881ff37beeSXueming Li struct mlx5_cache_entry * 891ff37beeSXueming Li mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) 901ff37beeSXueming Li { 911ff37beeSXueming Li struct mlx5_cache_entry *entry; 921ff37beeSXueming Li uint32_t prev_gen_cnt = 0; 931ff37beeSXueming Li 941ff37beeSXueming Li MLX5_ASSERT(list); 951ff37beeSXueming Li prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); 961ff37beeSXueming Li /* Lookup with read lock, reuse if found. */ 971ff37beeSXueming Li entry = cache_lookup(list, ctx, true); 981ff37beeSXueming Li if (entry) 991ff37beeSXueming Li return entry; 1001ff37beeSXueming Li /* Not found, append with write lock - block read from other threads. */ 1011ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1021ff37beeSXueming Li /* If list changed by other threads before lock, search again. */ 1031ff37beeSXueming Li if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { 1041ff37beeSXueming Li /* Lookup and reuse w/o read lock. */ 1051ff37beeSXueming Li entry = __cache_lookup(list, ctx, true); 1061ff37beeSXueming Li if (entry) 1071ff37beeSXueming Li goto done; 1081ff37beeSXueming Li } 1091ff37beeSXueming Li entry = list->cb_create(list, entry, ctx); 1101ff37beeSXueming Li if (!entry) { 1111ff37beeSXueming Li DRV_LOG(ERR, "Failed to init cache list %s entry %p.", 1121ff37beeSXueming Li list->name, (void *)entry); 1131ff37beeSXueming Li goto done; 1141ff37beeSXueming Li } 1151ff37beeSXueming Li entry->ref_cnt = 1; 1161ff37beeSXueming Li LIST_INSERT_HEAD(&list->head, entry, next); 1171ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); 1181ff37beeSXueming Li __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1191ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", 1201ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1211ff37beeSXueming Li done: 1221ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1231ff37beeSXueming Li return entry; 1241ff37beeSXueming Li } 1251ff37beeSXueming Li 1261ff37beeSXueming Li int 1271ff37beeSXueming Li mlx5_cache_unregister(struct mlx5_cache_list *list, 1281ff37beeSXueming Li struct mlx5_cache_entry *entry) 1291ff37beeSXueming Li { 1301ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1311ff37beeSXueming Li MLX5_ASSERT(entry && entry->next.le_prev); 1321ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", 1331ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1341ff37beeSXueming Li if (--entry->ref_cnt) { 1351ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1361ff37beeSXueming Li return 1; 1371ff37beeSXueming Li } 1381ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); 1391ff37beeSXueming Li __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1401ff37beeSXueming Li LIST_REMOVE(entry, next); 1411ff37beeSXueming Li list->cb_remove(list, entry); 1421ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1431ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p removed.", 1441ff37beeSXueming Li list->name, (void *)entry); 1451ff37beeSXueming Li return 0; 1461ff37beeSXueming Li } 1471ff37beeSXueming Li 1481ff37beeSXueming Li void 1491ff37beeSXueming Li mlx5_cache_list_destroy(struct mlx5_cache_list *list) 1501ff37beeSXueming Li { 1511ff37beeSXueming Li struct mlx5_cache_entry *entry; 1521ff37beeSXueming Li 1531ff37beeSXueming Li MLX5_ASSERT(list); 1541ff37beeSXueming Li /* no LIST_FOREACH_SAFE, using while instead */ 1551ff37beeSXueming Li while (!LIST_EMPTY(&list->head)) { 1561ff37beeSXueming Li entry = LIST_FIRST(&list->head); 1571ff37beeSXueming Li LIST_REMOVE(entry, next); 1581ff37beeSXueming Li list->cb_remove(list, entry); 1591ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", 1601ff37beeSXueming Li list->name, (void *)entry); 1611ff37beeSXueming Li } 1621ff37beeSXueming Li memset(list, 0, sizeof(*list)); 1631ff37beeSXueming Li } 1641ff37beeSXueming Li 1651ff37beeSXueming Li uint32_t 1661ff37beeSXueming Li mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) 1671ff37beeSXueming Li { 1681ff37beeSXueming Li MLX5_ASSERT(list); 1691ff37beeSXueming Li return __atomic_load_n(&list->count, __ATOMIC_RELAXED); 1701ff37beeSXueming Li } 1711ff37beeSXueming Li 172e69a5922SXueming Li /********************* Indexed pool **********************/ 173e69a5922SXueming Li 174a3cf59f5SSuanming Mou static inline void 175a3cf59f5SSuanming Mou mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 176a3cf59f5SSuanming Mou { 177a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 178d15c0946SSuanming Mou rte_spinlock_lock(&pool->rsz_lock); 179a3cf59f5SSuanming Mou } 180a3cf59f5SSuanming Mou 181a3cf59f5SSuanming Mou static inline void 182a3cf59f5SSuanming Mou mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 183a3cf59f5SSuanming Mou { 184a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 185d15c0946SSuanming Mou rte_spinlock_unlock(&pool->rsz_lock); 186a3cf59f5SSuanming Mou } 187a3cf59f5SSuanming Mou 18862d7d519SSuanming Mou static inline uint32_t 18962d7d519SSuanming Mou mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 19062d7d519SSuanming Mou { 19162d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 19262d7d519SSuanming Mou uint32_t trunk_idx = 0; 19362d7d519SSuanming Mou uint32_t i; 19462d7d519SSuanming Mou 19562d7d519SSuanming Mou if (!cfg->grow_trunk) 19662d7d519SSuanming Mou return entry_idx / cfg->trunk_size; 19762d7d519SSuanming Mou if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 19862d7d519SSuanming Mou trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 19962d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 20062d7d519SSuanming Mou cfg->grow_trunk)) + cfg->grow_trunk; 20162d7d519SSuanming Mou } else { 20262d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 20362d7d519SSuanming Mou if (entry_idx < pool->grow_tbl[i]) 20462d7d519SSuanming Mou break; 20562d7d519SSuanming Mou } 20662d7d519SSuanming Mou trunk_idx = i; 20762d7d519SSuanming Mou } 20862d7d519SSuanming Mou return trunk_idx; 20962d7d519SSuanming Mou } 21062d7d519SSuanming Mou 21162d7d519SSuanming Mou static inline uint32_t 21262d7d519SSuanming Mou mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 21362d7d519SSuanming Mou { 21462d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 21562d7d519SSuanming Mou 21662d7d519SSuanming Mou return cfg->trunk_size << (cfg->grow_shift * 21762d7d519SSuanming Mou (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 21862d7d519SSuanming Mou } 21962d7d519SSuanming Mou 22062d7d519SSuanming Mou static inline uint32_t 22162d7d519SSuanming Mou mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 22262d7d519SSuanming Mou { 22362d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 22462d7d519SSuanming Mou uint32_t offset = 0; 22562d7d519SSuanming Mou 22662d7d519SSuanming Mou if (!trunk_idx) 22762d7d519SSuanming Mou return 0; 22862d7d519SSuanming Mou if (!cfg->grow_trunk) 22962d7d519SSuanming Mou return cfg->trunk_size * trunk_idx; 23062d7d519SSuanming Mou if (trunk_idx < cfg->grow_trunk) 23162d7d519SSuanming Mou offset = pool->grow_tbl[trunk_idx - 1]; 23262d7d519SSuanming Mou else 23362d7d519SSuanming Mou offset = pool->grow_tbl[cfg->grow_trunk - 1] + 23462d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 23562d7d519SSuanming Mou cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 23662d7d519SSuanming Mou return offset; 23762d7d519SSuanming Mou } 23862d7d519SSuanming Mou 239a3cf59f5SSuanming Mou struct mlx5_indexed_pool * 240a3cf59f5SSuanming Mou mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 241a3cf59f5SSuanming Mou { 242a3cf59f5SSuanming Mou struct mlx5_indexed_pool *pool; 24362d7d519SSuanming Mou uint32_t i; 244a3cf59f5SSuanming Mou 24579807d6aSXueming Li if (!cfg || (!cfg->malloc ^ !cfg->free) || 246d15c0946SSuanming Mou (cfg->per_core_cache && cfg->release_mem_en) || 247a3cf59f5SSuanming Mou (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 248a3cf59f5SSuanming Mou ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 249a3cf59f5SSuanming Mou return NULL; 25083c2047cSSuanming Mou pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk * 25183c2047cSSuanming Mou sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE, 25283c2047cSSuanming Mou SOCKET_ID_ANY); 253a3cf59f5SSuanming Mou if (!pool) 254a3cf59f5SSuanming Mou return NULL; 255a3cf59f5SSuanming Mou pool->cfg = *cfg; 256a3cf59f5SSuanming Mou if (!pool->cfg.trunk_size) 257a3cf59f5SSuanming Mou pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 258a3cf59f5SSuanming Mou if (!cfg->malloc && !cfg->free) { 25983c2047cSSuanming Mou pool->cfg.malloc = mlx5_malloc; 26083c2047cSSuanming Mou pool->cfg.free = mlx5_free; 261a3cf59f5SSuanming Mou } 262a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 263d15c0946SSuanming Mou rte_spinlock_init(&pool->rsz_lock); 26462d7d519SSuanming Mou /* 26562d7d519SSuanming Mou * Initialize the dynamic grow trunk size lookup table to have a quick 26662d7d519SSuanming Mou * lookup for the trunk entry index offset. 26762d7d519SSuanming Mou */ 26862d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 26962d7d519SSuanming Mou pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 27062d7d519SSuanming Mou if (i > 0) 27162d7d519SSuanming Mou pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 27262d7d519SSuanming Mou } 27358ecd3adSSuanming Mou if (!pool->cfg.max_idx) 27458ecd3adSSuanming Mou pool->cfg.max_idx = 27558ecd3adSSuanming Mou mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); 276d15c0946SSuanming Mou if (!cfg->per_core_cache) 277d15c0946SSuanming Mou pool->free_list = TRUNK_INVALID; 278*42f46339SSuanming Mou rte_spinlock_init(&pool->lcore_lock); 279a3cf59f5SSuanming Mou return pool; 280a3cf59f5SSuanming Mou } 281a3cf59f5SSuanming Mou 282a3cf59f5SSuanming Mou static int 283a3cf59f5SSuanming Mou mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 284a3cf59f5SSuanming Mou { 285a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 286a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **trunk_tmp; 287a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **p; 288a3cf59f5SSuanming Mou size_t trunk_size = 0; 28962d7d519SSuanming Mou size_t data_size; 290a3cf59f5SSuanming Mou size_t bmp_size; 29158ecd3adSSuanming Mou uint32_t idx, cur_max_idx, i; 292a3cf59f5SSuanming Mou 29358ecd3adSSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid); 29458ecd3adSSuanming Mou if (pool->n_trunk_valid == TRUNK_MAX_IDX || 29558ecd3adSSuanming Mou cur_max_idx >= pool->cfg.max_idx) 296a3cf59f5SSuanming Mou return -ENOMEM; 297a3cf59f5SSuanming Mou if (pool->n_trunk_valid == pool->n_trunk) { 298a3cf59f5SSuanming Mou /* No free trunk flags, expand trunk list. */ 299a3cf59f5SSuanming Mou int n_grow = pool->n_trunk_valid ? pool->n_trunk : 300a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 301a3cf59f5SSuanming Mou 30283c2047cSSuanming Mou p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) * 303a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *), 304a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 305a3cf59f5SSuanming Mou if (!p) 306a3cf59f5SSuanming Mou return -ENOMEM; 307a3cf59f5SSuanming Mou if (pool->trunks) 308a3cf59f5SSuanming Mou memcpy(p, pool->trunks, pool->n_trunk_valid * 309a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 310a3cf59f5SSuanming Mou memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 311a3cf59f5SSuanming Mou n_grow * sizeof(void *)); 312a3cf59f5SSuanming Mou trunk_tmp = pool->trunks; 313a3cf59f5SSuanming Mou pool->trunks = p; 314a3cf59f5SSuanming Mou if (trunk_tmp) 315a3cf59f5SSuanming Mou pool->cfg.free(trunk_tmp); 316a3cf59f5SSuanming Mou pool->n_trunk += n_grow; 317a3cf59f5SSuanming Mou } 3181fd4bb67SSuanming Mou if (!pool->cfg.release_mem_en) { 319a3cf59f5SSuanming Mou idx = pool->n_trunk_valid; 3201fd4bb67SSuanming Mou } else { 3211fd4bb67SSuanming Mou /* Find the first available slot in trunk list */ 3221fd4bb67SSuanming Mou for (idx = 0; idx < pool->n_trunk; idx++) 3231fd4bb67SSuanming Mou if (pool->trunks[idx] == NULL) 3241fd4bb67SSuanming Mou break; 3251fd4bb67SSuanming Mou } 326a3cf59f5SSuanming Mou trunk_size += sizeof(*trunk); 32762d7d519SSuanming Mou data_size = mlx5_trunk_size_get(pool, idx); 32862d7d519SSuanming Mou bmp_size = rte_bitmap_get_memory_footprint(data_size); 329691b3d3eSSuanming Mou /* rte_bitmap requires memory cacheline aligned. */ 330691b3d3eSSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 331691b3d3eSSuanming Mou trunk_size += bmp_size; 33283c2047cSSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 333a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 334a3cf59f5SSuanming Mou if (!trunk) 335a3cf59f5SSuanming Mou return -ENOMEM; 336a3cf59f5SSuanming Mou pool->trunks[idx] = trunk; 337a3cf59f5SSuanming Mou trunk->idx = idx; 33862d7d519SSuanming Mou trunk->free = data_size; 339a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 340a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 341a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 342a3cf59f5SSuanming Mou pool->free_list = idx; 343a3cf59f5SSuanming Mou /* Mark all entries as available. */ 344691b3d3eSSuanming Mou trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data 345691b3d3eSSuanming Mou [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], 346691b3d3eSSuanming Mou bmp_size); 34758ecd3adSSuanming Mou /* Clear the overhead bits in the trunk if it happens. */ 34858ecd3adSSuanming Mou if (cur_max_idx + data_size > pool->cfg.max_idx) { 34958ecd3adSSuanming Mou for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++) 35058ecd3adSSuanming Mou rte_bitmap_clear(trunk->bmp, i); 35158ecd3adSSuanming Mou } 352691b3d3eSSuanming Mou MLX5_ASSERT(trunk->bmp); 353a3cf59f5SSuanming Mou pool->n_trunk_valid++; 354a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 355a3cf59f5SSuanming Mou pool->trunk_new++; 356a3cf59f5SSuanming Mou pool->trunk_avail++; 357a3cf59f5SSuanming Mou #endif 358a3cf59f5SSuanming Mou return 0; 359a3cf59f5SSuanming Mou } 360a3cf59f5SSuanming Mou 361d15c0946SSuanming Mou static inline struct mlx5_indexed_cache * 362d15c0946SSuanming Mou mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx) 363d15c0946SSuanming Mou { 364d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *lc, *olc = NULL; 365d15c0946SSuanming Mou 366d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 367d15c0946SSuanming Mou gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED); 368d15c0946SSuanming Mou if (gc && lc != gc) { 369d15c0946SSuanming Mou mlx5_ipool_lock(pool); 370d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 371d15c0946SSuanming Mou olc = lc; 372d15c0946SSuanming Mou lc = pool->gc; 373d15c0946SSuanming Mou lc->ref_cnt++; 374d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 375d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 376d15c0946SSuanming Mou if (olc) 377d15c0946SSuanming Mou pool->cfg.free(olc); 378d15c0946SSuanming Mou } 379d15c0946SSuanming Mou return lc; 380d15c0946SSuanming Mou } 381d15c0946SSuanming Mou 382d15c0946SSuanming Mou static uint32_t 383d15c0946SSuanming Mou mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) 384d15c0946SSuanming Mou { 385d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 386d15c0946SSuanming Mou struct mlx5_indexed_cache *p, *lc, *olc = NULL; 387d15c0946SSuanming Mou size_t trunk_size = 0; 388d15c0946SSuanming Mou size_t data_size; 389d15c0946SSuanming Mou uint32_t cur_max_idx, trunk_idx, trunk_n; 390d15c0946SSuanming Mou uint32_t fetch_size, ts_idx, i; 391d15c0946SSuanming Mou int n_grow; 392d15c0946SSuanming Mou 393d15c0946SSuanming Mou check_again: 394d15c0946SSuanming Mou p = NULL; 395d15c0946SSuanming Mou fetch_size = 0; 396d15c0946SSuanming Mou /* 397d15c0946SSuanming Mou * Fetch new index from global if possible. First round local 398d15c0946SSuanming Mou * cache will be NULL. 399d15c0946SSuanming Mou */ 400d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 401d15c0946SSuanming Mou mlx5_ipool_lock(pool); 402d15c0946SSuanming Mou /* Try to update local cache first. */ 403d15c0946SSuanming Mou if (likely(pool->gc)) { 404d15c0946SSuanming Mou if (lc != pool->gc) { 405d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 406d15c0946SSuanming Mou olc = lc; 407d15c0946SSuanming Mou lc = pool->gc; 408d15c0946SSuanming Mou lc->ref_cnt++; 409d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 410d15c0946SSuanming Mou } 411d15c0946SSuanming Mou if (lc->len) { 412d15c0946SSuanming Mou /* Use the updated local cache to fetch index. */ 413d15c0946SSuanming Mou fetch_size = pool->cfg.per_core_cache >> 2; 414d15c0946SSuanming Mou if (lc->len < fetch_size) 415d15c0946SSuanming Mou fetch_size = lc->len; 416d15c0946SSuanming Mou lc->len -= fetch_size; 417d15c0946SSuanming Mou memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len], 418d15c0946SSuanming Mou sizeof(uint32_t) * fetch_size); 419d15c0946SSuanming Mou } 420d15c0946SSuanming Mou } 421d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 422d15c0946SSuanming Mou if (unlikely(olc)) { 423d15c0946SSuanming Mou pool->cfg.free(olc); 424d15c0946SSuanming Mou olc = NULL; 425d15c0946SSuanming Mou } 426d15c0946SSuanming Mou if (fetch_size) { 427d15c0946SSuanming Mou pool->cache[cidx]->len = fetch_size - 1; 428d15c0946SSuanming Mou return pool->cache[cidx]->idx[pool->cache[cidx]->len]; 429d15c0946SSuanming Mou } 430d15c0946SSuanming Mou trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid, 431d15c0946SSuanming Mou __ATOMIC_ACQUIRE) : 0; 432d15c0946SSuanming Mou trunk_n = lc ? lc->n_trunk : 0; 433d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx); 434d15c0946SSuanming Mou /* Check if index reach maximum. */ 435d15c0946SSuanming Mou if (trunk_idx == TRUNK_MAX_IDX || 436d15c0946SSuanming Mou cur_max_idx >= pool->cfg.max_idx) 437d15c0946SSuanming Mou return 0; 438d15c0946SSuanming Mou /* No enough space in trunk array, resize the trunks array. */ 439d15c0946SSuanming Mou if (trunk_idx == trunk_n) { 440d15c0946SSuanming Mou n_grow = trunk_idx ? trunk_idx : 441d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 442d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow); 443d15c0946SSuanming Mou /* Resize the trunk array. */ 444d15c0946SSuanming Mou p = pool->cfg.malloc(0, ((trunk_idx + n_grow) * 445d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)) + 446d15c0946SSuanming Mou (cur_max_idx * sizeof(uint32_t)) + sizeof(*p), 447d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 448d15c0946SSuanming Mou if (!p) 449d15c0946SSuanming Mou return 0; 450d15c0946SSuanming Mou p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx]; 451d15c0946SSuanming Mou if (lc) 452d15c0946SSuanming Mou memcpy(p->trunks, lc->trunks, trunk_idx * 453d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 454d15c0946SSuanming Mou #ifdef RTE_LIBRTE_MLX5_DEBUG 455d15c0946SSuanming Mou memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0, 456d15c0946SSuanming Mou n_grow * sizeof(void *)); 457d15c0946SSuanming Mou #endif 458d15c0946SSuanming Mou p->n_trunk_valid = trunk_idx; 459d15c0946SSuanming Mou p->n_trunk = trunk_n + n_grow; 460d15c0946SSuanming Mou p->len = 0; 461d15c0946SSuanming Mou } 462d15c0946SSuanming Mou /* Prepare the new trunk. */ 463d15c0946SSuanming Mou trunk_size = sizeof(*trunk); 464d15c0946SSuanming Mou data_size = mlx5_trunk_size_get(pool, trunk_idx); 465d15c0946SSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 466d15c0946SSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 467d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 468d15c0946SSuanming Mou if (unlikely(!trunk)) { 469d15c0946SSuanming Mou pool->cfg.free(p); 470d15c0946SSuanming Mou return 0; 471d15c0946SSuanming Mou } 472d15c0946SSuanming Mou trunk->idx = trunk_idx; 473d15c0946SSuanming Mou trunk->free = data_size; 474d15c0946SSuanming Mou mlx5_ipool_lock(pool); 475d15c0946SSuanming Mou /* 476d15c0946SSuanming Mou * Double check if trunks has been updated or have available index. 477d15c0946SSuanming Mou * During the new trunk allocate, index may still be flushed to the 478d15c0946SSuanming Mou * global cache. So also need to check the pool->gc->len. 479d15c0946SSuanming Mou */ 480d15c0946SSuanming Mou if (pool->gc && (lc != pool->gc || 481d15c0946SSuanming Mou lc->n_trunk_valid != trunk_idx || 482d15c0946SSuanming Mou pool->gc->len)) { 483d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 484d15c0946SSuanming Mou if (p) 485d15c0946SSuanming Mou pool->cfg.free(p); 486d15c0946SSuanming Mou pool->cfg.free(trunk); 487d15c0946SSuanming Mou goto check_again; 488d15c0946SSuanming Mou } 489d15c0946SSuanming Mou /* Resize the trunk array and update local cache first. */ 490d15c0946SSuanming Mou if (p) { 491d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 492d15c0946SSuanming Mou olc = lc; 493d15c0946SSuanming Mou lc = p; 494d15c0946SSuanming Mou lc->ref_cnt = 1; 495d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 496d15c0946SSuanming Mou __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED); 497d15c0946SSuanming Mou } 498d15c0946SSuanming Mou /* Add trunk to trunks array. */ 499d15c0946SSuanming Mou lc->trunks[trunk_idx] = trunk; 500d15c0946SSuanming Mou __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED); 501d15c0946SSuanming Mou /* Enqueue half of the index to global. */ 502d15c0946SSuanming Mou ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1; 503d15c0946SSuanming Mou fetch_size = trunk->free >> 1; 504d15c0946SSuanming Mou for (i = 0; i < fetch_size; i++) 505d15c0946SSuanming Mou lc->idx[i] = ts_idx + i; 506d15c0946SSuanming Mou lc->len = fetch_size; 507d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 508d15c0946SSuanming Mou /* Copy left half - 1 to local cache index array. */ 509d15c0946SSuanming Mou pool->cache[cidx]->len = trunk->free - fetch_size - 1; 510d15c0946SSuanming Mou ts_idx += fetch_size; 511d15c0946SSuanming Mou for (i = 0; i < pool->cache[cidx]->len; i++) 512d15c0946SSuanming Mou pool->cache[cidx]->idx[i] = ts_idx + i; 513d15c0946SSuanming Mou if (olc) 514d15c0946SSuanming Mou pool->cfg.free(olc); 515d15c0946SSuanming Mou return ts_idx + i; 516d15c0946SSuanming Mou } 517d15c0946SSuanming Mou 518d15c0946SSuanming Mou static void * 519*42f46339SSuanming Mou _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) 520d15c0946SSuanming Mou { 521d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 522d15c0946SSuanming Mou struct mlx5_indexed_cache *lc; 523d15c0946SSuanming Mou uint32_t trunk_idx; 524d15c0946SSuanming Mou uint32_t entry_idx; 525d15c0946SSuanming Mou 526d15c0946SSuanming Mou MLX5_ASSERT(idx); 52764a80f1cSSuanming Mou if (unlikely(!pool->cache[cidx])) { 52864a80f1cSSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 52964a80f1cSSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 53064a80f1cSSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 53164a80f1cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 53264a80f1cSSuanming Mou if (!pool->cache[cidx]) { 53364a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 53464a80f1cSSuanming Mou return NULL; 53564a80f1cSSuanming Mou } 53664a80f1cSSuanming Mou } 537d15c0946SSuanming Mou lc = mlx5_ipool_update_global_cache(pool, cidx); 538d15c0946SSuanming Mou idx -= 1; 539d15c0946SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 540d15c0946SSuanming Mou trunk = lc->trunks[trunk_idx]; 541d15c0946SSuanming Mou MLX5_ASSERT(trunk); 542d15c0946SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); 543d15c0946SSuanming Mou return &trunk->data[entry_idx * pool->cfg.size]; 544d15c0946SSuanming Mou } 545d15c0946SSuanming Mou 546d15c0946SSuanming Mou static void * 547*42f46339SSuanming Mou mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 548d15c0946SSuanming Mou { 549*42f46339SSuanming Mou void *entry; 550d15c0946SSuanming Mou int cidx; 551d15c0946SSuanming Mou 552d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 553d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 554*42f46339SSuanming Mou cidx = RTE_MAX_LCORE; 555*42f46339SSuanming Mou rte_spinlock_lock(&pool->lcore_lock); 556d15c0946SSuanming Mou } 557*42f46339SSuanming Mou entry = _mlx5_ipool_get_cache(pool, cidx, idx); 558*42f46339SSuanming Mou if (unlikely(cidx == RTE_MAX_LCORE)) 559*42f46339SSuanming Mou rte_spinlock_unlock(&pool->lcore_lock); 560*42f46339SSuanming Mou return entry; 561*42f46339SSuanming Mou } 562*42f46339SSuanming Mou 563*42f46339SSuanming Mou 564*42f46339SSuanming Mou static void * 565*42f46339SSuanming Mou _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx, 566*42f46339SSuanming Mou uint32_t *idx) 567*42f46339SSuanming Mou { 568d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 569d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 570d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 571d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 572d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 573d15c0946SSuanming Mou if (!pool->cache[cidx]) { 574d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 575d15c0946SSuanming Mou return NULL; 576d15c0946SSuanming Mou } 577d15c0946SSuanming Mou } else if (pool->cache[cidx]->len) { 578d15c0946SSuanming Mou pool->cache[cidx]->len--; 579d15c0946SSuanming Mou *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; 580*42f46339SSuanming Mou return _mlx5_ipool_get_cache(pool, cidx, *idx); 581d15c0946SSuanming Mou } 582d15c0946SSuanming Mou /* Not enough idx in global cache. Keep fetching from global. */ 583d15c0946SSuanming Mou *idx = mlx5_ipool_allocate_from_global(pool, cidx); 584d15c0946SSuanming Mou if (unlikely(!(*idx))) 585d15c0946SSuanming Mou return NULL; 586*42f46339SSuanming Mou return _mlx5_ipool_get_cache(pool, cidx, *idx); 587*42f46339SSuanming Mou } 588*42f46339SSuanming Mou 589*42f46339SSuanming Mou static void * 590*42f46339SSuanming Mou mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) 591*42f46339SSuanming Mou { 592*42f46339SSuanming Mou void *entry; 593*42f46339SSuanming Mou int cidx; 594*42f46339SSuanming Mou 595*42f46339SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 596*42f46339SSuanming Mou if (unlikely(cidx == -1)) { 597*42f46339SSuanming Mou cidx = RTE_MAX_LCORE; 598*42f46339SSuanming Mou rte_spinlock_lock(&pool->lcore_lock); 599*42f46339SSuanming Mou } 600*42f46339SSuanming Mou entry = _mlx5_ipool_malloc_cache(pool, cidx, idx); 601*42f46339SSuanming Mou if (unlikely(cidx == RTE_MAX_LCORE)) 602*42f46339SSuanming Mou rte_spinlock_unlock(&pool->lcore_lock); 603*42f46339SSuanming Mou return entry; 604d15c0946SSuanming Mou } 605d15c0946SSuanming Mou 606d15c0946SSuanming Mou static void 607*42f46339SSuanming Mou _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) 608d15c0946SSuanming Mou { 609d15c0946SSuanming Mou struct mlx5_ipool_per_lcore *ilc; 610d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *olc = NULL; 611d15c0946SSuanming Mou uint32_t reclaim_num = 0; 612d15c0946SSuanming Mou 613d15c0946SSuanming Mou MLX5_ASSERT(idx); 614d15c0946SSuanming Mou /* 615d15c0946SSuanming Mou * When index was allocated on core A but freed on core B. In this 616d15c0946SSuanming Mou * case check if local cache on core B was allocated before. 617d15c0946SSuanming Mou */ 618d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 619d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 620d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 621d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 622d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 623d15c0946SSuanming Mou if (!pool->cache[cidx]) { 624d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 625d15c0946SSuanming Mou return; 626d15c0946SSuanming Mou } 627d15c0946SSuanming Mou } 628d15c0946SSuanming Mou /* Try to enqueue to local index cache. */ 629d15c0946SSuanming Mou if (pool->cache[cidx]->len < pool->cfg.per_core_cache) { 630d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 631d15c0946SSuanming Mou pool->cache[cidx]->len++; 632d15c0946SSuanming Mou return; 633d15c0946SSuanming Mou } 634d15c0946SSuanming Mou ilc = pool->cache[cidx]; 635d15c0946SSuanming Mou reclaim_num = pool->cfg.per_core_cache >> 2; 636d15c0946SSuanming Mou ilc->len -= reclaim_num; 637d15c0946SSuanming Mou /* Local index cache full, try with global index cache. */ 638d15c0946SSuanming Mou mlx5_ipool_lock(pool); 639d15c0946SSuanming Mou gc = pool->gc; 640d15c0946SSuanming Mou if (ilc->lc != gc) { 641d15c0946SSuanming Mou if (!(--ilc->lc->ref_cnt)) 642d15c0946SSuanming Mou olc = ilc->lc; 643d15c0946SSuanming Mou gc->ref_cnt++; 644d15c0946SSuanming Mou ilc->lc = gc; 645d15c0946SSuanming Mou } 646d15c0946SSuanming Mou memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len], 647d15c0946SSuanming Mou reclaim_num * sizeof(uint32_t)); 648d15c0946SSuanming Mou gc->len += reclaim_num; 649d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 650d15c0946SSuanming Mou if (olc) 651d15c0946SSuanming Mou pool->cfg.free(olc); 652d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 653d15c0946SSuanming Mou pool->cache[cidx]->len++; 654d15c0946SSuanming Mou } 655d15c0946SSuanming Mou 656*42f46339SSuanming Mou static void 657*42f46339SSuanming Mou mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 658*42f46339SSuanming Mou { 659*42f46339SSuanming Mou int cidx; 660*42f46339SSuanming Mou 661*42f46339SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 662*42f46339SSuanming Mou if (unlikely(cidx == -1)) { 663*42f46339SSuanming Mou cidx = RTE_MAX_LCORE; 664*42f46339SSuanming Mou rte_spinlock_lock(&pool->lcore_lock); 665*42f46339SSuanming Mou } 666*42f46339SSuanming Mou _mlx5_ipool_free_cache(pool, cidx, idx); 667*42f46339SSuanming Mou if (unlikely(cidx == RTE_MAX_LCORE)) 668*42f46339SSuanming Mou rte_spinlock_unlock(&pool->lcore_lock); 669*42f46339SSuanming Mou } 670*42f46339SSuanming Mou 671a3cf59f5SSuanming Mou void * 672a3cf59f5SSuanming Mou mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 673a3cf59f5SSuanming Mou { 674a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 675a3cf59f5SSuanming Mou uint64_t slab = 0; 676a3cf59f5SSuanming Mou uint32_t iidx = 0; 677a3cf59f5SSuanming Mou void *p; 678a3cf59f5SSuanming Mou 679d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 680d15c0946SSuanming Mou return mlx5_ipool_malloc_cache(pool, idx); 681a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 682a3cf59f5SSuanming Mou if (pool->free_list == TRUNK_INVALID) { 683a3cf59f5SSuanming Mou /* If no available trunks, grow new. */ 684a3cf59f5SSuanming Mou if (mlx5_ipool_grow(pool)) { 685a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 686a3cf59f5SSuanming Mou return NULL; 687a3cf59f5SSuanming Mou } 688a3cf59f5SSuanming Mou } 689a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 690a3cf59f5SSuanming Mou trunk = pool->trunks[pool->free_list]; 691a3cf59f5SSuanming Mou MLX5_ASSERT(trunk->free); 692a3cf59f5SSuanming Mou if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 693a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 694a3cf59f5SSuanming Mou return NULL; 695a3cf59f5SSuanming Mou } 696a3cf59f5SSuanming Mou MLX5_ASSERT(slab); 697a3cf59f5SSuanming Mou iidx += __builtin_ctzll(slab); 698a3cf59f5SSuanming Mou MLX5_ASSERT(iidx != UINT32_MAX); 69962d7d519SSuanming Mou MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 700a3cf59f5SSuanming Mou rte_bitmap_clear(trunk->bmp, iidx); 701a3cf59f5SSuanming Mou p = &trunk->data[iidx * pool->cfg.size]; 7024ae8825cSXueming Li /* 7034ae8825cSXueming Li * The ipool index should grow continually from small to big, 7044ae8825cSXueming Li * some features as metering only accept limited bits of index. 7054ae8825cSXueming Li * Random index with MSB set may be rejected. 7064ae8825cSXueming Li */ 70762d7d519SSuanming Mou iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 708a3cf59f5SSuanming Mou iidx += 1; /* non-zero index. */ 709a3cf59f5SSuanming Mou trunk->free--; 710a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 711a3cf59f5SSuanming Mou pool->n_entry++; 712a3cf59f5SSuanming Mou #endif 713a3cf59f5SSuanming Mou if (!trunk->free) { 714a3cf59f5SSuanming Mou /* Full trunk will be removed from free list in imalloc. */ 715a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == trunk->idx); 716a3cf59f5SSuanming Mou pool->free_list = trunk->next; 717a3cf59f5SSuanming Mou if (trunk->next != TRUNK_INVALID) 718a3cf59f5SSuanming Mou pool->trunks[trunk->next]->prev = TRUNK_INVALID; 719a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 720a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 721a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 722a3cf59f5SSuanming Mou pool->trunk_empty++; 723a3cf59f5SSuanming Mou pool->trunk_avail--; 724a3cf59f5SSuanming Mou #endif 725a3cf59f5SSuanming Mou } 726a3cf59f5SSuanming Mou *idx = iidx; 727a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 728a3cf59f5SSuanming Mou return p; 729a3cf59f5SSuanming Mou } 730a3cf59f5SSuanming Mou 731a3cf59f5SSuanming Mou void * 732a3cf59f5SSuanming Mou mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 733a3cf59f5SSuanming Mou { 734a3cf59f5SSuanming Mou void *entry = mlx5_ipool_malloc(pool, idx); 735a3cf59f5SSuanming Mou 73679807d6aSXueming Li if (entry && pool->cfg.size) 737a3cf59f5SSuanming Mou memset(entry, 0, pool->cfg.size); 738a3cf59f5SSuanming Mou return entry; 739a3cf59f5SSuanming Mou } 740a3cf59f5SSuanming Mou 741a3cf59f5SSuanming Mou void 742a3cf59f5SSuanming Mou mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 743a3cf59f5SSuanming Mou { 744a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 745a3cf59f5SSuanming Mou uint32_t trunk_idx; 74662d7d519SSuanming Mou uint32_t entry_idx; 747a3cf59f5SSuanming Mou 748a3cf59f5SSuanming Mou if (!idx) 749a3cf59f5SSuanming Mou return; 750d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 751d15c0946SSuanming Mou mlx5_ipool_free_cache(pool, idx); 752d15c0946SSuanming Mou return; 753d15c0946SSuanming Mou } 754a3cf59f5SSuanming Mou idx -= 1; 755a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 75662d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 7571fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 7581fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 759a3cf59f5SSuanming Mou goto out; 760a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 76162d7d519SSuanming Mou if (!trunk) 762a3cf59f5SSuanming Mou goto out; 76362d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 76462d7d519SSuanming Mou if (trunk_idx != trunk->idx || 76562d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 76662d7d519SSuanming Mou goto out; 76762d7d519SSuanming Mou rte_bitmap_set(trunk->bmp, entry_idx); 768a3cf59f5SSuanming Mou trunk->free++; 7691fd4bb67SSuanming Mou if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 7701fd4bb67SSuanming Mou (pool, trunk->idx)) { 7711fd4bb67SSuanming Mou if (pool->free_list == trunk->idx) 7721fd4bb67SSuanming Mou pool->free_list = trunk->next; 7731fd4bb67SSuanming Mou if (trunk->next != TRUNK_INVALID) 7741fd4bb67SSuanming Mou pool->trunks[trunk->next]->prev = trunk->prev; 7751fd4bb67SSuanming Mou if (trunk->prev != TRUNK_INVALID) 7761fd4bb67SSuanming Mou pool->trunks[trunk->prev]->next = trunk->next; 7771fd4bb67SSuanming Mou pool->cfg.free(trunk); 7781fd4bb67SSuanming Mou pool->trunks[trunk_idx] = NULL; 7791fd4bb67SSuanming Mou pool->n_trunk_valid--; 7801fd4bb67SSuanming Mou #ifdef POOL_DEBUG 7811fd4bb67SSuanming Mou pool->trunk_avail--; 7821fd4bb67SSuanming Mou pool->trunk_free++; 7831fd4bb67SSuanming Mou #endif 7841fd4bb67SSuanming Mou if (pool->n_trunk_valid == 0) { 7851fd4bb67SSuanming Mou pool->cfg.free(pool->trunks); 7861fd4bb67SSuanming Mou pool->trunks = NULL; 7871fd4bb67SSuanming Mou pool->n_trunk = 0; 7881fd4bb67SSuanming Mou } 7891fd4bb67SSuanming Mou } else if (trunk->free == 1) { 790a3cf59f5SSuanming Mou /* Put into free trunk list head. */ 791a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != trunk->idx); 792a3cf59f5SSuanming Mou trunk->next = pool->free_list; 793a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 794a3cf59f5SSuanming Mou if (pool->free_list != TRUNK_INVALID) 795a3cf59f5SSuanming Mou pool->trunks[pool->free_list]->prev = trunk->idx; 796a3cf59f5SSuanming Mou pool->free_list = trunk->idx; 797a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 798a3cf59f5SSuanming Mou pool->trunk_empty--; 799a3cf59f5SSuanming Mou pool->trunk_avail++; 800a3cf59f5SSuanming Mou #endif 801a3cf59f5SSuanming Mou } 802a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 803a3cf59f5SSuanming Mou pool->n_entry--; 804a3cf59f5SSuanming Mou #endif 805a3cf59f5SSuanming Mou out: 806a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 807a3cf59f5SSuanming Mou } 808a3cf59f5SSuanming Mou 809a3cf59f5SSuanming Mou void * 810a3cf59f5SSuanming Mou mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 811a3cf59f5SSuanming Mou { 812a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 813a3cf59f5SSuanming Mou void *p = NULL; 814a3cf59f5SSuanming Mou uint32_t trunk_idx; 81562d7d519SSuanming Mou uint32_t entry_idx; 816a3cf59f5SSuanming Mou 817a3cf59f5SSuanming Mou if (!idx) 818a3cf59f5SSuanming Mou return NULL; 819d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 820d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, idx); 821a3cf59f5SSuanming Mou idx -= 1; 822a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 82362d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 8241fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 8251fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 826a3cf59f5SSuanming Mou goto out; 827a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 82862d7d519SSuanming Mou if (!trunk) 829a3cf59f5SSuanming Mou goto out; 83062d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 83162d7d519SSuanming Mou if (trunk_idx != trunk->idx || 83262d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 83362d7d519SSuanming Mou goto out; 83462d7d519SSuanming Mou p = &trunk->data[entry_idx * pool->cfg.size]; 835a3cf59f5SSuanming Mou out: 836a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 837a3cf59f5SSuanming Mou return p; 838a3cf59f5SSuanming Mou } 839a3cf59f5SSuanming Mou 840a3cf59f5SSuanming Mou int 841a3cf59f5SSuanming Mou mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 842a3cf59f5SSuanming Mou { 843d15c0946SSuanming Mou struct mlx5_indexed_trunk **trunks = NULL; 844d15c0946SSuanming Mou struct mlx5_indexed_cache *gc = pool->gc; 845d15c0946SSuanming Mou uint32_t i, n_trunk_valid = 0; 846a3cf59f5SSuanming Mou 847a3cf59f5SSuanming Mou MLX5_ASSERT(pool); 848a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 849d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 850*42f46339SSuanming Mou for (i = 0; i <= RTE_MAX_LCORE; i++) { 851d15c0946SSuanming Mou /* 852d15c0946SSuanming Mou * Free only old global cache. Pool gc will be 853d15c0946SSuanming Mou * freed at last. 854d15c0946SSuanming Mou */ 855d15c0946SSuanming Mou if (pool->cache[i]) { 856d15c0946SSuanming Mou if (pool->cache[i]->lc && 857d15c0946SSuanming Mou pool->cache[i]->lc != pool->gc && 858d15c0946SSuanming Mou (!(--pool->cache[i]->lc->ref_cnt))) 859d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]->lc); 860d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]); 861d15c0946SSuanming Mou } 862d15c0946SSuanming Mou } 863d15c0946SSuanming Mou if (gc) { 864d15c0946SSuanming Mou trunks = gc->trunks; 865d15c0946SSuanming Mou n_trunk_valid = gc->n_trunk_valid; 866d15c0946SSuanming Mou } 867d15c0946SSuanming Mou } else { 868d15c0946SSuanming Mou gc = NULL; 869a3cf59f5SSuanming Mou trunks = pool->trunks; 870d15c0946SSuanming Mou n_trunk_valid = pool->n_trunk_valid; 871d15c0946SSuanming Mou } 872d15c0946SSuanming Mou for (i = 0; i < n_trunk_valid; i++) { 873a3cf59f5SSuanming Mou if (trunks[i]) 874a3cf59f5SSuanming Mou pool->cfg.free(trunks[i]); 875a3cf59f5SSuanming Mou } 876d15c0946SSuanming Mou if (!gc && trunks) 877d15c0946SSuanming Mou pool->cfg.free(trunks); 878d15c0946SSuanming Mou if (gc) 879d15c0946SSuanming Mou pool->cfg.free(gc); 880a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 88183c2047cSSuanming Mou mlx5_free(pool); 882a3cf59f5SSuanming Mou return 0; 883a3cf59f5SSuanming Mou } 884a3cf59f5SSuanming Mou 885a3cf59f5SSuanming Mou void 88664a80f1cSSuanming Mou mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool) 88764a80f1cSSuanming Mou { 88864a80f1cSSuanming Mou uint32_t i, j; 88964a80f1cSSuanming Mou struct mlx5_indexed_cache *gc; 89064a80f1cSSuanming Mou struct rte_bitmap *ibmp; 89164a80f1cSSuanming Mou uint32_t bmp_num, mem_size; 89264a80f1cSSuanming Mou 89364a80f1cSSuanming Mou if (!pool->cfg.per_core_cache) 89464a80f1cSSuanming Mou return; 89564a80f1cSSuanming Mou gc = pool->gc; 89664a80f1cSSuanming Mou if (!gc) 89764a80f1cSSuanming Mou return; 89864a80f1cSSuanming Mou /* Reset bmp. */ 89964a80f1cSSuanming Mou bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid); 90064a80f1cSSuanming Mou mem_size = rte_bitmap_get_memory_footprint(bmp_num); 90164a80f1cSSuanming Mou pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size, 90264a80f1cSSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 90364a80f1cSSuanming Mou if (!pool->bmp_mem) { 90464a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n"); 90564a80f1cSSuanming Mou return; 90664a80f1cSSuanming Mou } 90764a80f1cSSuanming Mou ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size); 90864a80f1cSSuanming Mou if (!ibmp) { 90964a80f1cSSuanming Mou pool->cfg.free(pool->bmp_mem); 91064a80f1cSSuanming Mou pool->bmp_mem = NULL; 91164a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool bitmap create failed.\n"); 91264a80f1cSSuanming Mou return; 91364a80f1cSSuanming Mou } 91464a80f1cSSuanming Mou pool->ibmp = ibmp; 91564a80f1cSSuanming Mou /* Clear global cache. */ 91664a80f1cSSuanming Mou for (i = 0; i < gc->len; i++) 91764a80f1cSSuanming Mou rte_bitmap_clear(ibmp, gc->idx[i] - 1); 91864a80f1cSSuanming Mou /* Clear core cache. */ 919*42f46339SSuanming Mou for (i = 0; i < RTE_MAX_LCORE + 1; i++) { 92064a80f1cSSuanming Mou struct mlx5_ipool_per_lcore *ilc = pool->cache[i]; 92164a80f1cSSuanming Mou 92264a80f1cSSuanming Mou if (!ilc) 92364a80f1cSSuanming Mou continue; 92464a80f1cSSuanming Mou for (j = 0; j < ilc->len; j++) 92564a80f1cSSuanming Mou rte_bitmap_clear(ibmp, ilc->idx[j] - 1); 92664a80f1cSSuanming Mou } 92764a80f1cSSuanming Mou } 92864a80f1cSSuanming Mou 92964a80f1cSSuanming Mou static void * 93064a80f1cSSuanming Mou mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos) 93164a80f1cSSuanming Mou { 93264a80f1cSSuanming Mou struct rte_bitmap *ibmp; 93364a80f1cSSuanming Mou uint64_t slab = 0; 93464a80f1cSSuanming Mou uint32_t iidx = *pos; 93564a80f1cSSuanming Mou 93664a80f1cSSuanming Mou ibmp = pool->ibmp; 93764a80f1cSSuanming Mou if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) { 93864a80f1cSSuanming Mou if (pool->bmp_mem) { 93964a80f1cSSuanming Mou pool->cfg.free(pool->bmp_mem); 94064a80f1cSSuanming Mou pool->bmp_mem = NULL; 94164a80f1cSSuanming Mou pool->ibmp = NULL; 94264a80f1cSSuanming Mou } 94364a80f1cSSuanming Mou return NULL; 94464a80f1cSSuanming Mou } 94564a80f1cSSuanming Mou iidx += __builtin_ctzll(slab); 94664a80f1cSSuanming Mou rte_bitmap_clear(ibmp, iidx); 94764a80f1cSSuanming Mou iidx++; 94864a80f1cSSuanming Mou *pos = iidx; 94964a80f1cSSuanming Mou return mlx5_ipool_get_cache(pool, iidx); 95064a80f1cSSuanming Mou } 95164a80f1cSSuanming Mou 95264a80f1cSSuanming Mou void * 95364a80f1cSSuanming Mou mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos) 95464a80f1cSSuanming Mou { 95564a80f1cSSuanming Mou uint32_t idx = *pos; 95664a80f1cSSuanming Mou void *entry; 95764a80f1cSSuanming Mou 95864a80f1cSSuanming Mou if (pool->cfg.per_core_cache) 95964a80f1cSSuanming Mou return mlx5_ipool_get_next_cache(pool, pos); 96064a80f1cSSuanming Mou while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) { 96164a80f1cSSuanming Mou entry = mlx5_ipool_get(pool, idx); 96264a80f1cSSuanming Mou if (entry) { 96364a80f1cSSuanming Mou *pos = idx; 96464a80f1cSSuanming Mou return entry; 96564a80f1cSSuanming Mou } 96664a80f1cSSuanming Mou idx++; 96764a80f1cSSuanming Mou } 96864a80f1cSSuanming Mou return NULL; 96964a80f1cSSuanming Mou } 97064a80f1cSSuanming Mou 97164a80f1cSSuanming Mou void 972a3cf59f5SSuanming Mou mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 973a3cf59f5SSuanming Mou { 974a3cf59f5SSuanming Mou printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 975a3cf59f5SSuanming Mou "total: %d\n", 976a3cf59f5SSuanming Mou pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 977a3cf59f5SSuanming Mou pool->cfg.trunk_size, pool->n_trunk_valid); 978a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 979a3cf59f5SSuanming Mou printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 980a3cf59f5SSuanming Mou "available %u free %u\n", 981a3cf59f5SSuanming Mou pool->cfg.type, pool->n_entry, pool->trunk_new, 982a3cf59f5SSuanming Mou pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 983a3cf59f5SSuanming Mou #endif 984a3cf59f5SSuanming Mou } 985bd81eaebSSuanming Mou 986bd81eaebSSuanming Mou struct mlx5_l3t_tbl * 987bd81eaebSSuanming Mou mlx5_l3t_create(enum mlx5_l3t_type type) 988bd81eaebSSuanming Mou { 989bd81eaebSSuanming Mou struct mlx5_l3t_tbl *tbl; 990bd81eaebSSuanming Mou struct mlx5_indexed_pool_config l3t_ip_cfg = { 991bd81eaebSSuanming Mou .trunk_size = 16, 992bd81eaebSSuanming Mou .grow_trunk = 6, 993bd81eaebSSuanming Mou .grow_shift = 1, 994bd81eaebSSuanming Mou .need_lock = 0, 995bd81eaebSSuanming Mou .release_mem_en = 1, 99683c2047cSSuanming Mou .malloc = mlx5_malloc, 99783c2047cSSuanming Mou .free = mlx5_free, 998bd81eaebSSuanming Mou }; 999bd81eaebSSuanming Mou 1000bd81eaebSSuanming Mou if (type >= MLX5_L3T_TYPE_MAX) { 1001bd81eaebSSuanming Mou rte_errno = EINVAL; 1002bd81eaebSSuanming Mou return NULL; 1003bd81eaebSSuanming Mou } 100483c2047cSSuanming Mou tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1, 100583c2047cSSuanming Mou SOCKET_ID_ANY); 1006bd81eaebSSuanming Mou if (!tbl) { 1007bd81eaebSSuanming Mou rte_errno = ENOMEM; 1008bd81eaebSSuanming Mou return NULL; 1009bd81eaebSSuanming Mou } 1010bd81eaebSSuanming Mou tbl->type = type; 1011bd81eaebSSuanming Mou switch (type) { 1012bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 10130796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word); 1014bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w"; 1015bd81eaebSSuanming Mou break; 1016bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 10170796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword); 1018bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw"; 1019bd81eaebSSuanming Mou break; 1020bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 10210796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword); 1022bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw"; 1023bd81eaebSSuanming Mou break; 1024bd81eaebSSuanming Mou default: 10250796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr); 1026bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr"; 1027bd81eaebSSuanming Mou break; 1028bd81eaebSSuanming Mou } 10290796c7b1SSuanming Mou rte_spinlock_init(&tbl->sl); 1030bd81eaebSSuanming Mou tbl->eip = mlx5_ipool_create(&l3t_ip_cfg); 1031bd81eaebSSuanming Mou if (!tbl->eip) { 1032bd81eaebSSuanming Mou rte_errno = ENOMEM; 103383c2047cSSuanming Mou mlx5_free(tbl); 1034bd81eaebSSuanming Mou tbl = NULL; 1035bd81eaebSSuanming Mou } 1036bd81eaebSSuanming Mou return tbl; 1037bd81eaebSSuanming Mou } 1038bd81eaebSSuanming Mou 1039bd81eaebSSuanming Mou void 1040bd81eaebSSuanming Mou mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl) 1041bd81eaebSSuanming Mou { 1042bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1043bd81eaebSSuanming Mou uint32_t i, j; 1044bd81eaebSSuanming Mou 1045bd81eaebSSuanming Mou if (!tbl) 1046bd81eaebSSuanming Mou return; 1047bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1048bd81eaebSSuanming Mou if (g_tbl) { 1049bd81eaebSSuanming Mou for (i = 0; i < MLX5_L3T_GT_SIZE; i++) { 1050bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[i]; 1051bd81eaebSSuanming Mou if (!m_tbl) 1052bd81eaebSSuanming Mou continue; 1053bd81eaebSSuanming Mou for (j = 0; j < MLX5_L3T_MT_SIZE; j++) { 1054bd81eaebSSuanming Mou if (!m_tbl->tbl[j]) 1055bd81eaebSSuanming Mou continue; 1056bd81eaebSSuanming Mou MLX5_ASSERT(!((struct mlx5_l3t_entry_word *) 1057bd81eaebSSuanming Mou m_tbl->tbl[j])->ref_cnt); 1058bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 1059bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *) 1060bd81eaebSSuanming Mou m_tbl->tbl[j])->idx); 1061bd81eaebSSuanming Mou m_tbl->tbl[j] = 0; 1062bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) 1063bd81eaebSSuanming Mou break; 1064bd81eaebSSuanming Mou } 1065bd81eaebSSuanming Mou MLX5_ASSERT(!m_tbl->ref_cnt); 106683c2047cSSuanming Mou mlx5_free(g_tbl->tbl[i]); 1067bd81eaebSSuanming Mou g_tbl->tbl[i] = 0; 1068bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) 1069bd81eaebSSuanming Mou break; 1070bd81eaebSSuanming Mou } 1071bd81eaebSSuanming Mou MLX5_ASSERT(!g_tbl->ref_cnt); 107283c2047cSSuanming Mou mlx5_free(tbl->tbl); 1073bd81eaebSSuanming Mou tbl->tbl = 0; 1074bd81eaebSSuanming Mou } 1075bd81eaebSSuanming Mou mlx5_ipool_destroy(tbl->eip); 107683c2047cSSuanming Mou mlx5_free(tbl); 1077bd81eaebSSuanming Mou } 1078bd81eaebSSuanming Mou 10790796c7b1SSuanming Mou static int32_t 10800796c7b1SSuanming Mou __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1081bd81eaebSSuanming Mou union mlx5_l3t_data *data) 1082bd81eaebSSuanming Mou { 1083bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 10840796c7b1SSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 10850796c7b1SSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 10860796c7b1SSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 10870796c7b1SSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1088bd81eaebSSuanming Mou void *e_tbl; 1089bd81eaebSSuanming Mou uint32_t entry_idx; 1090bd81eaebSSuanming Mou 1091bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1092bd81eaebSSuanming Mou if (!g_tbl) 1093bd81eaebSSuanming Mou return -1; 1094bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1095bd81eaebSSuanming Mou if (!m_tbl) 1096bd81eaebSSuanming Mou return -1; 1097bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1098bd81eaebSSuanming Mou if (!e_tbl) 1099bd81eaebSSuanming Mou return -1; 1100bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1101bd81eaebSSuanming Mou switch (tbl->type) { 1102bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 11030796c7b1SSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 11040796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 11050796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) 11060796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 1107bd81eaebSSuanming Mou break; 1108bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 11090796c7b1SSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 11100796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 11110796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) 11120796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 1113bd81eaebSSuanming Mou break; 1114bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 11150796c7b1SSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 11160796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 11170796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) 11180796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 1119bd81eaebSSuanming Mou break; 1120bd81eaebSSuanming Mou default: 11210796c7b1SSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 11220796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 11230796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) 11240796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 1125bd81eaebSSuanming Mou break; 1126bd81eaebSSuanming Mou } 1127bd81eaebSSuanming Mou return 0; 1128bd81eaebSSuanming Mou } 1129bd81eaebSSuanming Mou 11300796c7b1SSuanming Mou int32_t 11310796c7b1SSuanming Mou mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 11320796c7b1SSuanming Mou union mlx5_l3t_data *data) 11330796c7b1SSuanming Mou { 11340796c7b1SSuanming Mou int ret; 11350796c7b1SSuanming Mou 11360796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 11370796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 11380796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 11390796c7b1SSuanming Mou return ret; 11400796c7b1SSuanming Mou } 11410796c7b1SSuanming Mou 11420796c7b1SSuanming Mou int32_t 1143bd81eaebSSuanming Mou mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx) 1144bd81eaebSSuanming Mou { 1145bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1146bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1147bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1148bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1149bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1150bd81eaebSSuanming Mou void *e_tbl; 1151bd81eaebSSuanming Mou uint32_t entry_idx; 1152bd81eaebSSuanming Mou uint64_t ref_cnt; 11530796c7b1SSuanming Mou int32_t ret = -1; 1154bd81eaebSSuanming Mou 11550796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 1156bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1157bd81eaebSSuanming Mou if (!g_tbl) 11580796c7b1SSuanming Mou goto out; 1159bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1160bd81eaebSSuanming Mou if (!m_tbl) 11610796c7b1SSuanming Mou goto out; 1162bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1163bd81eaebSSuanming Mou if (!e_tbl) 11640796c7b1SSuanming Mou goto out; 1165bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1166bd81eaebSSuanming Mou switch (tbl->type) { 1167bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1168bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 11690796c7b1SSuanming Mou MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt); 11700796c7b1SSuanming Mou ret = --w_e_tbl->entry[entry_idx].ref_cnt; 11710796c7b1SSuanming Mou if (ret) 11720796c7b1SSuanming Mou goto out; 11730796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = 0; 1174bd81eaebSSuanming Mou ref_cnt = --w_e_tbl->ref_cnt; 1175bd81eaebSSuanming Mou break; 1176bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1177bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 11780796c7b1SSuanming Mou MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt); 11790796c7b1SSuanming Mou ret = --dw_e_tbl->entry[entry_idx].ref_cnt; 11800796c7b1SSuanming Mou if (ret) 11810796c7b1SSuanming Mou goto out; 11820796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = 0; 1183bd81eaebSSuanming Mou ref_cnt = --dw_e_tbl->ref_cnt; 1184bd81eaebSSuanming Mou break; 1185bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1186bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 11870796c7b1SSuanming Mou MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt); 11880796c7b1SSuanming Mou ret = --qw_e_tbl->entry[entry_idx].ref_cnt; 11890796c7b1SSuanming Mou if (ret) 11900796c7b1SSuanming Mou goto out; 11910796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = 0; 1192bd81eaebSSuanming Mou ref_cnt = --qw_e_tbl->ref_cnt; 1193bd81eaebSSuanming Mou break; 1194bd81eaebSSuanming Mou default: 1195bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 11960796c7b1SSuanming Mou MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt); 11970796c7b1SSuanming Mou ret = --ptr_e_tbl->entry[entry_idx].ref_cnt; 11980796c7b1SSuanming Mou if (ret) 11990796c7b1SSuanming Mou goto out; 12000796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = NULL; 1201bd81eaebSSuanming Mou ref_cnt = --ptr_e_tbl->ref_cnt; 1202bd81eaebSSuanming Mou break; 1203bd81eaebSSuanming Mou } 1204bd81eaebSSuanming Mou if (!ref_cnt) { 1205bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 1206bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx); 1207bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1208bd81eaebSSuanming Mou NULL; 1209bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) { 121083c2047cSSuanming Mou mlx5_free(m_tbl); 1211bd81eaebSSuanming Mou g_tbl->tbl 1212bd81eaebSSuanming Mou [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL; 1213bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) { 121483c2047cSSuanming Mou mlx5_free(g_tbl); 1215bd81eaebSSuanming Mou tbl->tbl = 0; 1216bd81eaebSSuanming Mou } 1217bd81eaebSSuanming Mou } 1218bd81eaebSSuanming Mou } 12190796c7b1SSuanming Mou out: 12200796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 12210796c7b1SSuanming Mou return ret; 1222bd81eaebSSuanming Mou } 1223bd81eaebSSuanming Mou 12240796c7b1SSuanming Mou static int32_t 12250796c7b1SSuanming Mou __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1226bd81eaebSSuanming Mou union mlx5_l3t_data *data) 1227bd81eaebSSuanming Mou { 1228bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1229bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1230bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1231bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1232bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1233bd81eaebSSuanming Mou void *e_tbl; 1234bd81eaebSSuanming Mou uint32_t entry_idx, tbl_idx = 0; 1235bd81eaebSSuanming Mou 1236bd81eaebSSuanming Mou /* Check the global table, create it if empty. */ 1237bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1238bd81eaebSSuanming Mou if (!g_tbl) { 123983c2047cSSuanming Mou g_tbl = mlx5_malloc(MLX5_MEM_ZERO, 124083c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 124183c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_GT_SIZE, 1, 124283c2047cSSuanming Mou SOCKET_ID_ANY); 1243bd81eaebSSuanming Mou if (!g_tbl) { 1244bd81eaebSSuanming Mou rte_errno = ENOMEM; 1245bd81eaebSSuanming Mou return -1; 1246bd81eaebSSuanming Mou } 1247bd81eaebSSuanming Mou tbl->tbl = g_tbl; 1248bd81eaebSSuanming Mou } 1249bd81eaebSSuanming Mou /* 1250bd81eaebSSuanming Mou * Check the middle table, create it if empty. Ref_cnt will be 1251bd81eaebSSuanming Mou * increased if new sub table created. 1252bd81eaebSSuanming Mou */ 1253bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1254bd81eaebSSuanming Mou if (!m_tbl) { 125583c2047cSSuanming Mou m_tbl = mlx5_malloc(MLX5_MEM_ZERO, 125683c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 125783c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_MT_SIZE, 1, 125883c2047cSSuanming Mou SOCKET_ID_ANY); 1259bd81eaebSSuanming Mou if (!m_tbl) { 1260bd81eaebSSuanming Mou rte_errno = ENOMEM; 1261bd81eaebSSuanming Mou return -1; 1262bd81eaebSSuanming Mou } 1263bd81eaebSSuanming Mou g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = 1264bd81eaebSSuanming Mou m_tbl; 1265bd81eaebSSuanming Mou g_tbl->ref_cnt++; 1266bd81eaebSSuanming Mou } 1267bd81eaebSSuanming Mou /* 1268bd81eaebSSuanming Mou * Check the entry table, create it if empty. Ref_cnt will be 1269bd81eaebSSuanming Mou * increased if new sub entry table created. 1270bd81eaebSSuanming Mou */ 1271bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1272bd81eaebSSuanming Mou if (!e_tbl) { 1273bd81eaebSSuanming Mou e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx); 1274bd81eaebSSuanming Mou if (!e_tbl) { 1275bd81eaebSSuanming Mou rte_errno = ENOMEM; 1276bd81eaebSSuanming Mou return -1; 1277bd81eaebSSuanming Mou } 1278bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx; 1279bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1280bd81eaebSSuanming Mou e_tbl; 1281bd81eaebSSuanming Mou m_tbl->ref_cnt++; 1282bd81eaebSSuanming Mou } 1283bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1284bd81eaebSSuanming Mou switch (tbl->type) { 1285bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1286bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 12870796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) { 12880796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 12890796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 12900796c7b1SSuanming Mou rte_errno = EEXIST; 12910796c7b1SSuanming Mou return -1; 12920796c7b1SSuanming Mou } 12930796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = data->word; 12940796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt = 1; 1295bd81eaebSSuanming Mou w_e_tbl->ref_cnt++; 1296bd81eaebSSuanming Mou break; 1297bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1298bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 12990796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) { 13000796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 13010796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 13020796c7b1SSuanming Mou rte_errno = EEXIST; 13030796c7b1SSuanming Mou return -1; 13040796c7b1SSuanming Mou } 13050796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = data->dword; 13060796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt = 1; 1307bd81eaebSSuanming Mou dw_e_tbl->ref_cnt++; 1308bd81eaebSSuanming Mou break; 1309bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1310bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 13110796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) { 13120796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 13130796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 13140796c7b1SSuanming Mou rte_errno = EEXIST; 13150796c7b1SSuanming Mou return -1; 13160796c7b1SSuanming Mou } 13170796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = data->qword; 13180796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt = 1; 1319bd81eaebSSuanming Mou qw_e_tbl->ref_cnt++; 1320bd81eaebSSuanming Mou break; 1321bd81eaebSSuanming Mou default: 1322bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 13230796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) { 13240796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 13250796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 13260796c7b1SSuanming Mou rte_errno = EEXIST; 13270796c7b1SSuanming Mou return -1; 13280796c7b1SSuanming Mou } 13290796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = data->ptr; 13300796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt = 1; 1331bd81eaebSSuanming Mou ptr_e_tbl->ref_cnt++; 1332bd81eaebSSuanming Mou break; 1333bd81eaebSSuanming Mou } 1334bd81eaebSSuanming Mou return 0; 1335bd81eaebSSuanming Mou } 13360796c7b1SSuanming Mou 13370796c7b1SSuanming Mou int32_t 13380796c7b1SSuanming Mou mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 13390796c7b1SSuanming Mou union mlx5_l3t_data *data) 13400796c7b1SSuanming Mou { 13410796c7b1SSuanming Mou int ret; 13420796c7b1SSuanming Mou 13430796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 13440796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 13450796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 13460796c7b1SSuanming Mou return ret; 13470796c7b1SSuanming Mou } 13480796c7b1SSuanming Mou 13490796c7b1SSuanming Mou int32_t 13500796c7b1SSuanming Mou mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 13510796c7b1SSuanming Mou union mlx5_l3t_data *data, 13520796c7b1SSuanming Mou mlx5_l3t_alloc_callback_fn cb, void *ctx) 13530796c7b1SSuanming Mou { 13540796c7b1SSuanming Mou int32_t ret; 13550796c7b1SSuanming Mou 13560796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 13570796c7b1SSuanming Mou /* Check if entry data is ready. */ 13580796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 13590796c7b1SSuanming Mou if (!ret) { 13600796c7b1SSuanming Mou switch (tbl->type) { 13610796c7b1SSuanming Mou case MLX5_L3T_TYPE_WORD: 13620796c7b1SSuanming Mou if (data->word) 13630796c7b1SSuanming Mou goto out; 13640796c7b1SSuanming Mou break; 13650796c7b1SSuanming Mou case MLX5_L3T_TYPE_DWORD: 13660796c7b1SSuanming Mou if (data->dword) 13670796c7b1SSuanming Mou goto out; 13680796c7b1SSuanming Mou break; 13690796c7b1SSuanming Mou case MLX5_L3T_TYPE_QWORD: 13700796c7b1SSuanming Mou if (data->qword) 13710796c7b1SSuanming Mou goto out; 13720796c7b1SSuanming Mou break; 13730796c7b1SSuanming Mou default: 13740796c7b1SSuanming Mou if (data->ptr) 13750796c7b1SSuanming Mou goto out; 13760796c7b1SSuanming Mou break; 13770796c7b1SSuanming Mou } 13780796c7b1SSuanming Mou } 13790796c7b1SSuanming Mou /* Entry data is not ready, use user callback to create it. */ 13800796c7b1SSuanming Mou ret = cb(ctx, data); 13810796c7b1SSuanming Mou if (ret) 13820796c7b1SSuanming Mou goto out; 13830796c7b1SSuanming Mou /* Save the new allocated data to entry. */ 13840796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 13850796c7b1SSuanming Mou out: 13860796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 13870796c7b1SSuanming Mou return ret; 13880796c7b1SSuanming Mou } 1389