146287eacSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause 246287eacSBing Zhao * Copyright 2019 Mellanox Technologies, Ltd 346287eacSBing Zhao */ 446287eacSBing Zhao 546287eacSBing Zhao #include <rte_malloc.h> 646287eacSBing Zhao 783c2047cSSuanming Mou #include <mlx5_malloc.h> 883c2047cSSuanming Mou 946287eacSBing Zhao #include "mlx5_utils.h" 1046287eacSBing Zhao 11a3cf59f5SSuanming Mou 121ff37beeSXueming Li /********************* Cache list ************************/ 131ff37beeSXueming Li 141ff37beeSXueming Li static struct mlx5_cache_entry * 151ff37beeSXueming Li mlx5_clist_default_create_cb(struct mlx5_cache_list *list, 161ff37beeSXueming Li struct mlx5_cache_entry *entry __rte_unused, 171ff37beeSXueming Li void *ctx __rte_unused) 181ff37beeSXueming Li { 191ff37beeSXueming Li return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); 201ff37beeSXueming Li } 211ff37beeSXueming Li 221ff37beeSXueming Li static void 231ff37beeSXueming Li mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, 241ff37beeSXueming Li struct mlx5_cache_entry *entry) 251ff37beeSXueming Li { 261ff37beeSXueming Li mlx5_free(entry); 271ff37beeSXueming Li } 281ff37beeSXueming Li 291ff37beeSXueming Li int 301ff37beeSXueming Li mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, 311ff37beeSXueming Li uint32_t entry_size, void *ctx, 321ff37beeSXueming Li mlx5_cache_create_cb cb_create, 331ff37beeSXueming Li mlx5_cache_match_cb cb_match, 341ff37beeSXueming Li mlx5_cache_remove_cb cb_remove) 351ff37beeSXueming Li { 361ff37beeSXueming Li MLX5_ASSERT(list); 371ff37beeSXueming Li if (!cb_match || (!cb_create ^ !cb_remove)) 381ff37beeSXueming Li return -1; 391ff37beeSXueming Li if (name) 401ff37beeSXueming Li snprintf(list->name, sizeof(list->name), "%s", name); 411ff37beeSXueming Li list->entry_sz = entry_size; 421ff37beeSXueming Li list->ctx = ctx; 431ff37beeSXueming Li list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; 441ff37beeSXueming Li list->cb_match = cb_match; 451ff37beeSXueming Li list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; 461ff37beeSXueming Li rte_rwlock_init(&list->lock); 471ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); 481ff37beeSXueming Li LIST_INIT(&list->head); 491ff37beeSXueming Li return 0; 501ff37beeSXueming Li } 511ff37beeSXueming Li 521ff37beeSXueming Li static struct mlx5_cache_entry * 531ff37beeSXueming Li __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 541ff37beeSXueming Li { 551ff37beeSXueming Li struct mlx5_cache_entry *entry; 561ff37beeSXueming Li 571ff37beeSXueming Li LIST_FOREACH(entry, &list->head, next) { 581ff37beeSXueming Li if (list->cb_match(list, entry, ctx)) 591ff37beeSXueming Li continue; 601ff37beeSXueming Li if (reuse) { 611ff37beeSXueming Li __atomic_add_fetch(&entry->ref_cnt, 1, 621ff37beeSXueming Li __ATOMIC_RELAXED); 631ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", 641ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 651ff37beeSXueming Li } 661ff37beeSXueming Li break; 671ff37beeSXueming Li } 681ff37beeSXueming Li return entry; 691ff37beeSXueming Li } 701ff37beeSXueming Li 711ff37beeSXueming Li static struct mlx5_cache_entry * 721ff37beeSXueming Li cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 731ff37beeSXueming Li { 741ff37beeSXueming Li struct mlx5_cache_entry *entry; 751ff37beeSXueming Li 761ff37beeSXueming Li rte_rwlock_read_lock(&list->lock); 771ff37beeSXueming Li entry = __cache_lookup(list, ctx, reuse); 781ff37beeSXueming Li rte_rwlock_read_unlock(&list->lock); 791ff37beeSXueming Li return entry; 801ff37beeSXueming Li } 811ff37beeSXueming Li 821ff37beeSXueming Li struct mlx5_cache_entry * 831ff37beeSXueming Li mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) 841ff37beeSXueming Li { 851ff37beeSXueming Li return cache_lookup(list, ctx, false); 861ff37beeSXueming Li } 871ff37beeSXueming Li 881ff37beeSXueming Li struct mlx5_cache_entry * 891ff37beeSXueming Li mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) 901ff37beeSXueming Li { 911ff37beeSXueming Li struct mlx5_cache_entry *entry; 921ff37beeSXueming Li uint32_t prev_gen_cnt = 0; 931ff37beeSXueming Li 941ff37beeSXueming Li MLX5_ASSERT(list); 951ff37beeSXueming Li prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); 961ff37beeSXueming Li /* Lookup with read lock, reuse if found. */ 971ff37beeSXueming Li entry = cache_lookup(list, ctx, true); 981ff37beeSXueming Li if (entry) 991ff37beeSXueming Li return entry; 1001ff37beeSXueming Li /* Not found, append with write lock - block read from other threads. */ 1011ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1021ff37beeSXueming Li /* If list changed by other threads before lock, search again. */ 1031ff37beeSXueming Li if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { 1041ff37beeSXueming Li /* Lookup and reuse w/o read lock. */ 1051ff37beeSXueming Li entry = __cache_lookup(list, ctx, true); 1061ff37beeSXueming Li if (entry) 1071ff37beeSXueming Li goto done; 1081ff37beeSXueming Li } 1091ff37beeSXueming Li entry = list->cb_create(list, entry, ctx); 1101ff37beeSXueming Li if (!entry) { 1111ff37beeSXueming Li DRV_LOG(ERR, "Failed to init cache list %s entry %p.", 1121ff37beeSXueming Li list->name, (void *)entry); 1131ff37beeSXueming Li goto done; 1141ff37beeSXueming Li } 1151ff37beeSXueming Li entry->ref_cnt = 1; 1161ff37beeSXueming Li LIST_INSERT_HEAD(&list->head, entry, next); 1171ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); 1181ff37beeSXueming Li __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1191ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", 1201ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1211ff37beeSXueming Li done: 1221ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1231ff37beeSXueming Li return entry; 1241ff37beeSXueming Li } 1251ff37beeSXueming Li 1261ff37beeSXueming Li int 1271ff37beeSXueming Li mlx5_cache_unregister(struct mlx5_cache_list *list, 1281ff37beeSXueming Li struct mlx5_cache_entry *entry) 1291ff37beeSXueming Li { 1301ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1311ff37beeSXueming Li MLX5_ASSERT(entry && entry->next.le_prev); 1321ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", 1331ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1341ff37beeSXueming Li if (--entry->ref_cnt) { 1351ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1361ff37beeSXueming Li return 1; 1371ff37beeSXueming Li } 1381ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); 1391ff37beeSXueming Li __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1401ff37beeSXueming Li LIST_REMOVE(entry, next); 1411ff37beeSXueming Li list->cb_remove(list, entry); 1421ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1431ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p removed.", 1441ff37beeSXueming Li list->name, (void *)entry); 1451ff37beeSXueming Li return 0; 1461ff37beeSXueming Li } 1471ff37beeSXueming Li 1481ff37beeSXueming Li void 1491ff37beeSXueming Li mlx5_cache_list_destroy(struct mlx5_cache_list *list) 1501ff37beeSXueming Li { 1511ff37beeSXueming Li struct mlx5_cache_entry *entry; 1521ff37beeSXueming Li 1531ff37beeSXueming Li MLX5_ASSERT(list); 1541ff37beeSXueming Li /* no LIST_FOREACH_SAFE, using while instead */ 1551ff37beeSXueming Li while (!LIST_EMPTY(&list->head)) { 1561ff37beeSXueming Li entry = LIST_FIRST(&list->head); 1571ff37beeSXueming Li LIST_REMOVE(entry, next); 1581ff37beeSXueming Li list->cb_remove(list, entry); 1591ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", 1601ff37beeSXueming Li list->name, (void *)entry); 1611ff37beeSXueming Li } 1621ff37beeSXueming Li memset(list, 0, sizeof(*list)); 1631ff37beeSXueming Li } 1641ff37beeSXueming Li 1651ff37beeSXueming Li uint32_t 1661ff37beeSXueming Li mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) 1671ff37beeSXueming Li { 1681ff37beeSXueming Li MLX5_ASSERT(list); 1691ff37beeSXueming Li return __atomic_load_n(&list->count, __ATOMIC_RELAXED); 1701ff37beeSXueming Li } 1711ff37beeSXueming Li 172e69a5922SXueming Li /********************* Indexed pool **********************/ 173e69a5922SXueming Li 174a3cf59f5SSuanming Mou static inline void 175a3cf59f5SSuanming Mou mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 176a3cf59f5SSuanming Mou { 177a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 178*d15c0946SSuanming Mou rte_spinlock_lock(&pool->rsz_lock); 179a3cf59f5SSuanming Mou } 180a3cf59f5SSuanming Mou 181a3cf59f5SSuanming Mou static inline void 182a3cf59f5SSuanming Mou mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 183a3cf59f5SSuanming Mou { 184a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 185*d15c0946SSuanming Mou rte_spinlock_unlock(&pool->rsz_lock); 186a3cf59f5SSuanming Mou } 187a3cf59f5SSuanming Mou 18862d7d519SSuanming Mou static inline uint32_t 18962d7d519SSuanming Mou mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 19062d7d519SSuanming Mou { 19162d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 19262d7d519SSuanming Mou uint32_t trunk_idx = 0; 19362d7d519SSuanming Mou uint32_t i; 19462d7d519SSuanming Mou 19562d7d519SSuanming Mou if (!cfg->grow_trunk) 19662d7d519SSuanming Mou return entry_idx / cfg->trunk_size; 19762d7d519SSuanming Mou if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 19862d7d519SSuanming Mou trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 19962d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 20062d7d519SSuanming Mou cfg->grow_trunk)) + cfg->grow_trunk; 20162d7d519SSuanming Mou } else { 20262d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 20362d7d519SSuanming Mou if (entry_idx < pool->grow_tbl[i]) 20462d7d519SSuanming Mou break; 20562d7d519SSuanming Mou } 20662d7d519SSuanming Mou trunk_idx = i; 20762d7d519SSuanming Mou } 20862d7d519SSuanming Mou return trunk_idx; 20962d7d519SSuanming Mou } 21062d7d519SSuanming Mou 21162d7d519SSuanming Mou static inline uint32_t 21262d7d519SSuanming Mou mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 21362d7d519SSuanming Mou { 21462d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 21562d7d519SSuanming Mou 21662d7d519SSuanming Mou return cfg->trunk_size << (cfg->grow_shift * 21762d7d519SSuanming Mou (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 21862d7d519SSuanming Mou } 21962d7d519SSuanming Mou 22062d7d519SSuanming Mou static inline uint32_t 22162d7d519SSuanming Mou mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 22262d7d519SSuanming Mou { 22362d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 22462d7d519SSuanming Mou uint32_t offset = 0; 22562d7d519SSuanming Mou 22662d7d519SSuanming Mou if (!trunk_idx) 22762d7d519SSuanming Mou return 0; 22862d7d519SSuanming Mou if (!cfg->grow_trunk) 22962d7d519SSuanming Mou return cfg->trunk_size * trunk_idx; 23062d7d519SSuanming Mou if (trunk_idx < cfg->grow_trunk) 23162d7d519SSuanming Mou offset = pool->grow_tbl[trunk_idx - 1]; 23262d7d519SSuanming Mou else 23362d7d519SSuanming Mou offset = pool->grow_tbl[cfg->grow_trunk - 1] + 23462d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 23562d7d519SSuanming Mou cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 23662d7d519SSuanming Mou return offset; 23762d7d519SSuanming Mou } 23862d7d519SSuanming Mou 239a3cf59f5SSuanming Mou struct mlx5_indexed_pool * 240a3cf59f5SSuanming Mou mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 241a3cf59f5SSuanming Mou { 242a3cf59f5SSuanming Mou struct mlx5_indexed_pool *pool; 24362d7d519SSuanming Mou uint32_t i; 244a3cf59f5SSuanming Mou 24579807d6aSXueming Li if (!cfg || (!cfg->malloc ^ !cfg->free) || 246*d15c0946SSuanming Mou (cfg->per_core_cache && cfg->release_mem_en) || 247a3cf59f5SSuanming Mou (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 248a3cf59f5SSuanming Mou ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 249a3cf59f5SSuanming Mou return NULL; 25083c2047cSSuanming Mou pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk * 25183c2047cSSuanming Mou sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE, 25283c2047cSSuanming Mou SOCKET_ID_ANY); 253a3cf59f5SSuanming Mou if (!pool) 254a3cf59f5SSuanming Mou return NULL; 255a3cf59f5SSuanming Mou pool->cfg = *cfg; 256a3cf59f5SSuanming Mou if (!pool->cfg.trunk_size) 257a3cf59f5SSuanming Mou pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 258a3cf59f5SSuanming Mou if (!cfg->malloc && !cfg->free) { 25983c2047cSSuanming Mou pool->cfg.malloc = mlx5_malloc; 26083c2047cSSuanming Mou pool->cfg.free = mlx5_free; 261a3cf59f5SSuanming Mou } 262a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 263*d15c0946SSuanming Mou rte_spinlock_init(&pool->rsz_lock); 26462d7d519SSuanming Mou /* 26562d7d519SSuanming Mou * Initialize the dynamic grow trunk size lookup table to have a quick 26662d7d519SSuanming Mou * lookup for the trunk entry index offset. 26762d7d519SSuanming Mou */ 26862d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 26962d7d519SSuanming Mou pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 27062d7d519SSuanming Mou if (i > 0) 27162d7d519SSuanming Mou pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 27262d7d519SSuanming Mou } 27358ecd3adSSuanming Mou if (!pool->cfg.max_idx) 27458ecd3adSSuanming Mou pool->cfg.max_idx = 27558ecd3adSSuanming Mou mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); 276*d15c0946SSuanming Mou if (!cfg->per_core_cache) 277*d15c0946SSuanming Mou pool->free_list = TRUNK_INVALID; 278a3cf59f5SSuanming Mou return pool; 279a3cf59f5SSuanming Mou } 280a3cf59f5SSuanming Mou 281a3cf59f5SSuanming Mou static int 282a3cf59f5SSuanming Mou mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 283a3cf59f5SSuanming Mou { 284a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 285a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **trunk_tmp; 286a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **p; 287a3cf59f5SSuanming Mou size_t trunk_size = 0; 28862d7d519SSuanming Mou size_t data_size; 289a3cf59f5SSuanming Mou size_t bmp_size; 29058ecd3adSSuanming Mou uint32_t idx, cur_max_idx, i; 291a3cf59f5SSuanming Mou 29258ecd3adSSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid); 29358ecd3adSSuanming Mou if (pool->n_trunk_valid == TRUNK_MAX_IDX || 29458ecd3adSSuanming Mou cur_max_idx >= pool->cfg.max_idx) 295a3cf59f5SSuanming Mou return -ENOMEM; 296a3cf59f5SSuanming Mou if (pool->n_trunk_valid == pool->n_trunk) { 297a3cf59f5SSuanming Mou /* No free trunk flags, expand trunk list. */ 298a3cf59f5SSuanming Mou int n_grow = pool->n_trunk_valid ? pool->n_trunk : 299a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 300a3cf59f5SSuanming Mou 30183c2047cSSuanming Mou p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) * 302a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *), 303a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 304a3cf59f5SSuanming Mou if (!p) 305a3cf59f5SSuanming Mou return -ENOMEM; 306a3cf59f5SSuanming Mou if (pool->trunks) 307a3cf59f5SSuanming Mou memcpy(p, pool->trunks, pool->n_trunk_valid * 308a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 309a3cf59f5SSuanming Mou memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 310a3cf59f5SSuanming Mou n_grow * sizeof(void *)); 311a3cf59f5SSuanming Mou trunk_tmp = pool->trunks; 312a3cf59f5SSuanming Mou pool->trunks = p; 313a3cf59f5SSuanming Mou if (trunk_tmp) 314a3cf59f5SSuanming Mou pool->cfg.free(trunk_tmp); 315a3cf59f5SSuanming Mou pool->n_trunk += n_grow; 316a3cf59f5SSuanming Mou } 3171fd4bb67SSuanming Mou if (!pool->cfg.release_mem_en) { 318a3cf59f5SSuanming Mou idx = pool->n_trunk_valid; 3191fd4bb67SSuanming Mou } else { 3201fd4bb67SSuanming Mou /* Find the first available slot in trunk list */ 3211fd4bb67SSuanming Mou for (idx = 0; idx < pool->n_trunk; idx++) 3221fd4bb67SSuanming Mou if (pool->trunks[idx] == NULL) 3231fd4bb67SSuanming Mou break; 3241fd4bb67SSuanming Mou } 325a3cf59f5SSuanming Mou trunk_size += sizeof(*trunk); 32662d7d519SSuanming Mou data_size = mlx5_trunk_size_get(pool, idx); 32762d7d519SSuanming Mou bmp_size = rte_bitmap_get_memory_footprint(data_size); 328691b3d3eSSuanming Mou /* rte_bitmap requires memory cacheline aligned. */ 329691b3d3eSSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 330691b3d3eSSuanming Mou trunk_size += bmp_size; 33183c2047cSSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 332a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 333a3cf59f5SSuanming Mou if (!trunk) 334a3cf59f5SSuanming Mou return -ENOMEM; 335a3cf59f5SSuanming Mou pool->trunks[idx] = trunk; 336a3cf59f5SSuanming Mou trunk->idx = idx; 33762d7d519SSuanming Mou trunk->free = data_size; 338a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 339a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 340a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 341a3cf59f5SSuanming Mou pool->free_list = idx; 342a3cf59f5SSuanming Mou /* Mark all entries as available. */ 343691b3d3eSSuanming Mou trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data 344691b3d3eSSuanming Mou [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], 345691b3d3eSSuanming Mou bmp_size); 34658ecd3adSSuanming Mou /* Clear the overhead bits in the trunk if it happens. */ 34758ecd3adSSuanming Mou if (cur_max_idx + data_size > pool->cfg.max_idx) { 34858ecd3adSSuanming Mou for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++) 34958ecd3adSSuanming Mou rte_bitmap_clear(trunk->bmp, i); 35058ecd3adSSuanming Mou } 351691b3d3eSSuanming Mou MLX5_ASSERT(trunk->bmp); 352a3cf59f5SSuanming Mou pool->n_trunk_valid++; 353a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 354a3cf59f5SSuanming Mou pool->trunk_new++; 355a3cf59f5SSuanming Mou pool->trunk_avail++; 356a3cf59f5SSuanming Mou #endif 357a3cf59f5SSuanming Mou return 0; 358a3cf59f5SSuanming Mou } 359a3cf59f5SSuanming Mou 360*d15c0946SSuanming Mou static inline struct mlx5_indexed_cache * 361*d15c0946SSuanming Mou mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx) 362*d15c0946SSuanming Mou { 363*d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *lc, *olc = NULL; 364*d15c0946SSuanming Mou 365*d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 366*d15c0946SSuanming Mou gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED); 367*d15c0946SSuanming Mou if (gc && lc != gc) { 368*d15c0946SSuanming Mou mlx5_ipool_lock(pool); 369*d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 370*d15c0946SSuanming Mou olc = lc; 371*d15c0946SSuanming Mou lc = pool->gc; 372*d15c0946SSuanming Mou lc->ref_cnt++; 373*d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 374*d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 375*d15c0946SSuanming Mou if (olc) 376*d15c0946SSuanming Mou pool->cfg.free(olc); 377*d15c0946SSuanming Mou } 378*d15c0946SSuanming Mou return lc; 379*d15c0946SSuanming Mou } 380*d15c0946SSuanming Mou 381*d15c0946SSuanming Mou static uint32_t 382*d15c0946SSuanming Mou mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) 383*d15c0946SSuanming Mou { 384*d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 385*d15c0946SSuanming Mou struct mlx5_indexed_cache *p, *lc, *olc = NULL; 386*d15c0946SSuanming Mou size_t trunk_size = 0; 387*d15c0946SSuanming Mou size_t data_size; 388*d15c0946SSuanming Mou uint32_t cur_max_idx, trunk_idx, trunk_n; 389*d15c0946SSuanming Mou uint32_t fetch_size, ts_idx, i; 390*d15c0946SSuanming Mou int n_grow; 391*d15c0946SSuanming Mou 392*d15c0946SSuanming Mou check_again: 393*d15c0946SSuanming Mou p = NULL; 394*d15c0946SSuanming Mou fetch_size = 0; 395*d15c0946SSuanming Mou /* 396*d15c0946SSuanming Mou * Fetch new index from global if possible. First round local 397*d15c0946SSuanming Mou * cache will be NULL. 398*d15c0946SSuanming Mou */ 399*d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 400*d15c0946SSuanming Mou mlx5_ipool_lock(pool); 401*d15c0946SSuanming Mou /* Try to update local cache first. */ 402*d15c0946SSuanming Mou if (likely(pool->gc)) { 403*d15c0946SSuanming Mou if (lc != pool->gc) { 404*d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 405*d15c0946SSuanming Mou olc = lc; 406*d15c0946SSuanming Mou lc = pool->gc; 407*d15c0946SSuanming Mou lc->ref_cnt++; 408*d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 409*d15c0946SSuanming Mou } 410*d15c0946SSuanming Mou if (lc->len) { 411*d15c0946SSuanming Mou /* Use the updated local cache to fetch index. */ 412*d15c0946SSuanming Mou fetch_size = pool->cfg.per_core_cache >> 2; 413*d15c0946SSuanming Mou if (lc->len < fetch_size) 414*d15c0946SSuanming Mou fetch_size = lc->len; 415*d15c0946SSuanming Mou lc->len -= fetch_size; 416*d15c0946SSuanming Mou memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len], 417*d15c0946SSuanming Mou sizeof(uint32_t) * fetch_size); 418*d15c0946SSuanming Mou } 419*d15c0946SSuanming Mou } 420*d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 421*d15c0946SSuanming Mou if (unlikely(olc)) { 422*d15c0946SSuanming Mou pool->cfg.free(olc); 423*d15c0946SSuanming Mou olc = NULL; 424*d15c0946SSuanming Mou } 425*d15c0946SSuanming Mou if (fetch_size) { 426*d15c0946SSuanming Mou pool->cache[cidx]->len = fetch_size - 1; 427*d15c0946SSuanming Mou return pool->cache[cidx]->idx[pool->cache[cidx]->len]; 428*d15c0946SSuanming Mou } 429*d15c0946SSuanming Mou trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid, 430*d15c0946SSuanming Mou __ATOMIC_ACQUIRE) : 0; 431*d15c0946SSuanming Mou trunk_n = lc ? lc->n_trunk : 0; 432*d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx); 433*d15c0946SSuanming Mou /* Check if index reach maximum. */ 434*d15c0946SSuanming Mou if (trunk_idx == TRUNK_MAX_IDX || 435*d15c0946SSuanming Mou cur_max_idx >= pool->cfg.max_idx) 436*d15c0946SSuanming Mou return 0; 437*d15c0946SSuanming Mou /* No enough space in trunk array, resize the trunks array. */ 438*d15c0946SSuanming Mou if (trunk_idx == trunk_n) { 439*d15c0946SSuanming Mou n_grow = trunk_idx ? trunk_idx : 440*d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 441*d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow); 442*d15c0946SSuanming Mou /* Resize the trunk array. */ 443*d15c0946SSuanming Mou p = pool->cfg.malloc(0, ((trunk_idx + n_grow) * 444*d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)) + 445*d15c0946SSuanming Mou (cur_max_idx * sizeof(uint32_t)) + sizeof(*p), 446*d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 447*d15c0946SSuanming Mou if (!p) 448*d15c0946SSuanming Mou return 0; 449*d15c0946SSuanming Mou p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx]; 450*d15c0946SSuanming Mou if (lc) 451*d15c0946SSuanming Mou memcpy(p->trunks, lc->trunks, trunk_idx * 452*d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 453*d15c0946SSuanming Mou #ifdef RTE_LIBRTE_MLX5_DEBUG 454*d15c0946SSuanming Mou memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0, 455*d15c0946SSuanming Mou n_grow * sizeof(void *)); 456*d15c0946SSuanming Mou #endif 457*d15c0946SSuanming Mou p->n_trunk_valid = trunk_idx; 458*d15c0946SSuanming Mou p->n_trunk = trunk_n + n_grow; 459*d15c0946SSuanming Mou p->len = 0; 460*d15c0946SSuanming Mou } 461*d15c0946SSuanming Mou /* Prepare the new trunk. */ 462*d15c0946SSuanming Mou trunk_size = sizeof(*trunk); 463*d15c0946SSuanming Mou data_size = mlx5_trunk_size_get(pool, trunk_idx); 464*d15c0946SSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 465*d15c0946SSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 466*d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 467*d15c0946SSuanming Mou if (unlikely(!trunk)) { 468*d15c0946SSuanming Mou pool->cfg.free(p); 469*d15c0946SSuanming Mou return 0; 470*d15c0946SSuanming Mou } 471*d15c0946SSuanming Mou trunk->idx = trunk_idx; 472*d15c0946SSuanming Mou trunk->free = data_size; 473*d15c0946SSuanming Mou mlx5_ipool_lock(pool); 474*d15c0946SSuanming Mou /* 475*d15c0946SSuanming Mou * Double check if trunks has been updated or have available index. 476*d15c0946SSuanming Mou * During the new trunk allocate, index may still be flushed to the 477*d15c0946SSuanming Mou * global cache. So also need to check the pool->gc->len. 478*d15c0946SSuanming Mou */ 479*d15c0946SSuanming Mou if (pool->gc && (lc != pool->gc || 480*d15c0946SSuanming Mou lc->n_trunk_valid != trunk_idx || 481*d15c0946SSuanming Mou pool->gc->len)) { 482*d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 483*d15c0946SSuanming Mou if (p) 484*d15c0946SSuanming Mou pool->cfg.free(p); 485*d15c0946SSuanming Mou pool->cfg.free(trunk); 486*d15c0946SSuanming Mou goto check_again; 487*d15c0946SSuanming Mou } 488*d15c0946SSuanming Mou /* Resize the trunk array and update local cache first. */ 489*d15c0946SSuanming Mou if (p) { 490*d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 491*d15c0946SSuanming Mou olc = lc; 492*d15c0946SSuanming Mou lc = p; 493*d15c0946SSuanming Mou lc->ref_cnt = 1; 494*d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 495*d15c0946SSuanming Mou __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED); 496*d15c0946SSuanming Mou } 497*d15c0946SSuanming Mou /* Add trunk to trunks array. */ 498*d15c0946SSuanming Mou lc->trunks[trunk_idx] = trunk; 499*d15c0946SSuanming Mou __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED); 500*d15c0946SSuanming Mou /* Enqueue half of the index to global. */ 501*d15c0946SSuanming Mou ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1; 502*d15c0946SSuanming Mou fetch_size = trunk->free >> 1; 503*d15c0946SSuanming Mou for (i = 0; i < fetch_size; i++) 504*d15c0946SSuanming Mou lc->idx[i] = ts_idx + i; 505*d15c0946SSuanming Mou lc->len = fetch_size; 506*d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 507*d15c0946SSuanming Mou /* Copy left half - 1 to local cache index array. */ 508*d15c0946SSuanming Mou pool->cache[cidx]->len = trunk->free - fetch_size - 1; 509*d15c0946SSuanming Mou ts_idx += fetch_size; 510*d15c0946SSuanming Mou for (i = 0; i < pool->cache[cidx]->len; i++) 511*d15c0946SSuanming Mou pool->cache[cidx]->idx[i] = ts_idx + i; 512*d15c0946SSuanming Mou if (olc) 513*d15c0946SSuanming Mou pool->cfg.free(olc); 514*d15c0946SSuanming Mou return ts_idx + i; 515*d15c0946SSuanming Mou } 516*d15c0946SSuanming Mou 517*d15c0946SSuanming Mou static void * 518*d15c0946SSuanming Mou mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 519*d15c0946SSuanming Mou { 520*d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 521*d15c0946SSuanming Mou struct mlx5_indexed_cache *lc; 522*d15c0946SSuanming Mou uint32_t trunk_idx; 523*d15c0946SSuanming Mou uint32_t entry_idx; 524*d15c0946SSuanming Mou int cidx; 525*d15c0946SSuanming Mou 526*d15c0946SSuanming Mou MLX5_ASSERT(idx); 527*d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 528*d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 529*d15c0946SSuanming Mou rte_errno = ENOTSUP; 530*d15c0946SSuanming Mou return NULL; 531*d15c0946SSuanming Mou } 532*d15c0946SSuanming Mou lc = mlx5_ipool_update_global_cache(pool, cidx); 533*d15c0946SSuanming Mou idx -= 1; 534*d15c0946SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 535*d15c0946SSuanming Mou trunk = lc->trunks[trunk_idx]; 536*d15c0946SSuanming Mou MLX5_ASSERT(trunk); 537*d15c0946SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); 538*d15c0946SSuanming Mou return &trunk->data[entry_idx * pool->cfg.size]; 539*d15c0946SSuanming Mou } 540*d15c0946SSuanming Mou 541*d15c0946SSuanming Mou static void * 542*d15c0946SSuanming Mou mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) 543*d15c0946SSuanming Mou { 544*d15c0946SSuanming Mou int cidx; 545*d15c0946SSuanming Mou 546*d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 547*d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 548*d15c0946SSuanming Mou rte_errno = ENOTSUP; 549*d15c0946SSuanming Mou return NULL; 550*d15c0946SSuanming Mou } 551*d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 552*d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 553*d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 554*d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 555*d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 556*d15c0946SSuanming Mou if (!pool->cache[cidx]) { 557*d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 558*d15c0946SSuanming Mou return NULL; 559*d15c0946SSuanming Mou } 560*d15c0946SSuanming Mou } else if (pool->cache[cidx]->len) { 561*d15c0946SSuanming Mou pool->cache[cidx]->len--; 562*d15c0946SSuanming Mou *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; 563*d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, *idx); 564*d15c0946SSuanming Mou } 565*d15c0946SSuanming Mou /* Not enough idx in global cache. Keep fetching from global. */ 566*d15c0946SSuanming Mou *idx = mlx5_ipool_allocate_from_global(pool, cidx); 567*d15c0946SSuanming Mou if (unlikely(!(*idx))) 568*d15c0946SSuanming Mou return NULL; 569*d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, *idx); 570*d15c0946SSuanming Mou } 571*d15c0946SSuanming Mou 572*d15c0946SSuanming Mou static void 573*d15c0946SSuanming Mou mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 574*d15c0946SSuanming Mou { 575*d15c0946SSuanming Mou int cidx; 576*d15c0946SSuanming Mou struct mlx5_ipool_per_lcore *ilc; 577*d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *olc = NULL; 578*d15c0946SSuanming Mou uint32_t reclaim_num = 0; 579*d15c0946SSuanming Mou 580*d15c0946SSuanming Mou MLX5_ASSERT(idx); 581*d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 582*d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 583*d15c0946SSuanming Mou rte_errno = ENOTSUP; 584*d15c0946SSuanming Mou return; 585*d15c0946SSuanming Mou } 586*d15c0946SSuanming Mou /* 587*d15c0946SSuanming Mou * When index was allocated on core A but freed on core B. In this 588*d15c0946SSuanming Mou * case check if local cache on core B was allocated before. 589*d15c0946SSuanming Mou */ 590*d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 591*d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 592*d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 593*d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 594*d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 595*d15c0946SSuanming Mou if (!pool->cache[cidx]) { 596*d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 597*d15c0946SSuanming Mou return; 598*d15c0946SSuanming Mou } 599*d15c0946SSuanming Mou } 600*d15c0946SSuanming Mou /* Try to enqueue to local index cache. */ 601*d15c0946SSuanming Mou if (pool->cache[cidx]->len < pool->cfg.per_core_cache) { 602*d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 603*d15c0946SSuanming Mou pool->cache[cidx]->len++; 604*d15c0946SSuanming Mou return; 605*d15c0946SSuanming Mou } 606*d15c0946SSuanming Mou ilc = pool->cache[cidx]; 607*d15c0946SSuanming Mou reclaim_num = pool->cfg.per_core_cache >> 2; 608*d15c0946SSuanming Mou ilc->len -= reclaim_num; 609*d15c0946SSuanming Mou /* Local index cache full, try with global index cache. */ 610*d15c0946SSuanming Mou mlx5_ipool_lock(pool); 611*d15c0946SSuanming Mou gc = pool->gc; 612*d15c0946SSuanming Mou if (ilc->lc != gc) { 613*d15c0946SSuanming Mou if (!(--ilc->lc->ref_cnt)) 614*d15c0946SSuanming Mou olc = ilc->lc; 615*d15c0946SSuanming Mou gc->ref_cnt++; 616*d15c0946SSuanming Mou ilc->lc = gc; 617*d15c0946SSuanming Mou } 618*d15c0946SSuanming Mou memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len], 619*d15c0946SSuanming Mou reclaim_num * sizeof(uint32_t)); 620*d15c0946SSuanming Mou gc->len += reclaim_num; 621*d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 622*d15c0946SSuanming Mou if (olc) 623*d15c0946SSuanming Mou pool->cfg.free(olc); 624*d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 625*d15c0946SSuanming Mou pool->cache[cidx]->len++; 626*d15c0946SSuanming Mou } 627*d15c0946SSuanming Mou 628a3cf59f5SSuanming Mou void * 629a3cf59f5SSuanming Mou mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 630a3cf59f5SSuanming Mou { 631a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 632a3cf59f5SSuanming Mou uint64_t slab = 0; 633a3cf59f5SSuanming Mou uint32_t iidx = 0; 634a3cf59f5SSuanming Mou void *p; 635a3cf59f5SSuanming Mou 636*d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 637*d15c0946SSuanming Mou return mlx5_ipool_malloc_cache(pool, idx); 638a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 639a3cf59f5SSuanming Mou if (pool->free_list == TRUNK_INVALID) { 640a3cf59f5SSuanming Mou /* If no available trunks, grow new. */ 641a3cf59f5SSuanming Mou if (mlx5_ipool_grow(pool)) { 642a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 643a3cf59f5SSuanming Mou return NULL; 644a3cf59f5SSuanming Mou } 645a3cf59f5SSuanming Mou } 646a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 647a3cf59f5SSuanming Mou trunk = pool->trunks[pool->free_list]; 648a3cf59f5SSuanming Mou MLX5_ASSERT(trunk->free); 649a3cf59f5SSuanming Mou if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 650a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 651a3cf59f5SSuanming Mou return NULL; 652a3cf59f5SSuanming Mou } 653a3cf59f5SSuanming Mou MLX5_ASSERT(slab); 654a3cf59f5SSuanming Mou iidx += __builtin_ctzll(slab); 655a3cf59f5SSuanming Mou MLX5_ASSERT(iidx != UINT32_MAX); 65662d7d519SSuanming Mou MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 657a3cf59f5SSuanming Mou rte_bitmap_clear(trunk->bmp, iidx); 658a3cf59f5SSuanming Mou p = &trunk->data[iidx * pool->cfg.size]; 6594ae8825cSXueming Li /* 6604ae8825cSXueming Li * The ipool index should grow continually from small to big, 6614ae8825cSXueming Li * some features as metering only accept limited bits of index. 6624ae8825cSXueming Li * Random index with MSB set may be rejected. 6634ae8825cSXueming Li */ 66462d7d519SSuanming Mou iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 665a3cf59f5SSuanming Mou iidx += 1; /* non-zero index. */ 666a3cf59f5SSuanming Mou trunk->free--; 667a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 668a3cf59f5SSuanming Mou pool->n_entry++; 669a3cf59f5SSuanming Mou #endif 670a3cf59f5SSuanming Mou if (!trunk->free) { 671a3cf59f5SSuanming Mou /* Full trunk will be removed from free list in imalloc. */ 672a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == trunk->idx); 673a3cf59f5SSuanming Mou pool->free_list = trunk->next; 674a3cf59f5SSuanming Mou if (trunk->next != TRUNK_INVALID) 675a3cf59f5SSuanming Mou pool->trunks[trunk->next]->prev = TRUNK_INVALID; 676a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 677a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 678a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 679a3cf59f5SSuanming Mou pool->trunk_empty++; 680a3cf59f5SSuanming Mou pool->trunk_avail--; 681a3cf59f5SSuanming Mou #endif 682a3cf59f5SSuanming Mou } 683a3cf59f5SSuanming Mou *idx = iidx; 684a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 685a3cf59f5SSuanming Mou return p; 686a3cf59f5SSuanming Mou } 687a3cf59f5SSuanming Mou 688a3cf59f5SSuanming Mou void * 689a3cf59f5SSuanming Mou mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 690a3cf59f5SSuanming Mou { 691a3cf59f5SSuanming Mou void *entry = mlx5_ipool_malloc(pool, idx); 692a3cf59f5SSuanming Mou 69379807d6aSXueming Li if (entry && pool->cfg.size) 694a3cf59f5SSuanming Mou memset(entry, 0, pool->cfg.size); 695a3cf59f5SSuanming Mou return entry; 696a3cf59f5SSuanming Mou } 697a3cf59f5SSuanming Mou 698a3cf59f5SSuanming Mou void 699a3cf59f5SSuanming Mou mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 700a3cf59f5SSuanming Mou { 701a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 702a3cf59f5SSuanming Mou uint32_t trunk_idx; 70362d7d519SSuanming Mou uint32_t entry_idx; 704a3cf59f5SSuanming Mou 705a3cf59f5SSuanming Mou if (!idx) 706a3cf59f5SSuanming Mou return; 707*d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 708*d15c0946SSuanming Mou mlx5_ipool_free_cache(pool, idx); 709*d15c0946SSuanming Mou return; 710*d15c0946SSuanming Mou } 711a3cf59f5SSuanming Mou idx -= 1; 712a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 71362d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 7141fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 7151fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 716a3cf59f5SSuanming Mou goto out; 717a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 71862d7d519SSuanming Mou if (!trunk) 719a3cf59f5SSuanming Mou goto out; 72062d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 72162d7d519SSuanming Mou if (trunk_idx != trunk->idx || 72262d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 72362d7d519SSuanming Mou goto out; 72462d7d519SSuanming Mou rte_bitmap_set(trunk->bmp, entry_idx); 725a3cf59f5SSuanming Mou trunk->free++; 7261fd4bb67SSuanming Mou if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 7271fd4bb67SSuanming Mou (pool, trunk->idx)) { 7281fd4bb67SSuanming Mou if (pool->free_list == trunk->idx) 7291fd4bb67SSuanming Mou pool->free_list = trunk->next; 7301fd4bb67SSuanming Mou if (trunk->next != TRUNK_INVALID) 7311fd4bb67SSuanming Mou pool->trunks[trunk->next]->prev = trunk->prev; 7321fd4bb67SSuanming Mou if (trunk->prev != TRUNK_INVALID) 7331fd4bb67SSuanming Mou pool->trunks[trunk->prev]->next = trunk->next; 7341fd4bb67SSuanming Mou pool->cfg.free(trunk); 7351fd4bb67SSuanming Mou pool->trunks[trunk_idx] = NULL; 7361fd4bb67SSuanming Mou pool->n_trunk_valid--; 7371fd4bb67SSuanming Mou #ifdef POOL_DEBUG 7381fd4bb67SSuanming Mou pool->trunk_avail--; 7391fd4bb67SSuanming Mou pool->trunk_free++; 7401fd4bb67SSuanming Mou #endif 7411fd4bb67SSuanming Mou if (pool->n_trunk_valid == 0) { 7421fd4bb67SSuanming Mou pool->cfg.free(pool->trunks); 7431fd4bb67SSuanming Mou pool->trunks = NULL; 7441fd4bb67SSuanming Mou pool->n_trunk = 0; 7451fd4bb67SSuanming Mou } 7461fd4bb67SSuanming Mou } else if (trunk->free == 1) { 747a3cf59f5SSuanming Mou /* Put into free trunk list head. */ 748a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != trunk->idx); 749a3cf59f5SSuanming Mou trunk->next = pool->free_list; 750a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 751a3cf59f5SSuanming Mou if (pool->free_list != TRUNK_INVALID) 752a3cf59f5SSuanming Mou pool->trunks[pool->free_list]->prev = trunk->idx; 753a3cf59f5SSuanming Mou pool->free_list = trunk->idx; 754a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 755a3cf59f5SSuanming Mou pool->trunk_empty--; 756a3cf59f5SSuanming Mou pool->trunk_avail++; 757a3cf59f5SSuanming Mou #endif 758a3cf59f5SSuanming Mou } 759a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 760a3cf59f5SSuanming Mou pool->n_entry--; 761a3cf59f5SSuanming Mou #endif 762a3cf59f5SSuanming Mou out: 763a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 764a3cf59f5SSuanming Mou } 765a3cf59f5SSuanming Mou 766a3cf59f5SSuanming Mou void * 767a3cf59f5SSuanming Mou mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 768a3cf59f5SSuanming Mou { 769a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 770a3cf59f5SSuanming Mou void *p = NULL; 771a3cf59f5SSuanming Mou uint32_t trunk_idx; 77262d7d519SSuanming Mou uint32_t entry_idx; 773a3cf59f5SSuanming Mou 774a3cf59f5SSuanming Mou if (!idx) 775a3cf59f5SSuanming Mou return NULL; 776*d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 777*d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, idx); 778a3cf59f5SSuanming Mou idx -= 1; 779a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 78062d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 7811fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 7821fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 783a3cf59f5SSuanming Mou goto out; 784a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 78562d7d519SSuanming Mou if (!trunk) 786a3cf59f5SSuanming Mou goto out; 78762d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 78862d7d519SSuanming Mou if (trunk_idx != trunk->idx || 78962d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 79062d7d519SSuanming Mou goto out; 79162d7d519SSuanming Mou p = &trunk->data[entry_idx * pool->cfg.size]; 792a3cf59f5SSuanming Mou out: 793a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 794a3cf59f5SSuanming Mou return p; 795a3cf59f5SSuanming Mou } 796a3cf59f5SSuanming Mou 797a3cf59f5SSuanming Mou int 798a3cf59f5SSuanming Mou mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 799a3cf59f5SSuanming Mou { 800*d15c0946SSuanming Mou struct mlx5_indexed_trunk **trunks = NULL; 801*d15c0946SSuanming Mou struct mlx5_indexed_cache *gc = pool->gc; 802*d15c0946SSuanming Mou uint32_t i, n_trunk_valid = 0; 803a3cf59f5SSuanming Mou 804a3cf59f5SSuanming Mou MLX5_ASSERT(pool); 805a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 806*d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 807*d15c0946SSuanming Mou for (i = 0; i < RTE_MAX_LCORE; i++) { 808*d15c0946SSuanming Mou /* 809*d15c0946SSuanming Mou * Free only old global cache. Pool gc will be 810*d15c0946SSuanming Mou * freed at last. 811*d15c0946SSuanming Mou */ 812*d15c0946SSuanming Mou if (pool->cache[i]) { 813*d15c0946SSuanming Mou if (pool->cache[i]->lc && 814*d15c0946SSuanming Mou pool->cache[i]->lc != pool->gc && 815*d15c0946SSuanming Mou (!(--pool->cache[i]->lc->ref_cnt))) 816*d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]->lc); 817*d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]); 818*d15c0946SSuanming Mou } 819*d15c0946SSuanming Mou } 820*d15c0946SSuanming Mou if (gc) { 821*d15c0946SSuanming Mou trunks = gc->trunks; 822*d15c0946SSuanming Mou n_trunk_valid = gc->n_trunk_valid; 823*d15c0946SSuanming Mou } 824*d15c0946SSuanming Mou } else { 825*d15c0946SSuanming Mou gc = NULL; 826a3cf59f5SSuanming Mou trunks = pool->trunks; 827*d15c0946SSuanming Mou n_trunk_valid = pool->n_trunk_valid; 828*d15c0946SSuanming Mou } 829*d15c0946SSuanming Mou for (i = 0; i < n_trunk_valid; i++) { 830a3cf59f5SSuanming Mou if (trunks[i]) 831a3cf59f5SSuanming Mou pool->cfg.free(trunks[i]); 832a3cf59f5SSuanming Mou } 833*d15c0946SSuanming Mou if (!gc && trunks) 834*d15c0946SSuanming Mou pool->cfg.free(trunks); 835*d15c0946SSuanming Mou if (gc) 836*d15c0946SSuanming Mou pool->cfg.free(gc); 837a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 83883c2047cSSuanming Mou mlx5_free(pool); 839a3cf59f5SSuanming Mou return 0; 840a3cf59f5SSuanming Mou } 841a3cf59f5SSuanming Mou 842a3cf59f5SSuanming Mou void 843a3cf59f5SSuanming Mou mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 844a3cf59f5SSuanming Mou { 845a3cf59f5SSuanming Mou printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 846a3cf59f5SSuanming Mou "total: %d\n", 847a3cf59f5SSuanming Mou pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 848a3cf59f5SSuanming Mou pool->cfg.trunk_size, pool->n_trunk_valid); 849a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 850a3cf59f5SSuanming Mou printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 851a3cf59f5SSuanming Mou "available %u free %u\n", 852a3cf59f5SSuanming Mou pool->cfg.type, pool->n_entry, pool->trunk_new, 853a3cf59f5SSuanming Mou pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 854a3cf59f5SSuanming Mou #endif 855a3cf59f5SSuanming Mou } 856bd81eaebSSuanming Mou 857bd81eaebSSuanming Mou struct mlx5_l3t_tbl * 858bd81eaebSSuanming Mou mlx5_l3t_create(enum mlx5_l3t_type type) 859bd81eaebSSuanming Mou { 860bd81eaebSSuanming Mou struct mlx5_l3t_tbl *tbl; 861bd81eaebSSuanming Mou struct mlx5_indexed_pool_config l3t_ip_cfg = { 862bd81eaebSSuanming Mou .trunk_size = 16, 863bd81eaebSSuanming Mou .grow_trunk = 6, 864bd81eaebSSuanming Mou .grow_shift = 1, 865bd81eaebSSuanming Mou .need_lock = 0, 866bd81eaebSSuanming Mou .release_mem_en = 1, 86783c2047cSSuanming Mou .malloc = mlx5_malloc, 86883c2047cSSuanming Mou .free = mlx5_free, 869bd81eaebSSuanming Mou }; 870bd81eaebSSuanming Mou 871bd81eaebSSuanming Mou if (type >= MLX5_L3T_TYPE_MAX) { 872bd81eaebSSuanming Mou rte_errno = EINVAL; 873bd81eaebSSuanming Mou return NULL; 874bd81eaebSSuanming Mou } 87583c2047cSSuanming Mou tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1, 87683c2047cSSuanming Mou SOCKET_ID_ANY); 877bd81eaebSSuanming Mou if (!tbl) { 878bd81eaebSSuanming Mou rte_errno = ENOMEM; 879bd81eaebSSuanming Mou return NULL; 880bd81eaebSSuanming Mou } 881bd81eaebSSuanming Mou tbl->type = type; 882bd81eaebSSuanming Mou switch (type) { 883bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 8840796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word); 885bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w"; 886bd81eaebSSuanming Mou break; 887bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 8880796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword); 889bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw"; 890bd81eaebSSuanming Mou break; 891bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 8920796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword); 893bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw"; 894bd81eaebSSuanming Mou break; 895bd81eaebSSuanming Mou default: 8960796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr); 897bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr"; 898bd81eaebSSuanming Mou break; 899bd81eaebSSuanming Mou } 9000796c7b1SSuanming Mou rte_spinlock_init(&tbl->sl); 901bd81eaebSSuanming Mou tbl->eip = mlx5_ipool_create(&l3t_ip_cfg); 902bd81eaebSSuanming Mou if (!tbl->eip) { 903bd81eaebSSuanming Mou rte_errno = ENOMEM; 90483c2047cSSuanming Mou mlx5_free(tbl); 905bd81eaebSSuanming Mou tbl = NULL; 906bd81eaebSSuanming Mou } 907bd81eaebSSuanming Mou return tbl; 908bd81eaebSSuanming Mou } 909bd81eaebSSuanming Mou 910bd81eaebSSuanming Mou void 911bd81eaebSSuanming Mou mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl) 912bd81eaebSSuanming Mou { 913bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 914bd81eaebSSuanming Mou uint32_t i, j; 915bd81eaebSSuanming Mou 916bd81eaebSSuanming Mou if (!tbl) 917bd81eaebSSuanming Mou return; 918bd81eaebSSuanming Mou g_tbl = tbl->tbl; 919bd81eaebSSuanming Mou if (g_tbl) { 920bd81eaebSSuanming Mou for (i = 0; i < MLX5_L3T_GT_SIZE; i++) { 921bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[i]; 922bd81eaebSSuanming Mou if (!m_tbl) 923bd81eaebSSuanming Mou continue; 924bd81eaebSSuanming Mou for (j = 0; j < MLX5_L3T_MT_SIZE; j++) { 925bd81eaebSSuanming Mou if (!m_tbl->tbl[j]) 926bd81eaebSSuanming Mou continue; 927bd81eaebSSuanming Mou MLX5_ASSERT(!((struct mlx5_l3t_entry_word *) 928bd81eaebSSuanming Mou m_tbl->tbl[j])->ref_cnt); 929bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 930bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *) 931bd81eaebSSuanming Mou m_tbl->tbl[j])->idx); 932bd81eaebSSuanming Mou m_tbl->tbl[j] = 0; 933bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) 934bd81eaebSSuanming Mou break; 935bd81eaebSSuanming Mou } 936bd81eaebSSuanming Mou MLX5_ASSERT(!m_tbl->ref_cnt); 93783c2047cSSuanming Mou mlx5_free(g_tbl->tbl[i]); 938bd81eaebSSuanming Mou g_tbl->tbl[i] = 0; 939bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) 940bd81eaebSSuanming Mou break; 941bd81eaebSSuanming Mou } 942bd81eaebSSuanming Mou MLX5_ASSERT(!g_tbl->ref_cnt); 94383c2047cSSuanming Mou mlx5_free(tbl->tbl); 944bd81eaebSSuanming Mou tbl->tbl = 0; 945bd81eaebSSuanming Mou } 946bd81eaebSSuanming Mou mlx5_ipool_destroy(tbl->eip); 94783c2047cSSuanming Mou mlx5_free(tbl); 948bd81eaebSSuanming Mou } 949bd81eaebSSuanming Mou 9500796c7b1SSuanming Mou static int32_t 9510796c7b1SSuanming Mou __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 952bd81eaebSSuanming Mou union mlx5_l3t_data *data) 953bd81eaebSSuanming Mou { 954bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 9550796c7b1SSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 9560796c7b1SSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 9570796c7b1SSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 9580796c7b1SSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 959bd81eaebSSuanming Mou void *e_tbl; 960bd81eaebSSuanming Mou uint32_t entry_idx; 961bd81eaebSSuanming Mou 962bd81eaebSSuanming Mou g_tbl = tbl->tbl; 963bd81eaebSSuanming Mou if (!g_tbl) 964bd81eaebSSuanming Mou return -1; 965bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 966bd81eaebSSuanming Mou if (!m_tbl) 967bd81eaebSSuanming Mou return -1; 968bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 969bd81eaebSSuanming Mou if (!e_tbl) 970bd81eaebSSuanming Mou return -1; 971bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 972bd81eaebSSuanming Mou switch (tbl->type) { 973bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 9740796c7b1SSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 9750796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 9760796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) 9770796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 978bd81eaebSSuanming Mou break; 979bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 9800796c7b1SSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 9810796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 9820796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) 9830796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 984bd81eaebSSuanming Mou break; 985bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 9860796c7b1SSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 9870796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 9880796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) 9890796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 990bd81eaebSSuanming Mou break; 991bd81eaebSSuanming Mou default: 9920796c7b1SSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 9930796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 9940796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) 9950796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 996bd81eaebSSuanming Mou break; 997bd81eaebSSuanming Mou } 998bd81eaebSSuanming Mou return 0; 999bd81eaebSSuanming Mou } 1000bd81eaebSSuanming Mou 10010796c7b1SSuanming Mou int32_t 10020796c7b1SSuanming Mou mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 10030796c7b1SSuanming Mou union mlx5_l3t_data *data) 10040796c7b1SSuanming Mou { 10050796c7b1SSuanming Mou int ret; 10060796c7b1SSuanming Mou 10070796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 10080796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 10090796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 10100796c7b1SSuanming Mou return ret; 10110796c7b1SSuanming Mou } 10120796c7b1SSuanming Mou 10130796c7b1SSuanming Mou int32_t 1014bd81eaebSSuanming Mou mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx) 1015bd81eaebSSuanming Mou { 1016bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1017bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1018bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1019bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1020bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1021bd81eaebSSuanming Mou void *e_tbl; 1022bd81eaebSSuanming Mou uint32_t entry_idx; 1023bd81eaebSSuanming Mou uint64_t ref_cnt; 10240796c7b1SSuanming Mou int32_t ret = -1; 1025bd81eaebSSuanming Mou 10260796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 1027bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1028bd81eaebSSuanming Mou if (!g_tbl) 10290796c7b1SSuanming Mou goto out; 1030bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1031bd81eaebSSuanming Mou if (!m_tbl) 10320796c7b1SSuanming Mou goto out; 1033bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1034bd81eaebSSuanming Mou if (!e_tbl) 10350796c7b1SSuanming Mou goto out; 1036bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1037bd81eaebSSuanming Mou switch (tbl->type) { 1038bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1039bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 10400796c7b1SSuanming Mou MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt); 10410796c7b1SSuanming Mou ret = --w_e_tbl->entry[entry_idx].ref_cnt; 10420796c7b1SSuanming Mou if (ret) 10430796c7b1SSuanming Mou goto out; 10440796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = 0; 1045bd81eaebSSuanming Mou ref_cnt = --w_e_tbl->ref_cnt; 1046bd81eaebSSuanming Mou break; 1047bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1048bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 10490796c7b1SSuanming Mou MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt); 10500796c7b1SSuanming Mou ret = --dw_e_tbl->entry[entry_idx].ref_cnt; 10510796c7b1SSuanming Mou if (ret) 10520796c7b1SSuanming Mou goto out; 10530796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = 0; 1054bd81eaebSSuanming Mou ref_cnt = --dw_e_tbl->ref_cnt; 1055bd81eaebSSuanming Mou break; 1056bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1057bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 10580796c7b1SSuanming Mou MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt); 10590796c7b1SSuanming Mou ret = --qw_e_tbl->entry[entry_idx].ref_cnt; 10600796c7b1SSuanming Mou if (ret) 10610796c7b1SSuanming Mou goto out; 10620796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = 0; 1063bd81eaebSSuanming Mou ref_cnt = --qw_e_tbl->ref_cnt; 1064bd81eaebSSuanming Mou break; 1065bd81eaebSSuanming Mou default: 1066bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 10670796c7b1SSuanming Mou MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt); 10680796c7b1SSuanming Mou ret = --ptr_e_tbl->entry[entry_idx].ref_cnt; 10690796c7b1SSuanming Mou if (ret) 10700796c7b1SSuanming Mou goto out; 10710796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = NULL; 1072bd81eaebSSuanming Mou ref_cnt = --ptr_e_tbl->ref_cnt; 1073bd81eaebSSuanming Mou break; 1074bd81eaebSSuanming Mou } 1075bd81eaebSSuanming Mou if (!ref_cnt) { 1076bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 1077bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx); 1078bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1079bd81eaebSSuanming Mou NULL; 1080bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) { 108183c2047cSSuanming Mou mlx5_free(m_tbl); 1082bd81eaebSSuanming Mou g_tbl->tbl 1083bd81eaebSSuanming Mou [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL; 1084bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) { 108583c2047cSSuanming Mou mlx5_free(g_tbl); 1086bd81eaebSSuanming Mou tbl->tbl = 0; 1087bd81eaebSSuanming Mou } 1088bd81eaebSSuanming Mou } 1089bd81eaebSSuanming Mou } 10900796c7b1SSuanming Mou out: 10910796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 10920796c7b1SSuanming Mou return ret; 1093bd81eaebSSuanming Mou } 1094bd81eaebSSuanming Mou 10950796c7b1SSuanming Mou static int32_t 10960796c7b1SSuanming Mou __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1097bd81eaebSSuanming Mou union mlx5_l3t_data *data) 1098bd81eaebSSuanming Mou { 1099bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1100bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1101bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1102bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1103bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1104bd81eaebSSuanming Mou void *e_tbl; 1105bd81eaebSSuanming Mou uint32_t entry_idx, tbl_idx = 0; 1106bd81eaebSSuanming Mou 1107bd81eaebSSuanming Mou /* Check the global table, create it if empty. */ 1108bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1109bd81eaebSSuanming Mou if (!g_tbl) { 111083c2047cSSuanming Mou g_tbl = mlx5_malloc(MLX5_MEM_ZERO, 111183c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 111283c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_GT_SIZE, 1, 111383c2047cSSuanming Mou SOCKET_ID_ANY); 1114bd81eaebSSuanming Mou if (!g_tbl) { 1115bd81eaebSSuanming Mou rte_errno = ENOMEM; 1116bd81eaebSSuanming Mou return -1; 1117bd81eaebSSuanming Mou } 1118bd81eaebSSuanming Mou tbl->tbl = g_tbl; 1119bd81eaebSSuanming Mou } 1120bd81eaebSSuanming Mou /* 1121bd81eaebSSuanming Mou * Check the middle table, create it if empty. Ref_cnt will be 1122bd81eaebSSuanming Mou * increased if new sub table created. 1123bd81eaebSSuanming Mou */ 1124bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1125bd81eaebSSuanming Mou if (!m_tbl) { 112683c2047cSSuanming Mou m_tbl = mlx5_malloc(MLX5_MEM_ZERO, 112783c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 112883c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_MT_SIZE, 1, 112983c2047cSSuanming Mou SOCKET_ID_ANY); 1130bd81eaebSSuanming Mou if (!m_tbl) { 1131bd81eaebSSuanming Mou rte_errno = ENOMEM; 1132bd81eaebSSuanming Mou return -1; 1133bd81eaebSSuanming Mou } 1134bd81eaebSSuanming Mou g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = 1135bd81eaebSSuanming Mou m_tbl; 1136bd81eaebSSuanming Mou g_tbl->ref_cnt++; 1137bd81eaebSSuanming Mou } 1138bd81eaebSSuanming Mou /* 1139bd81eaebSSuanming Mou * Check the entry table, create it if empty. Ref_cnt will be 1140bd81eaebSSuanming Mou * increased if new sub entry table created. 1141bd81eaebSSuanming Mou */ 1142bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1143bd81eaebSSuanming Mou if (!e_tbl) { 1144bd81eaebSSuanming Mou e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx); 1145bd81eaebSSuanming Mou if (!e_tbl) { 1146bd81eaebSSuanming Mou rte_errno = ENOMEM; 1147bd81eaebSSuanming Mou return -1; 1148bd81eaebSSuanming Mou } 1149bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx; 1150bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1151bd81eaebSSuanming Mou e_tbl; 1152bd81eaebSSuanming Mou m_tbl->ref_cnt++; 1153bd81eaebSSuanming Mou } 1154bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1155bd81eaebSSuanming Mou switch (tbl->type) { 1156bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1157bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 11580796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) { 11590796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 11600796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 11610796c7b1SSuanming Mou rte_errno = EEXIST; 11620796c7b1SSuanming Mou return -1; 11630796c7b1SSuanming Mou } 11640796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = data->word; 11650796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt = 1; 1166bd81eaebSSuanming Mou w_e_tbl->ref_cnt++; 1167bd81eaebSSuanming Mou break; 1168bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1169bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 11700796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) { 11710796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 11720796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 11730796c7b1SSuanming Mou rte_errno = EEXIST; 11740796c7b1SSuanming Mou return -1; 11750796c7b1SSuanming Mou } 11760796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = data->dword; 11770796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt = 1; 1178bd81eaebSSuanming Mou dw_e_tbl->ref_cnt++; 1179bd81eaebSSuanming Mou break; 1180bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1181bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 11820796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) { 11830796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 11840796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 11850796c7b1SSuanming Mou rte_errno = EEXIST; 11860796c7b1SSuanming Mou return -1; 11870796c7b1SSuanming Mou } 11880796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = data->qword; 11890796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt = 1; 1190bd81eaebSSuanming Mou qw_e_tbl->ref_cnt++; 1191bd81eaebSSuanming Mou break; 1192bd81eaebSSuanming Mou default: 1193bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 11940796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) { 11950796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 11960796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 11970796c7b1SSuanming Mou rte_errno = EEXIST; 11980796c7b1SSuanming Mou return -1; 11990796c7b1SSuanming Mou } 12000796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = data->ptr; 12010796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt = 1; 1202bd81eaebSSuanming Mou ptr_e_tbl->ref_cnt++; 1203bd81eaebSSuanming Mou break; 1204bd81eaebSSuanming Mou } 1205bd81eaebSSuanming Mou return 0; 1206bd81eaebSSuanming Mou } 12070796c7b1SSuanming Mou 12080796c7b1SSuanming Mou int32_t 12090796c7b1SSuanming Mou mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 12100796c7b1SSuanming Mou union mlx5_l3t_data *data) 12110796c7b1SSuanming Mou { 12120796c7b1SSuanming Mou int ret; 12130796c7b1SSuanming Mou 12140796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 12150796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 12160796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 12170796c7b1SSuanming Mou return ret; 12180796c7b1SSuanming Mou } 12190796c7b1SSuanming Mou 12200796c7b1SSuanming Mou int32_t 12210796c7b1SSuanming Mou mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 12220796c7b1SSuanming Mou union mlx5_l3t_data *data, 12230796c7b1SSuanming Mou mlx5_l3t_alloc_callback_fn cb, void *ctx) 12240796c7b1SSuanming Mou { 12250796c7b1SSuanming Mou int32_t ret; 12260796c7b1SSuanming Mou 12270796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 12280796c7b1SSuanming Mou /* Check if entry data is ready. */ 12290796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 12300796c7b1SSuanming Mou if (!ret) { 12310796c7b1SSuanming Mou switch (tbl->type) { 12320796c7b1SSuanming Mou case MLX5_L3T_TYPE_WORD: 12330796c7b1SSuanming Mou if (data->word) 12340796c7b1SSuanming Mou goto out; 12350796c7b1SSuanming Mou break; 12360796c7b1SSuanming Mou case MLX5_L3T_TYPE_DWORD: 12370796c7b1SSuanming Mou if (data->dword) 12380796c7b1SSuanming Mou goto out; 12390796c7b1SSuanming Mou break; 12400796c7b1SSuanming Mou case MLX5_L3T_TYPE_QWORD: 12410796c7b1SSuanming Mou if (data->qword) 12420796c7b1SSuanming Mou goto out; 12430796c7b1SSuanming Mou break; 12440796c7b1SSuanming Mou default: 12450796c7b1SSuanming Mou if (data->ptr) 12460796c7b1SSuanming Mou goto out; 12470796c7b1SSuanming Mou break; 12480796c7b1SSuanming Mou } 12490796c7b1SSuanming Mou } 12500796c7b1SSuanming Mou /* Entry data is not ready, use user callback to create it. */ 12510796c7b1SSuanming Mou ret = cb(ctx, data); 12520796c7b1SSuanming Mou if (ret) 12530796c7b1SSuanming Mou goto out; 12540796c7b1SSuanming Mou /* Save the new allocated data to entry. */ 12550796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 12560796c7b1SSuanming Mou out: 12570796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 12580796c7b1SSuanming Mou return ret; 12590796c7b1SSuanming Mou } 1260