146287eacSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause 246287eacSBing Zhao * Copyright 2019 Mellanox Technologies, Ltd 346287eacSBing Zhao */ 446287eacSBing Zhao 546287eacSBing Zhao #include <rte_malloc.h> 646287eacSBing Zhao 783c2047cSSuanming Mou #include <mlx5_malloc.h> 883c2047cSSuanming Mou 946287eacSBing Zhao #include "mlx5_utils.h" 1046287eacSBing Zhao 11a3cf59f5SSuanming Mou 121ff37beeSXueming Li /********************* Cache list ************************/ 131ff37beeSXueming Li 141ff37beeSXueming Li static struct mlx5_cache_entry * 151ff37beeSXueming Li mlx5_clist_default_create_cb(struct mlx5_cache_list *list, 161ff37beeSXueming Li struct mlx5_cache_entry *entry __rte_unused, 171ff37beeSXueming Li void *ctx __rte_unused) 181ff37beeSXueming Li { 191ff37beeSXueming Li return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); 201ff37beeSXueming Li } 211ff37beeSXueming Li 221ff37beeSXueming Li static void 231ff37beeSXueming Li mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, 241ff37beeSXueming Li struct mlx5_cache_entry *entry) 251ff37beeSXueming Li { 261ff37beeSXueming Li mlx5_free(entry); 271ff37beeSXueming Li } 281ff37beeSXueming Li 291ff37beeSXueming Li int 301ff37beeSXueming Li mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, 311ff37beeSXueming Li uint32_t entry_size, void *ctx, 321ff37beeSXueming Li mlx5_cache_create_cb cb_create, 331ff37beeSXueming Li mlx5_cache_match_cb cb_match, 341ff37beeSXueming Li mlx5_cache_remove_cb cb_remove) 351ff37beeSXueming Li { 361ff37beeSXueming Li MLX5_ASSERT(list); 371ff37beeSXueming Li if (!cb_match || (!cb_create ^ !cb_remove)) 381ff37beeSXueming Li return -1; 391ff37beeSXueming Li if (name) 401ff37beeSXueming Li snprintf(list->name, sizeof(list->name), "%s", name); 411ff37beeSXueming Li list->entry_sz = entry_size; 421ff37beeSXueming Li list->ctx = ctx; 431ff37beeSXueming Li list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; 441ff37beeSXueming Li list->cb_match = cb_match; 451ff37beeSXueming Li list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; 461ff37beeSXueming Li rte_rwlock_init(&list->lock); 471ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); 481ff37beeSXueming Li LIST_INIT(&list->head); 491ff37beeSXueming Li return 0; 501ff37beeSXueming Li } 511ff37beeSXueming Li 521ff37beeSXueming Li static struct mlx5_cache_entry * 531ff37beeSXueming Li __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 541ff37beeSXueming Li { 551ff37beeSXueming Li struct mlx5_cache_entry *entry; 561ff37beeSXueming Li 571ff37beeSXueming Li LIST_FOREACH(entry, &list->head, next) { 581ff37beeSXueming Li if (list->cb_match(list, entry, ctx)) 591ff37beeSXueming Li continue; 601ff37beeSXueming Li if (reuse) { 611ff37beeSXueming Li __atomic_add_fetch(&entry->ref_cnt, 1, 621ff37beeSXueming Li __ATOMIC_RELAXED); 631ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", 641ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 651ff37beeSXueming Li } 661ff37beeSXueming Li break; 671ff37beeSXueming Li } 681ff37beeSXueming Li return entry; 691ff37beeSXueming Li } 701ff37beeSXueming Li 711ff37beeSXueming Li static struct mlx5_cache_entry * 721ff37beeSXueming Li cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) 731ff37beeSXueming Li { 741ff37beeSXueming Li struct mlx5_cache_entry *entry; 751ff37beeSXueming Li 761ff37beeSXueming Li rte_rwlock_read_lock(&list->lock); 771ff37beeSXueming Li entry = __cache_lookup(list, ctx, reuse); 781ff37beeSXueming Li rte_rwlock_read_unlock(&list->lock); 791ff37beeSXueming Li return entry; 801ff37beeSXueming Li } 811ff37beeSXueming Li 821ff37beeSXueming Li struct mlx5_cache_entry * 831ff37beeSXueming Li mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) 841ff37beeSXueming Li { 851ff37beeSXueming Li return cache_lookup(list, ctx, false); 861ff37beeSXueming Li } 871ff37beeSXueming Li 881ff37beeSXueming Li struct mlx5_cache_entry * 891ff37beeSXueming Li mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) 901ff37beeSXueming Li { 911ff37beeSXueming Li struct mlx5_cache_entry *entry; 921ff37beeSXueming Li uint32_t prev_gen_cnt = 0; 931ff37beeSXueming Li 941ff37beeSXueming Li MLX5_ASSERT(list); 951ff37beeSXueming Li prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); 961ff37beeSXueming Li /* Lookup with read lock, reuse if found. */ 971ff37beeSXueming Li entry = cache_lookup(list, ctx, true); 981ff37beeSXueming Li if (entry) 991ff37beeSXueming Li return entry; 1001ff37beeSXueming Li /* Not found, append with write lock - block read from other threads. */ 1011ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1021ff37beeSXueming Li /* If list changed by other threads before lock, search again. */ 1031ff37beeSXueming Li if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { 1041ff37beeSXueming Li /* Lookup and reuse w/o read lock. */ 1051ff37beeSXueming Li entry = __cache_lookup(list, ctx, true); 1061ff37beeSXueming Li if (entry) 1071ff37beeSXueming Li goto done; 1081ff37beeSXueming Li } 1091ff37beeSXueming Li entry = list->cb_create(list, entry, ctx); 1101ff37beeSXueming Li if (!entry) { 1111ff37beeSXueming Li DRV_LOG(ERR, "Failed to init cache list %s entry %p.", 1121ff37beeSXueming Li list->name, (void *)entry); 1131ff37beeSXueming Li goto done; 1141ff37beeSXueming Li } 1151ff37beeSXueming Li entry->ref_cnt = 1; 1161ff37beeSXueming Li LIST_INSERT_HEAD(&list->head, entry, next); 1171ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); 1181ff37beeSXueming Li __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1191ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", 1201ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1211ff37beeSXueming Li done: 1221ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1231ff37beeSXueming Li return entry; 1241ff37beeSXueming Li } 1251ff37beeSXueming Li 1261ff37beeSXueming Li int 1271ff37beeSXueming Li mlx5_cache_unregister(struct mlx5_cache_list *list, 1281ff37beeSXueming Li struct mlx5_cache_entry *entry) 1291ff37beeSXueming Li { 1301ff37beeSXueming Li rte_rwlock_write_lock(&list->lock); 1311ff37beeSXueming Li MLX5_ASSERT(entry && entry->next.le_prev); 1321ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", 1331ff37beeSXueming Li list->name, (void *)entry, entry->ref_cnt); 1341ff37beeSXueming Li if (--entry->ref_cnt) { 1351ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1361ff37beeSXueming Li return 1; 1371ff37beeSXueming Li } 1381ff37beeSXueming Li __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); 1391ff37beeSXueming Li __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); 1401ff37beeSXueming Li LIST_REMOVE(entry, next); 1411ff37beeSXueming Li list->cb_remove(list, entry); 1421ff37beeSXueming Li rte_rwlock_write_unlock(&list->lock); 1431ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p removed.", 1441ff37beeSXueming Li list->name, (void *)entry); 1451ff37beeSXueming Li return 0; 1461ff37beeSXueming Li } 1471ff37beeSXueming Li 1481ff37beeSXueming Li void 1491ff37beeSXueming Li mlx5_cache_list_destroy(struct mlx5_cache_list *list) 1501ff37beeSXueming Li { 1511ff37beeSXueming Li struct mlx5_cache_entry *entry; 1521ff37beeSXueming Li 1531ff37beeSXueming Li MLX5_ASSERT(list); 1541ff37beeSXueming Li /* no LIST_FOREACH_SAFE, using while instead */ 1551ff37beeSXueming Li while (!LIST_EMPTY(&list->head)) { 1561ff37beeSXueming Li entry = LIST_FIRST(&list->head); 1571ff37beeSXueming Li LIST_REMOVE(entry, next); 1581ff37beeSXueming Li list->cb_remove(list, entry); 1591ff37beeSXueming Li DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", 1601ff37beeSXueming Li list->name, (void *)entry); 1611ff37beeSXueming Li } 1621ff37beeSXueming Li memset(list, 0, sizeof(*list)); 1631ff37beeSXueming Li } 1641ff37beeSXueming Li 1651ff37beeSXueming Li uint32_t 1661ff37beeSXueming Li mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) 1671ff37beeSXueming Li { 1681ff37beeSXueming Li MLX5_ASSERT(list); 1691ff37beeSXueming Li return __atomic_load_n(&list->count, __ATOMIC_RELAXED); 1701ff37beeSXueming Li } 1711ff37beeSXueming Li 172e69a5922SXueming Li /********************* Indexed pool **********************/ 173e69a5922SXueming Li 174a3cf59f5SSuanming Mou static inline void 175a3cf59f5SSuanming Mou mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 176a3cf59f5SSuanming Mou { 177a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 178d15c0946SSuanming Mou rte_spinlock_lock(&pool->rsz_lock); 179a3cf59f5SSuanming Mou } 180a3cf59f5SSuanming Mou 181a3cf59f5SSuanming Mou static inline void 182a3cf59f5SSuanming Mou mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 183a3cf59f5SSuanming Mou { 184a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 185d15c0946SSuanming Mou rte_spinlock_unlock(&pool->rsz_lock); 186a3cf59f5SSuanming Mou } 187a3cf59f5SSuanming Mou 18862d7d519SSuanming Mou static inline uint32_t 18962d7d519SSuanming Mou mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 19062d7d519SSuanming Mou { 19162d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 19262d7d519SSuanming Mou uint32_t trunk_idx = 0; 19362d7d519SSuanming Mou uint32_t i; 19462d7d519SSuanming Mou 19562d7d519SSuanming Mou if (!cfg->grow_trunk) 19662d7d519SSuanming Mou return entry_idx / cfg->trunk_size; 19762d7d519SSuanming Mou if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 19862d7d519SSuanming Mou trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 19962d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 20062d7d519SSuanming Mou cfg->grow_trunk)) + cfg->grow_trunk; 20162d7d519SSuanming Mou } else { 20262d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 20362d7d519SSuanming Mou if (entry_idx < pool->grow_tbl[i]) 20462d7d519SSuanming Mou break; 20562d7d519SSuanming Mou } 20662d7d519SSuanming Mou trunk_idx = i; 20762d7d519SSuanming Mou } 20862d7d519SSuanming Mou return trunk_idx; 20962d7d519SSuanming Mou } 21062d7d519SSuanming Mou 21162d7d519SSuanming Mou static inline uint32_t 21262d7d519SSuanming Mou mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 21362d7d519SSuanming Mou { 21462d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 21562d7d519SSuanming Mou 21662d7d519SSuanming Mou return cfg->trunk_size << (cfg->grow_shift * 21762d7d519SSuanming Mou (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 21862d7d519SSuanming Mou } 21962d7d519SSuanming Mou 22062d7d519SSuanming Mou static inline uint32_t 22162d7d519SSuanming Mou mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 22262d7d519SSuanming Mou { 22362d7d519SSuanming Mou struct mlx5_indexed_pool_config *cfg = &pool->cfg; 22462d7d519SSuanming Mou uint32_t offset = 0; 22562d7d519SSuanming Mou 22662d7d519SSuanming Mou if (!trunk_idx) 22762d7d519SSuanming Mou return 0; 22862d7d519SSuanming Mou if (!cfg->grow_trunk) 22962d7d519SSuanming Mou return cfg->trunk_size * trunk_idx; 23062d7d519SSuanming Mou if (trunk_idx < cfg->grow_trunk) 23162d7d519SSuanming Mou offset = pool->grow_tbl[trunk_idx - 1]; 23262d7d519SSuanming Mou else 23362d7d519SSuanming Mou offset = pool->grow_tbl[cfg->grow_trunk - 1] + 23462d7d519SSuanming Mou (cfg->trunk_size << (cfg->grow_shift * 23562d7d519SSuanming Mou cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 23662d7d519SSuanming Mou return offset; 23762d7d519SSuanming Mou } 23862d7d519SSuanming Mou 239a3cf59f5SSuanming Mou struct mlx5_indexed_pool * 240a3cf59f5SSuanming Mou mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 241a3cf59f5SSuanming Mou { 242a3cf59f5SSuanming Mou struct mlx5_indexed_pool *pool; 24362d7d519SSuanming Mou uint32_t i; 244a3cf59f5SSuanming Mou 24579807d6aSXueming Li if (!cfg || (!cfg->malloc ^ !cfg->free) || 246d15c0946SSuanming Mou (cfg->per_core_cache && cfg->release_mem_en) || 247a3cf59f5SSuanming Mou (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 248a3cf59f5SSuanming Mou ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 249a3cf59f5SSuanming Mou return NULL; 25083c2047cSSuanming Mou pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk * 25183c2047cSSuanming Mou sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE, 25283c2047cSSuanming Mou SOCKET_ID_ANY); 253a3cf59f5SSuanming Mou if (!pool) 254a3cf59f5SSuanming Mou return NULL; 255a3cf59f5SSuanming Mou pool->cfg = *cfg; 256a3cf59f5SSuanming Mou if (!pool->cfg.trunk_size) 257a3cf59f5SSuanming Mou pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 258a3cf59f5SSuanming Mou if (!cfg->malloc && !cfg->free) { 25983c2047cSSuanming Mou pool->cfg.malloc = mlx5_malloc; 26083c2047cSSuanming Mou pool->cfg.free = mlx5_free; 261a3cf59f5SSuanming Mou } 262a3cf59f5SSuanming Mou if (pool->cfg.need_lock) 263d15c0946SSuanming Mou rte_spinlock_init(&pool->rsz_lock); 26462d7d519SSuanming Mou /* 26562d7d519SSuanming Mou * Initialize the dynamic grow trunk size lookup table to have a quick 26662d7d519SSuanming Mou * lookup for the trunk entry index offset. 26762d7d519SSuanming Mou */ 26862d7d519SSuanming Mou for (i = 0; i < cfg->grow_trunk; i++) { 26962d7d519SSuanming Mou pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 27062d7d519SSuanming Mou if (i > 0) 27162d7d519SSuanming Mou pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 27262d7d519SSuanming Mou } 27358ecd3adSSuanming Mou if (!pool->cfg.max_idx) 27458ecd3adSSuanming Mou pool->cfg.max_idx = 27558ecd3adSSuanming Mou mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); 276d15c0946SSuanming Mou if (!cfg->per_core_cache) 277d15c0946SSuanming Mou pool->free_list = TRUNK_INVALID; 278a3cf59f5SSuanming Mou return pool; 279a3cf59f5SSuanming Mou } 280a3cf59f5SSuanming Mou 281a3cf59f5SSuanming Mou static int 282a3cf59f5SSuanming Mou mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 283a3cf59f5SSuanming Mou { 284a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 285a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **trunk_tmp; 286a3cf59f5SSuanming Mou struct mlx5_indexed_trunk **p; 287a3cf59f5SSuanming Mou size_t trunk_size = 0; 28862d7d519SSuanming Mou size_t data_size; 289a3cf59f5SSuanming Mou size_t bmp_size; 29058ecd3adSSuanming Mou uint32_t idx, cur_max_idx, i; 291a3cf59f5SSuanming Mou 29258ecd3adSSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid); 29358ecd3adSSuanming Mou if (pool->n_trunk_valid == TRUNK_MAX_IDX || 29458ecd3adSSuanming Mou cur_max_idx >= pool->cfg.max_idx) 295a3cf59f5SSuanming Mou return -ENOMEM; 296a3cf59f5SSuanming Mou if (pool->n_trunk_valid == pool->n_trunk) { 297a3cf59f5SSuanming Mou /* No free trunk flags, expand trunk list. */ 298a3cf59f5SSuanming Mou int n_grow = pool->n_trunk_valid ? pool->n_trunk : 299a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 300a3cf59f5SSuanming Mou 30183c2047cSSuanming Mou p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) * 302a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *), 303a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 304a3cf59f5SSuanming Mou if (!p) 305a3cf59f5SSuanming Mou return -ENOMEM; 306a3cf59f5SSuanming Mou if (pool->trunks) 307a3cf59f5SSuanming Mou memcpy(p, pool->trunks, pool->n_trunk_valid * 308a3cf59f5SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 309a3cf59f5SSuanming Mou memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 310a3cf59f5SSuanming Mou n_grow * sizeof(void *)); 311a3cf59f5SSuanming Mou trunk_tmp = pool->trunks; 312a3cf59f5SSuanming Mou pool->trunks = p; 313a3cf59f5SSuanming Mou if (trunk_tmp) 314a3cf59f5SSuanming Mou pool->cfg.free(trunk_tmp); 315a3cf59f5SSuanming Mou pool->n_trunk += n_grow; 316a3cf59f5SSuanming Mou } 3171fd4bb67SSuanming Mou if (!pool->cfg.release_mem_en) { 318a3cf59f5SSuanming Mou idx = pool->n_trunk_valid; 3191fd4bb67SSuanming Mou } else { 3201fd4bb67SSuanming Mou /* Find the first available slot in trunk list */ 3211fd4bb67SSuanming Mou for (idx = 0; idx < pool->n_trunk; idx++) 3221fd4bb67SSuanming Mou if (pool->trunks[idx] == NULL) 3231fd4bb67SSuanming Mou break; 3241fd4bb67SSuanming Mou } 325a3cf59f5SSuanming Mou trunk_size += sizeof(*trunk); 32662d7d519SSuanming Mou data_size = mlx5_trunk_size_get(pool, idx); 32762d7d519SSuanming Mou bmp_size = rte_bitmap_get_memory_footprint(data_size); 328691b3d3eSSuanming Mou /* rte_bitmap requires memory cacheline aligned. */ 329691b3d3eSSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 330691b3d3eSSuanming Mou trunk_size += bmp_size; 33183c2047cSSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 332a3cf59f5SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 333a3cf59f5SSuanming Mou if (!trunk) 334a3cf59f5SSuanming Mou return -ENOMEM; 335a3cf59f5SSuanming Mou pool->trunks[idx] = trunk; 336a3cf59f5SSuanming Mou trunk->idx = idx; 33762d7d519SSuanming Mou trunk->free = data_size; 338a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 339a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 340a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 341a3cf59f5SSuanming Mou pool->free_list = idx; 342a3cf59f5SSuanming Mou /* Mark all entries as available. */ 343691b3d3eSSuanming Mou trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data 344691b3d3eSSuanming Mou [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], 345691b3d3eSSuanming Mou bmp_size); 34658ecd3adSSuanming Mou /* Clear the overhead bits in the trunk if it happens. */ 34758ecd3adSSuanming Mou if (cur_max_idx + data_size > pool->cfg.max_idx) { 34858ecd3adSSuanming Mou for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++) 34958ecd3adSSuanming Mou rte_bitmap_clear(trunk->bmp, i); 35058ecd3adSSuanming Mou } 351691b3d3eSSuanming Mou MLX5_ASSERT(trunk->bmp); 352a3cf59f5SSuanming Mou pool->n_trunk_valid++; 353a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 354a3cf59f5SSuanming Mou pool->trunk_new++; 355a3cf59f5SSuanming Mou pool->trunk_avail++; 356a3cf59f5SSuanming Mou #endif 357a3cf59f5SSuanming Mou return 0; 358a3cf59f5SSuanming Mou } 359a3cf59f5SSuanming Mou 360d15c0946SSuanming Mou static inline struct mlx5_indexed_cache * 361d15c0946SSuanming Mou mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx) 362d15c0946SSuanming Mou { 363d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *lc, *olc = NULL; 364d15c0946SSuanming Mou 365d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 366d15c0946SSuanming Mou gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED); 367d15c0946SSuanming Mou if (gc && lc != gc) { 368d15c0946SSuanming Mou mlx5_ipool_lock(pool); 369d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 370d15c0946SSuanming Mou olc = lc; 371d15c0946SSuanming Mou lc = pool->gc; 372d15c0946SSuanming Mou lc->ref_cnt++; 373d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 374d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 375d15c0946SSuanming Mou if (olc) 376d15c0946SSuanming Mou pool->cfg.free(olc); 377d15c0946SSuanming Mou } 378d15c0946SSuanming Mou return lc; 379d15c0946SSuanming Mou } 380d15c0946SSuanming Mou 381d15c0946SSuanming Mou static uint32_t 382d15c0946SSuanming Mou mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) 383d15c0946SSuanming Mou { 384d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 385d15c0946SSuanming Mou struct mlx5_indexed_cache *p, *lc, *olc = NULL; 386d15c0946SSuanming Mou size_t trunk_size = 0; 387d15c0946SSuanming Mou size_t data_size; 388d15c0946SSuanming Mou uint32_t cur_max_idx, trunk_idx, trunk_n; 389d15c0946SSuanming Mou uint32_t fetch_size, ts_idx, i; 390d15c0946SSuanming Mou int n_grow; 391d15c0946SSuanming Mou 392d15c0946SSuanming Mou check_again: 393d15c0946SSuanming Mou p = NULL; 394d15c0946SSuanming Mou fetch_size = 0; 395d15c0946SSuanming Mou /* 396d15c0946SSuanming Mou * Fetch new index from global if possible. First round local 397d15c0946SSuanming Mou * cache will be NULL. 398d15c0946SSuanming Mou */ 399d15c0946SSuanming Mou lc = pool->cache[cidx]->lc; 400d15c0946SSuanming Mou mlx5_ipool_lock(pool); 401d15c0946SSuanming Mou /* Try to update local cache first. */ 402d15c0946SSuanming Mou if (likely(pool->gc)) { 403d15c0946SSuanming Mou if (lc != pool->gc) { 404d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 405d15c0946SSuanming Mou olc = lc; 406d15c0946SSuanming Mou lc = pool->gc; 407d15c0946SSuanming Mou lc->ref_cnt++; 408d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 409d15c0946SSuanming Mou } 410d15c0946SSuanming Mou if (lc->len) { 411d15c0946SSuanming Mou /* Use the updated local cache to fetch index. */ 412d15c0946SSuanming Mou fetch_size = pool->cfg.per_core_cache >> 2; 413d15c0946SSuanming Mou if (lc->len < fetch_size) 414d15c0946SSuanming Mou fetch_size = lc->len; 415d15c0946SSuanming Mou lc->len -= fetch_size; 416d15c0946SSuanming Mou memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len], 417d15c0946SSuanming Mou sizeof(uint32_t) * fetch_size); 418d15c0946SSuanming Mou } 419d15c0946SSuanming Mou } 420d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 421d15c0946SSuanming Mou if (unlikely(olc)) { 422d15c0946SSuanming Mou pool->cfg.free(olc); 423d15c0946SSuanming Mou olc = NULL; 424d15c0946SSuanming Mou } 425d15c0946SSuanming Mou if (fetch_size) { 426d15c0946SSuanming Mou pool->cache[cidx]->len = fetch_size - 1; 427d15c0946SSuanming Mou return pool->cache[cidx]->idx[pool->cache[cidx]->len]; 428d15c0946SSuanming Mou } 429d15c0946SSuanming Mou trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid, 430d15c0946SSuanming Mou __ATOMIC_ACQUIRE) : 0; 431d15c0946SSuanming Mou trunk_n = lc ? lc->n_trunk : 0; 432d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx); 433d15c0946SSuanming Mou /* Check if index reach maximum. */ 434d15c0946SSuanming Mou if (trunk_idx == TRUNK_MAX_IDX || 435d15c0946SSuanming Mou cur_max_idx >= pool->cfg.max_idx) 436d15c0946SSuanming Mou return 0; 437d15c0946SSuanming Mou /* No enough space in trunk array, resize the trunks array. */ 438d15c0946SSuanming Mou if (trunk_idx == trunk_n) { 439d15c0946SSuanming Mou n_grow = trunk_idx ? trunk_idx : 440d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE / sizeof(void *); 441d15c0946SSuanming Mou cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow); 442d15c0946SSuanming Mou /* Resize the trunk array. */ 443d15c0946SSuanming Mou p = pool->cfg.malloc(0, ((trunk_idx + n_grow) * 444d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)) + 445d15c0946SSuanming Mou (cur_max_idx * sizeof(uint32_t)) + sizeof(*p), 446d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 447d15c0946SSuanming Mou if (!p) 448d15c0946SSuanming Mou return 0; 449d15c0946SSuanming Mou p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx]; 450d15c0946SSuanming Mou if (lc) 451d15c0946SSuanming Mou memcpy(p->trunks, lc->trunks, trunk_idx * 452d15c0946SSuanming Mou sizeof(struct mlx5_indexed_trunk *)); 453d15c0946SSuanming Mou #ifdef RTE_LIBRTE_MLX5_DEBUG 454d15c0946SSuanming Mou memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0, 455d15c0946SSuanming Mou n_grow * sizeof(void *)); 456d15c0946SSuanming Mou #endif 457d15c0946SSuanming Mou p->n_trunk_valid = trunk_idx; 458d15c0946SSuanming Mou p->n_trunk = trunk_n + n_grow; 459d15c0946SSuanming Mou p->len = 0; 460d15c0946SSuanming Mou } 461d15c0946SSuanming Mou /* Prepare the new trunk. */ 462d15c0946SSuanming Mou trunk_size = sizeof(*trunk); 463d15c0946SSuanming Mou data_size = mlx5_trunk_size_get(pool, trunk_idx); 464d15c0946SSuanming Mou trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 465d15c0946SSuanming Mou trunk = pool->cfg.malloc(0, trunk_size, 466d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 467d15c0946SSuanming Mou if (unlikely(!trunk)) { 468d15c0946SSuanming Mou pool->cfg.free(p); 469d15c0946SSuanming Mou return 0; 470d15c0946SSuanming Mou } 471d15c0946SSuanming Mou trunk->idx = trunk_idx; 472d15c0946SSuanming Mou trunk->free = data_size; 473d15c0946SSuanming Mou mlx5_ipool_lock(pool); 474d15c0946SSuanming Mou /* 475d15c0946SSuanming Mou * Double check if trunks has been updated or have available index. 476d15c0946SSuanming Mou * During the new trunk allocate, index may still be flushed to the 477d15c0946SSuanming Mou * global cache. So also need to check the pool->gc->len. 478d15c0946SSuanming Mou */ 479d15c0946SSuanming Mou if (pool->gc && (lc != pool->gc || 480d15c0946SSuanming Mou lc->n_trunk_valid != trunk_idx || 481d15c0946SSuanming Mou pool->gc->len)) { 482d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 483d15c0946SSuanming Mou if (p) 484d15c0946SSuanming Mou pool->cfg.free(p); 485d15c0946SSuanming Mou pool->cfg.free(trunk); 486d15c0946SSuanming Mou goto check_again; 487d15c0946SSuanming Mou } 488d15c0946SSuanming Mou /* Resize the trunk array and update local cache first. */ 489d15c0946SSuanming Mou if (p) { 490d15c0946SSuanming Mou if (lc && !(--lc->ref_cnt)) 491d15c0946SSuanming Mou olc = lc; 492d15c0946SSuanming Mou lc = p; 493d15c0946SSuanming Mou lc->ref_cnt = 1; 494d15c0946SSuanming Mou pool->cache[cidx]->lc = lc; 495d15c0946SSuanming Mou __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED); 496d15c0946SSuanming Mou } 497d15c0946SSuanming Mou /* Add trunk to trunks array. */ 498d15c0946SSuanming Mou lc->trunks[trunk_idx] = trunk; 499d15c0946SSuanming Mou __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED); 500d15c0946SSuanming Mou /* Enqueue half of the index to global. */ 501d15c0946SSuanming Mou ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1; 502d15c0946SSuanming Mou fetch_size = trunk->free >> 1; 503d15c0946SSuanming Mou for (i = 0; i < fetch_size; i++) 504d15c0946SSuanming Mou lc->idx[i] = ts_idx + i; 505d15c0946SSuanming Mou lc->len = fetch_size; 506d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 507d15c0946SSuanming Mou /* Copy left half - 1 to local cache index array. */ 508d15c0946SSuanming Mou pool->cache[cidx]->len = trunk->free - fetch_size - 1; 509d15c0946SSuanming Mou ts_idx += fetch_size; 510d15c0946SSuanming Mou for (i = 0; i < pool->cache[cidx]->len; i++) 511d15c0946SSuanming Mou pool->cache[cidx]->idx[i] = ts_idx + i; 512d15c0946SSuanming Mou if (olc) 513d15c0946SSuanming Mou pool->cfg.free(olc); 514d15c0946SSuanming Mou return ts_idx + i; 515d15c0946SSuanming Mou } 516d15c0946SSuanming Mou 517d15c0946SSuanming Mou static void * 518d15c0946SSuanming Mou mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 519d15c0946SSuanming Mou { 520d15c0946SSuanming Mou struct mlx5_indexed_trunk *trunk; 521d15c0946SSuanming Mou struct mlx5_indexed_cache *lc; 522d15c0946SSuanming Mou uint32_t trunk_idx; 523d15c0946SSuanming Mou uint32_t entry_idx; 524d15c0946SSuanming Mou int cidx; 525d15c0946SSuanming Mou 526d15c0946SSuanming Mou MLX5_ASSERT(idx); 527d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 528d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 529d15c0946SSuanming Mou rte_errno = ENOTSUP; 530d15c0946SSuanming Mou return NULL; 531d15c0946SSuanming Mou } 532*64a80f1cSSuanming Mou if (unlikely(!pool->cache[cidx])) { 533*64a80f1cSSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 534*64a80f1cSSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 535*64a80f1cSSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 536*64a80f1cSSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 537*64a80f1cSSuanming Mou if (!pool->cache[cidx]) { 538*64a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 539*64a80f1cSSuanming Mou return NULL; 540*64a80f1cSSuanming Mou } 541*64a80f1cSSuanming Mou } 542d15c0946SSuanming Mou lc = mlx5_ipool_update_global_cache(pool, cidx); 543d15c0946SSuanming Mou idx -= 1; 544d15c0946SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 545d15c0946SSuanming Mou trunk = lc->trunks[trunk_idx]; 546d15c0946SSuanming Mou MLX5_ASSERT(trunk); 547d15c0946SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); 548d15c0946SSuanming Mou return &trunk->data[entry_idx * pool->cfg.size]; 549d15c0946SSuanming Mou } 550d15c0946SSuanming Mou 551d15c0946SSuanming Mou static void * 552d15c0946SSuanming Mou mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) 553d15c0946SSuanming Mou { 554d15c0946SSuanming Mou int cidx; 555d15c0946SSuanming Mou 556d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 557d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 558d15c0946SSuanming Mou rte_errno = ENOTSUP; 559d15c0946SSuanming Mou return NULL; 560d15c0946SSuanming Mou } 561d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 562d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 563d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 564d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 565d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 566d15c0946SSuanming Mou if (!pool->cache[cidx]) { 567d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 568d15c0946SSuanming Mou return NULL; 569d15c0946SSuanming Mou } 570d15c0946SSuanming Mou } else if (pool->cache[cidx]->len) { 571d15c0946SSuanming Mou pool->cache[cidx]->len--; 572d15c0946SSuanming Mou *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; 573d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, *idx); 574d15c0946SSuanming Mou } 575d15c0946SSuanming Mou /* Not enough idx in global cache. Keep fetching from global. */ 576d15c0946SSuanming Mou *idx = mlx5_ipool_allocate_from_global(pool, cidx); 577d15c0946SSuanming Mou if (unlikely(!(*idx))) 578d15c0946SSuanming Mou return NULL; 579d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, *idx); 580d15c0946SSuanming Mou } 581d15c0946SSuanming Mou 582d15c0946SSuanming Mou static void 583d15c0946SSuanming Mou mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 584d15c0946SSuanming Mou { 585d15c0946SSuanming Mou int cidx; 586d15c0946SSuanming Mou struct mlx5_ipool_per_lcore *ilc; 587d15c0946SSuanming Mou struct mlx5_indexed_cache *gc, *olc = NULL; 588d15c0946SSuanming Mou uint32_t reclaim_num = 0; 589d15c0946SSuanming Mou 590d15c0946SSuanming Mou MLX5_ASSERT(idx); 591d15c0946SSuanming Mou cidx = rte_lcore_index(rte_lcore_id()); 592d15c0946SSuanming Mou if (unlikely(cidx == -1)) { 593d15c0946SSuanming Mou rte_errno = ENOTSUP; 594d15c0946SSuanming Mou return; 595d15c0946SSuanming Mou } 596d15c0946SSuanming Mou /* 597d15c0946SSuanming Mou * When index was allocated on core A but freed on core B. In this 598d15c0946SSuanming Mou * case check if local cache on core B was allocated before. 599d15c0946SSuanming Mou */ 600d15c0946SSuanming Mou if (unlikely(!pool->cache[cidx])) { 601d15c0946SSuanming Mou pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 602d15c0946SSuanming Mou sizeof(struct mlx5_ipool_per_lcore) + 603d15c0946SSuanming Mou (pool->cfg.per_core_cache * sizeof(uint32_t)), 604d15c0946SSuanming Mou RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 605d15c0946SSuanming Mou if (!pool->cache[cidx]) { 606d15c0946SSuanming Mou DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 607d15c0946SSuanming Mou return; 608d15c0946SSuanming Mou } 609d15c0946SSuanming Mou } 610d15c0946SSuanming Mou /* Try to enqueue to local index cache. */ 611d15c0946SSuanming Mou if (pool->cache[cidx]->len < pool->cfg.per_core_cache) { 612d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 613d15c0946SSuanming Mou pool->cache[cidx]->len++; 614d15c0946SSuanming Mou return; 615d15c0946SSuanming Mou } 616d15c0946SSuanming Mou ilc = pool->cache[cidx]; 617d15c0946SSuanming Mou reclaim_num = pool->cfg.per_core_cache >> 2; 618d15c0946SSuanming Mou ilc->len -= reclaim_num; 619d15c0946SSuanming Mou /* Local index cache full, try with global index cache. */ 620d15c0946SSuanming Mou mlx5_ipool_lock(pool); 621d15c0946SSuanming Mou gc = pool->gc; 622d15c0946SSuanming Mou if (ilc->lc != gc) { 623d15c0946SSuanming Mou if (!(--ilc->lc->ref_cnt)) 624d15c0946SSuanming Mou olc = ilc->lc; 625d15c0946SSuanming Mou gc->ref_cnt++; 626d15c0946SSuanming Mou ilc->lc = gc; 627d15c0946SSuanming Mou } 628d15c0946SSuanming Mou memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len], 629d15c0946SSuanming Mou reclaim_num * sizeof(uint32_t)); 630d15c0946SSuanming Mou gc->len += reclaim_num; 631d15c0946SSuanming Mou mlx5_ipool_unlock(pool); 632d15c0946SSuanming Mou if (olc) 633d15c0946SSuanming Mou pool->cfg.free(olc); 634d15c0946SSuanming Mou pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 635d15c0946SSuanming Mou pool->cache[cidx]->len++; 636d15c0946SSuanming Mou } 637d15c0946SSuanming Mou 638a3cf59f5SSuanming Mou void * 639a3cf59f5SSuanming Mou mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 640a3cf59f5SSuanming Mou { 641a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 642a3cf59f5SSuanming Mou uint64_t slab = 0; 643a3cf59f5SSuanming Mou uint32_t iidx = 0; 644a3cf59f5SSuanming Mou void *p; 645a3cf59f5SSuanming Mou 646d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 647d15c0946SSuanming Mou return mlx5_ipool_malloc_cache(pool, idx); 648a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 649a3cf59f5SSuanming Mou if (pool->free_list == TRUNK_INVALID) { 650a3cf59f5SSuanming Mou /* If no available trunks, grow new. */ 651a3cf59f5SSuanming Mou if (mlx5_ipool_grow(pool)) { 652a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 653a3cf59f5SSuanming Mou return NULL; 654a3cf59f5SSuanming Mou } 655a3cf59f5SSuanming Mou } 656a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 657a3cf59f5SSuanming Mou trunk = pool->trunks[pool->free_list]; 658a3cf59f5SSuanming Mou MLX5_ASSERT(trunk->free); 659a3cf59f5SSuanming Mou if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 660a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 661a3cf59f5SSuanming Mou return NULL; 662a3cf59f5SSuanming Mou } 663a3cf59f5SSuanming Mou MLX5_ASSERT(slab); 664a3cf59f5SSuanming Mou iidx += __builtin_ctzll(slab); 665a3cf59f5SSuanming Mou MLX5_ASSERT(iidx != UINT32_MAX); 66662d7d519SSuanming Mou MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 667a3cf59f5SSuanming Mou rte_bitmap_clear(trunk->bmp, iidx); 668a3cf59f5SSuanming Mou p = &trunk->data[iidx * pool->cfg.size]; 6694ae8825cSXueming Li /* 6704ae8825cSXueming Li * The ipool index should grow continually from small to big, 6714ae8825cSXueming Li * some features as metering only accept limited bits of index. 6724ae8825cSXueming Li * Random index with MSB set may be rejected. 6734ae8825cSXueming Li */ 67462d7d519SSuanming Mou iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 675a3cf59f5SSuanming Mou iidx += 1; /* non-zero index. */ 676a3cf59f5SSuanming Mou trunk->free--; 677a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 678a3cf59f5SSuanming Mou pool->n_entry++; 679a3cf59f5SSuanming Mou #endif 680a3cf59f5SSuanming Mou if (!trunk->free) { 681a3cf59f5SSuanming Mou /* Full trunk will be removed from free list in imalloc. */ 682a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list == trunk->idx); 683a3cf59f5SSuanming Mou pool->free_list = trunk->next; 684a3cf59f5SSuanming Mou if (trunk->next != TRUNK_INVALID) 685a3cf59f5SSuanming Mou pool->trunks[trunk->next]->prev = TRUNK_INVALID; 686a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 687a3cf59f5SSuanming Mou trunk->next = TRUNK_INVALID; 688a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 689a3cf59f5SSuanming Mou pool->trunk_empty++; 690a3cf59f5SSuanming Mou pool->trunk_avail--; 691a3cf59f5SSuanming Mou #endif 692a3cf59f5SSuanming Mou } 693a3cf59f5SSuanming Mou *idx = iidx; 694a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 695a3cf59f5SSuanming Mou return p; 696a3cf59f5SSuanming Mou } 697a3cf59f5SSuanming Mou 698a3cf59f5SSuanming Mou void * 699a3cf59f5SSuanming Mou mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 700a3cf59f5SSuanming Mou { 701a3cf59f5SSuanming Mou void *entry = mlx5_ipool_malloc(pool, idx); 702a3cf59f5SSuanming Mou 70379807d6aSXueming Li if (entry && pool->cfg.size) 704a3cf59f5SSuanming Mou memset(entry, 0, pool->cfg.size); 705a3cf59f5SSuanming Mou return entry; 706a3cf59f5SSuanming Mou } 707a3cf59f5SSuanming Mou 708a3cf59f5SSuanming Mou void 709a3cf59f5SSuanming Mou mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 710a3cf59f5SSuanming Mou { 711a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 712a3cf59f5SSuanming Mou uint32_t trunk_idx; 71362d7d519SSuanming Mou uint32_t entry_idx; 714a3cf59f5SSuanming Mou 715a3cf59f5SSuanming Mou if (!idx) 716a3cf59f5SSuanming Mou return; 717d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 718d15c0946SSuanming Mou mlx5_ipool_free_cache(pool, idx); 719d15c0946SSuanming Mou return; 720d15c0946SSuanming Mou } 721a3cf59f5SSuanming Mou idx -= 1; 722a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 72362d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 7241fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 7251fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 726a3cf59f5SSuanming Mou goto out; 727a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 72862d7d519SSuanming Mou if (!trunk) 729a3cf59f5SSuanming Mou goto out; 73062d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 73162d7d519SSuanming Mou if (trunk_idx != trunk->idx || 73262d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 73362d7d519SSuanming Mou goto out; 73462d7d519SSuanming Mou rte_bitmap_set(trunk->bmp, entry_idx); 735a3cf59f5SSuanming Mou trunk->free++; 7361fd4bb67SSuanming Mou if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 7371fd4bb67SSuanming Mou (pool, trunk->idx)) { 7381fd4bb67SSuanming Mou if (pool->free_list == trunk->idx) 7391fd4bb67SSuanming Mou pool->free_list = trunk->next; 7401fd4bb67SSuanming Mou if (trunk->next != TRUNK_INVALID) 7411fd4bb67SSuanming Mou pool->trunks[trunk->next]->prev = trunk->prev; 7421fd4bb67SSuanming Mou if (trunk->prev != TRUNK_INVALID) 7431fd4bb67SSuanming Mou pool->trunks[trunk->prev]->next = trunk->next; 7441fd4bb67SSuanming Mou pool->cfg.free(trunk); 7451fd4bb67SSuanming Mou pool->trunks[trunk_idx] = NULL; 7461fd4bb67SSuanming Mou pool->n_trunk_valid--; 7471fd4bb67SSuanming Mou #ifdef POOL_DEBUG 7481fd4bb67SSuanming Mou pool->trunk_avail--; 7491fd4bb67SSuanming Mou pool->trunk_free++; 7501fd4bb67SSuanming Mou #endif 7511fd4bb67SSuanming Mou if (pool->n_trunk_valid == 0) { 7521fd4bb67SSuanming Mou pool->cfg.free(pool->trunks); 7531fd4bb67SSuanming Mou pool->trunks = NULL; 7541fd4bb67SSuanming Mou pool->n_trunk = 0; 7551fd4bb67SSuanming Mou } 7561fd4bb67SSuanming Mou } else if (trunk->free == 1) { 757a3cf59f5SSuanming Mou /* Put into free trunk list head. */ 758a3cf59f5SSuanming Mou MLX5_ASSERT(pool->free_list != trunk->idx); 759a3cf59f5SSuanming Mou trunk->next = pool->free_list; 760a3cf59f5SSuanming Mou trunk->prev = TRUNK_INVALID; 761a3cf59f5SSuanming Mou if (pool->free_list != TRUNK_INVALID) 762a3cf59f5SSuanming Mou pool->trunks[pool->free_list]->prev = trunk->idx; 763a3cf59f5SSuanming Mou pool->free_list = trunk->idx; 764a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 765a3cf59f5SSuanming Mou pool->trunk_empty--; 766a3cf59f5SSuanming Mou pool->trunk_avail++; 767a3cf59f5SSuanming Mou #endif 768a3cf59f5SSuanming Mou } 769a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 770a3cf59f5SSuanming Mou pool->n_entry--; 771a3cf59f5SSuanming Mou #endif 772a3cf59f5SSuanming Mou out: 773a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 774a3cf59f5SSuanming Mou } 775a3cf59f5SSuanming Mou 776a3cf59f5SSuanming Mou void * 777a3cf59f5SSuanming Mou mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 778a3cf59f5SSuanming Mou { 779a3cf59f5SSuanming Mou struct mlx5_indexed_trunk *trunk; 780a3cf59f5SSuanming Mou void *p = NULL; 781a3cf59f5SSuanming Mou uint32_t trunk_idx; 78262d7d519SSuanming Mou uint32_t entry_idx; 783a3cf59f5SSuanming Mou 784a3cf59f5SSuanming Mou if (!idx) 785a3cf59f5SSuanming Mou return NULL; 786d15c0946SSuanming Mou if (pool->cfg.per_core_cache) 787d15c0946SSuanming Mou return mlx5_ipool_get_cache(pool, idx); 788a3cf59f5SSuanming Mou idx -= 1; 789a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 79062d7d519SSuanming Mou trunk_idx = mlx5_trunk_idx_get(pool, idx); 7911fd4bb67SSuanming Mou if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 7921fd4bb67SSuanming Mou (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 793a3cf59f5SSuanming Mou goto out; 794a3cf59f5SSuanming Mou trunk = pool->trunks[trunk_idx]; 79562d7d519SSuanming Mou if (!trunk) 796a3cf59f5SSuanming Mou goto out; 79762d7d519SSuanming Mou entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 79862d7d519SSuanming Mou if (trunk_idx != trunk->idx || 79962d7d519SSuanming Mou rte_bitmap_get(trunk->bmp, entry_idx)) 80062d7d519SSuanming Mou goto out; 80162d7d519SSuanming Mou p = &trunk->data[entry_idx * pool->cfg.size]; 802a3cf59f5SSuanming Mou out: 803a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 804a3cf59f5SSuanming Mou return p; 805a3cf59f5SSuanming Mou } 806a3cf59f5SSuanming Mou 807a3cf59f5SSuanming Mou int 808a3cf59f5SSuanming Mou mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 809a3cf59f5SSuanming Mou { 810d15c0946SSuanming Mou struct mlx5_indexed_trunk **trunks = NULL; 811d15c0946SSuanming Mou struct mlx5_indexed_cache *gc = pool->gc; 812d15c0946SSuanming Mou uint32_t i, n_trunk_valid = 0; 813a3cf59f5SSuanming Mou 814a3cf59f5SSuanming Mou MLX5_ASSERT(pool); 815a3cf59f5SSuanming Mou mlx5_ipool_lock(pool); 816d15c0946SSuanming Mou if (pool->cfg.per_core_cache) { 817d15c0946SSuanming Mou for (i = 0; i < RTE_MAX_LCORE; i++) { 818d15c0946SSuanming Mou /* 819d15c0946SSuanming Mou * Free only old global cache. Pool gc will be 820d15c0946SSuanming Mou * freed at last. 821d15c0946SSuanming Mou */ 822d15c0946SSuanming Mou if (pool->cache[i]) { 823d15c0946SSuanming Mou if (pool->cache[i]->lc && 824d15c0946SSuanming Mou pool->cache[i]->lc != pool->gc && 825d15c0946SSuanming Mou (!(--pool->cache[i]->lc->ref_cnt))) 826d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]->lc); 827d15c0946SSuanming Mou pool->cfg.free(pool->cache[i]); 828d15c0946SSuanming Mou } 829d15c0946SSuanming Mou } 830d15c0946SSuanming Mou if (gc) { 831d15c0946SSuanming Mou trunks = gc->trunks; 832d15c0946SSuanming Mou n_trunk_valid = gc->n_trunk_valid; 833d15c0946SSuanming Mou } 834d15c0946SSuanming Mou } else { 835d15c0946SSuanming Mou gc = NULL; 836a3cf59f5SSuanming Mou trunks = pool->trunks; 837d15c0946SSuanming Mou n_trunk_valid = pool->n_trunk_valid; 838d15c0946SSuanming Mou } 839d15c0946SSuanming Mou for (i = 0; i < n_trunk_valid; i++) { 840a3cf59f5SSuanming Mou if (trunks[i]) 841a3cf59f5SSuanming Mou pool->cfg.free(trunks[i]); 842a3cf59f5SSuanming Mou } 843d15c0946SSuanming Mou if (!gc && trunks) 844d15c0946SSuanming Mou pool->cfg.free(trunks); 845d15c0946SSuanming Mou if (gc) 846d15c0946SSuanming Mou pool->cfg.free(gc); 847a3cf59f5SSuanming Mou mlx5_ipool_unlock(pool); 84883c2047cSSuanming Mou mlx5_free(pool); 849a3cf59f5SSuanming Mou return 0; 850a3cf59f5SSuanming Mou } 851a3cf59f5SSuanming Mou 852a3cf59f5SSuanming Mou void 853*64a80f1cSSuanming Mou mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool) 854*64a80f1cSSuanming Mou { 855*64a80f1cSSuanming Mou uint32_t i, j; 856*64a80f1cSSuanming Mou struct mlx5_indexed_cache *gc; 857*64a80f1cSSuanming Mou struct rte_bitmap *ibmp; 858*64a80f1cSSuanming Mou uint32_t bmp_num, mem_size; 859*64a80f1cSSuanming Mou 860*64a80f1cSSuanming Mou if (!pool->cfg.per_core_cache) 861*64a80f1cSSuanming Mou return; 862*64a80f1cSSuanming Mou gc = pool->gc; 863*64a80f1cSSuanming Mou if (!gc) 864*64a80f1cSSuanming Mou return; 865*64a80f1cSSuanming Mou /* Reset bmp. */ 866*64a80f1cSSuanming Mou bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid); 867*64a80f1cSSuanming Mou mem_size = rte_bitmap_get_memory_footprint(bmp_num); 868*64a80f1cSSuanming Mou pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size, 869*64a80f1cSSuanming Mou RTE_CACHE_LINE_SIZE, rte_socket_id()); 870*64a80f1cSSuanming Mou if (!pool->bmp_mem) { 871*64a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n"); 872*64a80f1cSSuanming Mou return; 873*64a80f1cSSuanming Mou } 874*64a80f1cSSuanming Mou ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size); 875*64a80f1cSSuanming Mou if (!ibmp) { 876*64a80f1cSSuanming Mou pool->cfg.free(pool->bmp_mem); 877*64a80f1cSSuanming Mou pool->bmp_mem = NULL; 878*64a80f1cSSuanming Mou DRV_LOG(ERR, "Ipool bitmap create failed.\n"); 879*64a80f1cSSuanming Mou return; 880*64a80f1cSSuanming Mou } 881*64a80f1cSSuanming Mou pool->ibmp = ibmp; 882*64a80f1cSSuanming Mou /* Clear global cache. */ 883*64a80f1cSSuanming Mou for (i = 0; i < gc->len; i++) 884*64a80f1cSSuanming Mou rte_bitmap_clear(ibmp, gc->idx[i] - 1); 885*64a80f1cSSuanming Mou /* Clear core cache. */ 886*64a80f1cSSuanming Mou for (i = 0; i < RTE_MAX_LCORE; i++) { 887*64a80f1cSSuanming Mou struct mlx5_ipool_per_lcore *ilc = pool->cache[i]; 888*64a80f1cSSuanming Mou 889*64a80f1cSSuanming Mou if (!ilc) 890*64a80f1cSSuanming Mou continue; 891*64a80f1cSSuanming Mou for (j = 0; j < ilc->len; j++) 892*64a80f1cSSuanming Mou rte_bitmap_clear(ibmp, ilc->idx[j] - 1); 893*64a80f1cSSuanming Mou } 894*64a80f1cSSuanming Mou } 895*64a80f1cSSuanming Mou 896*64a80f1cSSuanming Mou static void * 897*64a80f1cSSuanming Mou mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos) 898*64a80f1cSSuanming Mou { 899*64a80f1cSSuanming Mou struct rte_bitmap *ibmp; 900*64a80f1cSSuanming Mou uint64_t slab = 0; 901*64a80f1cSSuanming Mou uint32_t iidx = *pos; 902*64a80f1cSSuanming Mou 903*64a80f1cSSuanming Mou ibmp = pool->ibmp; 904*64a80f1cSSuanming Mou if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) { 905*64a80f1cSSuanming Mou if (pool->bmp_mem) { 906*64a80f1cSSuanming Mou pool->cfg.free(pool->bmp_mem); 907*64a80f1cSSuanming Mou pool->bmp_mem = NULL; 908*64a80f1cSSuanming Mou pool->ibmp = NULL; 909*64a80f1cSSuanming Mou } 910*64a80f1cSSuanming Mou return NULL; 911*64a80f1cSSuanming Mou } 912*64a80f1cSSuanming Mou iidx += __builtin_ctzll(slab); 913*64a80f1cSSuanming Mou rte_bitmap_clear(ibmp, iidx); 914*64a80f1cSSuanming Mou iidx++; 915*64a80f1cSSuanming Mou *pos = iidx; 916*64a80f1cSSuanming Mou return mlx5_ipool_get_cache(pool, iidx); 917*64a80f1cSSuanming Mou } 918*64a80f1cSSuanming Mou 919*64a80f1cSSuanming Mou void * 920*64a80f1cSSuanming Mou mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos) 921*64a80f1cSSuanming Mou { 922*64a80f1cSSuanming Mou uint32_t idx = *pos; 923*64a80f1cSSuanming Mou void *entry; 924*64a80f1cSSuanming Mou 925*64a80f1cSSuanming Mou if (pool->cfg.per_core_cache) 926*64a80f1cSSuanming Mou return mlx5_ipool_get_next_cache(pool, pos); 927*64a80f1cSSuanming Mou while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) { 928*64a80f1cSSuanming Mou entry = mlx5_ipool_get(pool, idx); 929*64a80f1cSSuanming Mou if (entry) { 930*64a80f1cSSuanming Mou *pos = idx; 931*64a80f1cSSuanming Mou return entry; 932*64a80f1cSSuanming Mou } 933*64a80f1cSSuanming Mou idx++; 934*64a80f1cSSuanming Mou } 935*64a80f1cSSuanming Mou return NULL; 936*64a80f1cSSuanming Mou } 937*64a80f1cSSuanming Mou 938*64a80f1cSSuanming Mou void 939a3cf59f5SSuanming Mou mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 940a3cf59f5SSuanming Mou { 941a3cf59f5SSuanming Mou printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 942a3cf59f5SSuanming Mou "total: %d\n", 943a3cf59f5SSuanming Mou pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 944a3cf59f5SSuanming Mou pool->cfg.trunk_size, pool->n_trunk_valid); 945a3cf59f5SSuanming Mou #ifdef POOL_DEBUG 946a3cf59f5SSuanming Mou printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 947a3cf59f5SSuanming Mou "available %u free %u\n", 948a3cf59f5SSuanming Mou pool->cfg.type, pool->n_entry, pool->trunk_new, 949a3cf59f5SSuanming Mou pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 950a3cf59f5SSuanming Mou #endif 951a3cf59f5SSuanming Mou } 952bd81eaebSSuanming Mou 953bd81eaebSSuanming Mou struct mlx5_l3t_tbl * 954bd81eaebSSuanming Mou mlx5_l3t_create(enum mlx5_l3t_type type) 955bd81eaebSSuanming Mou { 956bd81eaebSSuanming Mou struct mlx5_l3t_tbl *tbl; 957bd81eaebSSuanming Mou struct mlx5_indexed_pool_config l3t_ip_cfg = { 958bd81eaebSSuanming Mou .trunk_size = 16, 959bd81eaebSSuanming Mou .grow_trunk = 6, 960bd81eaebSSuanming Mou .grow_shift = 1, 961bd81eaebSSuanming Mou .need_lock = 0, 962bd81eaebSSuanming Mou .release_mem_en = 1, 96383c2047cSSuanming Mou .malloc = mlx5_malloc, 96483c2047cSSuanming Mou .free = mlx5_free, 965bd81eaebSSuanming Mou }; 966bd81eaebSSuanming Mou 967bd81eaebSSuanming Mou if (type >= MLX5_L3T_TYPE_MAX) { 968bd81eaebSSuanming Mou rte_errno = EINVAL; 969bd81eaebSSuanming Mou return NULL; 970bd81eaebSSuanming Mou } 97183c2047cSSuanming Mou tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1, 97283c2047cSSuanming Mou SOCKET_ID_ANY); 973bd81eaebSSuanming Mou if (!tbl) { 974bd81eaebSSuanming Mou rte_errno = ENOMEM; 975bd81eaebSSuanming Mou return NULL; 976bd81eaebSSuanming Mou } 977bd81eaebSSuanming Mou tbl->type = type; 978bd81eaebSSuanming Mou switch (type) { 979bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 9800796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word); 981bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w"; 982bd81eaebSSuanming Mou break; 983bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 9840796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword); 985bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw"; 986bd81eaebSSuanming Mou break; 987bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 9880796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword); 989bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw"; 990bd81eaebSSuanming Mou break; 991bd81eaebSSuanming Mou default: 9920796c7b1SSuanming Mou l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr); 993bd81eaebSSuanming Mou l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr"; 994bd81eaebSSuanming Mou break; 995bd81eaebSSuanming Mou } 9960796c7b1SSuanming Mou rte_spinlock_init(&tbl->sl); 997bd81eaebSSuanming Mou tbl->eip = mlx5_ipool_create(&l3t_ip_cfg); 998bd81eaebSSuanming Mou if (!tbl->eip) { 999bd81eaebSSuanming Mou rte_errno = ENOMEM; 100083c2047cSSuanming Mou mlx5_free(tbl); 1001bd81eaebSSuanming Mou tbl = NULL; 1002bd81eaebSSuanming Mou } 1003bd81eaebSSuanming Mou return tbl; 1004bd81eaebSSuanming Mou } 1005bd81eaebSSuanming Mou 1006bd81eaebSSuanming Mou void 1007bd81eaebSSuanming Mou mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl) 1008bd81eaebSSuanming Mou { 1009bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1010bd81eaebSSuanming Mou uint32_t i, j; 1011bd81eaebSSuanming Mou 1012bd81eaebSSuanming Mou if (!tbl) 1013bd81eaebSSuanming Mou return; 1014bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1015bd81eaebSSuanming Mou if (g_tbl) { 1016bd81eaebSSuanming Mou for (i = 0; i < MLX5_L3T_GT_SIZE; i++) { 1017bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[i]; 1018bd81eaebSSuanming Mou if (!m_tbl) 1019bd81eaebSSuanming Mou continue; 1020bd81eaebSSuanming Mou for (j = 0; j < MLX5_L3T_MT_SIZE; j++) { 1021bd81eaebSSuanming Mou if (!m_tbl->tbl[j]) 1022bd81eaebSSuanming Mou continue; 1023bd81eaebSSuanming Mou MLX5_ASSERT(!((struct mlx5_l3t_entry_word *) 1024bd81eaebSSuanming Mou m_tbl->tbl[j])->ref_cnt); 1025bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 1026bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *) 1027bd81eaebSSuanming Mou m_tbl->tbl[j])->idx); 1028bd81eaebSSuanming Mou m_tbl->tbl[j] = 0; 1029bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) 1030bd81eaebSSuanming Mou break; 1031bd81eaebSSuanming Mou } 1032bd81eaebSSuanming Mou MLX5_ASSERT(!m_tbl->ref_cnt); 103383c2047cSSuanming Mou mlx5_free(g_tbl->tbl[i]); 1034bd81eaebSSuanming Mou g_tbl->tbl[i] = 0; 1035bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) 1036bd81eaebSSuanming Mou break; 1037bd81eaebSSuanming Mou } 1038bd81eaebSSuanming Mou MLX5_ASSERT(!g_tbl->ref_cnt); 103983c2047cSSuanming Mou mlx5_free(tbl->tbl); 1040bd81eaebSSuanming Mou tbl->tbl = 0; 1041bd81eaebSSuanming Mou } 1042bd81eaebSSuanming Mou mlx5_ipool_destroy(tbl->eip); 104383c2047cSSuanming Mou mlx5_free(tbl); 1044bd81eaebSSuanming Mou } 1045bd81eaebSSuanming Mou 10460796c7b1SSuanming Mou static int32_t 10470796c7b1SSuanming Mou __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1048bd81eaebSSuanming Mou union mlx5_l3t_data *data) 1049bd81eaebSSuanming Mou { 1050bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 10510796c7b1SSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 10520796c7b1SSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 10530796c7b1SSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 10540796c7b1SSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1055bd81eaebSSuanming Mou void *e_tbl; 1056bd81eaebSSuanming Mou uint32_t entry_idx; 1057bd81eaebSSuanming Mou 1058bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1059bd81eaebSSuanming Mou if (!g_tbl) 1060bd81eaebSSuanming Mou return -1; 1061bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1062bd81eaebSSuanming Mou if (!m_tbl) 1063bd81eaebSSuanming Mou return -1; 1064bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1065bd81eaebSSuanming Mou if (!e_tbl) 1066bd81eaebSSuanming Mou return -1; 1067bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1068bd81eaebSSuanming Mou switch (tbl->type) { 1069bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 10700796c7b1SSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 10710796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 10720796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) 10730796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 1074bd81eaebSSuanming Mou break; 1075bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 10760796c7b1SSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 10770796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 10780796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) 10790796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 1080bd81eaebSSuanming Mou break; 1081bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 10820796c7b1SSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 10830796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 10840796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) 10850796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 1086bd81eaebSSuanming Mou break; 1087bd81eaebSSuanming Mou default: 10880796c7b1SSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 10890796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 10900796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) 10910796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 1092bd81eaebSSuanming Mou break; 1093bd81eaebSSuanming Mou } 1094bd81eaebSSuanming Mou return 0; 1095bd81eaebSSuanming Mou } 1096bd81eaebSSuanming Mou 10970796c7b1SSuanming Mou int32_t 10980796c7b1SSuanming Mou mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 10990796c7b1SSuanming Mou union mlx5_l3t_data *data) 11000796c7b1SSuanming Mou { 11010796c7b1SSuanming Mou int ret; 11020796c7b1SSuanming Mou 11030796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 11040796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 11050796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 11060796c7b1SSuanming Mou return ret; 11070796c7b1SSuanming Mou } 11080796c7b1SSuanming Mou 11090796c7b1SSuanming Mou int32_t 1110bd81eaebSSuanming Mou mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx) 1111bd81eaebSSuanming Mou { 1112bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1113bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1114bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1115bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1116bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1117bd81eaebSSuanming Mou void *e_tbl; 1118bd81eaebSSuanming Mou uint32_t entry_idx; 1119bd81eaebSSuanming Mou uint64_t ref_cnt; 11200796c7b1SSuanming Mou int32_t ret = -1; 1121bd81eaebSSuanming Mou 11220796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 1123bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1124bd81eaebSSuanming Mou if (!g_tbl) 11250796c7b1SSuanming Mou goto out; 1126bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1127bd81eaebSSuanming Mou if (!m_tbl) 11280796c7b1SSuanming Mou goto out; 1129bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1130bd81eaebSSuanming Mou if (!e_tbl) 11310796c7b1SSuanming Mou goto out; 1132bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1133bd81eaebSSuanming Mou switch (tbl->type) { 1134bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1135bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 11360796c7b1SSuanming Mou MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt); 11370796c7b1SSuanming Mou ret = --w_e_tbl->entry[entry_idx].ref_cnt; 11380796c7b1SSuanming Mou if (ret) 11390796c7b1SSuanming Mou goto out; 11400796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = 0; 1141bd81eaebSSuanming Mou ref_cnt = --w_e_tbl->ref_cnt; 1142bd81eaebSSuanming Mou break; 1143bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1144bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 11450796c7b1SSuanming Mou MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt); 11460796c7b1SSuanming Mou ret = --dw_e_tbl->entry[entry_idx].ref_cnt; 11470796c7b1SSuanming Mou if (ret) 11480796c7b1SSuanming Mou goto out; 11490796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = 0; 1150bd81eaebSSuanming Mou ref_cnt = --dw_e_tbl->ref_cnt; 1151bd81eaebSSuanming Mou break; 1152bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1153bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 11540796c7b1SSuanming Mou MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt); 11550796c7b1SSuanming Mou ret = --qw_e_tbl->entry[entry_idx].ref_cnt; 11560796c7b1SSuanming Mou if (ret) 11570796c7b1SSuanming Mou goto out; 11580796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = 0; 1159bd81eaebSSuanming Mou ref_cnt = --qw_e_tbl->ref_cnt; 1160bd81eaebSSuanming Mou break; 1161bd81eaebSSuanming Mou default: 1162bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 11630796c7b1SSuanming Mou MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt); 11640796c7b1SSuanming Mou ret = --ptr_e_tbl->entry[entry_idx].ref_cnt; 11650796c7b1SSuanming Mou if (ret) 11660796c7b1SSuanming Mou goto out; 11670796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = NULL; 1168bd81eaebSSuanming Mou ref_cnt = --ptr_e_tbl->ref_cnt; 1169bd81eaebSSuanming Mou break; 1170bd81eaebSSuanming Mou } 1171bd81eaebSSuanming Mou if (!ref_cnt) { 1172bd81eaebSSuanming Mou mlx5_ipool_free(tbl->eip, 1173bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx); 1174bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1175bd81eaebSSuanming Mou NULL; 1176bd81eaebSSuanming Mou if (!(--m_tbl->ref_cnt)) { 117783c2047cSSuanming Mou mlx5_free(m_tbl); 1178bd81eaebSSuanming Mou g_tbl->tbl 1179bd81eaebSSuanming Mou [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL; 1180bd81eaebSSuanming Mou if (!(--g_tbl->ref_cnt)) { 118183c2047cSSuanming Mou mlx5_free(g_tbl); 1182bd81eaebSSuanming Mou tbl->tbl = 0; 1183bd81eaebSSuanming Mou } 1184bd81eaebSSuanming Mou } 1185bd81eaebSSuanming Mou } 11860796c7b1SSuanming Mou out: 11870796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 11880796c7b1SSuanming Mou return ret; 1189bd81eaebSSuanming Mou } 1190bd81eaebSSuanming Mou 11910796c7b1SSuanming Mou static int32_t 11920796c7b1SSuanming Mou __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1193bd81eaebSSuanming Mou union mlx5_l3t_data *data) 1194bd81eaebSSuanming Mou { 1195bd81eaebSSuanming Mou struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1196bd81eaebSSuanming Mou struct mlx5_l3t_entry_word *w_e_tbl; 1197bd81eaebSSuanming Mou struct mlx5_l3t_entry_dword *dw_e_tbl; 1198bd81eaebSSuanming Mou struct mlx5_l3t_entry_qword *qw_e_tbl; 1199bd81eaebSSuanming Mou struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1200bd81eaebSSuanming Mou void *e_tbl; 1201bd81eaebSSuanming Mou uint32_t entry_idx, tbl_idx = 0; 1202bd81eaebSSuanming Mou 1203bd81eaebSSuanming Mou /* Check the global table, create it if empty. */ 1204bd81eaebSSuanming Mou g_tbl = tbl->tbl; 1205bd81eaebSSuanming Mou if (!g_tbl) { 120683c2047cSSuanming Mou g_tbl = mlx5_malloc(MLX5_MEM_ZERO, 120783c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 120883c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_GT_SIZE, 1, 120983c2047cSSuanming Mou SOCKET_ID_ANY); 1210bd81eaebSSuanming Mou if (!g_tbl) { 1211bd81eaebSSuanming Mou rte_errno = ENOMEM; 1212bd81eaebSSuanming Mou return -1; 1213bd81eaebSSuanming Mou } 1214bd81eaebSSuanming Mou tbl->tbl = g_tbl; 1215bd81eaebSSuanming Mou } 1216bd81eaebSSuanming Mou /* 1217bd81eaebSSuanming Mou * Check the middle table, create it if empty. Ref_cnt will be 1218bd81eaebSSuanming Mou * increased if new sub table created. 1219bd81eaebSSuanming Mou */ 1220bd81eaebSSuanming Mou m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1221bd81eaebSSuanming Mou if (!m_tbl) { 122283c2047cSSuanming Mou m_tbl = mlx5_malloc(MLX5_MEM_ZERO, 122383c2047cSSuanming Mou sizeof(struct mlx5_l3t_level_tbl) + 122483c2047cSSuanming Mou sizeof(void *) * MLX5_L3T_MT_SIZE, 1, 122583c2047cSSuanming Mou SOCKET_ID_ANY); 1226bd81eaebSSuanming Mou if (!m_tbl) { 1227bd81eaebSSuanming Mou rte_errno = ENOMEM; 1228bd81eaebSSuanming Mou return -1; 1229bd81eaebSSuanming Mou } 1230bd81eaebSSuanming Mou g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = 1231bd81eaebSSuanming Mou m_tbl; 1232bd81eaebSSuanming Mou g_tbl->ref_cnt++; 1233bd81eaebSSuanming Mou } 1234bd81eaebSSuanming Mou /* 1235bd81eaebSSuanming Mou * Check the entry table, create it if empty. Ref_cnt will be 1236bd81eaebSSuanming Mou * increased if new sub entry table created. 1237bd81eaebSSuanming Mou */ 1238bd81eaebSSuanming Mou e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1239bd81eaebSSuanming Mou if (!e_tbl) { 1240bd81eaebSSuanming Mou e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx); 1241bd81eaebSSuanming Mou if (!e_tbl) { 1242bd81eaebSSuanming Mou rte_errno = ENOMEM; 1243bd81eaebSSuanming Mou return -1; 1244bd81eaebSSuanming Mou } 1245bd81eaebSSuanming Mou ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx; 1246bd81eaebSSuanming Mou m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1247bd81eaebSSuanming Mou e_tbl; 1248bd81eaebSSuanming Mou m_tbl->ref_cnt++; 1249bd81eaebSSuanming Mou } 1250bd81eaebSSuanming Mou entry_idx = idx & MLX5_L3T_ET_MASK; 1251bd81eaebSSuanming Mou switch (tbl->type) { 1252bd81eaebSSuanming Mou case MLX5_L3T_TYPE_WORD: 1253bd81eaebSSuanming Mou w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 12540796c7b1SSuanming Mou if (w_e_tbl->entry[entry_idx].data) { 12550796c7b1SSuanming Mou data->word = w_e_tbl->entry[entry_idx].data; 12560796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt++; 12570796c7b1SSuanming Mou rte_errno = EEXIST; 12580796c7b1SSuanming Mou return -1; 12590796c7b1SSuanming Mou } 12600796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].data = data->word; 12610796c7b1SSuanming Mou w_e_tbl->entry[entry_idx].ref_cnt = 1; 1262bd81eaebSSuanming Mou w_e_tbl->ref_cnt++; 1263bd81eaebSSuanming Mou break; 1264bd81eaebSSuanming Mou case MLX5_L3T_TYPE_DWORD: 1265bd81eaebSSuanming Mou dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 12660796c7b1SSuanming Mou if (dw_e_tbl->entry[entry_idx].data) { 12670796c7b1SSuanming Mou data->dword = dw_e_tbl->entry[entry_idx].data; 12680796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt++; 12690796c7b1SSuanming Mou rte_errno = EEXIST; 12700796c7b1SSuanming Mou return -1; 12710796c7b1SSuanming Mou } 12720796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].data = data->dword; 12730796c7b1SSuanming Mou dw_e_tbl->entry[entry_idx].ref_cnt = 1; 1274bd81eaebSSuanming Mou dw_e_tbl->ref_cnt++; 1275bd81eaebSSuanming Mou break; 1276bd81eaebSSuanming Mou case MLX5_L3T_TYPE_QWORD: 1277bd81eaebSSuanming Mou qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 12780796c7b1SSuanming Mou if (qw_e_tbl->entry[entry_idx].data) { 12790796c7b1SSuanming Mou data->qword = qw_e_tbl->entry[entry_idx].data; 12800796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt++; 12810796c7b1SSuanming Mou rte_errno = EEXIST; 12820796c7b1SSuanming Mou return -1; 12830796c7b1SSuanming Mou } 12840796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].data = data->qword; 12850796c7b1SSuanming Mou qw_e_tbl->entry[entry_idx].ref_cnt = 1; 1286bd81eaebSSuanming Mou qw_e_tbl->ref_cnt++; 1287bd81eaebSSuanming Mou break; 1288bd81eaebSSuanming Mou default: 1289bd81eaebSSuanming Mou ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 12900796c7b1SSuanming Mou if (ptr_e_tbl->entry[entry_idx].data) { 12910796c7b1SSuanming Mou data->ptr = ptr_e_tbl->entry[entry_idx].data; 12920796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt++; 12930796c7b1SSuanming Mou rte_errno = EEXIST; 12940796c7b1SSuanming Mou return -1; 12950796c7b1SSuanming Mou } 12960796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].data = data->ptr; 12970796c7b1SSuanming Mou ptr_e_tbl->entry[entry_idx].ref_cnt = 1; 1298bd81eaebSSuanming Mou ptr_e_tbl->ref_cnt++; 1299bd81eaebSSuanming Mou break; 1300bd81eaebSSuanming Mou } 1301bd81eaebSSuanming Mou return 0; 1302bd81eaebSSuanming Mou } 13030796c7b1SSuanming Mou 13040796c7b1SSuanming Mou int32_t 13050796c7b1SSuanming Mou mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 13060796c7b1SSuanming Mou union mlx5_l3t_data *data) 13070796c7b1SSuanming Mou { 13080796c7b1SSuanming Mou int ret; 13090796c7b1SSuanming Mou 13100796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 13110796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 13120796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 13130796c7b1SSuanming Mou return ret; 13140796c7b1SSuanming Mou } 13150796c7b1SSuanming Mou 13160796c7b1SSuanming Mou int32_t 13170796c7b1SSuanming Mou mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 13180796c7b1SSuanming Mou union mlx5_l3t_data *data, 13190796c7b1SSuanming Mou mlx5_l3t_alloc_callback_fn cb, void *ctx) 13200796c7b1SSuanming Mou { 13210796c7b1SSuanming Mou int32_t ret; 13220796c7b1SSuanming Mou 13230796c7b1SSuanming Mou rte_spinlock_lock(&tbl->sl); 13240796c7b1SSuanming Mou /* Check if entry data is ready. */ 13250796c7b1SSuanming Mou ret = __l3t_get_entry(tbl, idx, data); 13260796c7b1SSuanming Mou if (!ret) { 13270796c7b1SSuanming Mou switch (tbl->type) { 13280796c7b1SSuanming Mou case MLX5_L3T_TYPE_WORD: 13290796c7b1SSuanming Mou if (data->word) 13300796c7b1SSuanming Mou goto out; 13310796c7b1SSuanming Mou break; 13320796c7b1SSuanming Mou case MLX5_L3T_TYPE_DWORD: 13330796c7b1SSuanming Mou if (data->dword) 13340796c7b1SSuanming Mou goto out; 13350796c7b1SSuanming Mou break; 13360796c7b1SSuanming Mou case MLX5_L3T_TYPE_QWORD: 13370796c7b1SSuanming Mou if (data->qword) 13380796c7b1SSuanming Mou goto out; 13390796c7b1SSuanming Mou break; 13400796c7b1SSuanming Mou default: 13410796c7b1SSuanming Mou if (data->ptr) 13420796c7b1SSuanming Mou goto out; 13430796c7b1SSuanming Mou break; 13440796c7b1SSuanming Mou } 13450796c7b1SSuanming Mou } 13460796c7b1SSuanming Mou /* Entry data is not ready, use user callback to create it. */ 13470796c7b1SSuanming Mou ret = cb(ctx, data); 13480796c7b1SSuanming Mou if (ret) 13490796c7b1SSuanming Mou goto out; 13500796c7b1SSuanming Mou /* Save the new allocated data to entry. */ 13510796c7b1SSuanming Mou ret = __l3t_set_entry(tbl, idx, data); 13520796c7b1SSuanming Mou out: 13530796c7b1SSuanming Mou rte_spinlock_unlock(&tbl->sl); 13540796c7b1SSuanming Mou return ret; 13550796c7b1SSuanming Mou } 1356