1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_hash_crc.h> 7 8 #include "mlx5_utils.h" 9 10 struct mlx5_hlist * 11 mlx5_hlist_create(const char *name, uint32_t size) 12 { 13 struct mlx5_hlist *h; 14 uint32_t act_size; 15 uint32_t alloc_size; 16 17 if (!size) 18 return NULL; 19 /* Align to the next power of 2, 32bits integer is enough now. */ 20 if (!rte_is_power_of_2(size)) { 21 act_size = rte_align32pow2(size); 22 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " 23 "be aligned to 0x%" PRIX32 ".\n", size, act_size); 24 } else { 25 act_size = size; 26 } 27 alloc_size = sizeof(struct mlx5_hlist) + 28 sizeof(struct mlx5_hlist_head) * act_size; 29 /* Using zmalloc, then no need to initialize the heads. */ 30 h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE); 31 if (!h) { 32 DRV_LOG(ERR, "No memory for hash list %s creation\n", 33 name ? name : "None"); 34 return NULL; 35 } 36 if (name) 37 snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); 38 h->table_sz = act_size; 39 h->mask = act_size - 1; 40 DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.\n", 41 h->name, act_size); 42 return h; 43 } 44 45 struct mlx5_hlist_entry * 46 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key) 47 { 48 uint32_t idx; 49 struct mlx5_hlist_head *first; 50 struct mlx5_hlist_entry *node; 51 52 MLX5_ASSERT(h); 53 idx = rte_hash_crc_8byte(key, 0) & h->mask; 54 first = &h->heads[idx]; 55 LIST_FOREACH(node, first, next) { 56 if (node->key == key) 57 return node; 58 } 59 return NULL; 60 } 61 62 int 63 mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) 64 { 65 uint32_t idx; 66 struct mlx5_hlist_head *first; 67 struct mlx5_hlist_entry *node; 68 69 MLX5_ASSERT(h && entry); 70 idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; 71 first = &h->heads[idx]; 72 /* No need to reuse the lookup function. */ 73 LIST_FOREACH(node, first, next) { 74 if (node->key == entry->key) 75 return -EEXIST; 76 } 77 LIST_INSERT_HEAD(first, entry, next); 78 return 0; 79 } 80 81 void 82 mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, 83 struct mlx5_hlist_entry *entry) 84 { 85 MLX5_ASSERT(entry && entry->next.le_prev); 86 LIST_REMOVE(entry, next); 87 /* Set to NULL to get rid of removing action for more than once. */ 88 entry->next.le_prev = NULL; 89 } 90 91 void 92 mlx5_hlist_destroy(struct mlx5_hlist *h, 93 mlx5_hlist_destroy_callback_fn cb, void *ctx) 94 { 95 uint32_t idx; 96 struct mlx5_hlist_entry *entry; 97 98 MLX5_ASSERT(h); 99 for (idx = 0; idx < h->table_sz; ++idx) { 100 /* no LIST_FOREACH_SAFE, using while instead */ 101 while (!LIST_EMPTY(&h->heads[idx])) { 102 entry = LIST_FIRST(&h->heads[idx]); 103 LIST_REMOVE(entry, next); 104 /* 105 * The owner of whole element which contains data entry 106 * is the user, so it's the user's duty to do the clean 107 * up and the free work because someone may not put the 108 * hlist entry at the beginning(suggested to locate at 109 * the beginning). Or else the default free function 110 * will be used. 111 */ 112 if (cb) 113 cb(entry, ctx); 114 else 115 rte_free(entry); 116 } 117 } 118 rte_free(h); 119 } 120 121 static inline void 122 mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 123 { 124 if (pool->cfg.need_lock) 125 rte_spinlock_lock(&pool->lock); 126 } 127 128 static inline void 129 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 130 { 131 if (pool->cfg.need_lock) 132 rte_spinlock_unlock(&pool->lock); 133 } 134 135 static inline uint32_t 136 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 137 { 138 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 139 uint32_t trunk_idx = 0; 140 uint32_t i; 141 142 if (!cfg->grow_trunk) 143 return entry_idx / cfg->trunk_size; 144 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 145 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 146 (cfg->trunk_size << (cfg->grow_shift * 147 cfg->grow_trunk)) + cfg->grow_trunk; 148 } else { 149 for (i = 0; i < cfg->grow_trunk; i++) { 150 if (entry_idx < pool->grow_tbl[i]) 151 break; 152 } 153 trunk_idx = i; 154 } 155 return trunk_idx; 156 } 157 158 static inline uint32_t 159 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 160 { 161 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 162 163 return cfg->trunk_size << (cfg->grow_shift * 164 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 165 } 166 167 static inline uint32_t 168 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 169 { 170 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 171 uint32_t offset = 0; 172 173 if (!trunk_idx) 174 return 0; 175 if (!cfg->grow_trunk) 176 return cfg->trunk_size * trunk_idx; 177 if (trunk_idx < cfg->grow_trunk) 178 offset = pool->grow_tbl[trunk_idx - 1]; 179 else 180 offset = pool->grow_tbl[cfg->grow_trunk - 1] + 181 (cfg->trunk_size << (cfg->grow_shift * 182 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 183 return offset; 184 } 185 186 struct mlx5_indexed_pool * 187 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 188 { 189 struct mlx5_indexed_pool *pool; 190 uint32_t i; 191 192 if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) || 193 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 194 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 195 return NULL; 196 pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk * 197 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE); 198 if (!pool) 199 return NULL; 200 pool->cfg = *cfg; 201 if (!pool->cfg.trunk_size) 202 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 203 if (!cfg->malloc && !cfg->free) { 204 pool->cfg.malloc = rte_malloc_socket; 205 pool->cfg.free = rte_free; 206 } 207 pool->free_list = TRUNK_INVALID; 208 if (pool->cfg.need_lock) 209 rte_spinlock_init(&pool->lock); 210 /* 211 * Initialize the dynamic grow trunk size lookup table to have a quick 212 * lookup for the trunk entry index offset. 213 */ 214 for (i = 0; i < cfg->grow_trunk; i++) { 215 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 216 if (i > 0) 217 pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 218 } 219 return pool; 220 } 221 222 static int 223 mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 224 { 225 struct mlx5_indexed_trunk *trunk; 226 struct mlx5_indexed_trunk **trunk_tmp; 227 struct mlx5_indexed_trunk **p; 228 size_t trunk_size = 0; 229 size_t data_size; 230 size_t bmp_size; 231 uint32_t idx; 232 233 if (pool->n_trunk_valid == TRUNK_MAX_IDX) 234 return -ENOMEM; 235 if (pool->n_trunk_valid == pool->n_trunk) { 236 /* No free trunk flags, expand trunk list. */ 237 int n_grow = pool->n_trunk_valid ? pool->n_trunk : 238 RTE_CACHE_LINE_SIZE / sizeof(void *); 239 240 p = pool->cfg.malloc(pool->cfg.type, 241 (pool->n_trunk_valid + n_grow) * 242 sizeof(struct mlx5_indexed_trunk *), 243 RTE_CACHE_LINE_SIZE, rte_socket_id()); 244 if (!p) 245 return -ENOMEM; 246 if (pool->trunks) 247 memcpy(p, pool->trunks, pool->n_trunk_valid * 248 sizeof(struct mlx5_indexed_trunk *)); 249 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 250 n_grow * sizeof(void *)); 251 trunk_tmp = pool->trunks; 252 pool->trunks = p; 253 if (trunk_tmp) 254 pool->cfg.free(trunk_tmp); 255 pool->n_trunk += n_grow; 256 } 257 if (!pool->cfg.release_mem_en) { 258 idx = pool->n_trunk_valid; 259 } else { 260 /* Find the first available slot in trunk list */ 261 for (idx = 0; idx < pool->n_trunk; idx++) 262 if (pool->trunks[idx] == NULL) 263 break; 264 } 265 trunk_size += sizeof(*trunk); 266 data_size = mlx5_trunk_size_get(pool, idx); 267 bmp_size = rte_bitmap_get_memory_footprint(data_size); 268 trunk_size += data_size * pool->cfg.size + bmp_size; 269 trunk = pool->cfg.malloc(pool->cfg.type, trunk_size, 270 RTE_CACHE_LINE_SIZE, rte_socket_id()); 271 if (!trunk) 272 return -ENOMEM; 273 pool->trunks[idx] = trunk; 274 trunk->idx = idx; 275 trunk->free = data_size; 276 trunk->prev = TRUNK_INVALID; 277 trunk->next = TRUNK_INVALID; 278 MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 279 pool->free_list = idx; 280 /* Mark all entries as available. */ 281 trunk->bmp = rte_bitmap_init_with_all_set(data_size, 282 &trunk->data[data_size * pool->cfg.size], bmp_size); 283 pool->n_trunk_valid++; 284 #ifdef POOL_DEBUG 285 pool->trunk_new++; 286 pool->trunk_avail++; 287 #endif 288 return 0; 289 } 290 291 void * 292 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 293 { 294 struct mlx5_indexed_trunk *trunk; 295 uint64_t slab = 0; 296 uint32_t iidx = 0; 297 void *p; 298 299 mlx5_ipool_lock(pool); 300 if (pool->free_list == TRUNK_INVALID) { 301 /* If no available trunks, grow new. */ 302 if (mlx5_ipool_grow(pool)) { 303 mlx5_ipool_unlock(pool); 304 return NULL; 305 } 306 } 307 MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 308 trunk = pool->trunks[pool->free_list]; 309 MLX5_ASSERT(trunk->free); 310 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 311 mlx5_ipool_unlock(pool); 312 return NULL; 313 } 314 MLX5_ASSERT(slab); 315 iidx += __builtin_ctzll(slab); 316 MLX5_ASSERT(iidx != UINT32_MAX); 317 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 318 rte_bitmap_clear(trunk->bmp, iidx); 319 p = &trunk->data[iidx * pool->cfg.size]; 320 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 321 iidx += 1; /* non-zero index. */ 322 trunk->free--; 323 #ifdef POOL_DEBUG 324 pool->n_entry++; 325 #endif 326 if (!trunk->free) { 327 /* Full trunk will be removed from free list in imalloc. */ 328 MLX5_ASSERT(pool->free_list == trunk->idx); 329 pool->free_list = trunk->next; 330 if (trunk->next != TRUNK_INVALID) 331 pool->trunks[trunk->next]->prev = TRUNK_INVALID; 332 trunk->prev = TRUNK_INVALID; 333 trunk->next = TRUNK_INVALID; 334 #ifdef POOL_DEBUG 335 pool->trunk_empty++; 336 pool->trunk_avail--; 337 #endif 338 } 339 *idx = iidx; 340 mlx5_ipool_unlock(pool); 341 return p; 342 } 343 344 void * 345 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 346 { 347 void *entry = mlx5_ipool_malloc(pool, idx); 348 349 if (entry) 350 memset(entry, 0, pool->cfg.size); 351 return entry; 352 } 353 354 void 355 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 356 { 357 struct mlx5_indexed_trunk *trunk; 358 uint32_t trunk_idx; 359 uint32_t entry_idx; 360 361 if (!idx) 362 return; 363 idx -= 1; 364 mlx5_ipool_lock(pool); 365 trunk_idx = mlx5_trunk_idx_get(pool, idx); 366 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 367 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 368 goto out; 369 trunk = pool->trunks[trunk_idx]; 370 if (!trunk) 371 goto out; 372 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 373 if (trunk_idx != trunk->idx || 374 rte_bitmap_get(trunk->bmp, entry_idx)) 375 goto out; 376 rte_bitmap_set(trunk->bmp, entry_idx); 377 trunk->free++; 378 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 379 (pool, trunk->idx)) { 380 if (pool->free_list == trunk->idx) 381 pool->free_list = trunk->next; 382 if (trunk->next != TRUNK_INVALID) 383 pool->trunks[trunk->next]->prev = trunk->prev; 384 if (trunk->prev != TRUNK_INVALID) 385 pool->trunks[trunk->prev]->next = trunk->next; 386 pool->cfg.free(trunk); 387 pool->trunks[trunk_idx] = NULL; 388 pool->n_trunk_valid--; 389 #ifdef POOL_DEBUG 390 pool->trunk_avail--; 391 pool->trunk_free++; 392 #endif 393 if (pool->n_trunk_valid == 0) { 394 pool->cfg.free(pool->trunks); 395 pool->trunks = NULL; 396 pool->n_trunk = 0; 397 } 398 } else if (trunk->free == 1) { 399 /* Put into free trunk list head. */ 400 MLX5_ASSERT(pool->free_list != trunk->idx); 401 trunk->next = pool->free_list; 402 trunk->prev = TRUNK_INVALID; 403 if (pool->free_list != TRUNK_INVALID) 404 pool->trunks[pool->free_list]->prev = trunk->idx; 405 pool->free_list = trunk->idx; 406 #ifdef POOL_DEBUG 407 pool->trunk_empty--; 408 pool->trunk_avail++; 409 #endif 410 } 411 #ifdef POOL_DEBUG 412 pool->n_entry--; 413 #endif 414 out: 415 mlx5_ipool_unlock(pool); 416 } 417 418 void * 419 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 420 { 421 struct mlx5_indexed_trunk *trunk; 422 void *p = NULL; 423 uint32_t trunk_idx; 424 uint32_t entry_idx; 425 426 if (!idx) 427 return NULL; 428 idx -= 1; 429 mlx5_ipool_lock(pool); 430 trunk_idx = mlx5_trunk_idx_get(pool, idx); 431 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 432 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 433 goto out; 434 trunk = pool->trunks[trunk_idx]; 435 if (!trunk) 436 goto out; 437 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 438 if (trunk_idx != trunk->idx || 439 rte_bitmap_get(trunk->bmp, entry_idx)) 440 goto out; 441 p = &trunk->data[entry_idx * pool->cfg.size]; 442 out: 443 mlx5_ipool_unlock(pool); 444 return p; 445 } 446 447 int 448 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 449 { 450 struct mlx5_indexed_trunk **trunks; 451 uint32_t i; 452 453 MLX5_ASSERT(pool); 454 mlx5_ipool_lock(pool); 455 trunks = pool->trunks; 456 for (i = 0; i < pool->n_trunk; i++) { 457 if (trunks[i]) 458 pool->cfg.free(trunks[i]); 459 } 460 if (!pool->trunks) 461 pool->cfg.free(pool->trunks); 462 mlx5_ipool_unlock(pool); 463 rte_free(pool); 464 return 0; 465 } 466 467 void 468 mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 469 { 470 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 471 "total: %d\n", 472 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 473 pool->cfg.trunk_size, pool->n_trunk_valid); 474 #ifdef POOL_DEBUG 475 printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 476 "available %u free %u\n", 477 pool->cfg.type, pool->n_entry, pool->trunk_new, 478 pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 479 #endif 480 } 481