1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_hash_crc.h> 7 8 #include "mlx5_utils.h" 9 10 struct mlx5_hlist * 11 mlx5_hlist_create(const char *name, uint32_t size) 12 { 13 struct mlx5_hlist *h; 14 uint32_t act_size; 15 uint32_t alloc_size; 16 17 if (!size) 18 return NULL; 19 /* Align to the next power of 2, 32bits integer is enough now. */ 20 if (!rte_is_power_of_2(size)) { 21 act_size = rte_align32pow2(size); 22 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " 23 "be aligned to 0x%" PRIX32 ".\n", size, act_size); 24 } else { 25 act_size = size; 26 } 27 alloc_size = sizeof(struct mlx5_hlist) + 28 sizeof(struct mlx5_hlist_head) * act_size; 29 /* Using zmalloc, then no need to initialize the heads. */ 30 h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE); 31 if (!h) { 32 DRV_LOG(ERR, "No memory for hash list %s creation\n", 33 name ? name : "None"); 34 return NULL; 35 } 36 if (name) 37 snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); 38 h->table_sz = act_size; 39 h->mask = act_size - 1; 40 DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.\n", 41 h->name, act_size); 42 return h; 43 } 44 45 struct mlx5_hlist_entry * 46 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key) 47 { 48 uint32_t idx; 49 struct mlx5_hlist_head *first; 50 struct mlx5_hlist_entry *node; 51 52 MLX5_ASSERT(h); 53 idx = rte_hash_crc_8byte(key, 0) & h->mask; 54 first = &h->heads[idx]; 55 LIST_FOREACH(node, first, next) { 56 if (node->key == key) 57 return node; 58 } 59 return NULL; 60 } 61 62 int 63 mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) 64 { 65 uint32_t idx; 66 struct mlx5_hlist_head *first; 67 struct mlx5_hlist_entry *node; 68 69 MLX5_ASSERT(h && entry); 70 idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; 71 first = &h->heads[idx]; 72 /* No need to reuse the lookup function. */ 73 LIST_FOREACH(node, first, next) { 74 if (node->key == entry->key) 75 return -EEXIST; 76 } 77 LIST_INSERT_HEAD(first, entry, next); 78 return 0; 79 } 80 81 void 82 mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused, 83 struct mlx5_hlist_entry *entry) 84 { 85 MLX5_ASSERT(entry && entry->next.le_prev); 86 LIST_REMOVE(entry, next); 87 /* Set to NULL to get rid of removing action for more than once. */ 88 entry->next.le_prev = NULL; 89 } 90 91 void 92 mlx5_hlist_destroy(struct mlx5_hlist *h, 93 mlx5_hlist_destroy_callback_fn cb, void *ctx) 94 { 95 uint32_t idx; 96 struct mlx5_hlist_entry *entry; 97 98 MLX5_ASSERT(h); 99 for (idx = 0; idx < h->table_sz; ++idx) { 100 /* no LIST_FOREACH_SAFE, using while instead */ 101 while (!LIST_EMPTY(&h->heads[idx])) { 102 entry = LIST_FIRST(&h->heads[idx]); 103 LIST_REMOVE(entry, next); 104 /* 105 * The owner of whole element which contains data entry 106 * is the user, so it's the user's duty to do the clean 107 * up and the free work because someone may not put the 108 * hlist entry at the beginning(suggested to locate at 109 * the beginning). Or else the default free function 110 * will be used. 111 */ 112 if (cb) 113 cb(entry, ctx); 114 else 115 rte_free(entry); 116 } 117 } 118 rte_free(h); 119 } 120 121 static inline void 122 mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 123 { 124 if (pool->cfg.need_lock) 125 rte_spinlock_lock(&pool->lock); 126 } 127 128 static inline void 129 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 130 { 131 if (pool->cfg.need_lock) 132 rte_spinlock_unlock(&pool->lock); 133 } 134 135 static inline uint32_t 136 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 137 { 138 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 139 uint32_t trunk_idx = 0; 140 uint32_t i; 141 142 if (!cfg->grow_trunk) 143 return entry_idx / cfg->trunk_size; 144 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 145 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 146 (cfg->trunk_size << (cfg->grow_shift * 147 cfg->grow_trunk)) + cfg->grow_trunk; 148 } else { 149 for (i = 0; i < cfg->grow_trunk; i++) { 150 if (entry_idx < pool->grow_tbl[i]) 151 break; 152 } 153 trunk_idx = i; 154 } 155 return trunk_idx; 156 } 157 158 static inline uint32_t 159 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 160 { 161 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 162 163 return cfg->trunk_size << (cfg->grow_shift * 164 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 165 } 166 167 static inline uint32_t 168 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 169 { 170 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 171 uint32_t offset = 0; 172 173 if (!trunk_idx) 174 return 0; 175 if (!cfg->grow_trunk) 176 return cfg->trunk_size * trunk_idx; 177 if (trunk_idx < cfg->grow_trunk) 178 offset = pool->grow_tbl[trunk_idx - 1]; 179 else 180 offset = pool->grow_tbl[cfg->grow_trunk - 1] + 181 (cfg->trunk_size << (cfg->grow_shift * 182 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 183 return offset; 184 } 185 186 struct mlx5_indexed_pool * 187 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 188 { 189 struct mlx5_indexed_pool *pool; 190 uint32_t i; 191 192 if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) || 193 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 194 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 195 return NULL; 196 pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk * 197 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE); 198 if (!pool) 199 return NULL; 200 pool->cfg = *cfg; 201 if (!pool->cfg.trunk_size) 202 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 203 if (!cfg->malloc && !cfg->free) { 204 pool->cfg.malloc = rte_malloc_socket; 205 pool->cfg.free = rte_free; 206 } 207 pool->free_list = TRUNK_INVALID; 208 if (pool->cfg.need_lock) 209 rte_spinlock_init(&pool->lock); 210 /* 211 * Initialize the dynamic grow trunk size lookup table to have a quick 212 * lookup for the trunk entry index offset. 213 */ 214 for (i = 0; i < cfg->grow_trunk; i++) { 215 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 216 if (i > 0) 217 pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 218 } 219 return pool; 220 } 221 222 static int 223 mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 224 { 225 struct mlx5_indexed_trunk *trunk; 226 struct mlx5_indexed_trunk **trunk_tmp; 227 struct mlx5_indexed_trunk **p; 228 size_t trunk_size = 0; 229 size_t data_size; 230 size_t bmp_size; 231 uint32_t idx; 232 233 if (pool->n_trunk_valid == TRUNK_MAX_IDX) 234 return -ENOMEM; 235 if (pool->n_trunk_valid == pool->n_trunk) { 236 /* No free trunk flags, expand trunk list. */ 237 int n_grow = pool->n_trunk_valid ? pool->n_trunk : 238 RTE_CACHE_LINE_SIZE / sizeof(void *); 239 240 p = pool->cfg.malloc(pool->cfg.type, 241 (pool->n_trunk_valid + n_grow) * 242 sizeof(struct mlx5_indexed_trunk *), 243 RTE_CACHE_LINE_SIZE, rte_socket_id()); 244 if (!p) 245 return -ENOMEM; 246 if (pool->trunks) 247 memcpy(p, pool->trunks, pool->n_trunk_valid * 248 sizeof(struct mlx5_indexed_trunk *)); 249 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 250 n_grow * sizeof(void *)); 251 trunk_tmp = pool->trunks; 252 pool->trunks = p; 253 if (trunk_tmp) 254 pool->cfg.free(trunk_tmp); 255 pool->n_trunk += n_grow; 256 } 257 if (!pool->cfg.release_mem_en) { 258 idx = pool->n_trunk_valid; 259 } else { 260 /* Find the first available slot in trunk list */ 261 for (idx = 0; idx < pool->n_trunk; idx++) 262 if (pool->trunks[idx] == NULL) 263 break; 264 } 265 trunk_size += sizeof(*trunk); 266 data_size = mlx5_trunk_size_get(pool, idx); 267 bmp_size = rte_bitmap_get_memory_footprint(data_size); 268 /* rte_bitmap requires memory cacheline aligned. */ 269 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 270 trunk_size += bmp_size; 271 trunk = pool->cfg.malloc(pool->cfg.type, trunk_size, 272 RTE_CACHE_LINE_SIZE, rte_socket_id()); 273 if (!trunk) 274 return -ENOMEM; 275 pool->trunks[idx] = trunk; 276 trunk->idx = idx; 277 trunk->free = data_size; 278 trunk->prev = TRUNK_INVALID; 279 trunk->next = TRUNK_INVALID; 280 MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 281 pool->free_list = idx; 282 /* Mark all entries as available. */ 283 trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data 284 [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], 285 bmp_size); 286 MLX5_ASSERT(trunk->bmp); 287 pool->n_trunk_valid++; 288 #ifdef POOL_DEBUG 289 pool->trunk_new++; 290 pool->trunk_avail++; 291 #endif 292 return 0; 293 } 294 295 void * 296 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 297 { 298 struct mlx5_indexed_trunk *trunk; 299 uint64_t slab = 0; 300 uint32_t iidx = 0; 301 void *p; 302 303 mlx5_ipool_lock(pool); 304 if (pool->free_list == TRUNK_INVALID) { 305 /* If no available trunks, grow new. */ 306 if (mlx5_ipool_grow(pool)) { 307 mlx5_ipool_unlock(pool); 308 return NULL; 309 } 310 } 311 MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 312 trunk = pool->trunks[pool->free_list]; 313 MLX5_ASSERT(trunk->free); 314 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 315 mlx5_ipool_unlock(pool); 316 return NULL; 317 } 318 MLX5_ASSERT(slab); 319 iidx += __builtin_ctzll(slab); 320 MLX5_ASSERT(iidx != UINT32_MAX); 321 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 322 rte_bitmap_clear(trunk->bmp, iidx); 323 p = &trunk->data[iidx * pool->cfg.size]; 324 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 325 iidx += 1; /* non-zero index. */ 326 trunk->free--; 327 #ifdef POOL_DEBUG 328 pool->n_entry++; 329 #endif 330 if (!trunk->free) { 331 /* Full trunk will be removed from free list in imalloc. */ 332 MLX5_ASSERT(pool->free_list == trunk->idx); 333 pool->free_list = trunk->next; 334 if (trunk->next != TRUNK_INVALID) 335 pool->trunks[trunk->next]->prev = TRUNK_INVALID; 336 trunk->prev = TRUNK_INVALID; 337 trunk->next = TRUNK_INVALID; 338 #ifdef POOL_DEBUG 339 pool->trunk_empty++; 340 pool->trunk_avail--; 341 #endif 342 } 343 *idx = iidx; 344 mlx5_ipool_unlock(pool); 345 return p; 346 } 347 348 void * 349 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 350 { 351 void *entry = mlx5_ipool_malloc(pool, idx); 352 353 if (entry) 354 memset(entry, 0, pool->cfg.size); 355 return entry; 356 } 357 358 void 359 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 360 { 361 struct mlx5_indexed_trunk *trunk; 362 uint32_t trunk_idx; 363 uint32_t entry_idx; 364 365 if (!idx) 366 return; 367 idx -= 1; 368 mlx5_ipool_lock(pool); 369 trunk_idx = mlx5_trunk_idx_get(pool, idx); 370 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 371 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 372 goto out; 373 trunk = pool->trunks[trunk_idx]; 374 if (!trunk) 375 goto out; 376 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 377 if (trunk_idx != trunk->idx || 378 rte_bitmap_get(trunk->bmp, entry_idx)) 379 goto out; 380 rte_bitmap_set(trunk->bmp, entry_idx); 381 trunk->free++; 382 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 383 (pool, trunk->idx)) { 384 if (pool->free_list == trunk->idx) 385 pool->free_list = trunk->next; 386 if (trunk->next != TRUNK_INVALID) 387 pool->trunks[trunk->next]->prev = trunk->prev; 388 if (trunk->prev != TRUNK_INVALID) 389 pool->trunks[trunk->prev]->next = trunk->next; 390 pool->cfg.free(trunk); 391 pool->trunks[trunk_idx] = NULL; 392 pool->n_trunk_valid--; 393 #ifdef POOL_DEBUG 394 pool->trunk_avail--; 395 pool->trunk_free++; 396 #endif 397 if (pool->n_trunk_valid == 0) { 398 pool->cfg.free(pool->trunks); 399 pool->trunks = NULL; 400 pool->n_trunk = 0; 401 } 402 } else if (trunk->free == 1) { 403 /* Put into free trunk list head. */ 404 MLX5_ASSERT(pool->free_list != trunk->idx); 405 trunk->next = pool->free_list; 406 trunk->prev = TRUNK_INVALID; 407 if (pool->free_list != TRUNK_INVALID) 408 pool->trunks[pool->free_list]->prev = trunk->idx; 409 pool->free_list = trunk->idx; 410 #ifdef POOL_DEBUG 411 pool->trunk_empty--; 412 pool->trunk_avail++; 413 #endif 414 } 415 #ifdef POOL_DEBUG 416 pool->n_entry--; 417 #endif 418 out: 419 mlx5_ipool_unlock(pool); 420 } 421 422 void * 423 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 424 { 425 struct mlx5_indexed_trunk *trunk; 426 void *p = NULL; 427 uint32_t trunk_idx; 428 uint32_t entry_idx; 429 430 if (!idx) 431 return NULL; 432 idx -= 1; 433 mlx5_ipool_lock(pool); 434 trunk_idx = mlx5_trunk_idx_get(pool, idx); 435 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 436 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 437 goto out; 438 trunk = pool->trunks[trunk_idx]; 439 if (!trunk) 440 goto out; 441 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 442 if (trunk_idx != trunk->idx || 443 rte_bitmap_get(trunk->bmp, entry_idx)) 444 goto out; 445 p = &trunk->data[entry_idx * pool->cfg.size]; 446 out: 447 mlx5_ipool_unlock(pool); 448 return p; 449 } 450 451 int 452 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 453 { 454 struct mlx5_indexed_trunk **trunks; 455 uint32_t i; 456 457 MLX5_ASSERT(pool); 458 mlx5_ipool_lock(pool); 459 trunks = pool->trunks; 460 for (i = 0; i < pool->n_trunk; i++) { 461 if (trunks[i]) 462 pool->cfg.free(trunks[i]); 463 } 464 if (!pool->trunks) 465 pool->cfg.free(pool->trunks); 466 mlx5_ipool_unlock(pool); 467 rte_free(pool); 468 return 0; 469 } 470 471 void 472 mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 473 { 474 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 475 "total: %d\n", 476 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 477 pool->cfg.trunk_size, pool->n_trunk_valid); 478 #ifdef POOL_DEBUG 479 printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 480 "available %u free %u\n", 481 pool->cfg.type, pool->n_entry, pool->trunk_new, 482 pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 483 #endif 484 } 485