1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #include <rte_malloc.h> 6 7 #include <mlx5_malloc.h> 8 9 #include "mlx5_utils.h" 10 11 /********************* Indexed pool **********************/ 12 13 static inline void 14 mlx5_ipool_lock(struct mlx5_indexed_pool *pool) 15 { 16 if (pool->cfg.need_lock) 17 rte_spinlock_lock(&pool->rsz_lock); 18 } 19 20 static inline void 21 mlx5_ipool_unlock(struct mlx5_indexed_pool *pool) 22 { 23 if (pool->cfg.need_lock) 24 rte_spinlock_unlock(&pool->rsz_lock); 25 } 26 27 static inline uint32_t 28 mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx) 29 { 30 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 31 uint32_t trunk_idx = 0; 32 uint32_t i; 33 34 if (!cfg->grow_trunk) 35 return entry_idx / cfg->trunk_size; 36 if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) { 37 trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) / 38 (cfg->trunk_size << (cfg->grow_shift * 39 cfg->grow_trunk)) + cfg->grow_trunk; 40 } else { 41 for (i = 0; i < cfg->grow_trunk; i++) { 42 if (entry_idx < pool->grow_tbl[i]) 43 break; 44 } 45 trunk_idx = i; 46 } 47 return trunk_idx; 48 } 49 50 static inline uint32_t 51 mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 52 { 53 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 54 55 return cfg->trunk_size << (cfg->grow_shift * 56 (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx)); 57 } 58 59 static inline uint32_t 60 mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx) 61 { 62 struct mlx5_indexed_pool_config *cfg = &pool->cfg; 63 uint32_t offset = 0; 64 65 if (!trunk_idx) 66 return 0; 67 if (!cfg->grow_trunk) 68 return cfg->trunk_size * trunk_idx; 69 if (trunk_idx < cfg->grow_trunk) 70 offset = pool->grow_tbl[trunk_idx - 1]; 71 else 72 offset = pool->grow_tbl[cfg->grow_trunk - 1] + 73 (cfg->trunk_size << (cfg->grow_shift * 74 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk); 75 return offset; 76 } 77 78 struct mlx5_indexed_pool * 79 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg) 80 { 81 struct mlx5_indexed_pool *pool; 82 uint32_t i; 83 84 if (!cfg || (!cfg->malloc ^ !cfg->free) || 85 (cfg->per_core_cache && cfg->release_mem_en) || 86 (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) || 87 ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32)))) 88 return NULL; 89 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk * 90 sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE, 91 SOCKET_ID_ANY); 92 if (!pool) 93 return NULL; 94 pool->cfg = *cfg; 95 if (!pool->cfg.trunk_size) 96 pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE; 97 if (!cfg->malloc && !cfg->free) { 98 pool->cfg.malloc = mlx5_malloc; 99 pool->cfg.free = mlx5_free; 100 } 101 if (pool->cfg.need_lock) 102 rte_spinlock_init(&pool->rsz_lock); 103 /* 104 * Initialize the dynamic grow trunk size lookup table to have a quick 105 * lookup for the trunk entry index offset. 106 */ 107 for (i = 0; i < cfg->grow_trunk; i++) { 108 pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i); 109 if (i > 0) 110 pool->grow_tbl[i] += pool->grow_tbl[i - 1]; 111 } 112 if (!pool->cfg.max_idx) 113 pool->cfg.max_idx = 114 mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); 115 if (!cfg->per_core_cache) 116 pool->free_list = TRUNK_INVALID; 117 rte_spinlock_init(&pool->lcore_lock); 118 return pool; 119 } 120 121 static int 122 mlx5_ipool_grow(struct mlx5_indexed_pool *pool) 123 { 124 struct mlx5_indexed_trunk *trunk; 125 struct mlx5_indexed_trunk **trunk_tmp; 126 struct mlx5_indexed_trunk **p; 127 size_t trunk_size = 0; 128 size_t data_size; 129 size_t bmp_size; 130 uint32_t idx, cur_max_idx, i; 131 132 cur_max_idx = mlx5_trunk_idx_offset_get(pool, pool->n_trunk_valid); 133 if (pool->n_trunk_valid == TRUNK_MAX_IDX || 134 cur_max_idx >= pool->cfg.max_idx) 135 return -ENOMEM; 136 if (pool->n_trunk_valid == pool->n_trunk) { 137 /* No free trunk flags, expand trunk list. */ 138 int n_grow = pool->n_trunk_valid ? pool->n_trunk : 139 RTE_CACHE_LINE_SIZE / sizeof(void *); 140 141 p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) * 142 sizeof(struct mlx5_indexed_trunk *), 143 RTE_CACHE_LINE_SIZE, rte_socket_id()); 144 if (!p) 145 return -ENOMEM; 146 if (pool->trunks) 147 memcpy(p, pool->trunks, pool->n_trunk_valid * 148 sizeof(struct mlx5_indexed_trunk *)); 149 memset(RTE_PTR_ADD(p, pool->n_trunk_valid * sizeof(void *)), 0, 150 n_grow * sizeof(void *)); 151 trunk_tmp = pool->trunks; 152 pool->trunks = p; 153 if (trunk_tmp) 154 pool->cfg.free(trunk_tmp); 155 pool->n_trunk += n_grow; 156 } 157 if (!pool->cfg.release_mem_en) { 158 idx = pool->n_trunk_valid; 159 } else { 160 /* Find the first available slot in trunk list */ 161 for (idx = 0; idx < pool->n_trunk; idx++) 162 if (pool->trunks[idx] == NULL) 163 break; 164 } 165 trunk_size += sizeof(*trunk); 166 data_size = mlx5_trunk_size_get(pool, idx); 167 bmp_size = rte_bitmap_get_memory_footprint(data_size); 168 /* rte_bitmap requires memory cacheline aligned. */ 169 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 170 trunk_size += bmp_size; 171 trunk = pool->cfg.malloc(0, trunk_size, 172 RTE_CACHE_LINE_SIZE, rte_socket_id()); 173 if (!trunk) 174 return -ENOMEM; 175 pool->trunks[idx] = trunk; 176 trunk->idx = idx; 177 trunk->free = data_size; 178 trunk->prev = TRUNK_INVALID; 179 trunk->next = TRUNK_INVALID; 180 MLX5_ASSERT(pool->free_list == TRUNK_INVALID); 181 pool->free_list = idx; 182 /* Mark all entries as available. */ 183 trunk->bmp = rte_bitmap_init_with_all_set(data_size, &trunk->data 184 [RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size)], 185 bmp_size); 186 /* Clear the overhead bits in the trunk if it happens. */ 187 if (cur_max_idx + data_size > pool->cfg.max_idx) { 188 for (i = pool->cfg.max_idx - cur_max_idx; i < data_size; i++) 189 rte_bitmap_clear(trunk->bmp, i); 190 } 191 MLX5_ASSERT(trunk->bmp); 192 pool->n_trunk_valid++; 193 #ifdef POOL_DEBUG 194 pool->trunk_new++; 195 pool->trunk_avail++; 196 #endif 197 return 0; 198 } 199 200 static inline struct mlx5_indexed_cache * 201 mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx) 202 { 203 struct mlx5_indexed_cache *gc, *lc, *olc = NULL; 204 205 lc = pool->cache[cidx]->lc; 206 gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED); 207 if (gc && lc != gc) { 208 mlx5_ipool_lock(pool); 209 if (lc && !(--lc->ref_cnt)) 210 olc = lc; 211 lc = pool->gc; 212 lc->ref_cnt++; 213 pool->cache[cidx]->lc = lc; 214 mlx5_ipool_unlock(pool); 215 if (olc) 216 pool->cfg.free(olc); 217 } 218 return lc; 219 } 220 221 static uint32_t 222 mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) 223 { 224 struct mlx5_indexed_trunk *trunk; 225 struct mlx5_indexed_cache *p, *lc, *olc = NULL; 226 size_t trunk_size = 0; 227 size_t data_size; 228 uint32_t cur_max_idx, trunk_idx, trunk_n; 229 uint32_t fetch_size, ts_idx, i; 230 int n_grow; 231 232 check_again: 233 p = NULL; 234 fetch_size = 0; 235 /* 236 * Fetch new index from global if possible. First round local 237 * cache will be NULL. 238 */ 239 lc = pool->cache[cidx]->lc; 240 mlx5_ipool_lock(pool); 241 /* Try to update local cache first. */ 242 if (likely(pool->gc)) { 243 if (lc != pool->gc) { 244 if (lc && !(--lc->ref_cnt)) 245 olc = lc; 246 lc = pool->gc; 247 lc->ref_cnt++; 248 pool->cache[cidx]->lc = lc; 249 } 250 if (lc->len) { 251 /* Use the updated local cache to fetch index. */ 252 fetch_size = pool->cfg.per_core_cache >> 2; 253 if (lc->len < fetch_size) 254 fetch_size = lc->len; 255 lc->len -= fetch_size; 256 memcpy(pool->cache[cidx]->idx, &lc->idx[lc->len], 257 sizeof(uint32_t) * fetch_size); 258 } 259 } 260 mlx5_ipool_unlock(pool); 261 if (unlikely(olc)) { 262 pool->cfg.free(olc); 263 olc = NULL; 264 } 265 if (fetch_size) { 266 pool->cache[cidx]->len = fetch_size - 1; 267 return pool->cache[cidx]->idx[pool->cache[cidx]->len]; 268 } 269 trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid, 270 __ATOMIC_ACQUIRE) : 0; 271 trunk_n = lc ? lc->n_trunk : 0; 272 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx); 273 /* Check if index reach maximum. */ 274 if (trunk_idx == TRUNK_MAX_IDX || 275 cur_max_idx >= pool->cfg.max_idx) 276 return 0; 277 /* No enough space in trunk array, resize the trunks array. */ 278 if (trunk_idx == trunk_n) { 279 n_grow = trunk_idx ? trunk_idx : 280 RTE_CACHE_LINE_SIZE / sizeof(void *); 281 cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_n + n_grow); 282 /* Resize the trunk array. */ 283 p = pool->cfg.malloc(0, ((trunk_idx + n_grow) * 284 sizeof(struct mlx5_indexed_trunk *)) + 285 (cur_max_idx * sizeof(uint32_t)) + sizeof(*p), 286 RTE_CACHE_LINE_SIZE, rte_socket_id()); 287 if (!p) 288 return 0; 289 p->trunks = (struct mlx5_indexed_trunk **)&p->idx[cur_max_idx]; 290 if (lc) 291 memcpy(p->trunks, lc->trunks, trunk_idx * 292 sizeof(struct mlx5_indexed_trunk *)); 293 #ifdef RTE_LIBRTE_MLX5_DEBUG 294 memset(RTE_PTR_ADD(p->trunks, trunk_idx * sizeof(void *)), 0, 295 n_grow * sizeof(void *)); 296 #endif 297 p->n_trunk_valid = trunk_idx; 298 p->n_trunk = trunk_n + n_grow; 299 p->len = 0; 300 } 301 /* Prepare the new trunk. */ 302 trunk_size = sizeof(*trunk); 303 data_size = mlx5_trunk_size_get(pool, trunk_idx); 304 trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size); 305 trunk = pool->cfg.malloc(0, trunk_size, 306 RTE_CACHE_LINE_SIZE, rte_socket_id()); 307 if (unlikely(!trunk)) { 308 pool->cfg.free(p); 309 return 0; 310 } 311 trunk->idx = trunk_idx; 312 trunk->free = data_size; 313 mlx5_ipool_lock(pool); 314 /* 315 * Double check if trunks has been updated or have available index. 316 * During the new trunk allocate, index may still be flushed to the 317 * global cache. So also need to check the pool->gc->len. 318 */ 319 if (pool->gc && (lc != pool->gc || 320 lc->n_trunk_valid != trunk_idx || 321 pool->gc->len)) { 322 mlx5_ipool_unlock(pool); 323 if (p) 324 pool->cfg.free(p); 325 pool->cfg.free(trunk); 326 goto check_again; 327 } 328 /* Resize the trunk array and update local cache first. */ 329 if (p) { 330 if (lc && !(--lc->ref_cnt)) 331 olc = lc; 332 lc = p; 333 lc->ref_cnt = 1; 334 pool->cache[cidx]->lc = lc; 335 __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED); 336 } 337 /* Add trunk to trunks array. */ 338 lc->trunks[trunk_idx] = trunk; 339 __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED); 340 /* Enqueue half of the index to global. */ 341 ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1; 342 fetch_size = trunk->free >> 1; 343 if (fetch_size > pool->cfg.per_core_cache) 344 fetch_size = trunk->free - pool->cfg.per_core_cache; 345 for (i = 0; i < fetch_size; i++) 346 lc->idx[i] = ts_idx + i; 347 lc->len = fetch_size; 348 mlx5_ipool_unlock(pool); 349 /* Copy left half - 1 to local cache index array. */ 350 pool->cache[cidx]->len = trunk->free - fetch_size - 1; 351 ts_idx += fetch_size; 352 for (i = 0; i < pool->cache[cidx]->len; i++) 353 pool->cache[cidx]->idx[i] = ts_idx + i; 354 if (olc) 355 pool->cfg.free(olc); 356 return ts_idx + i; 357 } 358 359 static void * 360 _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) 361 { 362 struct mlx5_indexed_trunk *trunk; 363 struct mlx5_indexed_cache *lc; 364 uint32_t trunk_idx; 365 uint32_t entry_idx; 366 367 MLX5_ASSERT(idx); 368 if (unlikely(!pool->cache[cidx])) { 369 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 370 sizeof(struct mlx5_ipool_per_lcore) + 371 (pool->cfg.per_core_cache * sizeof(uint32_t)), 372 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 373 if (!pool->cache[cidx]) { 374 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 375 return NULL; 376 } 377 } 378 lc = mlx5_ipool_update_global_cache(pool, cidx); 379 idx -= 1; 380 trunk_idx = mlx5_trunk_idx_get(pool, idx); 381 trunk = lc->trunks[trunk_idx]; 382 MLX5_ASSERT(trunk); 383 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); 384 return &trunk->data[entry_idx * pool->cfg.size]; 385 } 386 387 static void * 388 mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 389 { 390 void *entry; 391 int cidx; 392 393 cidx = rte_lcore_index(rte_lcore_id()); 394 if (unlikely(cidx == -1)) { 395 cidx = RTE_MAX_LCORE; 396 rte_spinlock_lock(&pool->lcore_lock); 397 } 398 entry = _mlx5_ipool_get_cache(pool, cidx, idx); 399 if (unlikely(cidx == RTE_MAX_LCORE)) 400 rte_spinlock_unlock(&pool->lcore_lock); 401 return entry; 402 } 403 404 405 static void * 406 _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx, 407 uint32_t *idx) 408 { 409 if (unlikely(!pool->cache[cidx])) { 410 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 411 sizeof(struct mlx5_ipool_per_lcore) + 412 (pool->cfg.per_core_cache * sizeof(uint32_t)), 413 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 414 if (!pool->cache[cidx]) { 415 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 416 return NULL; 417 } 418 } else if (pool->cache[cidx]->len) { 419 pool->cache[cidx]->len--; 420 *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; 421 return _mlx5_ipool_get_cache(pool, cidx, *idx); 422 } 423 /* Not enough idx in global cache. Keep fetching from global. */ 424 *idx = mlx5_ipool_allocate_from_global(pool, cidx); 425 if (unlikely(!(*idx))) 426 return NULL; 427 return _mlx5_ipool_get_cache(pool, cidx, *idx); 428 } 429 430 static void * 431 mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx) 432 { 433 void *entry; 434 int cidx; 435 436 cidx = rte_lcore_index(rte_lcore_id()); 437 if (unlikely(cidx == -1)) { 438 cidx = RTE_MAX_LCORE; 439 rte_spinlock_lock(&pool->lcore_lock); 440 } 441 entry = _mlx5_ipool_malloc_cache(pool, cidx, idx); 442 if (unlikely(cidx == RTE_MAX_LCORE)) 443 rte_spinlock_unlock(&pool->lcore_lock); 444 return entry; 445 } 446 447 static void 448 _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) 449 { 450 struct mlx5_ipool_per_lcore *ilc; 451 struct mlx5_indexed_cache *gc, *olc = NULL; 452 uint32_t reclaim_num = 0; 453 454 MLX5_ASSERT(idx); 455 /* 456 * When index was allocated on core A but freed on core B. In this 457 * case check if local cache on core B was allocated before. 458 */ 459 if (unlikely(!pool->cache[cidx])) { 460 pool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO, 461 sizeof(struct mlx5_ipool_per_lcore) + 462 (pool->cfg.per_core_cache * sizeof(uint32_t)), 463 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 464 if (!pool->cache[cidx]) { 465 DRV_LOG(ERR, "Ipool cache%d allocate failed\n", cidx); 466 return; 467 } 468 } 469 /* Try to enqueue to local index cache. */ 470 if (pool->cache[cidx]->len < pool->cfg.per_core_cache) { 471 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 472 pool->cache[cidx]->len++; 473 return; 474 } 475 ilc = pool->cache[cidx]; 476 reclaim_num = pool->cfg.per_core_cache >> 2; 477 ilc->len -= reclaim_num; 478 /* Local index cache full, try with global index cache. */ 479 mlx5_ipool_lock(pool); 480 gc = pool->gc; 481 if (ilc->lc != gc) { 482 if (ilc->lc && !(--ilc->lc->ref_cnt)) 483 olc = ilc->lc; 484 gc->ref_cnt++; 485 ilc->lc = gc; 486 } 487 memcpy(&gc->idx[gc->len], &ilc->idx[ilc->len], 488 reclaim_num * sizeof(uint32_t)); 489 gc->len += reclaim_num; 490 mlx5_ipool_unlock(pool); 491 if (olc) 492 pool->cfg.free(olc); 493 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; 494 pool->cache[cidx]->len++; 495 } 496 497 static void 498 mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx) 499 { 500 int cidx; 501 502 cidx = rte_lcore_index(rte_lcore_id()); 503 if (unlikely(cidx == -1)) { 504 cidx = RTE_MAX_LCORE; 505 rte_spinlock_lock(&pool->lcore_lock); 506 } 507 _mlx5_ipool_free_cache(pool, cidx, idx); 508 if (unlikely(cidx == RTE_MAX_LCORE)) 509 rte_spinlock_unlock(&pool->lcore_lock); 510 } 511 512 void * 513 mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 514 { 515 struct mlx5_indexed_trunk *trunk; 516 uint64_t slab = 0; 517 uint32_t iidx = 0; 518 void *p; 519 520 if (pool->cfg.per_core_cache) 521 return mlx5_ipool_malloc_cache(pool, idx); 522 mlx5_ipool_lock(pool); 523 if (pool->free_list == TRUNK_INVALID) { 524 /* If no available trunks, grow new. */ 525 if (mlx5_ipool_grow(pool)) { 526 mlx5_ipool_unlock(pool); 527 return NULL; 528 } 529 } 530 MLX5_ASSERT(pool->free_list != TRUNK_INVALID); 531 trunk = pool->trunks[pool->free_list]; 532 MLX5_ASSERT(trunk->free); 533 if (!rte_bitmap_scan(trunk->bmp, &iidx, &slab)) { 534 mlx5_ipool_unlock(pool); 535 return NULL; 536 } 537 MLX5_ASSERT(slab); 538 iidx += rte_ctz64(slab); 539 MLX5_ASSERT(iidx != UINT32_MAX); 540 MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx)); 541 rte_bitmap_clear(trunk->bmp, iidx); 542 p = &trunk->data[iidx * pool->cfg.size]; 543 /* 544 * The ipool index should grow continually from small to big, 545 * some features as metering only accept limited bits of index. 546 * Random index with MSB set may be rejected. 547 */ 548 iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx); 549 iidx += 1; /* non-zero index. */ 550 trunk->free--; 551 #ifdef POOL_DEBUG 552 pool->n_entry++; 553 #endif 554 if (!trunk->free) { 555 /* Full trunk will be removed from free list in imalloc. */ 556 MLX5_ASSERT(pool->free_list == trunk->idx); 557 pool->free_list = trunk->next; 558 if (trunk->next != TRUNK_INVALID) 559 pool->trunks[trunk->next]->prev = TRUNK_INVALID; 560 trunk->prev = TRUNK_INVALID; 561 trunk->next = TRUNK_INVALID; 562 #ifdef POOL_DEBUG 563 pool->trunk_empty++; 564 pool->trunk_avail--; 565 #endif 566 } 567 *idx = iidx; 568 mlx5_ipool_unlock(pool); 569 return p; 570 } 571 572 void * 573 mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx) 574 { 575 void *entry = mlx5_ipool_malloc(pool, idx); 576 577 if (entry && pool->cfg.size) 578 memset(entry, 0, pool->cfg.size); 579 return entry; 580 } 581 582 void 583 mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx) 584 { 585 struct mlx5_indexed_trunk *trunk; 586 uint32_t trunk_idx; 587 uint32_t entry_idx; 588 589 if (!idx) 590 return; 591 if (pool->cfg.per_core_cache) { 592 mlx5_ipool_free_cache(pool, idx); 593 return; 594 } 595 idx -= 1; 596 mlx5_ipool_lock(pool); 597 trunk_idx = mlx5_trunk_idx_get(pool, idx); 598 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 599 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 600 goto out; 601 trunk = pool->trunks[trunk_idx]; 602 if (!trunk) 603 goto out; 604 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 605 if (trunk_idx != trunk->idx || 606 rte_bitmap_get(trunk->bmp, entry_idx)) 607 goto out; 608 rte_bitmap_set(trunk->bmp, entry_idx); 609 trunk->free++; 610 if (pool->cfg.release_mem_en && trunk->free == mlx5_trunk_size_get 611 (pool, trunk->idx)) { 612 if (pool->free_list == trunk->idx) 613 pool->free_list = trunk->next; 614 if (trunk->next != TRUNK_INVALID) 615 pool->trunks[trunk->next]->prev = trunk->prev; 616 if (trunk->prev != TRUNK_INVALID) 617 pool->trunks[trunk->prev]->next = trunk->next; 618 pool->cfg.free(trunk); 619 pool->trunks[trunk_idx] = NULL; 620 pool->n_trunk_valid--; 621 #ifdef POOL_DEBUG 622 pool->trunk_avail--; 623 pool->trunk_free++; 624 #endif 625 if (pool->n_trunk_valid == 0) { 626 pool->cfg.free(pool->trunks); 627 pool->trunks = NULL; 628 pool->n_trunk = 0; 629 } 630 } else if (trunk->free == 1) { 631 /* Put into free trunk list head. */ 632 MLX5_ASSERT(pool->free_list != trunk->idx); 633 trunk->next = pool->free_list; 634 trunk->prev = TRUNK_INVALID; 635 if (pool->free_list != TRUNK_INVALID) 636 pool->trunks[pool->free_list]->prev = trunk->idx; 637 pool->free_list = trunk->idx; 638 #ifdef POOL_DEBUG 639 pool->trunk_empty--; 640 pool->trunk_avail++; 641 #endif 642 } 643 #ifdef POOL_DEBUG 644 pool->n_entry--; 645 #endif 646 out: 647 mlx5_ipool_unlock(pool); 648 } 649 650 void * 651 mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx) 652 { 653 struct mlx5_indexed_trunk *trunk; 654 void *p = NULL; 655 uint32_t trunk_idx; 656 uint32_t entry_idx; 657 658 if (!idx) 659 return NULL; 660 if (pool->cfg.per_core_cache) 661 return mlx5_ipool_get_cache(pool, idx); 662 idx -= 1; 663 mlx5_ipool_lock(pool); 664 trunk_idx = mlx5_trunk_idx_get(pool, idx); 665 if ((!pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk_valid) || 666 (pool->cfg.release_mem_en && trunk_idx >= pool->n_trunk)) 667 goto out; 668 trunk = pool->trunks[trunk_idx]; 669 if (!trunk) 670 goto out; 671 entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx); 672 if (trunk_idx != trunk->idx || 673 rte_bitmap_get(trunk->bmp, entry_idx)) 674 goto out; 675 p = &trunk->data[entry_idx * pool->cfg.size]; 676 out: 677 mlx5_ipool_unlock(pool); 678 return p; 679 } 680 681 int 682 mlx5_ipool_destroy(struct mlx5_indexed_pool *pool) 683 { 684 struct mlx5_indexed_trunk **trunks = NULL; 685 struct mlx5_indexed_cache *gc = pool->gc; 686 uint32_t i, n_trunk_valid = 0; 687 688 MLX5_ASSERT(pool); 689 mlx5_ipool_lock(pool); 690 if (pool->cfg.per_core_cache) { 691 for (i = 0; i <= RTE_MAX_LCORE; i++) { 692 /* 693 * Free only old global cache. Pool gc will be 694 * freed at last. 695 */ 696 if (pool->cache[i]) { 697 if (pool->cache[i]->lc && 698 pool->cache[i]->lc != pool->gc && 699 (!(--pool->cache[i]->lc->ref_cnt))) 700 pool->cfg.free(pool->cache[i]->lc); 701 pool->cfg.free(pool->cache[i]); 702 } 703 } 704 if (gc) { 705 trunks = gc->trunks; 706 n_trunk_valid = gc->n_trunk_valid; 707 } 708 } else { 709 gc = NULL; 710 trunks = pool->trunks; 711 n_trunk_valid = pool->n_trunk_valid; 712 } 713 for (i = 0; i < n_trunk_valid; i++) { 714 if (trunks[i]) 715 pool->cfg.free(trunks[i]); 716 } 717 if (!gc && trunks) 718 pool->cfg.free(trunks); 719 if (gc) 720 pool->cfg.free(gc); 721 mlx5_ipool_unlock(pool); 722 mlx5_free(pool); 723 return 0; 724 } 725 726 void 727 mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool) 728 { 729 uint32_t i, j; 730 struct mlx5_indexed_cache *gc; 731 struct rte_bitmap *ibmp; 732 uint32_t bmp_num, mem_size; 733 734 if (!pool->cfg.per_core_cache) 735 return; 736 gc = pool->gc; 737 if (!gc) 738 return; 739 /* Reset bmp. */ 740 bmp_num = mlx5_trunk_idx_offset_get(pool, gc->n_trunk_valid); 741 mem_size = rte_bitmap_get_memory_footprint(bmp_num); 742 pool->bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, mem_size, 743 RTE_CACHE_LINE_SIZE, rte_socket_id()); 744 if (!pool->bmp_mem) { 745 DRV_LOG(ERR, "Ipool bitmap mem allocate failed.\n"); 746 return; 747 } 748 ibmp = rte_bitmap_init_with_all_set(bmp_num, pool->bmp_mem, mem_size); 749 if (!ibmp) { 750 pool->cfg.free(pool->bmp_mem); 751 pool->bmp_mem = NULL; 752 DRV_LOG(ERR, "Ipool bitmap create failed.\n"); 753 return; 754 } 755 pool->ibmp = ibmp; 756 /* Clear global cache. */ 757 for (i = 0; i < gc->len; i++) 758 rte_bitmap_clear(ibmp, gc->idx[i] - 1); 759 /* Clear core cache. */ 760 for (i = 0; i < RTE_MAX_LCORE + 1; i++) { 761 struct mlx5_ipool_per_lcore *ilc = pool->cache[i]; 762 763 if (!ilc) 764 continue; 765 for (j = 0; j < ilc->len; j++) 766 rte_bitmap_clear(ibmp, ilc->idx[j] - 1); 767 } 768 } 769 770 static void * 771 mlx5_ipool_get_next_cache(struct mlx5_indexed_pool *pool, uint32_t *pos) 772 { 773 struct rte_bitmap *ibmp; 774 uint64_t slab = 0; 775 uint32_t iidx = *pos; 776 777 ibmp = pool->ibmp; 778 if (!ibmp || !rte_bitmap_scan(ibmp, &iidx, &slab)) { 779 if (pool->bmp_mem) { 780 pool->cfg.free(pool->bmp_mem); 781 pool->bmp_mem = NULL; 782 pool->ibmp = NULL; 783 } 784 return NULL; 785 } 786 iidx += rte_ctz64(slab); 787 rte_bitmap_clear(ibmp, iidx); 788 iidx++; 789 *pos = iidx; 790 return mlx5_ipool_get_cache(pool, iidx); 791 } 792 793 void * 794 mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos) 795 { 796 uint32_t idx = *pos; 797 void *entry; 798 799 if (pool->cfg.per_core_cache) 800 return mlx5_ipool_get_next_cache(pool, pos); 801 while (idx <= mlx5_trunk_idx_offset_get(pool, pool->n_trunk)) { 802 entry = mlx5_ipool_get(pool, idx); 803 if (entry) { 804 *pos = idx; 805 return entry; 806 } 807 idx++; 808 } 809 return NULL; 810 } 811 812 int 813 mlx5_ipool_resize(struct mlx5_indexed_pool *pool, uint32_t num_entries) 814 { 815 uint32_t cur_max_idx; 816 uint32_t max_index = mlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1); 817 818 if (num_entries % pool->cfg.trunk_size) { 819 DRV_LOG(ERR, "num_entries param should be trunk_size(=%u) multiplication\n", 820 pool->cfg.trunk_size); 821 return -EINVAL; 822 } 823 824 mlx5_ipool_lock(pool); 825 cur_max_idx = pool->cfg.max_idx + num_entries; 826 /* If the ipool max idx is above maximum or uint overflow occurred. */ 827 if (cur_max_idx > max_index || cur_max_idx < num_entries) { 828 DRV_LOG(ERR, "Ipool resize failed\n"); 829 DRV_LOG(ERR, "Adding %u entries to existing %u entries, will cross max limit(=%u)\n", 830 num_entries, cur_max_idx, max_index); 831 mlx5_ipool_unlock(pool); 832 return -EINVAL; 833 } 834 835 /* Update maximum entries number. */ 836 pool->cfg.max_idx = cur_max_idx; 837 mlx5_ipool_unlock(pool); 838 return 0; 839 } 840 841 void 842 mlx5_ipool_dump(struct mlx5_indexed_pool *pool) 843 { 844 printf("Pool %s entry size %u, trunks %u, %d entry per trunk, " 845 "total: %d\n", 846 pool->cfg.type, pool->cfg.size, pool->n_trunk_valid, 847 pool->cfg.trunk_size, pool->n_trunk_valid); 848 #ifdef POOL_DEBUG 849 printf("Pool %s entry %u, trunk alloc %u, empty: %u, " 850 "available %u free %u\n", 851 pool->cfg.type, pool->n_entry, pool->trunk_new, 852 pool->trunk_empty, pool->trunk_avail, pool->trunk_free); 853 #endif 854 } 855 856 struct mlx5_l3t_tbl * 857 mlx5_l3t_create(enum mlx5_l3t_type type) 858 { 859 struct mlx5_l3t_tbl *tbl; 860 struct mlx5_indexed_pool_config l3t_ip_cfg = { 861 .trunk_size = 16, 862 .grow_trunk = 6, 863 .grow_shift = 1, 864 .need_lock = 0, 865 .release_mem_en = 1, 866 .malloc = mlx5_malloc, 867 .free = mlx5_free, 868 }; 869 870 if (type >= MLX5_L3T_TYPE_MAX) { 871 rte_errno = EINVAL; 872 return NULL; 873 } 874 tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1, 875 SOCKET_ID_ANY); 876 if (!tbl) { 877 rte_errno = ENOMEM; 878 return NULL; 879 } 880 tbl->type = type; 881 switch (type) { 882 case MLX5_L3T_TYPE_WORD: 883 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_word); 884 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_w"; 885 break; 886 case MLX5_L3T_TYPE_DWORD: 887 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_dword); 888 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_dw"; 889 break; 890 case MLX5_L3T_TYPE_QWORD: 891 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_qword); 892 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_qw"; 893 break; 894 default: 895 l3t_ip_cfg.size = sizeof(struct mlx5_l3t_entry_ptr); 896 l3t_ip_cfg.type = "mlx5_l3t_e_tbl_tpr"; 897 break; 898 } 899 rte_spinlock_init(&tbl->sl); 900 tbl->eip = mlx5_ipool_create(&l3t_ip_cfg); 901 if (!tbl->eip) { 902 rte_errno = ENOMEM; 903 mlx5_free(tbl); 904 tbl = NULL; 905 } 906 return tbl; 907 } 908 909 void 910 mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl) 911 { 912 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 913 uint32_t i, j; 914 915 if (!tbl) 916 return; 917 g_tbl = tbl->tbl; 918 if (g_tbl) { 919 for (i = 0; i < MLX5_L3T_GT_SIZE; i++) { 920 m_tbl = g_tbl->tbl[i]; 921 if (!m_tbl) 922 continue; 923 for (j = 0; j < MLX5_L3T_MT_SIZE; j++) { 924 if (!m_tbl->tbl[j]) 925 continue; 926 MLX5_ASSERT(!((struct mlx5_l3t_entry_word *) 927 m_tbl->tbl[j])->ref_cnt); 928 mlx5_ipool_free(tbl->eip, 929 ((struct mlx5_l3t_entry_word *) 930 m_tbl->tbl[j])->idx); 931 m_tbl->tbl[j] = 0; 932 if (!(--m_tbl->ref_cnt)) 933 break; 934 } 935 MLX5_ASSERT(!m_tbl->ref_cnt); 936 mlx5_free(g_tbl->tbl[i]); 937 g_tbl->tbl[i] = 0; 938 if (!(--g_tbl->ref_cnt)) 939 break; 940 } 941 MLX5_ASSERT(!g_tbl->ref_cnt); 942 mlx5_free(tbl->tbl); 943 tbl->tbl = 0; 944 } 945 mlx5_ipool_destroy(tbl->eip); 946 mlx5_free(tbl); 947 } 948 949 static int32_t 950 __l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 951 union mlx5_l3t_data *data) 952 { 953 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 954 struct mlx5_l3t_entry_word *w_e_tbl; 955 struct mlx5_l3t_entry_dword *dw_e_tbl; 956 struct mlx5_l3t_entry_qword *qw_e_tbl; 957 struct mlx5_l3t_entry_ptr *ptr_e_tbl; 958 void *e_tbl; 959 uint32_t entry_idx; 960 961 g_tbl = tbl->tbl; 962 if (!g_tbl) 963 return -1; 964 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 965 if (!m_tbl) 966 return -1; 967 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 968 if (!e_tbl) 969 return -1; 970 entry_idx = idx & MLX5_L3T_ET_MASK; 971 switch (tbl->type) { 972 case MLX5_L3T_TYPE_WORD: 973 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 974 data->word = w_e_tbl->entry[entry_idx].data; 975 if (w_e_tbl->entry[entry_idx].data) 976 w_e_tbl->entry[entry_idx].ref_cnt++; 977 break; 978 case MLX5_L3T_TYPE_DWORD: 979 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 980 data->dword = dw_e_tbl->entry[entry_idx].data; 981 if (dw_e_tbl->entry[entry_idx].data) 982 dw_e_tbl->entry[entry_idx].ref_cnt++; 983 break; 984 case MLX5_L3T_TYPE_QWORD: 985 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 986 data->qword = qw_e_tbl->entry[entry_idx].data; 987 if (qw_e_tbl->entry[entry_idx].data) 988 qw_e_tbl->entry[entry_idx].ref_cnt++; 989 break; 990 default: 991 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 992 data->ptr = ptr_e_tbl->entry[entry_idx].data; 993 if (ptr_e_tbl->entry[entry_idx].data) 994 ptr_e_tbl->entry[entry_idx].ref_cnt++; 995 break; 996 } 997 return 0; 998 } 999 1000 int32_t 1001 mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1002 union mlx5_l3t_data *data) 1003 { 1004 int ret; 1005 1006 rte_spinlock_lock(&tbl->sl); 1007 ret = __l3t_get_entry(tbl, idx, data); 1008 rte_spinlock_unlock(&tbl->sl); 1009 return ret; 1010 } 1011 1012 int32_t 1013 mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx) 1014 { 1015 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1016 struct mlx5_l3t_entry_word *w_e_tbl; 1017 struct mlx5_l3t_entry_dword *dw_e_tbl; 1018 struct mlx5_l3t_entry_qword *qw_e_tbl; 1019 struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1020 void *e_tbl; 1021 uint32_t entry_idx; 1022 uint64_t ref_cnt; 1023 int32_t ret = -1; 1024 1025 rte_spinlock_lock(&tbl->sl); 1026 g_tbl = tbl->tbl; 1027 if (!g_tbl) 1028 goto out; 1029 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1030 if (!m_tbl) 1031 goto out; 1032 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1033 if (!e_tbl) 1034 goto out; 1035 entry_idx = idx & MLX5_L3T_ET_MASK; 1036 switch (tbl->type) { 1037 case MLX5_L3T_TYPE_WORD: 1038 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 1039 MLX5_ASSERT(w_e_tbl->entry[entry_idx].ref_cnt); 1040 ret = --w_e_tbl->entry[entry_idx].ref_cnt; 1041 if (ret) 1042 goto out; 1043 w_e_tbl->entry[entry_idx].data = 0; 1044 ref_cnt = --w_e_tbl->ref_cnt; 1045 break; 1046 case MLX5_L3T_TYPE_DWORD: 1047 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 1048 MLX5_ASSERT(dw_e_tbl->entry[entry_idx].ref_cnt); 1049 ret = --dw_e_tbl->entry[entry_idx].ref_cnt; 1050 if (ret) 1051 goto out; 1052 dw_e_tbl->entry[entry_idx].data = 0; 1053 ref_cnt = --dw_e_tbl->ref_cnt; 1054 break; 1055 case MLX5_L3T_TYPE_QWORD: 1056 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 1057 MLX5_ASSERT(qw_e_tbl->entry[entry_idx].ref_cnt); 1058 ret = --qw_e_tbl->entry[entry_idx].ref_cnt; 1059 if (ret) 1060 goto out; 1061 qw_e_tbl->entry[entry_idx].data = 0; 1062 ref_cnt = --qw_e_tbl->ref_cnt; 1063 break; 1064 default: 1065 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 1066 MLX5_ASSERT(ptr_e_tbl->entry[entry_idx].ref_cnt); 1067 ret = --ptr_e_tbl->entry[entry_idx].ref_cnt; 1068 if (ret) 1069 goto out; 1070 ptr_e_tbl->entry[entry_idx].data = NULL; 1071 ref_cnt = --ptr_e_tbl->ref_cnt; 1072 break; 1073 } 1074 if (!ref_cnt) { 1075 mlx5_ipool_free(tbl->eip, 1076 ((struct mlx5_l3t_entry_word *)e_tbl)->idx); 1077 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1078 NULL; 1079 if (!(--m_tbl->ref_cnt)) { 1080 mlx5_free(m_tbl); 1081 g_tbl->tbl 1082 [(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL; 1083 if (!(--g_tbl->ref_cnt)) { 1084 mlx5_free(g_tbl); 1085 tbl->tbl = 0; 1086 } 1087 } 1088 } 1089 out: 1090 rte_spinlock_unlock(&tbl->sl); 1091 return ret; 1092 } 1093 1094 static int32_t 1095 __l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1096 union mlx5_l3t_data *data) 1097 { 1098 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl; 1099 struct mlx5_l3t_entry_word *w_e_tbl; 1100 struct mlx5_l3t_entry_dword *dw_e_tbl; 1101 struct mlx5_l3t_entry_qword *qw_e_tbl; 1102 struct mlx5_l3t_entry_ptr *ptr_e_tbl; 1103 void *e_tbl; 1104 uint32_t entry_idx, tbl_idx = 0; 1105 1106 /* Check the global table, create it if empty. */ 1107 g_tbl = tbl->tbl; 1108 if (!g_tbl) { 1109 g_tbl = mlx5_malloc(MLX5_MEM_ZERO, 1110 sizeof(struct mlx5_l3t_level_tbl) + 1111 sizeof(void *) * MLX5_L3T_GT_SIZE, 1, 1112 SOCKET_ID_ANY); 1113 if (!g_tbl) { 1114 rte_errno = ENOMEM; 1115 return -1; 1116 } 1117 tbl->tbl = g_tbl; 1118 } 1119 /* 1120 * Check the middle table, create it if empty. Ref_cnt will be 1121 * increased if new sub table created. 1122 */ 1123 m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK]; 1124 if (!m_tbl) { 1125 m_tbl = mlx5_malloc(MLX5_MEM_ZERO, 1126 sizeof(struct mlx5_l3t_level_tbl) + 1127 sizeof(void *) * MLX5_L3T_MT_SIZE, 1, 1128 SOCKET_ID_ANY); 1129 if (!m_tbl) { 1130 rte_errno = ENOMEM; 1131 return -1; 1132 } 1133 g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = 1134 m_tbl; 1135 g_tbl->ref_cnt++; 1136 } 1137 /* 1138 * Check the entry table, create it if empty. Ref_cnt will be 1139 * increased if new sub entry table created. 1140 */ 1141 e_tbl = m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK]; 1142 if (!e_tbl) { 1143 e_tbl = mlx5_ipool_zmalloc(tbl->eip, &tbl_idx); 1144 if (!e_tbl) { 1145 rte_errno = ENOMEM; 1146 return -1; 1147 } 1148 ((struct mlx5_l3t_entry_word *)e_tbl)->idx = tbl_idx; 1149 m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] = 1150 e_tbl; 1151 m_tbl->ref_cnt++; 1152 } 1153 entry_idx = idx & MLX5_L3T_ET_MASK; 1154 switch (tbl->type) { 1155 case MLX5_L3T_TYPE_WORD: 1156 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl; 1157 if (w_e_tbl->entry[entry_idx].data) { 1158 data->word = w_e_tbl->entry[entry_idx].data; 1159 w_e_tbl->entry[entry_idx].ref_cnt++; 1160 rte_errno = EEXIST; 1161 return -1; 1162 } 1163 w_e_tbl->entry[entry_idx].data = data->word; 1164 w_e_tbl->entry[entry_idx].ref_cnt = 1; 1165 w_e_tbl->ref_cnt++; 1166 break; 1167 case MLX5_L3T_TYPE_DWORD: 1168 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl; 1169 if (dw_e_tbl->entry[entry_idx].data) { 1170 data->dword = dw_e_tbl->entry[entry_idx].data; 1171 dw_e_tbl->entry[entry_idx].ref_cnt++; 1172 rte_errno = EEXIST; 1173 return -1; 1174 } 1175 dw_e_tbl->entry[entry_idx].data = data->dword; 1176 dw_e_tbl->entry[entry_idx].ref_cnt = 1; 1177 dw_e_tbl->ref_cnt++; 1178 break; 1179 case MLX5_L3T_TYPE_QWORD: 1180 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl; 1181 if (qw_e_tbl->entry[entry_idx].data) { 1182 data->qword = qw_e_tbl->entry[entry_idx].data; 1183 qw_e_tbl->entry[entry_idx].ref_cnt++; 1184 rte_errno = EEXIST; 1185 return -1; 1186 } 1187 qw_e_tbl->entry[entry_idx].data = data->qword; 1188 qw_e_tbl->entry[entry_idx].ref_cnt = 1; 1189 qw_e_tbl->ref_cnt++; 1190 break; 1191 default: 1192 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl; 1193 if (ptr_e_tbl->entry[entry_idx].data) { 1194 data->ptr = ptr_e_tbl->entry[entry_idx].data; 1195 ptr_e_tbl->entry[entry_idx].ref_cnt++; 1196 rte_errno = EEXIST; 1197 return -1; 1198 } 1199 ptr_e_tbl->entry[entry_idx].data = data->ptr; 1200 ptr_e_tbl->entry[entry_idx].ref_cnt = 1; 1201 ptr_e_tbl->ref_cnt++; 1202 break; 1203 } 1204 return 0; 1205 } 1206 1207 int32_t 1208 mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, 1209 union mlx5_l3t_data *data) 1210 { 1211 int ret; 1212 1213 rte_spinlock_lock(&tbl->sl); 1214 ret = __l3t_set_entry(tbl, idx, data); 1215 rte_spinlock_unlock(&tbl->sl); 1216 return ret; 1217 } 1218