1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2020 Mellanox Technologies, Ltd 4 */ 5 #include <rte_eal_memconfig.h> 6 #include <rte_errno.h> 7 #include <rte_mempool.h> 8 #include <rte_malloc.h> 9 #include <rte_rwlock.h> 10 11 #include "mlx5_glue.h" 12 #include "mlx5_common_mp.h" 13 #include "mlx5_common_mr.h" 14 #include "mlx5_common_utils.h" 15 #include "mlx5_malloc.h" 16 17 struct mr_find_contig_memsegs_data { 18 uintptr_t addr; 19 uintptr_t start; 20 uintptr_t end; 21 const struct rte_memseg_list *msl; 22 }; 23 24 /** 25 * Expand B-tree table to a given size. Can't be called with holding 26 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc(). 27 * 28 * @param bt 29 * Pointer to B-tree structure. 30 * @param n 31 * Number of entries for expansion. 32 * 33 * @return 34 * 0 on success, -1 on failure. 35 */ 36 static int 37 mr_btree_expand(struct mlx5_mr_btree *bt, int n) 38 { 39 void *mem; 40 int ret = 0; 41 42 if (n <= bt->size) 43 return ret; 44 /* 45 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is 46 * used inside if there's no room to expand. Because this is a quite 47 * rare case and a part of very slow path, it is very acceptable. 48 * Initially cache_bh[] will be given practically enough space and once 49 * it is expanded, expansion wouldn't be needed again ever. 50 */ 51 mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO, 52 n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY); 53 if (mem == NULL) { 54 /* Not an error, B-tree search will be skipped. */ 55 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table", 56 (void *)bt); 57 ret = -1; 58 } else { 59 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n); 60 bt->table = mem; 61 bt->size = n; 62 } 63 return ret; 64 } 65 66 /** 67 * Look up LKey from given B-tree lookup table, store the last index and return 68 * searched LKey. 69 * 70 * @param bt 71 * Pointer to B-tree structure. 72 * @param[out] idx 73 * Pointer to index. Even on search failure, returns index where it stops 74 * searching so that index can be used when inserting a new entry. 75 * @param addr 76 * Search key. 77 * 78 * @return 79 * Searched LKey on success, UINT32_MAX on no match. 80 */ 81 static uint32_t 82 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr) 83 { 84 struct mr_cache_entry *lkp_tbl; 85 uint16_t n; 86 uint16_t base = 0; 87 88 MLX5_ASSERT(bt != NULL); 89 lkp_tbl = *bt->table; 90 n = bt->len; 91 /* First entry must be NULL for comparison. */ 92 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 && 93 lkp_tbl[0].lkey == UINT32_MAX)); 94 /* Binary search. */ 95 do { 96 register uint16_t delta = n >> 1; 97 98 if (addr < lkp_tbl[base + delta].start) { 99 n = delta; 100 } else { 101 base += delta; 102 n -= delta; 103 } 104 } while (n > 1); 105 MLX5_ASSERT(addr >= lkp_tbl[base].start); 106 *idx = base; 107 if (addr < lkp_tbl[base].end) 108 return lkp_tbl[base].lkey; 109 /* Not found. */ 110 return UINT32_MAX; 111 } 112 113 /** 114 * Insert an entry to B-tree lookup table. 115 * 116 * @param bt 117 * Pointer to B-tree structure. 118 * @param entry 119 * Pointer to new entry to insert. 120 * 121 * @return 122 * 0 on success, -1 on failure. 123 */ 124 static int 125 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry) 126 { 127 struct mr_cache_entry *lkp_tbl; 128 uint16_t idx = 0; 129 size_t shift; 130 131 MLX5_ASSERT(bt != NULL); 132 MLX5_ASSERT(bt->len <= bt->size); 133 MLX5_ASSERT(bt->len > 0); 134 lkp_tbl = *bt->table; 135 /* Find out the slot for insertion. */ 136 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { 137 DRV_LOG(DEBUG, 138 "abort insertion to B-tree(%p): already exist at" 139 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 140 (void *)bt, idx, entry->start, entry->end, entry->lkey); 141 /* Already exist, return. */ 142 return 0; 143 } 144 /* If table is full, return error. */ 145 if (unlikely(bt->len == bt->size)) { 146 bt->overflow = 1; 147 return -1; 148 } 149 /* Insert entry. */ 150 ++idx; 151 shift = (bt->len - idx) * sizeof(struct mr_cache_entry); 152 if (shift) 153 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); 154 lkp_tbl[idx] = *entry; 155 bt->len++; 156 DRV_LOG(DEBUG, 157 "inserted B-tree(%p)[%u]," 158 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 159 (void *)bt, idx, entry->start, entry->end, entry->lkey); 160 return 0; 161 } 162 163 /** 164 * Initialize B-tree and allocate memory for lookup table. 165 * 166 * @param bt 167 * Pointer to B-tree structure. 168 * @param n 169 * Number of entries to allocate. 170 * @param socket 171 * NUMA socket on which memory must be allocated. 172 * 173 * @return 174 * 0 on success, a negative errno value otherwise and rte_errno is set. 175 */ 176 int 177 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) 178 { 179 if (bt == NULL) { 180 rte_errno = EINVAL; 181 return -rte_errno; 182 } 183 MLX5_ASSERT(!bt->table && !bt->size); 184 memset(bt, 0, sizeof(*bt)); 185 bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 186 sizeof(struct mr_cache_entry) * n, 187 0, socket); 188 if (bt->table == NULL) { 189 rte_errno = ENOMEM; 190 DRV_LOG(DEBUG, 191 "failed to allocate memory for btree cache on socket " 192 "%d", socket); 193 return -rte_errno; 194 } 195 bt->size = n; 196 /* First entry must be NULL for binary search. */ 197 (*bt->table)[bt->len++] = (struct mr_cache_entry) { 198 .lkey = UINT32_MAX, 199 }; 200 DRV_LOG(DEBUG, "initialized B-tree %p with table %p", 201 (void *)bt, (void *)bt->table); 202 return 0; 203 } 204 205 /** 206 * Free B-tree resources. 207 * 208 * @param bt 209 * Pointer to B-tree structure. 210 */ 211 void 212 mlx5_mr_btree_free(struct mlx5_mr_btree *bt) 213 { 214 if (bt == NULL) 215 return; 216 DRV_LOG(DEBUG, "freeing B-tree %p with table %p", 217 (void *)bt, (void *)bt->table); 218 mlx5_free(bt->table); 219 memset(bt, 0, sizeof(*bt)); 220 } 221 222 /** 223 * Dump all the entries in a B-tree 224 * 225 * @param bt 226 * Pointer to B-tree structure. 227 */ 228 void 229 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused) 230 { 231 #ifdef RTE_LIBRTE_MLX5_DEBUG 232 int idx; 233 struct mr_cache_entry *lkp_tbl; 234 235 if (bt == NULL) 236 return; 237 lkp_tbl = *bt->table; 238 for (idx = 0; idx < bt->len; ++idx) { 239 struct mr_cache_entry *entry = &lkp_tbl[idx]; 240 241 DRV_LOG(DEBUG, "B-tree(%p)[%u]," 242 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 243 (void *)bt, idx, entry->start, entry->end, entry->lkey); 244 } 245 #endif 246 } 247 248 /** 249 * Find virtually contiguous memory chunk in a given MR. 250 * 251 * @param dev 252 * Pointer to MR structure. 253 * @param[out] entry 254 * Pointer to returning MR cache entry. If not found, this will not be 255 * updated. 256 * @param start_idx 257 * Start index of the memseg bitmap. 258 * 259 * @return 260 * Next index to go on lookup. 261 */ 262 static int 263 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry, 264 int base_idx) 265 { 266 uintptr_t start = 0; 267 uintptr_t end = 0; 268 uint32_t idx = 0; 269 270 /* MR for external memory doesn't have memseg list. */ 271 if (mr->msl == NULL) { 272 MLX5_ASSERT(mr->ms_bmp_n == 1); 273 MLX5_ASSERT(mr->ms_n == 1); 274 MLX5_ASSERT(base_idx == 0); 275 /* 276 * Can't search it from memseg list but get it directly from 277 * pmd_mr as there's only one chunk. 278 */ 279 entry->start = (uintptr_t)mr->pmd_mr.addr; 280 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len; 281 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey); 282 /* Returning 1 ends iteration. */ 283 return 1; 284 } 285 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { 286 if (rte_bitmap_get(mr->ms_bmp, idx)) { 287 const struct rte_memseg_list *msl; 288 const struct rte_memseg *ms; 289 290 msl = mr->msl; 291 ms = rte_fbarray_get(&msl->memseg_arr, 292 mr->ms_base_idx + idx); 293 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); 294 if (!start) 295 start = ms->addr_64; 296 end = ms->addr_64 + ms->hugepage_sz; 297 } else if (start) { 298 /* Passed the end of a fragment. */ 299 break; 300 } 301 } 302 if (start) { 303 /* Found one chunk. */ 304 entry->start = start; 305 entry->end = end; 306 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey); 307 } 308 return idx; 309 } 310 311 /** 312 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. 313 * Then, this entry will have to be searched by mr_lookup_list() in 314 * mlx5_mr_create() on miss. 315 * 316 * @param share_cache 317 * Pointer to a global shared MR cache. 318 * @param mr 319 * Pointer to MR to insert. 320 * 321 * @return 322 * 0 on success, -1 on failure. 323 */ 324 int 325 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, 326 struct mlx5_mr *mr) 327 { 328 unsigned int n; 329 330 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)", 331 (void *)mr, (void *)share_cache); 332 for (n = 0; n < mr->ms_bmp_n; ) { 333 struct mr_cache_entry entry; 334 335 memset(&entry, 0, sizeof(entry)); 336 /* Find a contiguous chunk and advance the index. */ 337 n = mr_find_next_chunk(mr, &entry, n); 338 if (!entry.end) 339 break; 340 if (mr_btree_insert(&share_cache->cache, &entry) < 0) { 341 /* 342 * Overflowed, but the global table cannot be expanded 343 * because of deadlock. 344 */ 345 return -1; 346 } 347 } 348 return 0; 349 } 350 351 /** 352 * Look up address in the original global MR list. 353 * 354 * @param share_cache 355 * Pointer to a global shared MR cache. 356 * @param[out] entry 357 * Pointer to returning MR cache entry. If no match, this will not be updated. 358 * @param addr 359 * Search key. 360 * 361 * @return 362 * Found MR on match, NULL otherwise. 363 */ 364 struct mlx5_mr * 365 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, 366 struct mr_cache_entry *entry, uintptr_t addr) 367 { 368 struct mlx5_mr *mr; 369 370 /* Iterate all the existing MRs. */ 371 LIST_FOREACH(mr, &share_cache->mr_list, mr) { 372 unsigned int n; 373 374 if (mr->ms_n == 0) 375 continue; 376 for (n = 0; n < mr->ms_bmp_n; ) { 377 struct mr_cache_entry ret; 378 379 memset(&ret, 0, sizeof(ret)); 380 n = mr_find_next_chunk(mr, &ret, n); 381 if (addr >= ret.start && addr < ret.end) { 382 /* Found. */ 383 *entry = ret; 384 return mr; 385 } 386 } 387 } 388 return NULL; 389 } 390 391 /** 392 * Look up address on global MR cache. 393 * 394 * @param share_cache 395 * Pointer to a global shared MR cache. 396 * @param[out] entry 397 * Pointer to returning MR cache entry. If no match, this will not be updated. 398 * @param addr 399 * Search key. 400 * 401 * @return 402 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 403 */ 404 uint32_t 405 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, 406 struct mr_cache_entry *entry, uintptr_t addr) 407 { 408 uint16_t idx; 409 uint32_t lkey = UINT32_MAX; 410 struct mlx5_mr *mr; 411 412 /* 413 * If the global cache has overflowed since it failed to expand the 414 * B-tree table, it can't have all the existing MRs. Then, the address 415 * has to be searched by traversing the original MR list instead, which 416 * is very slow path. Otherwise, the global cache is all inclusive. 417 */ 418 if (!unlikely(share_cache->cache.overflow)) { 419 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); 420 if (lkey != UINT32_MAX) 421 *entry = (*share_cache->cache.table)[idx]; 422 } else { 423 /* Falling back to the slowest path. */ 424 mr = mlx5_mr_lookup_list(share_cache, entry, addr); 425 if (mr != NULL) 426 lkey = entry->lkey; 427 } 428 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start && 429 addr < entry->end)); 430 return lkey; 431 } 432 433 /** 434 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() 435 * can raise memory free event and the callback function will spin on the lock. 436 * 437 * @param mr 438 * Pointer to MR to free. 439 */ 440 void 441 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb) 442 { 443 if (mr == NULL) 444 return; 445 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr); 446 dereg_mr_cb(&mr->pmd_mr); 447 if (mr->ms_bmp != NULL) 448 rte_bitmap_free(mr->ms_bmp); 449 mlx5_free(mr); 450 } 451 452 void 453 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache) 454 { 455 struct mlx5_mr *mr; 456 457 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache); 458 /* Flush cache to rebuild. */ 459 share_cache->cache.len = 1; 460 share_cache->cache.overflow = 0; 461 /* Iterate all the existing MRs. */ 462 LIST_FOREACH(mr, &share_cache->mr_list, mr) 463 if (mlx5_mr_insert_cache(share_cache, mr) < 0) 464 return; 465 } 466 467 /** 468 * Release resources of detached MR having no online entry. 469 * 470 * @param share_cache 471 * Pointer to a global shared MR cache. 472 */ 473 static void 474 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache) 475 { 476 struct mlx5_mr *mr_next; 477 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); 478 479 /* Must be called from the primary process. */ 480 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 481 /* 482 * MR can't be freed with holding the lock because rte_free() could call 483 * memory free callback function. This will be a deadlock situation. 484 */ 485 rte_rwlock_write_lock(&share_cache->rwlock); 486 /* Detach the whole free list and release it after unlocking. */ 487 free_list = share_cache->mr_free_list; 488 LIST_INIT(&share_cache->mr_free_list); 489 rte_rwlock_write_unlock(&share_cache->rwlock); 490 /* Release resources. */ 491 mr_next = LIST_FIRST(&free_list); 492 while (mr_next != NULL) { 493 struct mlx5_mr *mr = mr_next; 494 495 mr_next = LIST_NEXT(mr, mr); 496 mlx5_mr_free(mr, share_cache->dereg_mr_cb); 497 } 498 } 499 500 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */ 501 static int 502 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, 503 const struct rte_memseg *ms, size_t len, void *arg) 504 { 505 struct mr_find_contig_memsegs_data *data = arg; 506 507 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) 508 return 0; 509 /* Found, save it and stop walking. */ 510 data->start = ms->addr_64; 511 data->end = ms->addr_64 + len; 512 data->msl = msl; 513 return 1; 514 } 515 516 /** 517 * Create a new global Memory Region (MR) for a missing virtual address. 518 * This API should be called on a secondary process, then a request is sent to 519 * the primary process in order to create a MR for the address. As the global MR 520 * list is on the shared memory, following LKey lookup should succeed unless the 521 * request fails. 522 * 523 * @param pd 524 * Pointer to pd of a device (net, regex, vdpa,...). 525 * @param share_cache 526 * Pointer to a global shared MR cache. 527 * @param[out] entry 528 * Pointer to returning MR cache entry, found in the global cache or newly 529 * created. If failed to create one, this will not be updated. 530 * @param addr 531 * Target virtual address to register. 532 * @param mr_ext_memseg_en 533 * Configurable flag about external memory segment enable or not. 534 * 535 * @return 536 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 537 */ 538 static uint32_t 539 mlx5_mr_create_secondary(void *pd __rte_unused, 540 struct mlx5_mp_id *mp_id, 541 struct mlx5_mr_share_cache *share_cache, 542 struct mr_cache_entry *entry, uintptr_t addr, 543 unsigned int mr_ext_memseg_en __rte_unused) 544 { 545 int ret; 546 547 DRV_LOG(DEBUG, "port %u requesting MR creation for address (%p)", 548 mp_id->port_id, (void *)addr); 549 ret = mlx5_mp_req_mr_create(mp_id, addr); 550 if (ret) { 551 DRV_LOG(DEBUG, "Fail to request MR creation for address (%p)", 552 (void *)addr); 553 return UINT32_MAX; 554 } 555 rte_rwlock_read_lock(&share_cache->rwlock); 556 /* Fill in output data. */ 557 mlx5_mr_lookup_cache(share_cache, entry, addr); 558 /* Lookup can't fail. */ 559 MLX5_ASSERT(entry->lkey != UINT32_MAX); 560 rte_rwlock_read_unlock(&share_cache->rwlock); 561 DRV_LOG(DEBUG, "MR CREATED by primary process for %p:\n" 562 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x", 563 (void *)addr, entry->start, entry->end, entry->lkey); 564 return entry->lkey; 565 } 566 567 /** 568 * Create a new global Memory Region (MR) for a missing virtual address. 569 * Register entire virtually contiguous memory chunk around the address. 570 * 571 * @param pd 572 * Pointer to pd of a device (net, regex, vdpa,...). 573 * @param share_cache 574 * Pointer to a global shared MR cache. 575 * @param[out] entry 576 * Pointer to returning MR cache entry, found in the global cache or newly 577 * created. If failed to create one, this will not be updated. 578 * @param addr 579 * Target virtual address to register. 580 * @param mr_ext_memseg_en 581 * Configurable flag about external memory segment enable or not. 582 * 583 * @return 584 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 585 */ 586 uint32_t 587 mlx5_mr_create_primary(void *pd, 588 struct mlx5_mr_share_cache *share_cache, 589 struct mr_cache_entry *entry, uintptr_t addr, 590 unsigned int mr_ext_memseg_en) 591 { 592 struct mr_find_contig_memsegs_data data = {.addr = addr, }; 593 struct mr_find_contig_memsegs_data data_re; 594 const struct rte_memseg_list *msl; 595 const struct rte_memseg *ms; 596 struct mlx5_mr *mr = NULL; 597 int ms_idx_shift = -1; 598 uint32_t bmp_size; 599 void *bmp_mem; 600 uint32_t ms_n; 601 uint32_t n; 602 size_t len; 603 604 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr); 605 /* 606 * Release detached MRs if any. This can't be called with holding either 607 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have 608 * been detached by the memory free event but it couldn't be released 609 * inside the callback due to deadlock. As a result, releasing resources 610 * is quite opportunistic. 611 */ 612 mlx5_mr_garbage_collect(share_cache); 613 /* 614 * If enabled, find out a contiguous virtual address chunk in use, to 615 * which the given address belongs, in order to register maximum range. 616 * In the best case where mempools are not dynamically recreated and 617 * '--socket-mem' is specified as an EAL option, it is very likely to 618 * have only one MR(LKey) per a socket and per a hugepage-size even 619 * though the system memory is highly fragmented. As the whole memory 620 * chunk will be pinned by kernel, it can't be reused unless entire 621 * chunk is freed from EAL. 622 * 623 * If disabled, just register one memseg (page). Then, memory 624 * consumption will be minimized but it may drop performance if there 625 * are many MRs to lookup on the datapath. 626 */ 627 if (!mr_ext_memseg_en) { 628 data.msl = rte_mem_virt2memseg_list((void *)addr); 629 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz); 630 data.end = data.start + data.msl->page_sz; 631 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { 632 DRV_LOG(WARNING, 633 "Unable to find virtually contiguous" 634 " chunk for address (%p)." 635 " rte_memseg_contig_walk() failed.", (void *)addr); 636 rte_errno = ENXIO; 637 goto err_nolock; 638 } 639 alloc_resources: 640 /* Addresses must be page-aligned. */ 641 MLX5_ASSERT(data.msl); 642 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz)); 643 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz)); 644 msl = data.msl; 645 ms = rte_mem_virt2memseg((void *)data.start, msl); 646 len = data.end - data.start; 647 MLX5_ASSERT(ms); 648 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); 649 /* Number of memsegs in the range. */ 650 ms_n = len / msl->page_sz; 651 DRV_LOG(DEBUG, "Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 652 " page_sz=0x%" PRIx64 ", ms_n=%u", 653 (void *)addr, data.start, data.end, msl->page_sz, ms_n); 654 /* Size of memory for bitmap. */ 655 bmp_size = rte_bitmap_get_memory_footprint(ms_n); 656 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 657 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) + 658 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id); 659 if (mr == NULL) { 660 DRV_LOG(DEBUG, "Unable to allocate memory for a new MR of" 661 " address (%p).", (void *)addr); 662 rte_errno = ENOMEM; 663 goto err_nolock; 664 } 665 mr->msl = msl; 666 /* 667 * Save the index of the first memseg and initialize memseg bitmap. To 668 * see if a memseg of ms_idx in the memseg-list is still valid, check: 669 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) 670 */ 671 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); 672 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); 673 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); 674 if (mr->ms_bmp == NULL) { 675 DRV_LOG(DEBUG, "Unable to initialize bitmap for a new MR of" 676 " address (%p).", (void *)addr); 677 rte_errno = EINVAL; 678 goto err_nolock; 679 } 680 /* 681 * Should recheck whether the extended contiguous chunk is still valid. 682 * Because memory_hotplug_lock can't be held if there's any memory 683 * related calls in a critical path, resource allocation above can't be 684 * locked. If the memory has been changed at this point, try again with 685 * just single page. If not, go on with the big chunk atomically from 686 * here. 687 */ 688 rte_mcfg_mem_read_lock(); 689 data_re = data; 690 if (len > msl->page_sz && 691 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { 692 DRV_LOG(DEBUG, 693 "Unable to find virtually contiguous chunk for address " 694 "(%p). rte_memseg_contig_walk() failed.", (void *)addr); 695 rte_errno = ENXIO; 696 goto err_memlock; 697 } 698 if (data.start != data_re.start || data.end != data_re.end) { 699 /* 700 * The extended contiguous chunk has been changed. Try again 701 * with single memseg instead. 702 */ 703 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); 704 data.end = data.start + msl->page_sz; 705 rte_mcfg_mem_read_unlock(); 706 mlx5_mr_free(mr, share_cache->dereg_mr_cb); 707 goto alloc_resources; 708 } 709 MLX5_ASSERT(data.msl == data_re.msl); 710 rte_rwlock_write_lock(&share_cache->rwlock); 711 /* 712 * Check the address is really missing. If other thread already created 713 * one or it is not found due to overflow, abort and return. 714 */ 715 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) { 716 /* 717 * Insert to the global cache table. It may fail due to 718 * low-on-memory. Then, this entry will have to be searched 719 * here again. 720 */ 721 mr_btree_insert(&share_cache->cache, entry); 722 DRV_LOG(DEBUG, "Found MR for %p on final lookup, abort", 723 (void *)addr); 724 rte_rwlock_write_unlock(&share_cache->rwlock); 725 rte_mcfg_mem_read_unlock(); 726 /* 727 * Must be unlocked before calling rte_free() because 728 * mlx5_mr_mem_event_free_cb() can be called inside. 729 */ 730 mlx5_mr_free(mr, share_cache->dereg_mr_cb); 731 return entry->lkey; 732 } 733 /* 734 * Trim start and end addresses for verbs MR. Set bits for registering 735 * memsegs but exclude already registered ones. Bitmap can be 736 * fragmented. 737 */ 738 for (n = 0; n < ms_n; ++n) { 739 uintptr_t start; 740 struct mr_cache_entry ret; 741 742 memset(&ret, 0, sizeof(ret)); 743 start = data_re.start + n * msl->page_sz; 744 /* Exclude memsegs already registered by other MRs. */ 745 if (mlx5_mr_lookup_cache(share_cache, &ret, start) == 746 UINT32_MAX) { 747 /* 748 * Start from the first unregistered memseg in the 749 * extended range. 750 */ 751 if (ms_idx_shift == -1) { 752 mr->ms_base_idx += n; 753 data.start = start; 754 ms_idx_shift = n; 755 } 756 data.end = start + msl->page_sz; 757 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); 758 ++mr->ms_n; 759 } 760 } 761 len = data.end - data.start; 762 mr->ms_bmp_n = len / msl->page_sz; 763 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n); 764 /* 765 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can 766 * be called with holding the memory lock because it doesn't use 767 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() 768 * through mlx5_alloc_verbs_buf(). 769 */ 770 share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr); 771 if (mr->pmd_mr.obj == NULL) { 772 DRV_LOG(DEBUG, "Fail to create an MR for address (%p)", 773 (void *)addr); 774 rte_errno = EINVAL; 775 goto err_mrlock; 776 } 777 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start); 778 MLX5_ASSERT(mr->pmd_mr.len); 779 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr); 780 DRV_LOG(DEBUG, "MR CREATED (%p) for %p:\n" 781 " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 782 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", 783 (void *)mr, (void *)addr, data.start, data.end, 784 rte_cpu_to_be_32(mr->pmd_mr.lkey), 785 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); 786 /* Insert to the global cache table. */ 787 mlx5_mr_insert_cache(share_cache, mr); 788 /* Fill in output data. */ 789 mlx5_mr_lookup_cache(share_cache, entry, addr); 790 /* Lookup can't fail. */ 791 MLX5_ASSERT(entry->lkey != UINT32_MAX); 792 rte_rwlock_write_unlock(&share_cache->rwlock); 793 rte_mcfg_mem_read_unlock(); 794 return entry->lkey; 795 err_mrlock: 796 rte_rwlock_write_unlock(&share_cache->rwlock); 797 err_memlock: 798 rte_mcfg_mem_read_unlock(); 799 err_nolock: 800 /* 801 * In case of error, as this can be called in a datapath, a warning 802 * message per an error is preferable instead. Must be unlocked before 803 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called 804 * inside. 805 */ 806 mlx5_mr_free(mr, share_cache->dereg_mr_cb); 807 return UINT32_MAX; 808 } 809 810 /** 811 * Create a new global Memory Region (MR) for a missing virtual address. 812 * This can be called from primary and secondary process. 813 * 814 * @param pd 815 * Pointer to pd handle of a device (net, regex, vdpa,...). 816 * @param share_cache 817 * Pointer to a global shared MR cache. 818 * @param[out] entry 819 * Pointer to returning MR cache entry, found in the global cache or newly 820 * created. If failed to create one, this will not be updated. 821 * @param addr 822 * Target virtual address to register. 823 * 824 * @return 825 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 826 */ 827 static uint32_t 828 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id, 829 struct mlx5_mr_share_cache *share_cache, 830 struct mr_cache_entry *entry, uintptr_t addr, 831 unsigned int mr_ext_memseg_en) 832 { 833 uint32_t ret = 0; 834 835 switch (rte_eal_process_type()) { 836 case RTE_PROC_PRIMARY: 837 ret = mlx5_mr_create_primary(pd, share_cache, entry, 838 addr, mr_ext_memseg_en); 839 break; 840 case RTE_PROC_SECONDARY: 841 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry, 842 addr, mr_ext_memseg_en); 843 break; 844 default: 845 break; 846 } 847 return ret; 848 } 849 850 /** 851 * Look up address in the global MR cache table. If not found, create a new MR. 852 * Insert the found/created entry to local bottom-half cache table. 853 * 854 * @param pd 855 * Pointer to pd of a device (net, regex, vdpa,...). 856 * @param share_cache 857 * Pointer to a global shared MR cache. 858 * @param mr_ctrl 859 * Pointer to per-queue MR control structure. 860 * @param[out] entry 861 * Pointer to returning MR cache entry, found in the global cache or newly 862 * created. If failed to create one, this is not written. 863 * @param addr 864 * Search key. 865 * 866 * @return 867 * Searched LKey on success, UINT32_MAX on no match. 868 */ 869 static uint32_t 870 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id, 871 struct mlx5_mr_share_cache *share_cache, 872 struct mlx5_mr_ctrl *mr_ctrl, 873 struct mr_cache_entry *entry, uintptr_t addr, 874 unsigned int mr_ext_memseg_en) 875 { 876 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh; 877 uint32_t lkey; 878 uint16_t idx; 879 880 /* If local cache table is full, try to double it. */ 881 if (unlikely(bt->len == bt->size)) 882 mr_btree_expand(bt, bt->size << 1); 883 /* Look up in the global cache. */ 884 rte_rwlock_read_lock(&share_cache->rwlock); 885 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); 886 if (lkey != UINT32_MAX) { 887 /* Found. */ 888 *entry = (*share_cache->cache.table)[idx]; 889 rte_rwlock_read_unlock(&share_cache->rwlock); 890 /* 891 * Update local cache. Even if it fails, return the found entry 892 * to update top-half cache. Next time, this entry will be found 893 * in the global cache. 894 */ 895 mr_btree_insert(bt, entry); 896 return lkey; 897 } 898 rte_rwlock_read_unlock(&share_cache->rwlock); 899 /* First time to see the address? Create a new MR. */ 900 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr, 901 mr_ext_memseg_en); 902 /* 903 * Update the local cache if successfully created a new global MR. Even 904 * if failed to create one, there's no action to take in this datapath 905 * code. As returning LKey is invalid, this will eventually make HW 906 * fail. 907 */ 908 if (lkey != UINT32_MAX) 909 mr_btree_insert(bt, entry); 910 return lkey; 911 } 912 913 /** 914 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if 915 * misses, search in the global MR cache table and update the new entry to 916 * per-queue local caches. 917 * 918 * @param pd 919 * Pointer to pd of a device (net, regex, vdpa,...). 920 * @param share_cache 921 * Pointer to a global shared MR cache. 922 * @param mr_ctrl 923 * Pointer to per-queue MR control structure. 924 * @param addr 925 * Search key. 926 * 927 * @return 928 * Searched LKey on success, UINT32_MAX on no match. 929 */ 930 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id, 931 struct mlx5_mr_share_cache *share_cache, 932 struct mlx5_mr_ctrl *mr_ctrl, 933 uintptr_t addr, unsigned int mr_ext_memseg_en) 934 { 935 uint32_t lkey; 936 uint16_t bh_idx = 0; 937 /* Victim in top-half cache to replace with new entry. */ 938 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head]; 939 940 /* Binary-search MR translation table. */ 941 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); 942 /* Update top-half cache. */ 943 if (likely(lkey != UINT32_MAX)) { 944 *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; 945 } else { 946 /* 947 * If missed in local lookup table, search in the global cache 948 * and local cache_bh[] will be updated inside if possible. 949 * Top-half cache entry will also be updated. 950 */ 951 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl, 952 repl, addr, mr_ext_memseg_en); 953 if (unlikely(lkey == UINT32_MAX)) 954 return UINT32_MAX; 955 } 956 /* Update the most recently used entry. */ 957 mr_ctrl->mru = mr_ctrl->head; 958 /* Point to the next victim, the oldest. */ 959 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N; 960 return lkey; 961 } 962 963 /** 964 * Release all the created MRs and resources on global MR cache of a device. 965 * list. 966 * 967 * @param share_cache 968 * Pointer to a global shared MR cache. 969 */ 970 void 971 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache) 972 { 973 struct mlx5_mr *mr_next; 974 975 rte_rwlock_write_lock(&share_cache->rwlock); 976 /* Detach from MR list and move to free list. */ 977 mr_next = LIST_FIRST(&share_cache->mr_list); 978 while (mr_next != NULL) { 979 struct mlx5_mr *mr = mr_next; 980 981 mr_next = LIST_NEXT(mr, mr); 982 LIST_REMOVE(mr, mr); 983 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr); 984 } 985 LIST_INIT(&share_cache->mr_list); 986 /* Free global cache. */ 987 mlx5_mr_btree_free(&share_cache->cache); 988 rte_rwlock_write_unlock(&share_cache->rwlock); 989 /* Free all remaining MRs. */ 990 mlx5_mr_garbage_collect(share_cache); 991 } 992 993 /** 994 * Flush all of the local cache entries. 995 * 996 * @param mr_ctrl 997 * Pointer to per-queue MR local cache. 998 */ 999 void 1000 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) 1001 { 1002 /* Reset the most-recently-used index. */ 1003 mr_ctrl->mru = 0; 1004 /* Reset the linear search array. */ 1005 mr_ctrl->head = 0; 1006 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); 1007 /* Reset the B-tree table. */ 1008 mr_ctrl->cache_bh.len = 1; 1009 mr_ctrl->cache_bh.overflow = 0; 1010 /* Update the generation number. */ 1011 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; 1012 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d", 1013 (void *)mr_ctrl, mr_ctrl->cur_gen); 1014 } 1015 1016 /** 1017 * Creates a memory region for external memory, that is memory which is not 1018 * part of the DPDK memory segments. 1019 * 1020 * @param pd 1021 * Pointer to pd of a device (net, regex, vdpa,...). 1022 * @param addr 1023 * Starting virtual address of memory. 1024 * @param len 1025 * Length of memory segment being mapped. 1026 * @param socked_id 1027 * Socket to allocate heap memory for the control structures. 1028 * 1029 * @return 1030 * Pointer to MR structure on success, NULL otherwise. 1031 */ 1032 struct mlx5_mr * 1033 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id, 1034 mlx5_reg_mr_t reg_mr_cb) 1035 { 1036 struct mlx5_mr *mr = NULL; 1037 1038 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1039 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE), 1040 RTE_CACHE_LINE_SIZE, socket_id); 1041 if (mr == NULL) 1042 return NULL; 1043 reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr); 1044 if (mr->pmd_mr.obj == NULL) { 1045 DRV_LOG(WARNING, 1046 "Fail to create MR for address (%p)", 1047 (void *)addr); 1048 mlx5_free(mr); 1049 return NULL; 1050 } 1051 mr->msl = NULL; /* Mark it is external memory. */ 1052 mr->ms_bmp = NULL; 1053 mr->ms_n = 1; 1054 mr->ms_bmp_n = 1; 1055 DRV_LOG(DEBUG, 1056 "MR CREATED (%p) for external memory %p:\n" 1057 " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 1058 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", 1059 (void *)mr, (void *)addr, 1060 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey), 1061 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); 1062 return mr; 1063 } 1064 1065 /** 1066 * Dump all the created MRs and the global cache entries. 1067 * 1068 * @param sh 1069 * Pointer to Ethernet device shared context. 1070 */ 1071 void 1072 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused) 1073 { 1074 #ifdef RTE_LIBRTE_MLX5_DEBUG 1075 struct mlx5_mr *mr; 1076 int mr_n = 0; 1077 int chunk_n = 0; 1078 1079 rte_rwlock_read_lock(&share_cache->rwlock); 1080 /* Iterate all the existing MRs. */ 1081 LIST_FOREACH(mr, &share_cache->mr_list, mr) { 1082 unsigned int n; 1083 1084 DRV_LOG(DEBUG, "MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", 1085 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey), 1086 mr->ms_n, mr->ms_bmp_n); 1087 if (mr->ms_n == 0) 1088 continue; 1089 for (n = 0; n < mr->ms_bmp_n; ) { 1090 struct mr_cache_entry ret = { 0, }; 1091 1092 n = mr_find_next_chunk(mr, &ret, n); 1093 if (!ret.end) 1094 break; 1095 DRV_LOG(DEBUG, 1096 " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", 1097 chunk_n++, ret.start, ret.end); 1098 } 1099 } 1100 DRV_LOG(DEBUG, "Dumping global cache %p", (void *)share_cache); 1101 mlx5_mr_btree_dump(&share_cache->cache); 1102 rte_rwlock_read_unlock(&share_cache->rwlock); 1103 #endif 1104 } 1105