1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2020 Mellanox Technologies, Ltd 4 */ 5 #include <rte_eal_memconfig.h> 6 #include <rte_errno.h> 7 #include <rte_mempool.h> 8 #include <rte_malloc.h> 9 #include <rte_rwlock.h> 10 11 #include "mlx5_glue.h" 12 #include "mlx5_common_mp.h" 13 #include "mlx5_common_mr.h" 14 #include "mlx5_common_utils.h" 15 16 struct mr_find_contig_memsegs_data { 17 uintptr_t addr; 18 uintptr_t start; 19 uintptr_t end; 20 const struct rte_memseg_list *msl; 21 }; 22 23 /** 24 * Expand B-tree table to a given size. Can't be called with holding 25 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc(). 26 * 27 * @param bt 28 * Pointer to B-tree structure. 29 * @param n 30 * Number of entries for expansion. 31 * 32 * @return 33 * 0 on success, -1 on failure. 34 */ 35 static int 36 mr_btree_expand(struct mlx5_mr_btree *bt, int n) 37 { 38 void *mem; 39 int ret = 0; 40 41 if (n <= bt->size) 42 return ret; 43 /* 44 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is 45 * used inside if there's no room to expand. Because this is a quite 46 * rare case and a part of very slow path, it is very acceptable. 47 * Initially cache_bh[] will be given practically enough space and once 48 * it is expanded, expansion wouldn't be needed again ever. 49 */ 50 mem = rte_realloc(bt->table, n * sizeof(struct mr_cache_entry), 0); 51 if (mem == NULL) { 52 /* Not an error, B-tree search will be skipped. */ 53 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table", 54 (void *)bt); 55 ret = -1; 56 } else { 57 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n); 58 bt->table = mem; 59 bt->size = n; 60 } 61 return ret; 62 } 63 64 /** 65 * Look up LKey from given B-tree lookup table, store the last index and return 66 * searched LKey. 67 * 68 * @param bt 69 * Pointer to B-tree structure. 70 * @param[out] idx 71 * Pointer to index. Even on search failure, returns index where it stops 72 * searching so that index can be used when inserting a new entry. 73 * @param addr 74 * Search key. 75 * 76 * @return 77 * Searched LKey on success, UINT32_MAX on no match. 78 */ 79 static uint32_t 80 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr) 81 { 82 struct mr_cache_entry *lkp_tbl; 83 uint16_t n; 84 uint16_t base = 0; 85 86 MLX5_ASSERT(bt != NULL); 87 lkp_tbl = *bt->table; 88 n = bt->len; 89 /* First entry must be NULL for comparison. */ 90 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 && 91 lkp_tbl[0].lkey == UINT32_MAX)); 92 /* Binary search. */ 93 do { 94 register uint16_t delta = n >> 1; 95 96 if (addr < lkp_tbl[base + delta].start) { 97 n = delta; 98 } else { 99 base += delta; 100 n -= delta; 101 } 102 } while (n > 1); 103 MLX5_ASSERT(addr >= lkp_tbl[base].start); 104 *idx = base; 105 if (addr < lkp_tbl[base].end) 106 return lkp_tbl[base].lkey; 107 /* Not found. */ 108 return UINT32_MAX; 109 } 110 111 /** 112 * Insert an entry to B-tree lookup table. 113 * 114 * @param bt 115 * Pointer to B-tree structure. 116 * @param entry 117 * Pointer to new entry to insert. 118 * 119 * @return 120 * 0 on success, -1 on failure. 121 */ 122 static int 123 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry) 124 { 125 struct mr_cache_entry *lkp_tbl; 126 uint16_t idx = 0; 127 size_t shift; 128 129 MLX5_ASSERT(bt != NULL); 130 MLX5_ASSERT(bt->len <= bt->size); 131 MLX5_ASSERT(bt->len > 0); 132 lkp_tbl = *bt->table; 133 /* Find out the slot for insertion. */ 134 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { 135 DRV_LOG(DEBUG, 136 "abort insertion to B-tree(%p): already exist at" 137 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 138 (void *)bt, idx, entry->start, entry->end, entry->lkey); 139 /* Already exist, return. */ 140 return 0; 141 } 142 /* If table is full, return error. */ 143 if (unlikely(bt->len == bt->size)) { 144 bt->overflow = 1; 145 return -1; 146 } 147 /* Insert entry. */ 148 ++idx; 149 shift = (bt->len - idx) * sizeof(struct mr_cache_entry); 150 if (shift) 151 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); 152 lkp_tbl[idx] = *entry; 153 bt->len++; 154 DRV_LOG(DEBUG, 155 "inserted B-tree(%p)[%u]," 156 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 157 (void *)bt, idx, entry->start, entry->end, entry->lkey); 158 return 0; 159 } 160 161 /** 162 * Initialize B-tree and allocate memory for lookup table. 163 * 164 * @param bt 165 * Pointer to B-tree structure. 166 * @param n 167 * Number of entries to allocate. 168 * @param socket 169 * NUMA socket on which memory must be allocated. 170 * 171 * @return 172 * 0 on success, a negative errno value otherwise and rte_errno is set. 173 */ 174 int 175 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) 176 { 177 if (bt == NULL) { 178 rte_errno = EINVAL; 179 return -rte_errno; 180 } 181 MLX5_ASSERT(!bt->table && !bt->size); 182 memset(bt, 0, sizeof(*bt)); 183 bt->table = rte_calloc_socket("B-tree table", 184 n, sizeof(struct mr_cache_entry), 185 0, socket); 186 if (bt->table == NULL) { 187 rte_errno = ENOMEM; 188 DEBUG("failed to allocate memory for btree cache on socket %d", 189 socket); 190 return -rte_errno; 191 } 192 bt->size = n; 193 /* First entry must be NULL for binary search. */ 194 (*bt->table)[bt->len++] = (struct mr_cache_entry) { 195 .lkey = UINT32_MAX, 196 }; 197 DEBUG("initialized B-tree %p with table %p", 198 (void *)bt, (void *)bt->table); 199 return 0; 200 } 201 202 /** 203 * Free B-tree resources. 204 * 205 * @param bt 206 * Pointer to B-tree structure. 207 */ 208 void 209 mlx5_mr_btree_free(struct mlx5_mr_btree *bt) 210 { 211 if (bt == NULL) 212 return; 213 DEBUG("freeing B-tree %p with table %p", 214 (void *)bt, (void *)bt->table); 215 rte_free(bt->table); 216 memset(bt, 0, sizeof(*bt)); 217 } 218 219 /** 220 * Dump all the entries in a B-tree 221 * 222 * @param bt 223 * Pointer to B-tree structure. 224 */ 225 void 226 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused) 227 { 228 #ifdef RTE_LIBRTE_MLX5_DEBUG 229 int idx; 230 struct mr_cache_entry *lkp_tbl; 231 232 if (bt == NULL) 233 return; 234 lkp_tbl = *bt->table; 235 for (idx = 0; idx < bt->len; ++idx) { 236 struct mr_cache_entry *entry = &lkp_tbl[idx]; 237 238 DEBUG("B-tree(%p)[%u]," 239 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", 240 (void *)bt, idx, entry->start, entry->end, entry->lkey); 241 } 242 #endif 243 } 244 245 /** 246 * Find virtually contiguous memory chunk in a given MR. 247 * 248 * @param dev 249 * Pointer to MR structure. 250 * @param[out] entry 251 * Pointer to returning MR cache entry. If not found, this will not be 252 * updated. 253 * @param start_idx 254 * Start index of the memseg bitmap. 255 * 256 * @return 257 * Next index to go on lookup. 258 */ 259 static int 260 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry, 261 int base_idx) 262 { 263 uintptr_t start = 0; 264 uintptr_t end = 0; 265 uint32_t idx = 0; 266 267 /* MR for external memory doesn't have memseg list. */ 268 if (mr->msl == NULL) { 269 MLX5_ASSERT(mr->ms_bmp_n == 1); 270 MLX5_ASSERT(mr->ms_n == 1); 271 MLX5_ASSERT(base_idx == 0); 272 /* 273 * Can't search it from memseg list but get it directly from 274 * pmd_mr as there's only one chunk. 275 */ 276 entry->start = (uintptr_t)mr->pmd_mr.addr; 277 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len; 278 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey); 279 /* Returning 1 ends iteration. */ 280 return 1; 281 } 282 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { 283 if (rte_bitmap_get(mr->ms_bmp, idx)) { 284 const struct rte_memseg_list *msl; 285 const struct rte_memseg *ms; 286 287 msl = mr->msl; 288 ms = rte_fbarray_get(&msl->memseg_arr, 289 mr->ms_base_idx + idx); 290 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); 291 if (!start) 292 start = ms->addr_64; 293 end = ms->addr_64 + ms->hugepage_sz; 294 } else if (start) { 295 /* Passed the end of a fragment. */ 296 break; 297 } 298 } 299 if (start) { 300 /* Found one chunk. */ 301 entry->start = start; 302 entry->end = end; 303 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey); 304 } 305 return idx; 306 } 307 308 /** 309 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. 310 * Then, this entry will have to be searched by mr_lookup_list() in 311 * mlx5_mr_create() on miss. 312 * 313 * @param share_cache 314 * Pointer to a global shared MR cache. 315 * @param mr 316 * Pointer to MR to insert. 317 * 318 * @return 319 * 0 on success, -1 on failure. 320 */ 321 int 322 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, 323 struct mlx5_mr *mr) 324 { 325 unsigned int n; 326 327 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)", 328 (void *)mr, (void *)share_cache); 329 for (n = 0; n < mr->ms_bmp_n; ) { 330 struct mr_cache_entry entry; 331 332 memset(&entry, 0, sizeof(entry)); 333 /* Find a contiguous chunk and advance the index. */ 334 n = mr_find_next_chunk(mr, &entry, n); 335 if (!entry.end) 336 break; 337 if (mr_btree_insert(&share_cache->cache, &entry) < 0) { 338 /* 339 * Overflowed, but the global table cannot be expanded 340 * because of deadlock. 341 */ 342 return -1; 343 } 344 } 345 return 0; 346 } 347 348 /** 349 * Look up address in the original global MR list. 350 * 351 * @param share_cache 352 * Pointer to a global shared MR cache. 353 * @param[out] entry 354 * Pointer to returning MR cache entry. If no match, this will not be updated. 355 * @param addr 356 * Search key. 357 * 358 * @return 359 * Found MR on match, NULL otherwise. 360 */ 361 struct mlx5_mr * 362 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, 363 struct mr_cache_entry *entry, uintptr_t addr) 364 { 365 struct mlx5_mr *mr; 366 367 /* Iterate all the existing MRs. */ 368 LIST_FOREACH(mr, &share_cache->mr_list, mr) { 369 unsigned int n; 370 371 if (mr->ms_n == 0) 372 continue; 373 for (n = 0; n < mr->ms_bmp_n; ) { 374 struct mr_cache_entry ret; 375 376 memset(&ret, 0, sizeof(ret)); 377 n = mr_find_next_chunk(mr, &ret, n); 378 if (addr >= ret.start && addr < ret.end) { 379 /* Found. */ 380 *entry = ret; 381 return mr; 382 } 383 } 384 } 385 return NULL; 386 } 387 388 /** 389 * Look up address on global MR cache. 390 * 391 * @param share_cache 392 * Pointer to a global shared MR cache. 393 * @param[out] entry 394 * Pointer to returning MR cache entry. If no match, this will not be updated. 395 * @param addr 396 * Search key. 397 * 398 * @return 399 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 400 */ 401 uint32_t 402 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, 403 struct mr_cache_entry *entry, uintptr_t addr) 404 { 405 uint16_t idx; 406 uint32_t lkey = UINT32_MAX; 407 struct mlx5_mr *mr; 408 409 /* 410 * If the global cache has overflowed since it failed to expand the 411 * B-tree table, it can't have all the existing MRs. Then, the address 412 * has to be searched by traversing the original MR list instead, which 413 * is very slow path. Otherwise, the global cache is all inclusive. 414 */ 415 if (!unlikely(share_cache->cache.overflow)) { 416 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); 417 if (lkey != UINT32_MAX) 418 *entry = (*share_cache->cache.table)[idx]; 419 } else { 420 /* Falling back to the slowest path. */ 421 mr = mlx5_mr_lookup_list(share_cache, entry, addr); 422 if (mr != NULL) 423 lkey = entry->lkey; 424 } 425 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start && 426 addr < entry->end)); 427 return lkey; 428 } 429 430 /** 431 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() 432 * can raise memory free event and the callback function will spin on the lock. 433 * 434 * @param mr 435 * Pointer to MR to free. 436 */ 437 static void 438 mr_free(struct mlx5_mr *mr) 439 { 440 if (mr == NULL) 441 return; 442 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr); 443 if (mr->pmd_mr.obj != NULL) 444 claim_zero(mlx5_glue->dereg_mr(mr->pmd_mr.obj)); 445 if (mr->ms_bmp != NULL) 446 rte_bitmap_free(mr->ms_bmp); 447 rte_free(mr); 448 } 449 450 void 451 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache) 452 { 453 struct mlx5_mr *mr; 454 455 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache); 456 /* Flush cache to rebuild. */ 457 share_cache->cache.len = 1; 458 share_cache->cache.overflow = 0; 459 /* Iterate all the existing MRs. */ 460 LIST_FOREACH(mr, &share_cache->mr_list, mr) 461 if (mlx5_mr_insert_cache(share_cache, mr) < 0) 462 return; 463 } 464 465 /** 466 * Release resources of detached MR having no online entry. 467 * 468 * @param share_cache 469 * Pointer to a global shared MR cache. 470 */ 471 static void 472 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache) 473 { 474 struct mlx5_mr *mr_next; 475 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); 476 477 /* Must be called from the primary process. */ 478 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 479 /* 480 * MR can't be freed with holding the lock because rte_free() could call 481 * memory free callback function. This will be a deadlock situation. 482 */ 483 rte_rwlock_write_lock(&share_cache->rwlock); 484 /* Detach the whole free list and release it after unlocking. */ 485 free_list = share_cache->mr_free_list; 486 LIST_INIT(&share_cache->mr_free_list); 487 rte_rwlock_write_unlock(&share_cache->rwlock); 488 /* Release resources. */ 489 mr_next = LIST_FIRST(&free_list); 490 while (mr_next != NULL) { 491 struct mlx5_mr *mr = mr_next; 492 493 mr_next = LIST_NEXT(mr, mr); 494 mr_free(mr); 495 } 496 } 497 498 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */ 499 static int 500 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, 501 const struct rte_memseg *ms, size_t len, void *arg) 502 { 503 struct mr_find_contig_memsegs_data *data = arg; 504 505 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) 506 return 0; 507 /* Found, save it and stop walking. */ 508 data->start = ms->addr_64; 509 data->end = ms->addr_64 + len; 510 data->msl = msl; 511 return 1; 512 } 513 514 /** 515 * Create a new global Memory Region (MR) for a missing virtual address. 516 * This API should be called on a secondary process, then a request is sent to 517 * the primary process in order to create a MR for the address. As the global MR 518 * list is on the shared memory, following LKey lookup should succeed unless the 519 * request fails. 520 * 521 * @param pd 522 * Pointer to pd of a device (net, regex, vdpa,...). 523 * @param share_cache 524 * Pointer to a global shared MR cache. 525 * @param[out] entry 526 * Pointer to returning MR cache entry, found in the global cache or newly 527 * created. If failed to create one, this will not be updated. 528 * @param addr 529 * Target virtual address to register. 530 * @param mr_ext_memseg_en 531 * Configurable flag about external memory segment enable or not. 532 * 533 * @return 534 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 535 */ 536 static uint32_t 537 mlx5_mr_create_secondary(void *pd __rte_unused, 538 struct mlx5_mp_id *mp_id, 539 struct mlx5_mr_share_cache *share_cache, 540 struct mr_cache_entry *entry, uintptr_t addr, 541 unsigned int mr_ext_memseg_en __rte_unused) 542 { 543 int ret; 544 545 DEBUG("port %u requesting MR creation for address (%p)", 546 mp_id->port_id, (void *)addr); 547 ret = mlx5_mp_req_mr_create(mp_id, addr); 548 if (ret) { 549 DEBUG("Fail to request MR creation for address (%p)", 550 (void *)addr); 551 return UINT32_MAX; 552 } 553 rte_rwlock_read_lock(&share_cache->rwlock); 554 /* Fill in output data. */ 555 mlx5_mr_lookup_cache(share_cache, entry, addr); 556 /* Lookup can't fail. */ 557 MLX5_ASSERT(entry->lkey != UINT32_MAX); 558 rte_rwlock_read_unlock(&share_cache->rwlock); 559 DEBUG("MR CREATED by primary process for %p:\n" 560 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x", 561 (void *)addr, entry->start, entry->end, entry->lkey); 562 return entry->lkey; 563 } 564 565 /** 566 * Create a new global Memory Region (MR) for a missing virtual address. 567 * Register entire virtually contiguous memory chunk around the address. 568 * 569 * @param pd 570 * Pointer to pd of a device (net, regex, vdpa,...). 571 * @param share_cache 572 * Pointer to a global shared MR cache. 573 * @param[out] entry 574 * Pointer to returning MR cache entry, found in the global cache or newly 575 * created. If failed to create one, this will not be updated. 576 * @param addr 577 * Target virtual address to register. 578 * @param mr_ext_memseg_en 579 * Configurable flag about external memory segment enable or not. 580 * 581 * @return 582 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 583 */ 584 uint32_t 585 mlx5_mr_create_primary(void *pd, 586 struct mlx5_mr_share_cache *share_cache, 587 struct mr_cache_entry *entry, uintptr_t addr, 588 unsigned int mr_ext_memseg_en) 589 { 590 struct mr_find_contig_memsegs_data data = {.addr = addr, }; 591 struct mr_find_contig_memsegs_data data_re; 592 const struct rte_memseg_list *msl; 593 const struct rte_memseg *ms; 594 struct mlx5_mr *mr = NULL; 595 int ms_idx_shift = -1; 596 uint32_t bmp_size; 597 void *bmp_mem; 598 uint32_t ms_n; 599 uint32_t n; 600 size_t len; 601 struct ibv_mr *ibv_mr; 602 603 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr); 604 /* 605 * Release detached MRs if any. This can't be called with holding either 606 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have 607 * been detached by the memory free event but it couldn't be released 608 * inside the callback due to deadlock. As a result, releasing resources 609 * is quite opportunistic. 610 */ 611 mlx5_mr_garbage_collect(share_cache); 612 /* 613 * If enabled, find out a contiguous virtual address chunk in use, to 614 * which the given address belongs, in order to register maximum range. 615 * In the best case where mempools are not dynamically recreated and 616 * '--socket-mem' is specified as an EAL option, it is very likely to 617 * have only one MR(LKey) per a socket and per a hugepage-size even 618 * though the system memory is highly fragmented. As the whole memory 619 * chunk will be pinned by kernel, it can't be reused unless entire 620 * chunk is freed from EAL. 621 * 622 * If disabled, just register one memseg (page). Then, memory 623 * consumption will be minimized but it may drop performance if there 624 * are many MRs to lookup on the datapath. 625 */ 626 if (!mr_ext_memseg_en) { 627 data.msl = rte_mem_virt2memseg_list((void *)addr); 628 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz); 629 data.end = data.start + data.msl->page_sz; 630 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { 631 DRV_LOG(WARNING, 632 "Unable to find virtually contiguous" 633 " chunk for address (%p)." 634 " rte_memseg_contig_walk() failed.", (void *)addr); 635 rte_errno = ENXIO; 636 goto err_nolock; 637 } 638 alloc_resources: 639 /* Addresses must be page-aligned. */ 640 MLX5_ASSERT(data.msl); 641 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz)); 642 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz)); 643 msl = data.msl; 644 ms = rte_mem_virt2memseg((void *)data.start, msl); 645 len = data.end - data.start; 646 MLX5_ASSERT(ms); 647 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz); 648 /* Number of memsegs in the range. */ 649 ms_n = len / msl->page_sz; 650 DEBUG("Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 651 " page_sz=0x%" PRIx64 ", ms_n=%u", 652 (void *)addr, data.start, data.end, msl->page_sz, ms_n); 653 /* Size of memory for bitmap. */ 654 bmp_size = rte_bitmap_get_memory_footprint(ms_n); 655 mr = rte_zmalloc_socket(NULL, 656 RTE_ALIGN_CEIL(sizeof(*mr), 657 RTE_CACHE_LINE_SIZE) + 658 bmp_size, 659 RTE_CACHE_LINE_SIZE, msl->socket_id); 660 if (mr == NULL) { 661 DEBUG("Unable to allocate memory for a new MR of" 662 " address (%p).", (void *)addr); 663 rte_errno = ENOMEM; 664 goto err_nolock; 665 } 666 mr->msl = msl; 667 /* 668 * Save the index of the first memseg and initialize memseg bitmap. To 669 * see if a memseg of ms_idx in the memseg-list is still valid, check: 670 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) 671 */ 672 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); 673 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); 674 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); 675 if (mr->ms_bmp == NULL) { 676 DEBUG("Unable to initialize bitmap for a new MR of" 677 " address (%p).", (void *)addr); 678 rte_errno = EINVAL; 679 goto err_nolock; 680 } 681 /* 682 * Should recheck whether the extended contiguous chunk is still valid. 683 * Because memory_hotplug_lock can't be held if there's any memory 684 * related calls in a critical path, resource allocation above can't be 685 * locked. If the memory has been changed at this point, try again with 686 * just single page. If not, go on with the big chunk atomically from 687 * here. 688 */ 689 rte_mcfg_mem_read_lock(); 690 data_re = data; 691 if (len > msl->page_sz && 692 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { 693 DEBUG("Unable to find virtually contiguous" 694 " chunk for address (%p)." 695 " rte_memseg_contig_walk() failed.", (void *)addr); 696 rte_errno = ENXIO; 697 goto err_memlock; 698 } 699 if (data.start != data_re.start || data.end != data_re.end) { 700 /* 701 * The extended contiguous chunk has been changed. Try again 702 * with single memseg instead. 703 */ 704 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); 705 data.end = data.start + msl->page_sz; 706 rte_mcfg_mem_read_unlock(); 707 mr_free(mr); 708 goto alloc_resources; 709 } 710 MLX5_ASSERT(data.msl == data_re.msl); 711 rte_rwlock_write_lock(&share_cache->rwlock); 712 /* 713 * Check the address is really missing. If other thread already created 714 * one or it is not found due to overflow, abort and return. 715 */ 716 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) { 717 /* 718 * Insert to the global cache table. It may fail due to 719 * low-on-memory. Then, this entry will have to be searched 720 * here again. 721 */ 722 mr_btree_insert(&share_cache->cache, entry); 723 DEBUG("Found MR for %p on final lookup, abort", (void *)addr); 724 rte_rwlock_write_unlock(&share_cache->rwlock); 725 rte_mcfg_mem_read_unlock(); 726 /* 727 * Must be unlocked before calling rte_free() because 728 * mlx5_mr_mem_event_free_cb() can be called inside. 729 */ 730 mr_free(mr); 731 return entry->lkey; 732 } 733 /* 734 * Trim start and end addresses for verbs MR. Set bits for registering 735 * memsegs but exclude already registered ones. Bitmap can be 736 * fragmented. 737 */ 738 for (n = 0; n < ms_n; ++n) { 739 uintptr_t start; 740 struct mr_cache_entry ret; 741 742 memset(&ret, 0, sizeof(ret)); 743 start = data_re.start + n * msl->page_sz; 744 /* Exclude memsegs already registered by other MRs. */ 745 if (mlx5_mr_lookup_cache(share_cache, &ret, start) == 746 UINT32_MAX) { 747 /* 748 * Start from the first unregistered memseg in the 749 * extended range. 750 */ 751 if (ms_idx_shift == -1) { 752 mr->ms_base_idx += n; 753 data.start = start; 754 ms_idx_shift = n; 755 } 756 data.end = start + msl->page_sz; 757 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); 758 ++mr->ms_n; 759 } 760 } 761 len = data.end - data.start; 762 mr->ms_bmp_n = len / msl->page_sz; 763 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n); 764 /* 765 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be 766 * called with holding the memory lock because it doesn't use 767 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() 768 * through mlx5_alloc_verbs_buf(). 769 */ 770 ibv_mr = mlx5_glue->reg_mr(pd, (void *)data.start, len, 771 IBV_ACCESS_LOCAL_WRITE | 772 (haswell_broadwell_cpu ? 0 : 773 IBV_ACCESS_RELAXED_ORDERING)); 774 if (ibv_mr == NULL) { 775 DEBUG("Fail to create an MR for address (%p)", 776 (void *)addr); 777 rte_errno = EINVAL; 778 goto err_mrlock; 779 } 780 mr->pmd_mr.lkey = ibv_mr->lkey; 781 mr->pmd_mr.addr = ibv_mr->addr; 782 mr->pmd_mr.len = ibv_mr->length; 783 mr->pmd_mr.obj = ibv_mr; 784 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start); 785 MLX5_ASSERT(mr->pmd_mr.len); 786 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr); 787 DEBUG("MR CREATED (%p) for %p:\n" 788 " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 789 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", 790 (void *)mr, (void *)addr, data.start, data.end, 791 rte_cpu_to_be_32(mr->pmd_mr.lkey), 792 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); 793 /* Insert to the global cache table. */ 794 mlx5_mr_insert_cache(share_cache, mr); 795 /* Fill in output data. */ 796 mlx5_mr_lookup_cache(share_cache, entry, addr); 797 /* Lookup can't fail. */ 798 MLX5_ASSERT(entry->lkey != UINT32_MAX); 799 rte_rwlock_write_unlock(&share_cache->rwlock); 800 rte_mcfg_mem_read_unlock(); 801 return entry->lkey; 802 err_mrlock: 803 rte_rwlock_write_unlock(&share_cache->rwlock); 804 err_memlock: 805 rte_mcfg_mem_read_unlock(); 806 err_nolock: 807 /* 808 * In case of error, as this can be called in a datapath, a warning 809 * message per an error is preferable instead. Must be unlocked before 810 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called 811 * inside. 812 */ 813 mr_free(mr); 814 return UINT32_MAX; 815 } 816 817 /** 818 * Create a new global Memory Region (MR) for a missing virtual address. 819 * This can be called from primary and secondary process. 820 * 821 * @param pd 822 * Pointer to pd handle of a device (net, regex, vdpa,...). 823 * @param share_cache 824 * Pointer to a global shared MR cache. 825 * @param[out] entry 826 * Pointer to returning MR cache entry, found in the global cache or newly 827 * created. If failed to create one, this will not be updated. 828 * @param addr 829 * Target virtual address to register. 830 * 831 * @return 832 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. 833 */ 834 static uint32_t 835 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id, 836 struct mlx5_mr_share_cache *share_cache, 837 struct mr_cache_entry *entry, uintptr_t addr, 838 unsigned int mr_ext_memseg_en) 839 { 840 uint32_t ret = 0; 841 842 switch (rte_eal_process_type()) { 843 case RTE_PROC_PRIMARY: 844 ret = mlx5_mr_create_primary(pd, share_cache, entry, 845 addr, mr_ext_memseg_en); 846 break; 847 case RTE_PROC_SECONDARY: 848 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry, 849 addr, mr_ext_memseg_en); 850 break; 851 default: 852 break; 853 } 854 return ret; 855 } 856 857 /** 858 * Look up address in the global MR cache table. If not found, create a new MR. 859 * Insert the found/created entry to local bottom-half cache table. 860 * 861 * @param pd 862 * Pointer to pd of a device (net, regex, vdpa,...). 863 * @param share_cache 864 * Pointer to a global shared MR cache. 865 * @param mr_ctrl 866 * Pointer to per-queue MR control structure. 867 * @param[out] entry 868 * Pointer to returning MR cache entry, found in the global cache or newly 869 * created. If failed to create one, this is not written. 870 * @param addr 871 * Search key. 872 * 873 * @return 874 * Searched LKey on success, UINT32_MAX on no match. 875 */ 876 static uint32_t 877 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id, 878 struct mlx5_mr_share_cache *share_cache, 879 struct mlx5_mr_ctrl *mr_ctrl, 880 struct mr_cache_entry *entry, uintptr_t addr, 881 unsigned int mr_ext_memseg_en) 882 { 883 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh; 884 uint32_t lkey; 885 uint16_t idx; 886 887 /* If local cache table is full, try to double it. */ 888 if (unlikely(bt->len == bt->size)) 889 mr_btree_expand(bt, bt->size << 1); 890 /* Look up in the global cache. */ 891 rte_rwlock_read_lock(&share_cache->rwlock); 892 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); 893 if (lkey != UINT32_MAX) { 894 /* Found. */ 895 *entry = (*share_cache->cache.table)[idx]; 896 rte_rwlock_read_unlock(&share_cache->rwlock); 897 /* 898 * Update local cache. Even if it fails, return the found entry 899 * to update top-half cache. Next time, this entry will be found 900 * in the global cache. 901 */ 902 mr_btree_insert(bt, entry); 903 return lkey; 904 } 905 rte_rwlock_read_unlock(&share_cache->rwlock); 906 /* First time to see the address? Create a new MR. */ 907 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr, 908 mr_ext_memseg_en); 909 /* 910 * Update the local cache if successfully created a new global MR. Even 911 * if failed to create one, there's no action to take in this datapath 912 * code. As returning LKey is invalid, this will eventually make HW 913 * fail. 914 */ 915 if (lkey != UINT32_MAX) 916 mr_btree_insert(bt, entry); 917 return lkey; 918 } 919 920 /** 921 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if 922 * misses, search in the global MR cache table and update the new entry to 923 * per-queue local caches. 924 * 925 * @param pd 926 * Pointer to pd of a device (net, regex, vdpa,...). 927 * @param share_cache 928 * Pointer to a global shared MR cache. 929 * @param mr_ctrl 930 * Pointer to per-queue MR control structure. 931 * @param addr 932 * Search key. 933 * 934 * @return 935 * Searched LKey on success, UINT32_MAX on no match. 936 */ 937 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id, 938 struct mlx5_mr_share_cache *share_cache, 939 struct mlx5_mr_ctrl *mr_ctrl, 940 uintptr_t addr, unsigned int mr_ext_memseg_en) 941 { 942 uint32_t lkey; 943 uint16_t bh_idx = 0; 944 /* Victim in top-half cache to replace with new entry. */ 945 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head]; 946 947 /* Binary-search MR translation table. */ 948 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); 949 /* Update top-half cache. */ 950 if (likely(lkey != UINT32_MAX)) { 951 *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; 952 } else { 953 /* 954 * If missed in local lookup table, search in the global cache 955 * and local cache_bh[] will be updated inside if possible. 956 * Top-half cache entry will also be updated. 957 */ 958 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl, 959 repl, addr, mr_ext_memseg_en); 960 if (unlikely(lkey == UINT32_MAX)) 961 return UINT32_MAX; 962 } 963 /* Update the most recently used entry. */ 964 mr_ctrl->mru = mr_ctrl->head; 965 /* Point to the next victim, the oldest. */ 966 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N; 967 return lkey; 968 } 969 970 /** 971 * Release all the created MRs and resources on global MR cache of a device. 972 * list. 973 * 974 * @param share_cache 975 * Pointer to a global shared MR cache. 976 */ 977 void 978 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache) 979 { 980 struct mlx5_mr *mr_next; 981 982 rte_rwlock_write_lock(&share_cache->rwlock); 983 /* Detach from MR list and move to free list. */ 984 mr_next = LIST_FIRST(&share_cache->mr_list); 985 while (mr_next != NULL) { 986 struct mlx5_mr *mr = mr_next; 987 988 mr_next = LIST_NEXT(mr, mr); 989 LIST_REMOVE(mr, mr); 990 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr); 991 } 992 LIST_INIT(&share_cache->mr_list); 993 /* Free global cache. */ 994 mlx5_mr_btree_free(&share_cache->cache); 995 rte_rwlock_write_unlock(&share_cache->rwlock); 996 /* Free all remaining MRs. */ 997 mlx5_mr_garbage_collect(share_cache); 998 } 999 1000 /** 1001 * Flush all of the local cache entries. 1002 * 1003 * @param mr_ctrl 1004 * Pointer to per-queue MR local cache. 1005 */ 1006 void 1007 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) 1008 { 1009 /* Reset the most-recently-used index. */ 1010 mr_ctrl->mru = 0; 1011 /* Reset the linear search array. */ 1012 mr_ctrl->head = 0; 1013 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); 1014 /* Reset the B-tree table. */ 1015 mr_ctrl->cache_bh.len = 1; 1016 mr_ctrl->cache_bh.overflow = 0; 1017 /* Update the generation number. */ 1018 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; 1019 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d", 1020 (void *)mr_ctrl, mr_ctrl->cur_gen); 1021 } 1022 1023 /** 1024 * Creates a memory region for external memory, that is memory which is not 1025 * part of the DPDK memory segments. 1026 * 1027 * @param pd 1028 * Pointer to pd of a device (net, regex, vdpa,...). 1029 * @param addr 1030 * Starting virtual address of memory. 1031 * @param len 1032 * Length of memory segment being mapped. 1033 * @param socked_id 1034 * Socket to allocate heap memory for the control structures. 1035 * 1036 * @return 1037 * Pointer to MR structure on success, NULL otherwise. 1038 */ 1039 struct mlx5_mr * 1040 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id) 1041 { 1042 struct ibv_mr *ibv_mr; 1043 struct mlx5_mr *mr = NULL; 1044 1045 mr = rte_zmalloc_socket(NULL, 1046 RTE_ALIGN_CEIL(sizeof(*mr), 1047 RTE_CACHE_LINE_SIZE), 1048 RTE_CACHE_LINE_SIZE, socket_id); 1049 if (mr == NULL) 1050 return NULL; 1051 ibv_mr = mlx5_glue->reg_mr(pd, (void *)addr, len, 1052 IBV_ACCESS_LOCAL_WRITE | 1053 (haswell_broadwell_cpu ? 0 : 1054 IBV_ACCESS_RELAXED_ORDERING)); 1055 if (ibv_mr == NULL) { 1056 DRV_LOG(WARNING, 1057 "Fail to create MR for address (%p)", 1058 (void *)addr); 1059 rte_free(mr); 1060 return NULL; 1061 } 1062 mr->pmd_mr.lkey = ibv_mr->lkey; 1063 mr->pmd_mr.addr = ibv_mr->addr; 1064 mr->pmd_mr.len = ibv_mr->length; 1065 mr->pmd_mr.obj = ibv_mr; 1066 mr->msl = NULL; /* Mark it is external memory. */ 1067 mr->ms_bmp = NULL; 1068 mr->ms_n = 1; 1069 mr->ms_bmp_n = 1; 1070 DRV_LOG(DEBUG, 1071 "MR CREATED (%p) for external memory %p:\n" 1072 " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," 1073 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", 1074 (void *)mr, (void *)addr, 1075 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey), 1076 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); 1077 return mr; 1078 } 1079 1080 /** 1081 * Dump all the created MRs and the global cache entries. 1082 * 1083 * @param sh 1084 * Pointer to Ethernet device shared context. 1085 */ 1086 void 1087 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused) 1088 { 1089 #ifdef RTE_LIBRTE_MLX5_DEBUG 1090 struct mlx5_mr *mr; 1091 int mr_n = 0; 1092 int chunk_n = 0; 1093 1094 rte_rwlock_read_lock(&share_cache->rwlock); 1095 /* Iterate all the existing MRs. */ 1096 LIST_FOREACH(mr, &share_cache->mr_list, mr) { 1097 unsigned int n; 1098 1099 DEBUG("MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", 1100 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey), 1101 mr->ms_n, mr->ms_bmp_n); 1102 if (mr->ms_n == 0) 1103 continue; 1104 for (n = 0; n < mr->ms_bmp_n; ) { 1105 struct mr_cache_entry ret = { 0, }; 1106 1107 n = mr_find_next_chunk(mr, &ret, n); 1108 if (!ret.end) 1109 break; 1110 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", 1111 chunk_n++, ret.start, ret.end); 1112 } 1113 } 1114 DEBUG("Dumping global cache %p", (void *)share_cache); 1115 mlx5_mr_btree_dump(&share_cache->cache); 1116 rte_rwlock_read_unlock(&share_cache->rwlock); 1117 #endif 1118 } 1119