1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 #include <stdint.h> 5 #include <stddef.h> 6 #include <stdlib.h> 7 #include <stdio.h> 8 #include <stdarg.h> 9 #include <errno.h> 10 #include <sys/queue.h> 11 12 #include <rte_memory.h> 13 #include <rte_errno.h> 14 #include <rte_eal.h> 15 #include <rte_eal_memconfig.h> 16 #include <rte_launch.h> 17 #include <rte_per_lcore.h> 18 #include <rte_lcore.h> 19 #include <rte_common.h> 20 #include <rte_string_fns.h> 21 #include <rte_spinlock.h> 22 #include <rte_memcpy.h> 23 #include <rte_memzone.h> 24 #include <rte_atomic.h> 25 #include <rte_fbarray.h> 26 27 #include "eal_internal_cfg.h" 28 #include "eal_memalloc.h" 29 #include "eal_memcfg.h" 30 #include "eal_private.h" 31 #include "malloc_elem.h" 32 #include "malloc_heap.h" 33 #include "malloc_mp.h" 34 35 /* start external socket ID's at a very high number */ 36 #define CONST_MAX(a, b) (a > b ? a : b) /* RTE_MAX is not a constant */ 37 #define EXTERNAL_HEAP_MIN_SOCKET_ID (CONST_MAX((1 << 8), RTE_MAX_NUMA_NODES)) 38 39 static unsigned 40 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz) 41 { 42 unsigned check_flag = 0; 43 44 if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY)) 45 return 1; 46 47 switch (hugepage_sz) { 48 case RTE_PGSIZE_256K: 49 check_flag = RTE_MEMZONE_256KB; 50 break; 51 case RTE_PGSIZE_2M: 52 check_flag = RTE_MEMZONE_2MB; 53 break; 54 case RTE_PGSIZE_16M: 55 check_flag = RTE_MEMZONE_16MB; 56 break; 57 case RTE_PGSIZE_256M: 58 check_flag = RTE_MEMZONE_256MB; 59 break; 60 case RTE_PGSIZE_512M: 61 check_flag = RTE_MEMZONE_512MB; 62 break; 63 case RTE_PGSIZE_1G: 64 check_flag = RTE_MEMZONE_1GB; 65 break; 66 case RTE_PGSIZE_4G: 67 check_flag = RTE_MEMZONE_4GB; 68 break; 69 case RTE_PGSIZE_16G: 70 check_flag = RTE_MEMZONE_16GB; 71 } 72 73 return check_flag & flags; 74 } 75 76 int 77 malloc_socket_to_heap_id(unsigned int socket_id) 78 { 79 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 80 int i; 81 82 for (i = 0; i < RTE_MAX_HEAPS; i++) { 83 struct malloc_heap *heap = &mcfg->malloc_heaps[i]; 84 85 if (heap->socket_id == socket_id) 86 return i; 87 } 88 return -1; 89 } 90 91 /* 92 * Expand the heap with a memory area. 93 */ 94 static struct malloc_elem * 95 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl, 96 void *start, size_t len, bool dirty) 97 { 98 struct malloc_elem *elem = start; 99 100 malloc_elem_init(elem, heap, msl, len, elem, len, dirty); 101 102 malloc_elem_insert(elem); 103 104 elem = malloc_elem_join_adjacent_free(elem); 105 106 malloc_elem_free_list_insert(elem); 107 108 return elem; 109 } 110 111 static int 112 malloc_add_seg(const struct rte_memseg_list *msl, 113 const struct rte_memseg *ms, size_t len, void *arg __rte_unused) 114 { 115 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 116 struct rte_memseg_list *found_msl; 117 struct malloc_heap *heap; 118 int msl_idx, heap_idx; 119 120 if (msl->external) 121 return 0; 122 123 heap_idx = malloc_socket_to_heap_id(msl->socket_id); 124 if (heap_idx < 0) { 125 RTE_LOG(ERR, EAL, "Memseg list has invalid socket id\n"); 126 return -1; 127 } 128 heap = &mcfg->malloc_heaps[heap_idx]; 129 130 /* msl is const, so find it */ 131 msl_idx = msl - mcfg->memsegs; 132 133 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS) 134 return -1; 135 136 found_msl = &mcfg->memsegs[msl_idx]; 137 138 malloc_heap_add_memory(heap, found_msl, ms->addr, len, 139 ms->flags & RTE_MEMSEG_FLAG_DIRTY); 140 141 heap->total_size += len; 142 143 RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20, 144 msl->socket_id); 145 return 0; 146 } 147 148 /* 149 * Iterates through the freelist for a heap to find a free element 150 * which can store data of the required size and with the requested alignment. 151 * If size is 0, find the biggest available elem. 152 * Returns null on failure, or pointer to element on success. 153 */ 154 static struct malloc_elem * 155 find_suitable_element(struct malloc_heap *heap, size_t size, 156 unsigned int flags, size_t align, size_t bound, bool contig) 157 { 158 size_t idx; 159 struct malloc_elem *elem, *alt_elem = NULL; 160 161 for (idx = malloc_elem_free_list_index(size); 162 idx < RTE_HEAP_NUM_FREELISTS; idx++) { 163 for (elem = LIST_FIRST(&heap->free_head[idx]); 164 !!elem; elem = LIST_NEXT(elem, free_list)) { 165 if (malloc_elem_can_hold(elem, size, align, bound, 166 contig)) { 167 if (check_hugepage_sz(flags, 168 elem->msl->page_sz)) 169 return elem; 170 if (alt_elem == NULL) 171 alt_elem = elem; 172 } 173 } 174 } 175 176 if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY)) 177 return alt_elem; 178 179 return NULL; 180 } 181 182 /* 183 * Iterates through the freelist for a heap to find a free element with the 184 * biggest size and requested alignment. Will also set size to whatever element 185 * size that was found. 186 * Returns null on failure, or pointer to element on success. 187 */ 188 static struct malloc_elem * 189 find_biggest_element(struct malloc_heap *heap, size_t *size, 190 unsigned int flags, size_t align, bool contig) 191 { 192 struct malloc_elem *elem, *max_elem = NULL; 193 size_t idx, max_size = 0; 194 195 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) { 196 for (elem = LIST_FIRST(&heap->free_head[idx]); 197 !!elem; elem = LIST_NEXT(elem, free_list)) { 198 size_t cur_size; 199 if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) == 0 && 200 !check_hugepage_sz(flags, 201 elem->msl->page_sz)) 202 continue; 203 if (contig) { 204 cur_size = 205 malloc_elem_find_max_iova_contig(elem, 206 align); 207 } else { 208 void *data_start = RTE_PTR_ADD(elem, 209 MALLOC_ELEM_HEADER_LEN); 210 void *data_end = RTE_PTR_ADD(elem, elem->size - 211 MALLOC_ELEM_TRAILER_LEN); 212 void *aligned = RTE_PTR_ALIGN_CEIL(data_start, 213 align); 214 /* check if aligned data start is beyond end */ 215 if (aligned >= data_end) 216 continue; 217 cur_size = RTE_PTR_DIFF(data_end, aligned); 218 } 219 if (cur_size > max_size) { 220 max_size = cur_size; 221 max_elem = elem; 222 } 223 } 224 } 225 226 *size = max_size; 227 return max_elem; 228 } 229 230 /* 231 * Main function to allocate a block of memory from the heap. 232 * It locks the free list, scans it, and adds a new memseg if the 233 * scan fails. Once the new memseg is added, it re-scans and should return 234 * the new element after releasing the lock. 235 */ 236 static void * 237 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, 238 unsigned int flags, size_t align, size_t bound, bool contig) 239 { 240 struct malloc_elem *elem; 241 size_t user_size = size; 242 243 size = RTE_CACHE_LINE_ROUNDUP(size); 244 align = RTE_CACHE_LINE_ROUNDUP(align); 245 246 /* roundup might cause an overflow */ 247 if (size == 0) 248 return NULL; 249 elem = find_suitable_element(heap, size, flags, align, bound, contig); 250 if (elem != NULL) { 251 elem = malloc_elem_alloc(elem, size, align, bound, contig); 252 253 /* increase heap's count of allocated elements */ 254 heap->alloc_count++; 255 256 asan_set_redzone(elem, user_size); 257 } 258 259 return elem == NULL ? NULL : (void *)(&elem[1]); 260 } 261 262 static void * 263 heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused, 264 unsigned int flags, size_t align, bool contig) 265 { 266 struct malloc_elem *elem; 267 size_t size; 268 269 align = RTE_CACHE_LINE_ROUNDUP(align); 270 271 elem = find_biggest_element(heap, &size, flags, align, contig); 272 if (elem != NULL) { 273 elem = malloc_elem_alloc(elem, size, align, 0, contig); 274 275 /* increase heap's count of allocated elements */ 276 heap->alloc_count++; 277 278 asan_set_redzone(elem, size); 279 } 280 281 return elem == NULL ? NULL : (void *)(&elem[1]); 282 } 283 284 /* this function is exposed in malloc_mp.h */ 285 void 286 rollback_expand_heap(struct rte_memseg **ms, int n_segs, 287 struct malloc_elem *elem, void *map_addr, size_t map_len) 288 { 289 if (elem != NULL) { 290 malloc_elem_free_list_remove(elem); 291 malloc_elem_hide_region(elem, map_addr, map_len); 292 } 293 294 eal_memalloc_free_seg_bulk(ms, n_segs); 295 } 296 297 /* this function is exposed in malloc_mp.h */ 298 struct malloc_elem * 299 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, 300 int socket, unsigned int flags, size_t align, size_t bound, 301 bool contig, struct rte_memseg **ms, int n_segs) 302 { 303 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 304 struct rte_memseg_list *msl; 305 struct malloc_elem *elem = NULL; 306 size_t alloc_sz; 307 int allocd_pages, i; 308 bool dirty = false; 309 void *ret, *map_addr; 310 311 alloc_sz = (size_t)pg_sz * n_segs; 312 313 /* first, check if we're allowed to allocate this memory */ 314 if (eal_memalloc_mem_alloc_validate(socket, 315 heap->total_size + alloc_sz) < 0) { 316 RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n"); 317 return NULL; 318 } 319 320 allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz, 321 socket, true); 322 323 /* make sure we've allocated our pages... */ 324 if (allocd_pages < 0) 325 return NULL; 326 327 map_addr = ms[0]->addr; 328 msl = rte_mem_virt2memseg_list(map_addr); 329 330 /* check if we wanted contiguous memory but didn't get it */ 331 if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) { 332 RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n", 333 __func__); 334 goto fail; 335 } 336 337 /* 338 * Once we have all the memseg lists configured, if there is a dma mask 339 * set, check iova addresses are not out of range. Otherwise the device 340 * setting the dma mask could have problems with the mapped memory. 341 * 342 * There are two situations when this can happen: 343 * 1) memory initialization 344 * 2) dynamic memory allocation 345 * 346 * For 1), an error when checking dma mask implies app can not be 347 * executed. For 2) implies the new memory can not be added. 348 */ 349 if (mcfg->dma_maskbits && 350 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) { 351 /* 352 * Currently this can only happen if IOMMU is enabled 353 * and the address width supported by the IOMMU hw is 354 * not enough for using the memory mapped IOVAs. 355 * 356 * If IOVA is VA, advice to try with '--iova-mode pa' 357 * which could solve some situations when IOVA VA is not 358 * really needed. 359 */ 360 RTE_LOG(ERR, EAL, 361 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask\n", 362 __func__); 363 364 /* 365 * If IOVA is VA and it is possible to run with IOVA PA, 366 * because user is root, give and advice for solving the 367 * problem. 368 */ 369 if ((rte_eal_iova_mode() == RTE_IOVA_VA) && 370 rte_eal_using_phys_addrs()) 371 RTE_LOG(ERR, EAL, 372 "%s(): Please try initializing EAL with --iova-mode=pa parameter\n", 373 __func__); 374 goto fail; 375 } 376 377 /* Element is dirty if it contains at least one dirty page. */ 378 for (i = 0; i < allocd_pages; i++) 379 dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY; 380 381 /* add newly minted memsegs to malloc heap */ 382 elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty); 383 384 /* try once more, as now we have allocated new memory */ 385 ret = find_suitable_element(heap, elt_size, flags, align, bound, 386 contig); 387 388 if (ret == NULL) 389 goto fail; 390 391 return elem; 392 393 fail: 394 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz); 395 return NULL; 396 } 397 398 static int 399 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz, 400 size_t elt_size, int socket, unsigned int flags, size_t align, 401 size_t bound, bool contig) 402 { 403 struct malloc_elem *elem; 404 struct rte_memseg **ms; 405 void *map_addr; 406 size_t alloc_sz; 407 int n_segs; 408 bool callback_triggered = false; 409 410 alloc_sz = RTE_ALIGN_CEIL(align + elt_size + 411 MALLOC_ELEM_TRAILER_LEN, pg_sz); 412 n_segs = alloc_sz / pg_sz; 413 414 /* we can't know in advance how many pages we'll need, so we malloc */ 415 ms = malloc(sizeof(*ms) * n_segs); 416 if (ms == NULL) 417 return -1; 418 memset(ms, 0, sizeof(*ms) * n_segs); 419 420 elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align, 421 bound, contig, ms, n_segs); 422 423 if (elem == NULL) 424 goto free_ms; 425 426 map_addr = ms[0]->addr; 427 428 /* notify user about changes in memory map */ 429 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz); 430 431 /* notify other processes that this has happened */ 432 if (request_sync()) { 433 /* we couldn't ensure all processes have mapped memory, 434 * so free it back and notify everyone that it's been 435 * freed back. 436 * 437 * technically, we could've avoided adding memory addresses to 438 * the map, but that would've led to inconsistent behavior 439 * between primary and secondary processes, as those get 440 * callbacks during sync. therefore, force primary process to 441 * do alloc-and-rollback syncs as well. 442 */ 443 callback_triggered = true; 444 goto free_elem; 445 } 446 heap->total_size += alloc_sz; 447 448 RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n", 449 socket, alloc_sz >> 20ULL); 450 451 free(ms); 452 453 return 0; 454 455 free_elem: 456 if (callback_triggered) 457 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, 458 map_addr, alloc_sz); 459 460 rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz); 461 462 request_sync(); 463 free_ms: 464 free(ms); 465 466 return -1; 467 } 468 469 static int 470 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz, 471 size_t elt_size, int socket, unsigned int flags, size_t align, 472 size_t bound, bool contig) 473 { 474 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 475 struct malloc_mp_req req; 476 int req_result; 477 478 memset(&req, 0, sizeof(req)); 479 480 req.t = REQ_TYPE_ALLOC; 481 req.alloc_req.align = align; 482 req.alloc_req.bound = bound; 483 req.alloc_req.contig = contig; 484 req.alloc_req.flags = flags; 485 req.alloc_req.elt_size = elt_size; 486 req.alloc_req.page_sz = pg_sz; 487 req.alloc_req.socket = socket; 488 req.alloc_req.malloc_heap_idx = heap - mcfg->malloc_heaps; 489 490 req_result = request_to_primary(&req); 491 492 if (req_result != 0) 493 return -1; 494 495 if (req.result != REQ_RESULT_SUCCESS) 496 return -1; 497 498 return 0; 499 } 500 501 static int 502 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size, 503 int socket, unsigned int flags, size_t align, size_t bound, 504 bool contig) 505 { 506 int ret; 507 508 rte_mcfg_mem_write_lock(); 509 510 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 511 ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket, 512 flags, align, bound, contig); 513 } else { 514 ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket, 515 flags, align, bound, contig); 516 } 517 518 rte_mcfg_mem_write_unlock(); 519 return ret; 520 } 521 522 static int 523 compare_pagesz(const void *a, const void *b) 524 { 525 const struct rte_memseg_list * const*mpa = a; 526 const struct rte_memseg_list * const*mpb = b; 527 const struct rte_memseg_list *msla = *mpa; 528 const struct rte_memseg_list *mslb = *mpb; 529 uint64_t pg_sz_a = msla->page_sz; 530 uint64_t pg_sz_b = mslb->page_sz; 531 532 if (pg_sz_a < pg_sz_b) 533 return -1; 534 if (pg_sz_a > pg_sz_b) 535 return 1; 536 return 0; 537 } 538 539 static int 540 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket, 541 unsigned int flags, size_t align, size_t bound, bool contig) 542 { 543 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 544 struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS]; 545 struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS]; 546 uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS]; 547 uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS]; 548 uint64_t prev_pg_sz; 549 int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz; 550 bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0; 551 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY; 552 void *ret; 553 554 memset(requested_msls, 0, sizeof(requested_msls)); 555 memset(other_msls, 0, sizeof(other_msls)); 556 memset(requested_pg_sz, 0, sizeof(requested_pg_sz)); 557 memset(other_pg_sz, 0, sizeof(other_pg_sz)); 558 559 /* 560 * go through memseg list and take note of all the page sizes available, 561 * and if any of them were specifically requested by the user. 562 */ 563 n_requested_msls = 0; 564 n_other_msls = 0; 565 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) { 566 struct rte_memseg_list *msl = &mcfg->memsegs[i]; 567 568 if (msl->socket_id != socket) 569 continue; 570 571 if (msl->base_va == NULL) 572 continue; 573 574 /* if pages of specific size were requested */ 575 if (size_flags != 0 && check_hugepage_sz(size_flags, 576 msl->page_sz)) 577 requested_msls[n_requested_msls++] = msl; 578 else if (size_flags == 0 || size_hint) 579 other_msls[n_other_msls++] = msl; 580 } 581 582 /* sort the lists, smallest first */ 583 qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]), 584 compare_pagesz); 585 qsort(other_msls, n_other_msls, sizeof(other_msls[0]), 586 compare_pagesz); 587 588 /* now, extract page sizes we are supposed to try */ 589 prev_pg_sz = 0; 590 n_requested_pg_sz = 0; 591 for (i = 0; i < n_requested_msls; i++) { 592 uint64_t pg_sz = requested_msls[i]->page_sz; 593 594 if (prev_pg_sz != pg_sz) { 595 requested_pg_sz[n_requested_pg_sz++] = pg_sz; 596 prev_pg_sz = pg_sz; 597 } 598 } 599 prev_pg_sz = 0; 600 n_other_pg_sz = 0; 601 for (i = 0; i < n_other_msls; i++) { 602 uint64_t pg_sz = other_msls[i]->page_sz; 603 604 if (prev_pg_sz != pg_sz) { 605 other_pg_sz[n_other_pg_sz++] = pg_sz; 606 prev_pg_sz = pg_sz; 607 } 608 } 609 610 /* finally, try allocating memory of specified page sizes, starting from 611 * the smallest sizes 612 */ 613 for (i = 0; i < n_requested_pg_sz; i++) { 614 uint64_t pg_sz = requested_pg_sz[i]; 615 616 /* 617 * do not pass the size hint here, as user expects other page 618 * sizes first, before resorting to best effort allocation. 619 */ 620 if (!try_expand_heap(heap, pg_sz, size, socket, size_flags, 621 align, bound, contig)) 622 return 0; 623 } 624 if (n_other_pg_sz == 0) 625 return -1; 626 627 /* now, check if we can reserve anything with size hint */ 628 ret = find_suitable_element(heap, size, flags, align, bound, contig); 629 if (ret != NULL) 630 return 0; 631 632 /* 633 * we still couldn't reserve memory, so try expanding heap with other 634 * page sizes, if there are any 635 */ 636 for (i = 0; i < n_other_pg_sz; i++) { 637 uint64_t pg_sz = other_pg_sz[i]; 638 639 if (!try_expand_heap(heap, pg_sz, size, socket, flags, 640 align, bound, contig)) 641 return 0; 642 } 643 return -1; 644 } 645 646 /* this will try lower page sizes first */ 647 static void * 648 malloc_heap_alloc_on_heap_id(const char *type, size_t size, 649 unsigned int heap_id, unsigned int flags, size_t align, 650 size_t bound, bool contig) 651 { 652 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 653 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id]; 654 unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY; 655 int socket_id; 656 void *ret; 657 const struct internal_config *internal_conf = 658 eal_get_internal_configuration(); 659 660 rte_spinlock_lock(&(heap->lock)); 661 662 align = align == 0 ? 1 : align; 663 664 /* for legacy mode, try once and with all flags */ 665 if (internal_conf->legacy_mem) { 666 ret = heap_alloc(heap, type, size, flags, align, bound, contig); 667 goto alloc_unlock; 668 } 669 670 /* 671 * we do not pass the size hint here, because even if allocation fails, 672 * we may still be able to allocate memory from appropriate page sizes, 673 * we just need to request more memory first. 674 */ 675 676 socket_id = rte_socket_id_by_idx(heap_id); 677 /* 678 * if socket ID is negative, we cannot find a socket ID for this heap - 679 * which means it's an external heap. those can have unexpected page 680 * sizes, so if the user asked to allocate from there - assume user 681 * knows what they're doing, and allow allocating from there with any 682 * page size flags. 683 */ 684 if (socket_id < 0) 685 size_flags |= RTE_MEMZONE_SIZE_HINT_ONLY; 686 687 ret = heap_alloc(heap, type, size, size_flags, align, bound, contig); 688 if (ret != NULL) 689 goto alloc_unlock; 690 691 /* if socket ID is invalid, this is an external heap */ 692 if (socket_id < 0) 693 goto alloc_unlock; 694 695 if (!alloc_more_mem_on_socket(heap, size, socket_id, flags, align, 696 bound, contig)) { 697 ret = heap_alloc(heap, type, size, flags, align, bound, contig); 698 699 /* this should have succeeded */ 700 if (ret == NULL) 701 RTE_LOG(ERR, EAL, "Error allocating from heap\n"); 702 } 703 alloc_unlock: 704 rte_spinlock_unlock(&(heap->lock)); 705 return ret; 706 } 707 708 static unsigned int 709 malloc_get_numa_socket(void) 710 { 711 const struct internal_config *conf = eal_get_internal_configuration(); 712 unsigned int socket_id = rte_socket_id(); 713 unsigned int idx; 714 715 if (socket_id != (unsigned int)SOCKET_ID_ANY) 716 return socket_id; 717 718 /* for control threads, return first socket where memory is available */ 719 for (idx = 0; idx < rte_socket_count(); idx++) { 720 socket_id = rte_socket_id_by_idx(idx); 721 if (conf->socket_mem[socket_id] != 0) 722 return socket_id; 723 } 724 725 return rte_socket_id_by_idx(0); 726 } 727 728 void * 729 malloc_heap_alloc(const char *type, size_t size, int socket_arg, 730 unsigned int flags, size_t align, size_t bound, bool contig) 731 { 732 int socket, heap_id, i; 733 void *ret; 734 735 /* return NULL if size is 0 or alignment is not power-of-2 */ 736 if (size == 0 || (align && !rte_is_power_of_2(align))) 737 return NULL; 738 739 if (!rte_eal_has_hugepages() && socket_arg < RTE_MAX_NUMA_NODES) 740 socket_arg = SOCKET_ID_ANY; 741 742 if (socket_arg == SOCKET_ID_ANY) 743 socket = malloc_get_numa_socket(); 744 else 745 socket = socket_arg; 746 747 /* turn socket ID into heap ID */ 748 heap_id = malloc_socket_to_heap_id(socket); 749 /* if heap id is negative, socket ID was invalid */ 750 if (heap_id < 0) 751 return NULL; 752 753 ret = malloc_heap_alloc_on_heap_id(type, size, heap_id, flags, align, 754 bound, contig); 755 if (ret != NULL || socket_arg != SOCKET_ID_ANY) 756 return ret; 757 758 /* try other heaps. we are only iterating through native DPDK sockets, 759 * so external heaps won't be included. 760 */ 761 for (i = 0; i < (int) rte_socket_count(); i++) { 762 if (i == heap_id) 763 continue; 764 ret = malloc_heap_alloc_on_heap_id(type, size, i, flags, align, 765 bound, contig); 766 if (ret != NULL) 767 return ret; 768 } 769 return NULL; 770 } 771 772 static void * 773 heap_alloc_biggest_on_heap_id(const char *type, unsigned int heap_id, 774 unsigned int flags, size_t align, bool contig) 775 { 776 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 777 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id]; 778 void *ret; 779 780 rte_spinlock_lock(&(heap->lock)); 781 782 align = align == 0 ? 1 : align; 783 784 ret = heap_alloc_biggest(heap, type, flags, align, contig); 785 786 rte_spinlock_unlock(&(heap->lock)); 787 788 return ret; 789 } 790 791 void * 792 malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags, 793 size_t align, bool contig) 794 { 795 int socket, i, cur_socket, heap_id; 796 void *ret; 797 798 /* return NULL if align is not power-of-2 */ 799 if ((align && !rte_is_power_of_2(align))) 800 return NULL; 801 802 if (!rte_eal_has_hugepages()) 803 socket_arg = SOCKET_ID_ANY; 804 805 if (socket_arg == SOCKET_ID_ANY) 806 socket = malloc_get_numa_socket(); 807 else 808 socket = socket_arg; 809 810 /* turn socket ID into heap ID */ 811 heap_id = malloc_socket_to_heap_id(socket); 812 /* if heap id is negative, socket ID was invalid */ 813 if (heap_id < 0) 814 return NULL; 815 816 ret = heap_alloc_biggest_on_heap_id(type, heap_id, flags, align, 817 contig); 818 if (ret != NULL || socket_arg != SOCKET_ID_ANY) 819 return ret; 820 821 /* try other heaps */ 822 for (i = 0; i < (int) rte_socket_count(); i++) { 823 cur_socket = rte_socket_id_by_idx(i); 824 if (cur_socket == socket) 825 continue; 826 ret = heap_alloc_biggest_on_heap_id(type, i, flags, align, 827 contig); 828 if (ret != NULL) 829 return ret; 830 } 831 return NULL; 832 } 833 834 /* this function is exposed in malloc_mp.h */ 835 int 836 malloc_heap_free_pages(void *aligned_start, size_t aligned_len) 837 { 838 int n_segs, seg_idx, max_seg_idx; 839 struct rte_memseg_list *msl; 840 size_t page_sz; 841 842 msl = rte_mem_virt2memseg_list(aligned_start); 843 if (msl == NULL) 844 return -1; 845 846 page_sz = (size_t)msl->page_sz; 847 n_segs = aligned_len / page_sz; 848 seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz; 849 max_seg_idx = seg_idx + n_segs; 850 851 for (; seg_idx < max_seg_idx; seg_idx++) { 852 struct rte_memseg *ms; 853 854 ms = rte_fbarray_get(&msl->memseg_arr, seg_idx); 855 eal_memalloc_free_seg(ms); 856 } 857 return 0; 858 } 859 860 int 861 malloc_heap_free(struct malloc_elem *elem) 862 { 863 struct malloc_heap *heap; 864 void *start, *aligned_start, *end, *aligned_end; 865 size_t len, aligned_len, page_sz; 866 struct rte_memseg_list *msl; 867 unsigned int i, n_segs, before_space, after_space; 868 int ret; 869 const struct internal_config *internal_conf = 870 eal_get_internal_configuration(); 871 872 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) 873 return -1; 874 875 asan_clear_redzone(elem); 876 877 /* elem may be merged with previous element, so keep heap address */ 878 heap = elem->heap; 879 msl = elem->msl; 880 page_sz = (size_t)msl->page_sz; 881 882 rte_spinlock_lock(&(heap->lock)); 883 884 void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad); 885 size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad; 886 887 /* mark element as free */ 888 elem->state = ELEM_FREE; 889 890 elem = malloc_elem_free(elem); 891 892 /* anything after this is a bonus */ 893 ret = 0; 894 895 /* ...of which we can't avail if we are in legacy mode, or if this is an 896 * externally allocated segment. 897 */ 898 if (internal_conf->legacy_mem || (msl->external > 0)) 899 goto free_unlock; 900 901 /* check if we can free any memory back to the system */ 902 if (elem->size < page_sz) 903 goto free_unlock; 904 905 /* if user requested to match allocations, the sizes must match - if not, 906 * we will defer freeing these hugepages until the entire original allocation 907 * can be freed 908 */ 909 if (internal_conf->match_allocations && elem->size != elem->orig_size) 910 goto free_unlock; 911 912 /* probably, but let's make sure, as we may not be using up full page */ 913 start = elem; 914 len = elem->size; 915 aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz); 916 end = RTE_PTR_ADD(elem, len); 917 aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz); 918 919 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start); 920 921 /* can't free anything */ 922 if (aligned_len < page_sz) 923 goto free_unlock; 924 925 /* we can free something. however, some of these pages may be marked as 926 * unfreeable, so also check that as well 927 */ 928 n_segs = aligned_len / page_sz; 929 for (i = 0; i < n_segs; i++) { 930 const struct rte_memseg *tmp = 931 rte_mem_virt2memseg(aligned_start, msl); 932 933 if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) { 934 /* this is an unfreeable segment, so move start */ 935 aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len); 936 } 937 } 938 939 /* recalculate length and number of segments */ 940 aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start); 941 n_segs = aligned_len / page_sz; 942 943 /* check if we can still free some pages */ 944 if (n_segs == 0) 945 goto free_unlock; 946 947 /* We're not done yet. We also have to check if by freeing space we will 948 * be leaving free elements that are too small to store new elements. 949 * Check if we have enough space in the beginning and at the end, or if 950 * start/end are exactly page aligned. 951 */ 952 before_space = RTE_PTR_DIFF(aligned_start, elem); 953 after_space = RTE_PTR_DIFF(end, aligned_end); 954 if (before_space != 0 && 955 before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { 956 /* There is not enough space before start, but we may be able to 957 * move the start forward by one page. 958 */ 959 if (n_segs == 1) 960 goto free_unlock; 961 962 /* move start */ 963 aligned_start = RTE_PTR_ADD(aligned_start, page_sz); 964 aligned_len -= page_sz; 965 n_segs--; 966 } 967 if (after_space != 0 && after_space < 968 MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { 969 /* There is not enough space after end, but we may be able to 970 * move the end backwards by one page. 971 */ 972 if (n_segs == 1) 973 goto free_unlock; 974 975 /* move end */ 976 aligned_end = RTE_PTR_SUB(aligned_end, page_sz); 977 aligned_len -= page_sz; 978 n_segs--; 979 } 980 981 /* now we can finally free us some pages */ 982 983 rte_mcfg_mem_write_lock(); 984 985 /* 986 * we allow secondary processes to clear the heap of this allocated 987 * memory because it is safe to do so, as even if notifications about 988 * unmapped pages don't make it to other processes, heap is shared 989 * across all processes, and will become empty of this memory anyway, 990 * and nothing can allocate it back unless primary process will be able 991 * to deliver allocation message to every single running process. 992 */ 993 994 malloc_elem_free_list_remove(elem); 995 996 malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len); 997 998 heap->total_size -= aligned_len; 999 1000 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1001 /* notify user about changes in memory map */ 1002 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, 1003 aligned_start, aligned_len); 1004 1005 /* don't care if any of this fails */ 1006 malloc_heap_free_pages(aligned_start, aligned_len); 1007 1008 request_sync(); 1009 } else { 1010 struct malloc_mp_req req; 1011 1012 memset(&req, 0, sizeof(req)); 1013 1014 req.t = REQ_TYPE_FREE; 1015 req.free_req.addr = aligned_start; 1016 req.free_req.len = aligned_len; 1017 1018 /* 1019 * we request primary to deallocate pages, but we don't do it 1020 * in this thread. instead, we notify primary that we would like 1021 * to deallocate pages, and this process will receive another 1022 * request (in parallel) that will do it for us on another 1023 * thread. 1024 * 1025 * we also don't really care if this succeeds - the data is 1026 * already removed from the heap, so it is, for all intents and 1027 * purposes, hidden from the rest of DPDK even if some other 1028 * process (including this one) may have these pages mapped. 1029 * 1030 * notifications about deallocated memory happen during sync. 1031 */ 1032 request_to_primary(&req); 1033 } 1034 1035 RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n", 1036 msl->socket_id, aligned_len >> 20ULL); 1037 1038 rte_mcfg_mem_write_unlock(); 1039 free_unlock: 1040 asan_set_freezone(asan_ptr, asan_data_len); 1041 1042 rte_spinlock_unlock(&(heap->lock)); 1043 return ret; 1044 } 1045 1046 int 1047 malloc_heap_resize(struct malloc_elem *elem, size_t size) 1048 { 1049 int ret; 1050 1051 if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) 1052 return -1; 1053 1054 rte_spinlock_lock(&(elem->heap->lock)); 1055 1056 ret = malloc_elem_resize(elem, size); 1057 1058 rte_spinlock_unlock(&(elem->heap->lock)); 1059 1060 return ret; 1061 } 1062 1063 /* 1064 * Function to retrieve data for a given heap 1065 */ 1066 int 1067 malloc_heap_get_stats(struct malloc_heap *heap, 1068 struct rte_malloc_socket_stats *socket_stats) 1069 { 1070 size_t idx; 1071 struct malloc_elem *elem; 1072 1073 rte_spinlock_lock(&heap->lock); 1074 1075 /* Initialise variables for heap */ 1076 socket_stats->free_count = 0; 1077 socket_stats->heap_freesz_bytes = 0; 1078 socket_stats->greatest_free_size = 0; 1079 1080 /* Iterate through free list */ 1081 for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) { 1082 for (elem = LIST_FIRST(&heap->free_head[idx]); 1083 !!elem; elem = LIST_NEXT(elem, free_list)) 1084 { 1085 socket_stats->free_count++; 1086 socket_stats->heap_freesz_bytes += elem->size; 1087 if (elem->size > socket_stats->greatest_free_size) 1088 socket_stats->greatest_free_size = elem->size; 1089 } 1090 } 1091 /* Get stats on overall heap and allocated memory on this heap */ 1092 socket_stats->heap_totalsz_bytes = heap->total_size; 1093 socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes - 1094 socket_stats->heap_freesz_bytes); 1095 socket_stats->alloc_count = heap->alloc_count; 1096 1097 rte_spinlock_unlock(&heap->lock); 1098 return 0; 1099 } 1100 1101 /* 1102 * Function to retrieve data for a given heap 1103 */ 1104 void 1105 malloc_heap_dump(struct malloc_heap *heap, FILE *f) 1106 { 1107 struct malloc_elem *elem; 1108 1109 rte_spinlock_lock(&heap->lock); 1110 1111 fprintf(f, "Heap size: 0x%zx\n", heap->total_size); 1112 fprintf(f, "Heap alloc count: %u\n", heap->alloc_count); 1113 1114 elem = heap->first; 1115 while (elem) { 1116 malloc_elem_dump(elem, f); 1117 elem = elem->next; 1118 } 1119 1120 rte_spinlock_unlock(&heap->lock); 1121 } 1122 1123 static int 1124 destroy_elem(struct malloc_elem *elem, size_t len) 1125 { 1126 struct malloc_heap *heap = elem->heap; 1127 1128 /* notify all subscribers that a memory area is going to be removed */ 1129 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len); 1130 1131 /* this element can be removed */ 1132 malloc_elem_free_list_remove(elem); 1133 malloc_elem_hide_region(elem, elem, len); 1134 1135 heap->total_size -= len; 1136 1137 memset(elem, 0, sizeof(*elem)); 1138 1139 return 0; 1140 } 1141 1142 struct rte_memseg_list * 1143 malloc_heap_create_external_seg(void *va_addr, rte_iova_t iova_addrs[], 1144 unsigned int n_pages, size_t page_sz, const char *seg_name, 1145 unsigned int socket_id) 1146 { 1147 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 1148 char fbarray_name[RTE_FBARRAY_NAME_LEN]; 1149 struct rte_memseg_list *msl = NULL; 1150 struct rte_fbarray *arr; 1151 size_t seg_len = n_pages * page_sz; 1152 unsigned int i; 1153 1154 /* first, find a free memseg list */ 1155 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) { 1156 struct rte_memseg_list *tmp = &mcfg->memsegs[i]; 1157 if (tmp->base_va == NULL) { 1158 msl = tmp; 1159 break; 1160 } 1161 } 1162 if (msl == NULL) { 1163 RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n"); 1164 rte_errno = ENOSPC; 1165 return NULL; 1166 } 1167 1168 snprintf(fbarray_name, sizeof(fbarray_name), "%s_%p", 1169 seg_name, va_addr); 1170 1171 /* create the backing fbarray */ 1172 if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages, 1173 sizeof(struct rte_memseg)) < 0) { 1174 RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n"); 1175 return NULL; 1176 } 1177 arr = &msl->memseg_arr; 1178 1179 /* fbarray created, fill it up */ 1180 for (i = 0; i < n_pages; i++) { 1181 struct rte_memseg *ms; 1182 1183 rte_fbarray_set_used(arr, i); 1184 ms = rte_fbarray_get(arr, i); 1185 ms->addr = RTE_PTR_ADD(va_addr, i * page_sz); 1186 ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i]; 1187 ms->hugepage_sz = page_sz; 1188 ms->len = page_sz; 1189 ms->nchannel = rte_memory_get_nchannel(); 1190 ms->nrank = rte_memory_get_nrank(); 1191 ms->socket_id = socket_id; 1192 } 1193 1194 /* set up the memseg list */ 1195 msl->base_va = va_addr; 1196 msl->page_sz = page_sz; 1197 msl->socket_id = socket_id; 1198 msl->len = seg_len; 1199 msl->version = 0; 1200 msl->external = 1; 1201 1202 return msl; 1203 } 1204 1205 struct extseg_walk_arg { 1206 void *va_addr; 1207 size_t len; 1208 struct rte_memseg_list *msl; 1209 }; 1210 1211 static int 1212 extseg_walk(const struct rte_memseg_list *msl, void *arg) 1213 { 1214 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 1215 struct extseg_walk_arg *wa = arg; 1216 1217 if (msl->base_va == wa->va_addr && msl->len == wa->len) { 1218 unsigned int found_idx; 1219 1220 /* msl is const */ 1221 found_idx = msl - mcfg->memsegs; 1222 wa->msl = &mcfg->memsegs[found_idx]; 1223 return 1; 1224 } 1225 return 0; 1226 } 1227 1228 struct rte_memseg_list * 1229 malloc_heap_find_external_seg(void *va_addr, size_t len) 1230 { 1231 struct extseg_walk_arg wa; 1232 int res; 1233 1234 wa.va_addr = va_addr; 1235 wa.len = len; 1236 1237 res = rte_memseg_list_walk_thread_unsafe(extseg_walk, &wa); 1238 1239 if (res != 1) { 1240 /* 0 means nothing was found, -1 shouldn't happen */ 1241 if (res == 0) 1242 rte_errno = ENOENT; 1243 return NULL; 1244 } 1245 return wa.msl; 1246 } 1247 1248 int 1249 malloc_heap_destroy_external_seg(struct rte_memseg_list *msl) 1250 { 1251 /* destroy the fbarray backing this memory */ 1252 if (rte_fbarray_destroy(&msl->memseg_arr) < 0) 1253 return -1; 1254 1255 /* reset the memseg list */ 1256 memset(msl, 0, sizeof(*msl)); 1257 1258 return 0; 1259 } 1260 1261 int 1262 malloc_heap_add_external_memory(struct malloc_heap *heap, 1263 struct rte_memseg_list *msl) 1264 { 1265 /* erase contents of new memory */ 1266 memset(msl->base_va, 0, msl->len); 1267 1268 /* now, add newly minted memory to the malloc heap */ 1269 malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false); 1270 1271 heap->total_size += msl->len; 1272 1273 /* all done! */ 1274 RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n", 1275 heap->name, msl->base_va); 1276 1277 /* notify all subscribers that a new memory area has been added */ 1278 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, 1279 msl->base_va, msl->len); 1280 1281 return 0; 1282 } 1283 1284 int 1285 malloc_heap_remove_external_memory(struct malloc_heap *heap, void *va_addr, 1286 size_t len) 1287 { 1288 struct malloc_elem *elem = heap->first; 1289 1290 /* find element with specified va address */ 1291 while (elem != NULL && elem != va_addr) { 1292 elem = elem->next; 1293 /* stop if we've blown past our VA */ 1294 if (elem > (struct malloc_elem *)va_addr) { 1295 rte_errno = ENOENT; 1296 return -1; 1297 } 1298 } 1299 /* check if element was found */ 1300 if (elem == NULL || elem->msl->len != len) { 1301 rte_errno = ENOENT; 1302 return -1; 1303 } 1304 /* if element's size is not equal to segment len, segment is busy */ 1305 if (elem->state == ELEM_BUSY || elem->size != len) { 1306 rte_errno = EBUSY; 1307 return -1; 1308 } 1309 return destroy_elem(elem, len); 1310 } 1311 1312 int 1313 malloc_heap_create(struct malloc_heap *heap, const char *heap_name) 1314 { 1315 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 1316 uint32_t next_socket_id = mcfg->next_socket_id; 1317 1318 /* prevent overflow. did you really create 2 billion heaps??? */ 1319 if (next_socket_id > INT32_MAX) { 1320 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n"); 1321 rte_errno = ENOSPC; 1322 return -1; 1323 } 1324 1325 /* initialize empty heap */ 1326 heap->alloc_count = 0; 1327 heap->first = NULL; 1328 heap->last = NULL; 1329 LIST_INIT(heap->free_head); 1330 rte_spinlock_init(&heap->lock); 1331 heap->total_size = 0; 1332 heap->socket_id = next_socket_id; 1333 1334 /* we hold a global mem hotplug writelock, so it's safe to increment */ 1335 mcfg->next_socket_id++; 1336 1337 /* set up name */ 1338 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN); 1339 return 0; 1340 } 1341 1342 int 1343 malloc_heap_destroy(struct malloc_heap *heap) 1344 { 1345 if (heap->alloc_count != 0) { 1346 RTE_LOG(ERR, EAL, "Heap is still in use\n"); 1347 rte_errno = EBUSY; 1348 return -1; 1349 } 1350 if (heap->first != NULL || heap->last != NULL) { 1351 RTE_LOG(ERR, EAL, "Heap still contains memory segments\n"); 1352 rte_errno = EBUSY; 1353 return -1; 1354 } 1355 if (heap->total_size != 0) 1356 RTE_LOG(ERR, EAL, "Total size not zero, heap is likely corrupt\n"); 1357 1358 /* after this, the lock will be dropped */ 1359 memset(heap, 0, sizeof(*heap)); 1360 1361 return 0; 1362 } 1363 1364 int 1365 rte_eal_malloc_heap_init(void) 1366 { 1367 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 1368 unsigned int i; 1369 const struct internal_config *internal_conf = 1370 eal_get_internal_configuration(); 1371 1372 if (internal_conf->match_allocations) 1373 RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n"); 1374 1375 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1376 /* assign min socket ID to external heaps */ 1377 mcfg->next_socket_id = EXTERNAL_HEAP_MIN_SOCKET_ID; 1378 1379 /* assign names to default DPDK heaps */ 1380 for (i = 0; i < rte_socket_count(); i++) { 1381 struct malloc_heap *heap = &mcfg->malloc_heaps[i]; 1382 char heap_name[RTE_HEAP_NAME_MAX_LEN]; 1383 int socket_id = rte_socket_id_by_idx(i); 1384 1385 snprintf(heap_name, sizeof(heap_name), 1386 "socket_%i", socket_id); 1387 strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN); 1388 heap->socket_id = socket_id; 1389 } 1390 } 1391 1392 1393 if (register_mp_requests()) { 1394 RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n"); 1395 rte_mcfg_mem_read_unlock(); 1396 return -1; 1397 } 1398 1399 /* unlock mem hotplug here. it's safe for primary as no requests can 1400 * even come before primary itself is fully initialized, and secondaries 1401 * do not need to initialize the heap. 1402 */ 1403 rte_mcfg_mem_read_unlock(); 1404 1405 /* secondary process does not need to initialize anything */ 1406 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1407 return 0; 1408 1409 /* add all IOVA-contiguous areas to the heap */ 1410 return rte_memseg_contig_walk(malloc_add_seg, NULL); 1411 } 1412