1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2020 Dmitry Kozlyuk 3 */ 4 5 #include <inttypes.h> 6 #include <io.h> 7 8 #include <rte_eal_paging.h> 9 #include <rte_errno.h> 10 11 #include "eal_internal_cfg.h" 12 #include "eal_memalloc.h" 13 #include "eal_memcfg.h" 14 #include "eal_options.h" 15 #include "eal_private.h" 16 #include "eal_windows.h" 17 18 #include <rte_virt2phys.h> 19 20 /* MinGW-w64 headers lack VirtualAlloc2() in some distributions. 21 * Note: definitions are copied verbatim from Microsoft documentation 22 * and don't follow DPDK code style. 23 */ 24 #ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS 25 26 #define MEM_EXTENDED_PARAMETER_TYPE_BITS 4 27 28 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */ 29 typedef enum MEM_EXTENDED_PARAMETER_TYPE { 30 MemExtendedParameterInvalidType, 31 MemExtendedParameterAddressRequirements, 32 MemExtendedParameterNumaNode, 33 MemExtendedParameterPartitionHandle, 34 MemExtendedParameterUserPhysicalHandle, 35 MemExtendedParameterAttributeFlags, 36 MemExtendedParameterMax 37 } *PMEM_EXTENDED_PARAMETER_TYPE; 38 39 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */ 40 typedef struct MEM_EXTENDED_PARAMETER { 41 struct { 42 DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS; 43 DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS; 44 } DUMMYSTRUCTNAME; 45 union { 46 DWORD64 ULong64; 47 PVOID Pointer; 48 SIZE_T Size; 49 HANDLE Handle; 50 DWORD ULong; 51 } DUMMYUNIONNAME; 52 } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER; 53 54 #endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */ 55 56 /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */ 57 typedef PVOID (*VirtualAlloc2_type)( 58 HANDLE Process, 59 PVOID BaseAddress, 60 SIZE_T Size, 61 ULONG AllocationType, 62 ULONG PageProtection, 63 MEM_EXTENDED_PARAMETER *ExtendedParameters, 64 ULONG ParameterCount 65 ); 66 67 /* MinGW-w64 distributions, even those that declare VirtualAlloc2(), 68 * lack it in import libraries, which results in a failure at link time. 69 * Link it dynamically in such case. 70 */ 71 static VirtualAlloc2_type VirtualAlloc2_ptr; 72 73 #ifdef RTE_TOOLCHAIN_GCC 74 75 #define MEM_COALESCE_PLACEHOLDERS 0x00000001 76 #define MEM_PRESERVE_PLACEHOLDER 0x00000002 77 #define MEM_REPLACE_PLACEHOLDER 0x00004000 78 #define MEM_RESERVE_PLACEHOLDER 0x00040000 79 80 int 81 eal_mem_win32api_init(void) 82 { 83 /* Contrary to the docs, VirtualAlloc2() is not in kernel32.dll, 84 * see https://github.com/MicrosoftDocs/feedback/issues/1129. 85 */ 86 static const char library_name[] = "kernelbase.dll"; 87 static const char function[] = "VirtualAlloc2"; 88 89 HMODULE library = NULL; 90 int ret = 0; 91 92 /* Already done. */ 93 if (VirtualAlloc2_ptr != NULL) 94 return 0; 95 96 library = LoadLibraryA(library_name); 97 if (library == NULL) { 98 RTE_LOG_WIN32_ERR("LoadLibraryA(\"%s\")", library_name); 99 return -1; 100 } 101 102 VirtualAlloc2_ptr = (VirtualAlloc2_type)( 103 (void *)GetProcAddress(library, function)); 104 if (VirtualAlloc2_ptr == NULL) { 105 RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n", 106 library_name, function); 107 108 /* Contrary to the docs, Server 2016 is not supported. */ 109 RTE_LOG(ERR, EAL, "Windows 10 or Windows Server 2019 " 110 " is required for memory management\n"); 111 ret = -1; 112 } 113 114 FreeLibrary(library); 115 116 return ret; 117 } 118 119 #else 120 121 /* Stub in case VirtualAlloc2() is provided by the toolchain. */ 122 int 123 eal_mem_win32api_init(void) 124 { 125 VirtualAlloc2_ptr = VirtualAlloc2; 126 return 0; 127 } 128 129 #endif /* defined(RTE_TOOLCHAIN_GCC) */ 130 131 static HANDLE virt2phys_device = INVALID_HANDLE_VALUE; 132 133 int 134 eal_mem_virt2iova_init(void) 135 { 136 HDEVINFO list = INVALID_HANDLE_VALUE; 137 SP_DEVICE_INTERFACE_DATA ifdata; 138 SP_DEVICE_INTERFACE_DETAIL_DATA *detail = NULL; 139 DWORD detail_size; 140 int ret = -1; 141 142 list = SetupDiGetClassDevs( 143 &GUID_DEVINTERFACE_VIRT2PHYS, NULL, NULL, 144 DIGCF_DEVICEINTERFACE | DIGCF_PRESENT); 145 if (list == INVALID_HANDLE_VALUE) { 146 RTE_LOG_WIN32_ERR("SetupDiGetClassDevs()"); 147 goto exit; 148 } 149 150 ifdata.cbSize = sizeof(ifdata); 151 if (!SetupDiEnumDeviceInterfaces( 152 list, NULL, &GUID_DEVINTERFACE_VIRT2PHYS, 0, &ifdata)) { 153 RTE_LOG_WIN32_ERR("SetupDiEnumDeviceInterfaces()"); 154 goto exit; 155 } 156 157 if (!SetupDiGetDeviceInterfaceDetail( 158 list, &ifdata, NULL, 0, &detail_size, NULL)) { 159 if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { 160 RTE_LOG_WIN32_ERR( 161 "SetupDiGetDeviceInterfaceDetail(probe)"); 162 goto exit; 163 } 164 } 165 166 detail = malloc(detail_size); 167 if (detail == NULL) { 168 RTE_LOG(ERR, EAL, "Cannot allocate virt2phys " 169 "device interface detail data\n"); 170 goto exit; 171 } 172 173 detail->cbSize = sizeof(*detail); 174 if (!SetupDiGetDeviceInterfaceDetail( 175 list, &ifdata, detail, detail_size, NULL, NULL)) { 176 RTE_LOG_WIN32_ERR("SetupDiGetDeviceInterfaceDetail(read)"); 177 goto exit; 178 } 179 180 RTE_LOG(DEBUG, EAL, "Found virt2phys device: %s\n", detail->DevicePath); 181 182 virt2phys_device = CreateFile( 183 detail->DevicePath, 0, 0, NULL, OPEN_EXISTING, 0, NULL); 184 if (virt2phys_device == INVALID_HANDLE_VALUE) { 185 RTE_LOG_WIN32_ERR("CreateFile()"); 186 goto exit; 187 } 188 189 /* Indicate success. */ 190 ret = 0; 191 192 exit: 193 if (detail != NULL) 194 free(detail); 195 if (list != INVALID_HANDLE_VALUE) 196 SetupDiDestroyDeviceInfoList(list); 197 198 return ret; 199 } 200 201 void 202 eal_mem_virt2iova_cleanup(void) 203 { 204 if (virt2phys_device != INVALID_HANDLE_VALUE) 205 CloseHandle(virt2phys_device); 206 } 207 208 phys_addr_t 209 rte_mem_virt2phy(const void *virt) 210 { 211 LARGE_INTEGER phys; 212 DWORD bytes_returned; 213 214 if (virt2phys_device == INVALID_HANDLE_VALUE) 215 return RTE_BAD_PHYS_ADDR; 216 217 if (!DeviceIoControl( 218 virt2phys_device, IOCTL_VIRT2PHYS_TRANSLATE, 219 &virt, sizeof(virt), &phys, sizeof(phys), 220 &bytes_returned, NULL)) { 221 RTE_LOG_WIN32_ERR("DeviceIoControl(IOCTL_VIRT2PHYS_TRANSLATE)"); 222 return RTE_BAD_PHYS_ADDR; 223 } 224 225 return phys.QuadPart; 226 } 227 228 rte_iova_t 229 rte_mem_virt2iova(const void *virt) 230 { 231 phys_addr_t phys; 232 233 if (rte_eal_iova_mode() == RTE_IOVA_VA) 234 return (rte_iova_t)virt; 235 236 phys = rte_mem_virt2phy(virt); 237 if (phys == RTE_BAD_PHYS_ADDR) 238 return RTE_BAD_IOVA; 239 return (rte_iova_t)phys; 240 } 241 242 /* Always using physical addresses under Windows if they can be obtained. */ 243 int 244 rte_eal_using_phys_addrs(void) 245 { 246 return virt2phys_device != INVALID_HANDLE_VALUE; 247 } 248 249 /* Approximate error mapping from VirtualAlloc2() to POSIX mmap(3). */ 250 static void 251 set_errno_from_win32_alloc_error(DWORD code) 252 { 253 switch (code) { 254 case ERROR_SUCCESS: 255 rte_errno = 0; 256 break; 257 258 case ERROR_INVALID_ADDRESS: 259 /* A valid requested address is not available. */ 260 case ERROR_COMMITMENT_LIMIT: 261 /* May occur when committing regular memory. */ 262 case ERROR_NO_SYSTEM_RESOURCES: 263 /* Occurs when the system runs out of hugepages. */ 264 rte_errno = ENOMEM; 265 break; 266 267 case ERROR_INVALID_PARAMETER: 268 default: 269 rte_errno = EINVAL; 270 break; 271 } 272 } 273 274 void * 275 eal_mem_reserve(void *requested_addr, size_t size, int flags) 276 { 277 HANDLE process; 278 void *virt; 279 280 /* Windows requires hugepages to be committed. */ 281 if (flags & EAL_RESERVE_HUGEPAGES) { 282 rte_errno = ENOTSUP; 283 return NULL; 284 } 285 286 process = GetCurrentProcess(); 287 288 virt = VirtualAlloc2_ptr(process, requested_addr, size, 289 MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, 290 NULL, 0); 291 if (virt == NULL) { 292 DWORD err = GetLastError(); 293 RTE_LOG_WIN32_ERR("VirtualAlloc2()"); 294 set_errno_from_win32_alloc_error(err); 295 return NULL; 296 } 297 298 if ((flags & EAL_RESERVE_FORCE_ADDRESS) && (virt != requested_addr)) { 299 if (!VirtualFreeEx(process, virt, 0, MEM_RELEASE)) 300 RTE_LOG_WIN32_ERR("VirtualFreeEx()"); 301 rte_errno = ENOMEM; 302 return NULL; 303 } 304 305 return virt; 306 } 307 308 void * 309 eal_mem_alloc_socket(size_t size, int socket_id) 310 { 311 DWORD flags = MEM_RESERVE | MEM_COMMIT; 312 void *addr; 313 314 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 315 addr = VirtualAllocExNuma(GetCurrentProcess(), NULL, size, flags, 316 PAGE_READWRITE, eal_socket_numa_node(socket_id)); 317 if (addr == NULL) 318 rte_errno = ENOMEM; 319 return addr; 320 } 321 322 void * 323 eal_mem_commit(void *requested_addr, size_t size, int socket_id) 324 { 325 HANDLE process; 326 MEM_EXTENDED_PARAMETER param; 327 DWORD param_count = 0; 328 DWORD flags; 329 void *addr; 330 331 process = GetCurrentProcess(); 332 333 if (requested_addr != NULL) { 334 MEMORY_BASIC_INFORMATION info; 335 336 if (VirtualQueryEx(process, requested_addr, &info, 337 sizeof(info)) != sizeof(info)) { 338 RTE_LOG_WIN32_ERR("VirtualQuery(%p)", requested_addr); 339 return NULL; 340 } 341 342 /* Split reserved region if only a part is committed. */ 343 flags = MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER; 344 if ((info.RegionSize > size) && !VirtualFreeEx( 345 process, requested_addr, size, flags)) { 346 RTE_LOG_WIN32_ERR( 347 "VirtualFreeEx(%p, %zu, preserve placeholder)", 348 requested_addr, size); 349 return NULL; 350 } 351 352 /* Temporarily release the region to be committed. 353 * 354 * There is an inherent race for this memory range 355 * if another thread allocates memory via OS API. 356 * However, VirtualAlloc2(MEM_REPLACE_PLACEHOLDER) 357 * doesn't work with MEM_LARGE_PAGES on Windows Server. 358 */ 359 if (!VirtualFreeEx(process, requested_addr, 0, MEM_RELEASE)) { 360 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", 361 requested_addr); 362 return NULL; 363 } 364 } 365 366 if (socket_id != SOCKET_ID_ANY) { 367 param_count = 1; 368 memset(¶m, 0, sizeof(param)); 369 param.Type = MemExtendedParameterNumaNode; 370 param.ULong = eal_socket_numa_node(socket_id); 371 } 372 373 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 374 addr = VirtualAlloc2_ptr(process, requested_addr, size, 375 flags, PAGE_READWRITE, ¶m, param_count); 376 if (addr == NULL) { 377 /* Logging may overwrite GetLastError() result. */ 378 DWORD err = GetLastError(); 379 RTE_LOG_WIN32_ERR("VirtualAlloc2(%p, %zu, commit large pages)", 380 requested_addr, size); 381 set_errno_from_win32_alloc_error(err); 382 return NULL; 383 } 384 385 if ((requested_addr != NULL) && (addr != requested_addr)) { 386 /* We lost the race for the requested_addr. */ 387 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) 388 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", addr); 389 390 rte_errno = EADDRNOTAVAIL; 391 return NULL; 392 } 393 394 return addr; 395 } 396 397 int 398 eal_mem_decommit(void *addr, size_t size) 399 { 400 HANDLE process; 401 void *stub; 402 DWORD flags; 403 404 process = GetCurrentProcess(); 405 406 /* Hugepages cannot be decommited on Windows, 407 * so free them and replace the block with a placeholder. 408 * There is a race for VA in this block until VirtualAlloc2 call. 409 */ 410 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 411 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr); 412 return -1; 413 } 414 415 flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER; 416 stub = VirtualAlloc2_ptr( 417 process, addr, size, flags, PAGE_NOACCESS, NULL, 0); 418 if (stub == NULL) { 419 /* We lost the race for the VA. */ 420 if (!VirtualFreeEx(process, stub, 0, MEM_RELEASE)) 421 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", stub); 422 rte_errno = EADDRNOTAVAIL; 423 return -1; 424 } 425 426 /* No need to join reserved regions adjacent to the freed one: 427 * eal_mem_commit() will just pick up the page-size placeholder 428 * created here. 429 */ 430 return 0; 431 } 432 433 /** 434 * Free a reserved memory region in full or in part. 435 * 436 * @param addr 437 * Starting address of the area to free. 438 * @param size 439 * Number of bytes to free. Must be a multiple of page size. 440 * @param reserved 441 * Fail if the region is not in reserved state. 442 * @return 443 * * 0 on successful deallocation; 444 * * 1 if region must be in reserved state but it is not; 445 * * (-1) on system API failures. 446 */ 447 static int 448 mem_free(void *addr, size_t size, bool reserved) 449 { 450 MEMORY_BASIC_INFORMATION info; 451 HANDLE process; 452 453 process = GetCurrentProcess(); 454 455 if (VirtualQueryEx( 456 process, addr, &info, sizeof(info)) != sizeof(info)) { 457 RTE_LOG_WIN32_ERR("VirtualQueryEx(%p)", addr); 458 return -1; 459 } 460 461 if (reserved && (info.State != MEM_RESERVE)) 462 return 1; 463 464 /* Free complete region. */ 465 if ((addr == info.AllocationBase) && (size == info.RegionSize)) { 466 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 467 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", 468 addr); 469 } 470 return 0; 471 } 472 473 /* Split the part to be freed and the remaining reservation. */ 474 if (!VirtualFreeEx(process, addr, size, 475 MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { 476 RTE_LOG_WIN32_ERR( 477 "VirtualFreeEx(%p, %zu, preserve placeholder)", 478 addr, size); 479 return -1; 480 } 481 482 /* Actually free reservation part. */ 483 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 484 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr); 485 return -1; 486 } 487 488 return 0; 489 } 490 491 void 492 eal_mem_free(void *virt, size_t size) 493 { 494 mem_free(virt, size, false); 495 } 496 497 int 498 eal_mem_set_dump(void *virt, size_t size, bool dump) 499 { 500 RTE_SET_USED(virt); 501 RTE_SET_USED(size); 502 RTE_SET_USED(dump); 503 504 /* Windows does not dump reserved memory by default. 505 * 506 * There is <werapi.h> to include or exclude regions from the dump, 507 * but this is not currently required by EAL. 508 */ 509 510 rte_errno = ENOTSUP; 511 return -1; 512 } 513 514 void * 515 rte_mem_map(void *requested_addr, size_t size, int prot, int flags, 516 int fd, uint64_t offset) 517 { 518 HANDLE file_handle = INVALID_HANDLE_VALUE; 519 HANDLE mapping_handle = INVALID_HANDLE_VALUE; 520 DWORD sys_prot = 0; 521 DWORD sys_access = 0; 522 DWORD size_high = (DWORD)(size >> 32); 523 DWORD size_low = (DWORD)size; 524 DWORD offset_high = (DWORD)(offset >> 32); 525 DWORD offset_low = (DWORD)offset; 526 LPVOID virt = NULL; 527 528 if (prot & RTE_PROT_EXECUTE) { 529 if (prot & RTE_PROT_READ) { 530 sys_prot = PAGE_EXECUTE_READ; 531 sys_access = FILE_MAP_READ | FILE_MAP_EXECUTE; 532 } 533 if (prot & RTE_PROT_WRITE) { 534 sys_prot = PAGE_EXECUTE_READWRITE; 535 sys_access = FILE_MAP_WRITE | FILE_MAP_EXECUTE; 536 } 537 } else { 538 if (prot & RTE_PROT_READ) { 539 sys_prot = PAGE_READONLY; 540 sys_access = FILE_MAP_READ; 541 } 542 if (prot & RTE_PROT_WRITE) { 543 sys_prot = PAGE_READWRITE; 544 sys_access = FILE_MAP_WRITE; 545 } 546 } 547 548 if (flags & RTE_MAP_PRIVATE) 549 sys_access |= FILE_MAP_COPY; 550 551 if ((flags & RTE_MAP_ANONYMOUS) == 0) 552 file_handle = (HANDLE)_get_osfhandle(fd); 553 554 mapping_handle = CreateFileMapping( 555 file_handle, NULL, sys_prot, size_high, size_low, NULL); 556 if (mapping_handle == INVALID_HANDLE_VALUE) { 557 RTE_LOG_WIN32_ERR("CreateFileMapping()"); 558 return NULL; 559 } 560 561 /* There is a race for the requested_addr between mem_free() 562 * and MapViewOfFileEx(). MapViewOfFile3() that can replace a reserved 563 * region with a mapping in a single operation, but it does not support 564 * private mappings. 565 */ 566 if (requested_addr != NULL) { 567 int ret = mem_free(requested_addr, size, true); 568 if (ret) { 569 if (ret > 0) { 570 RTE_LOG(ERR, EAL, "Cannot map memory " 571 "to a region not reserved\n"); 572 rte_errno = EADDRNOTAVAIL; 573 } 574 return NULL; 575 } 576 } 577 578 virt = MapViewOfFileEx(mapping_handle, sys_access, 579 offset_high, offset_low, size, requested_addr); 580 if (!virt) { 581 RTE_LOG_WIN32_ERR("MapViewOfFileEx()"); 582 return NULL; 583 } 584 585 if ((flags & RTE_MAP_FORCE_ADDRESS) && (virt != requested_addr)) { 586 if (!UnmapViewOfFile(virt)) 587 RTE_LOG_WIN32_ERR("UnmapViewOfFile()"); 588 virt = NULL; 589 } 590 591 if (!CloseHandle(mapping_handle)) 592 RTE_LOG_WIN32_ERR("CloseHandle()"); 593 594 return virt; 595 } 596 597 int 598 rte_mem_unmap(void *virt, size_t size) 599 { 600 RTE_SET_USED(size); 601 602 if (!UnmapViewOfFile(virt)) { 603 RTE_LOG_WIN32_ERR("UnmapViewOfFile()"); 604 rte_errno = EINVAL; 605 return -1; 606 } 607 return 0; 608 } 609 610 uint64_t 611 eal_get_baseaddr(void) 612 { 613 /* Windows strategy for memory allocation is undocumented. 614 * Returning 0 here effectively disables address guessing 615 * unless user provides an address hint. 616 */ 617 return 0; 618 } 619 620 size_t 621 rte_mem_page_size(void) 622 { 623 static SYSTEM_INFO info; 624 625 if (info.dwPageSize == 0) 626 GetSystemInfo(&info); 627 628 return info.dwPageSize; 629 } 630 631 int 632 rte_mem_lock(const void *virt, size_t size) 633 { 634 /* VirtualLock() takes `void*`, work around compiler warning. */ 635 void *addr = (void *)((uintptr_t)virt); 636 637 if (!VirtualLock(addr, size)) { 638 RTE_LOG_WIN32_ERR("VirtualLock(%p %#zx)", virt, size); 639 return -1; 640 } 641 642 return 0; 643 } 644 645 int 646 rte_eal_memseg_init(void) 647 { 648 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 649 EAL_LOG_NOT_IMPLEMENTED(); 650 return -1; 651 } 652 653 return eal_dynmem_memseg_lists_init(); 654 } 655 656 static int 657 eal_nohuge_init(void) 658 { 659 struct rte_mem_config *mcfg; 660 struct rte_memseg_list *msl; 661 int n_segs; 662 uint64_t mem_sz, page_sz; 663 void *addr; 664 665 mcfg = rte_eal_get_configuration()->mem_config; 666 struct internal_config *internal_conf = 667 eal_get_internal_configuration(); 668 669 /* nohuge mode is legacy mode */ 670 internal_conf->legacy_mem = 1; 671 672 msl = &mcfg->memsegs[0]; 673 674 mem_sz = internal_conf->memory; 675 page_sz = RTE_PGSIZE_4K; 676 n_segs = mem_sz / page_sz; 677 678 if (eal_memseg_list_init_named( 679 msl, "nohugemem", page_sz, n_segs, 0, true)) { 680 return -1; 681 } 682 683 addr = VirtualAlloc( 684 NULL, mem_sz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 685 if (addr == NULL) { 686 RTE_LOG_WIN32_ERR("VirtualAlloc(size=%#zx)", mem_sz); 687 RTE_LOG(ERR, EAL, "Cannot allocate memory\n"); 688 return -1; 689 } 690 691 msl->base_va = addr; 692 msl->len = mem_sz; 693 694 eal_memseg_list_populate(msl, addr, n_segs); 695 696 if (mcfg->dma_maskbits && 697 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) { 698 RTE_LOG(ERR, EAL, 699 "%s(): couldn't allocate memory due to IOVA " 700 "exceeding limits of current DMA mask.\n", __func__); 701 return -1; 702 } 703 704 return 0; 705 } 706 707 int 708 rte_eal_hugepage_init(void) 709 { 710 const struct internal_config *internal_conf = 711 eal_get_internal_configuration(); 712 713 return internal_conf->no_hugetlbfs ? 714 eal_nohuge_init() : eal_dynmem_hugepage_init(); 715 } 716 717 int 718 rte_eal_hugepage_attach(void) 719 { 720 EAL_LOG_NOT_IMPLEMENTED(); 721 return -1; 722 } 723