1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2020 Dmitry Kozlyuk 3 */ 4 5 #include <inttypes.h> 6 #include <io.h> 7 8 #include <rte_eal_paging.h> 9 #include <rte_errno.h> 10 11 #include "eal_internal_cfg.h" 12 #include "eal_memalloc.h" 13 #include "eal_memcfg.h" 14 #include "eal_options.h" 15 #include "eal_private.h" 16 #include "eal_windows.h" 17 18 #include <rte_virt2phys.h> 19 20 /* MinGW-w64 headers lack VirtualAlloc2() in some distributions. 21 * Note: definitions are copied verbatim from Microsoft documentation 22 * and don't follow DPDK code style. 23 */ 24 #ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS 25 26 #define MEM_EXTENDED_PARAMETER_TYPE_BITS 4 27 28 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */ 29 typedef enum MEM_EXTENDED_PARAMETER_TYPE { 30 MemExtendedParameterInvalidType, 31 MemExtendedParameterAddressRequirements, 32 MemExtendedParameterNumaNode, 33 MemExtendedParameterPartitionHandle, 34 MemExtendedParameterUserPhysicalHandle, 35 MemExtendedParameterAttributeFlags, 36 MemExtendedParameterMax 37 } *PMEM_EXTENDED_PARAMETER_TYPE; 38 39 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */ 40 typedef struct MEM_EXTENDED_PARAMETER { 41 struct { 42 DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS; 43 DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS; 44 } DUMMYSTRUCTNAME; 45 union { 46 DWORD64 ULong64; 47 PVOID Pointer; 48 SIZE_T Size; 49 HANDLE Handle; 50 DWORD ULong; 51 } DUMMYUNIONNAME; 52 } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER; 53 54 #endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */ 55 56 /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */ 57 typedef PVOID (*VirtualAlloc2_type)( 58 HANDLE Process, 59 PVOID BaseAddress, 60 SIZE_T Size, 61 ULONG AllocationType, 62 ULONG PageProtection, 63 MEM_EXTENDED_PARAMETER *ExtendedParameters, 64 ULONG ParameterCount 65 ); 66 67 /* MinGW-w64 distributions, even those that declare VirtualAlloc2(), 68 * lack it in import libraries, which results in a failure at link time. 69 * Link it dynamically in such case. 70 */ 71 static VirtualAlloc2_type VirtualAlloc2_ptr; 72 73 #ifdef RTE_TOOLCHAIN_GCC 74 75 #define MEM_COALESCE_PLACEHOLDERS 0x00000001 76 #define MEM_PRESERVE_PLACEHOLDER 0x00000002 77 #define MEM_REPLACE_PLACEHOLDER 0x00004000 78 #define MEM_RESERVE_PLACEHOLDER 0x00040000 79 80 int 81 eal_mem_win32api_init(void) 82 { 83 /* Contrary to the docs, VirtualAlloc2() is not in kernel32.dll, 84 * see https://github.com/MicrosoftDocs/feedback/issues/1129. 85 */ 86 static const char library_name[] = "kernelbase.dll"; 87 static const char function[] = "VirtualAlloc2"; 88 89 HMODULE library = NULL; 90 int ret = 0; 91 92 /* Already done. */ 93 if (VirtualAlloc2_ptr != NULL) 94 return 0; 95 96 library = LoadLibraryA(library_name); 97 if (library == NULL) { 98 RTE_LOG_WIN32_ERR("LoadLibraryA(\"%s\")", library_name); 99 return -1; 100 } 101 102 VirtualAlloc2_ptr = (VirtualAlloc2_type)( 103 (void *)GetProcAddress(library, function)); 104 if (VirtualAlloc2_ptr == NULL) { 105 RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n", 106 library_name, function); 107 108 /* Contrary to the docs, Server 2016 is not supported. */ 109 RTE_LOG(ERR, EAL, "Windows 10 or Windows Server 2019 " 110 " is required for memory management\n"); 111 ret = -1; 112 } 113 114 FreeLibrary(library); 115 116 return ret; 117 } 118 119 #else 120 121 /* Stub in case VirtualAlloc2() is provided by the toolchain. */ 122 int 123 eal_mem_win32api_init(void) 124 { 125 VirtualAlloc2_ptr = VirtualAlloc2; 126 return 0; 127 } 128 129 #endif /* defined(RTE_TOOLCHAIN_GCC) */ 130 131 static HANDLE virt2phys_device = INVALID_HANDLE_VALUE; 132 133 int 134 eal_mem_virt2iova_init(void) 135 { 136 HDEVINFO list = INVALID_HANDLE_VALUE; 137 SP_DEVICE_INTERFACE_DATA ifdata; 138 SP_DEVICE_INTERFACE_DETAIL_DATA *detail = NULL; 139 DWORD detail_size; 140 int ret = -1; 141 142 list = SetupDiGetClassDevs( 143 &GUID_DEVINTERFACE_VIRT2PHYS, NULL, NULL, 144 DIGCF_DEVICEINTERFACE | DIGCF_PRESENT); 145 if (list == INVALID_HANDLE_VALUE) { 146 RTE_LOG_WIN32_ERR("SetupDiGetClassDevs()"); 147 goto exit; 148 } 149 150 ifdata.cbSize = sizeof(ifdata); 151 if (!SetupDiEnumDeviceInterfaces( 152 list, NULL, &GUID_DEVINTERFACE_VIRT2PHYS, 0, &ifdata)) { 153 RTE_LOG_WIN32_ERR("SetupDiEnumDeviceInterfaces()"); 154 goto exit; 155 } 156 157 if (!SetupDiGetDeviceInterfaceDetail( 158 list, &ifdata, NULL, 0, &detail_size, NULL)) { 159 if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { 160 RTE_LOG_WIN32_ERR( 161 "SetupDiGetDeviceInterfaceDetail(probe)"); 162 goto exit; 163 } 164 } 165 166 detail = malloc(detail_size); 167 if (detail == NULL) { 168 RTE_LOG(ERR, EAL, "Cannot allocate virt2phys " 169 "device interface detail data\n"); 170 goto exit; 171 } 172 173 detail->cbSize = sizeof(*detail); 174 if (!SetupDiGetDeviceInterfaceDetail( 175 list, &ifdata, detail, detail_size, NULL, NULL)) { 176 RTE_LOG_WIN32_ERR("SetupDiGetDeviceInterfaceDetail(read)"); 177 goto exit; 178 } 179 180 RTE_LOG(DEBUG, EAL, "Found virt2phys device: %s\n", detail->DevicePath); 181 182 virt2phys_device = CreateFile( 183 detail->DevicePath, 0, 0, NULL, OPEN_EXISTING, 0, NULL); 184 if (virt2phys_device == INVALID_HANDLE_VALUE) { 185 RTE_LOG_WIN32_ERR("CreateFile()"); 186 goto exit; 187 } 188 189 /* Indicate success. */ 190 ret = 0; 191 192 exit: 193 if (detail != NULL) 194 free(detail); 195 if (list != INVALID_HANDLE_VALUE) 196 SetupDiDestroyDeviceInfoList(list); 197 198 return ret; 199 } 200 201 phys_addr_t 202 rte_mem_virt2phy(const void *virt) 203 { 204 LARGE_INTEGER phys; 205 DWORD bytes_returned; 206 207 if (virt2phys_device == INVALID_HANDLE_VALUE) 208 return RTE_BAD_PHYS_ADDR; 209 210 if (!DeviceIoControl( 211 virt2phys_device, IOCTL_VIRT2PHYS_TRANSLATE, 212 &virt, sizeof(virt), &phys, sizeof(phys), 213 &bytes_returned, NULL)) { 214 RTE_LOG_WIN32_ERR("DeviceIoControl(IOCTL_VIRT2PHYS_TRANSLATE)"); 215 return RTE_BAD_PHYS_ADDR; 216 } 217 218 return phys.QuadPart; 219 } 220 221 /* Windows currently only supports IOVA as PA. */ 222 rte_iova_t 223 rte_mem_virt2iova(const void *virt) 224 { 225 phys_addr_t phys; 226 227 if (virt2phys_device == INVALID_HANDLE_VALUE) 228 return RTE_BAD_IOVA; 229 230 phys = rte_mem_virt2phy(virt); 231 if (phys == RTE_BAD_PHYS_ADDR) 232 return RTE_BAD_IOVA; 233 234 return (rte_iova_t)phys; 235 } 236 237 /* Always using physical addresses under Windows if they can be obtained. */ 238 int 239 rte_eal_using_phys_addrs(void) 240 { 241 return virt2phys_device != INVALID_HANDLE_VALUE; 242 } 243 244 /* Approximate error mapping from VirtualAlloc2() to POSIX mmap(3). */ 245 static void 246 set_errno_from_win32_alloc_error(DWORD code) 247 { 248 switch (code) { 249 case ERROR_SUCCESS: 250 rte_errno = 0; 251 break; 252 253 case ERROR_INVALID_ADDRESS: 254 /* A valid requested address is not available. */ 255 case ERROR_COMMITMENT_LIMIT: 256 /* May occur when committing regular memory. */ 257 case ERROR_NO_SYSTEM_RESOURCES: 258 /* Occurs when the system runs out of hugepages. */ 259 rte_errno = ENOMEM; 260 break; 261 262 case ERROR_INVALID_PARAMETER: 263 default: 264 rte_errno = EINVAL; 265 break; 266 } 267 } 268 269 void * 270 eal_mem_reserve(void *requested_addr, size_t size, int flags) 271 { 272 HANDLE process; 273 void *virt; 274 275 /* Windows requires hugepages to be committed. */ 276 if (flags & EAL_RESERVE_HUGEPAGES) { 277 rte_errno = ENOTSUP; 278 return NULL; 279 } 280 281 process = GetCurrentProcess(); 282 283 virt = VirtualAlloc2_ptr(process, requested_addr, size, 284 MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, 285 NULL, 0); 286 if (virt == NULL) { 287 DWORD err = GetLastError(); 288 RTE_LOG_WIN32_ERR("VirtualAlloc2()"); 289 set_errno_from_win32_alloc_error(err); 290 return NULL; 291 } 292 293 if ((flags & EAL_RESERVE_FORCE_ADDRESS) && (virt != requested_addr)) { 294 if (!VirtualFreeEx(process, virt, 0, MEM_RELEASE)) 295 RTE_LOG_WIN32_ERR("VirtualFreeEx()"); 296 rte_errno = ENOMEM; 297 return NULL; 298 } 299 300 return virt; 301 } 302 303 void * 304 eal_mem_alloc_socket(size_t size, int socket_id) 305 { 306 DWORD flags = MEM_RESERVE | MEM_COMMIT; 307 void *addr; 308 309 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 310 addr = VirtualAllocExNuma(GetCurrentProcess(), NULL, size, flags, 311 PAGE_READWRITE, eal_socket_numa_node(socket_id)); 312 if (addr == NULL) 313 rte_errno = ENOMEM; 314 return addr; 315 } 316 317 void * 318 eal_mem_commit(void *requested_addr, size_t size, int socket_id) 319 { 320 HANDLE process; 321 MEM_EXTENDED_PARAMETER param; 322 DWORD param_count = 0; 323 DWORD flags; 324 void *addr; 325 326 process = GetCurrentProcess(); 327 328 if (requested_addr != NULL) { 329 MEMORY_BASIC_INFORMATION info; 330 331 if (VirtualQueryEx(process, requested_addr, &info, 332 sizeof(info)) != sizeof(info)) { 333 RTE_LOG_WIN32_ERR("VirtualQuery(%p)", requested_addr); 334 return NULL; 335 } 336 337 /* Split reserved region if only a part is committed. */ 338 flags = MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER; 339 if ((info.RegionSize > size) && !VirtualFreeEx( 340 process, requested_addr, size, flags)) { 341 RTE_LOG_WIN32_ERR( 342 "VirtualFreeEx(%p, %zu, preserve placeholder)", 343 requested_addr, size); 344 return NULL; 345 } 346 347 /* Temporarily release the region to be committed. 348 * 349 * There is an inherent race for this memory range 350 * if another thread allocates memory via OS API. 351 * However, VirtualAlloc2(MEM_REPLACE_PLACEHOLDER) 352 * doesn't work with MEM_LARGE_PAGES on Windows Server. 353 */ 354 if (!VirtualFreeEx(process, requested_addr, 0, MEM_RELEASE)) { 355 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", 356 requested_addr); 357 return NULL; 358 } 359 } 360 361 if (socket_id != SOCKET_ID_ANY) { 362 param_count = 1; 363 memset(¶m, 0, sizeof(param)); 364 param.Type = MemExtendedParameterNumaNode; 365 param.ULong = eal_socket_numa_node(socket_id); 366 } 367 368 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 369 addr = VirtualAlloc2_ptr(process, requested_addr, size, 370 flags, PAGE_READWRITE, ¶m, param_count); 371 if (addr == NULL) { 372 /* Logging may overwrite GetLastError() result. */ 373 DWORD err = GetLastError(); 374 RTE_LOG_WIN32_ERR("VirtualAlloc2(%p, %zu, commit large pages)", 375 requested_addr, size); 376 set_errno_from_win32_alloc_error(err); 377 return NULL; 378 } 379 380 if ((requested_addr != NULL) && (addr != requested_addr)) { 381 /* We lost the race for the requested_addr. */ 382 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) 383 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", addr); 384 385 rte_errno = EADDRNOTAVAIL; 386 return NULL; 387 } 388 389 return addr; 390 } 391 392 int 393 eal_mem_decommit(void *addr, size_t size) 394 { 395 HANDLE process; 396 void *stub; 397 DWORD flags; 398 399 process = GetCurrentProcess(); 400 401 /* Hugepages cannot be decommited on Windows, 402 * so free them and replace the block with a placeholder. 403 * There is a race for VA in this block until VirtualAlloc2 call. 404 */ 405 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 406 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr); 407 return -1; 408 } 409 410 flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER; 411 stub = VirtualAlloc2_ptr( 412 process, addr, size, flags, PAGE_NOACCESS, NULL, 0); 413 if (stub == NULL) { 414 /* We lost the race for the VA. */ 415 if (!VirtualFreeEx(process, stub, 0, MEM_RELEASE)) 416 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", stub); 417 rte_errno = EADDRNOTAVAIL; 418 return -1; 419 } 420 421 /* No need to join reserved regions adjacent to the freed one: 422 * eal_mem_commit() will just pick up the page-size placeholder 423 * created here. 424 */ 425 return 0; 426 } 427 428 /** 429 * Free a reserved memory region in full or in part. 430 * 431 * @param addr 432 * Starting address of the area to free. 433 * @param size 434 * Number of bytes to free. Must be a multiple of page size. 435 * @param reserved 436 * Fail if the region is not in reserved state. 437 * @return 438 * * 0 on successful deallocation; 439 * * 1 if region must be in reserved state but it is not; 440 * * (-1) on system API failures. 441 */ 442 static int 443 mem_free(void *addr, size_t size, bool reserved) 444 { 445 MEMORY_BASIC_INFORMATION info; 446 HANDLE process; 447 448 process = GetCurrentProcess(); 449 450 if (VirtualQueryEx( 451 process, addr, &info, sizeof(info)) != sizeof(info)) { 452 RTE_LOG_WIN32_ERR("VirtualQueryEx(%p)", addr); 453 return -1; 454 } 455 456 if (reserved && (info.State != MEM_RESERVE)) 457 return 1; 458 459 /* Free complete region. */ 460 if ((addr == info.AllocationBase) && (size == info.RegionSize)) { 461 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 462 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", 463 addr); 464 } 465 return 0; 466 } 467 468 /* Split the part to be freed and the remaining reservation. */ 469 if (!VirtualFreeEx(process, addr, size, 470 MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) { 471 RTE_LOG_WIN32_ERR( 472 "VirtualFreeEx(%p, %zu, preserve placeholder)", 473 addr, size); 474 return -1; 475 } 476 477 /* Actually free reservation part. */ 478 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) { 479 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr); 480 return -1; 481 } 482 483 return 0; 484 } 485 486 void 487 eal_mem_free(void *virt, size_t size) 488 { 489 mem_free(virt, size, false); 490 } 491 492 int 493 eal_mem_set_dump(void *virt, size_t size, bool dump) 494 { 495 RTE_SET_USED(virt); 496 RTE_SET_USED(size); 497 RTE_SET_USED(dump); 498 499 /* Windows does not dump reserved memory by default. 500 * 501 * There is <werapi.h> to include or exclude regions from the dump, 502 * but this is not currently required by EAL. 503 */ 504 505 rte_errno = ENOTSUP; 506 return -1; 507 } 508 509 void * 510 rte_mem_map(void *requested_addr, size_t size, int prot, int flags, 511 int fd, uint64_t offset) 512 { 513 HANDLE file_handle = INVALID_HANDLE_VALUE; 514 HANDLE mapping_handle = INVALID_HANDLE_VALUE; 515 DWORD sys_prot = 0; 516 DWORD sys_access = 0; 517 DWORD size_high = (DWORD)(size >> 32); 518 DWORD size_low = (DWORD)size; 519 DWORD offset_high = (DWORD)(offset >> 32); 520 DWORD offset_low = (DWORD)offset; 521 LPVOID virt = NULL; 522 523 if (prot & RTE_PROT_EXECUTE) { 524 if (prot & RTE_PROT_READ) { 525 sys_prot = PAGE_EXECUTE_READ; 526 sys_access = FILE_MAP_READ | FILE_MAP_EXECUTE; 527 } 528 if (prot & RTE_PROT_WRITE) { 529 sys_prot = PAGE_EXECUTE_READWRITE; 530 sys_access = FILE_MAP_WRITE | FILE_MAP_EXECUTE; 531 } 532 } else { 533 if (prot & RTE_PROT_READ) { 534 sys_prot = PAGE_READONLY; 535 sys_access = FILE_MAP_READ; 536 } 537 if (prot & RTE_PROT_WRITE) { 538 sys_prot = PAGE_READWRITE; 539 sys_access = FILE_MAP_WRITE; 540 } 541 } 542 543 if (flags & RTE_MAP_PRIVATE) 544 sys_access |= FILE_MAP_COPY; 545 546 if ((flags & RTE_MAP_ANONYMOUS) == 0) 547 file_handle = (HANDLE)_get_osfhandle(fd); 548 549 mapping_handle = CreateFileMapping( 550 file_handle, NULL, sys_prot, size_high, size_low, NULL); 551 if (mapping_handle == INVALID_HANDLE_VALUE) { 552 RTE_LOG_WIN32_ERR("CreateFileMapping()"); 553 return NULL; 554 } 555 556 /* There is a race for the requested_addr between mem_free() 557 * and MapViewOfFileEx(). MapViewOfFile3() that can replace a reserved 558 * region with a mapping in a single operation, but it does not support 559 * private mappings. 560 */ 561 if (requested_addr != NULL) { 562 int ret = mem_free(requested_addr, size, true); 563 if (ret) { 564 if (ret > 0) { 565 RTE_LOG(ERR, EAL, "Cannot map memory " 566 "to a region not reserved\n"); 567 rte_errno = EADDRNOTAVAIL; 568 } 569 return NULL; 570 } 571 } 572 573 virt = MapViewOfFileEx(mapping_handle, sys_access, 574 offset_high, offset_low, size, requested_addr); 575 if (!virt) { 576 RTE_LOG_WIN32_ERR("MapViewOfFileEx()"); 577 return NULL; 578 } 579 580 if ((flags & RTE_MAP_FORCE_ADDRESS) && (virt != requested_addr)) { 581 if (!UnmapViewOfFile(virt)) 582 RTE_LOG_WIN32_ERR("UnmapViewOfFile()"); 583 virt = NULL; 584 } 585 586 if (!CloseHandle(mapping_handle)) 587 RTE_LOG_WIN32_ERR("CloseHandle()"); 588 589 return virt; 590 } 591 592 int 593 rte_mem_unmap(void *virt, size_t size) 594 { 595 RTE_SET_USED(size); 596 597 if (!UnmapViewOfFile(virt)) { 598 RTE_LOG_WIN32_ERR("UnmapViewOfFile()"); 599 rte_errno = EINVAL; 600 return -1; 601 } 602 return 0; 603 } 604 605 uint64_t 606 eal_get_baseaddr(void) 607 { 608 /* Windows strategy for memory allocation is undocumented. 609 * Returning 0 here effectively disables address guessing 610 * unless user provides an address hint. 611 */ 612 return 0; 613 } 614 615 size_t 616 rte_mem_page_size(void) 617 { 618 static SYSTEM_INFO info; 619 620 if (info.dwPageSize == 0) 621 GetSystemInfo(&info); 622 623 return info.dwPageSize; 624 } 625 626 int 627 rte_mem_lock(const void *virt, size_t size) 628 { 629 /* VirtualLock() takes `void*`, work around compiler warning. */ 630 void *addr = (void *)((uintptr_t)virt); 631 632 if (!VirtualLock(addr, size)) { 633 RTE_LOG_WIN32_ERR("VirtualLock(%p %#zx)", virt, size); 634 return -1; 635 } 636 637 return 0; 638 } 639 640 int 641 rte_eal_memseg_init(void) 642 { 643 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 644 EAL_LOG_NOT_IMPLEMENTED(); 645 return -1; 646 } 647 648 return eal_dynmem_memseg_lists_init(); 649 } 650 651 static int 652 eal_nohuge_init(void) 653 { 654 struct rte_mem_config *mcfg; 655 struct rte_memseg_list *msl; 656 int n_segs; 657 uint64_t mem_sz, page_sz; 658 void *addr; 659 660 mcfg = rte_eal_get_configuration()->mem_config; 661 struct internal_config *internal_conf = 662 eal_get_internal_configuration(); 663 664 /* nohuge mode is legacy mode */ 665 internal_conf->legacy_mem = 1; 666 667 msl = &mcfg->memsegs[0]; 668 669 mem_sz = internal_conf->memory; 670 page_sz = RTE_PGSIZE_4K; 671 n_segs = mem_sz / page_sz; 672 673 if (eal_memseg_list_init_named( 674 msl, "nohugemem", page_sz, n_segs, 0, true)) { 675 return -1; 676 } 677 678 addr = VirtualAlloc( 679 NULL, mem_sz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 680 if (addr == NULL) { 681 RTE_LOG_WIN32_ERR("VirtualAlloc(size=%#zx)", mem_sz); 682 RTE_LOG(ERR, EAL, "Cannot allocate memory\n"); 683 return -1; 684 } 685 686 msl->base_va = addr; 687 msl->len = mem_sz; 688 689 eal_memseg_list_populate(msl, addr, n_segs); 690 691 if (mcfg->dma_maskbits && 692 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) { 693 RTE_LOG(ERR, EAL, 694 "%s(): couldn't allocate memory due to IOVA " 695 "exceeding limits of current DMA mask.\n", __func__); 696 return -1; 697 } 698 699 return 0; 700 } 701 702 int 703 rte_eal_hugepage_init(void) 704 { 705 const struct internal_config *internal_conf = 706 eal_get_internal_configuration(); 707 708 return internal_conf->no_hugetlbfs ? 709 eal_nohuge_init() : eal_dynmem_hugepage_init(); 710 } 711 712 int 713 rte_eal_hugepage_attach(void) 714 { 715 EAL_LOG_NOT_IMPLEMENTED(); 716 return -1; 717 } 718