xref: /dpdk/lib/eal/windows/eal_memory.c (revision d38febb08d57fec29fed27a2d12a507fc6fcdfa1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2020 Dmitry Kozlyuk
3  */
4 
5 #include <inttypes.h>
6 #include <io.h>
7 
8 #include <rte_eal_paging.h>
9 #include <rte_errno.h>
10 
11 #include "eal_internal_cfg.h"
12 #include "eal_memalloc.h"
13 #include "eal_memcfg.h"
14 #include "eal_options.h"
15 #include "eal_private.h"
16 #include "eal_windows.h"
17 
18 #include <rte_virt2phys.h>
19 
20 /* MinGW-w64 headers lack VirtualAlloc2() in some distributions.
21  * Note: definitions are copied verbatim from Microsoft documentation
22  * and don't follow DPDK code style.
23  */
24 #ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS
25 
26 #define MEM_EXTENDED_PARAMETER_TYPE_BITS 4
27 
28 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */
29 typedef enum MEM_EXTENDED_PARAMETER_TYPE {
30 	MemExtendedParameterInvalidType,
31 	MemExtendedParameterAddressRequirements,
32 	MemExtendedParameterNumaNode,
33 	MemExtendedParameterPartitionHandle,
34 	MemExtendedParameterUserPhysicalHandle,
35 	MemExtendedParameterAttributeFlags,
36 	MemExtendedParameterMax
37 } *PMEM_EXTENDED_PARAMETER_TYPE;
38 
39 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */
40 typedef struct MEM_EXTENDED_PARAMETER {
41 	struct {
42 		DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS;
43 		DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS;
44 	} DUMMYSTRUCTNAME;
45 	union {
46 		DWORD64 ULong64;
47 		PVOID   Pointer;
48 		SIZE_T  Size;
49 		HANDLE  Handle;
50 		DWORD   ULong;
51 	} DUMMYUNIONNAME;
52 } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER;
53 
54 #endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */
55 
56 /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */
57 typedef PVOID (*VirtualAlloc2_type)(
58 	HANDLE                 Process,
59 	PVOID                  BaseAddress,
60 	SIZE_T                 Size,
61 	ULONG                  AllocationType,
62 	ULONG                  PageProtection,
63 	MEM_EXTENDED_PARAMETER *ExtendedParameters,
64 	ULONG                  ParameterCount
65 );
66 
67 /* MinGW-w64 distributions, even those that declare VirtualAlloc2(),
68  * lack it in import libraries, which results in a failure at link time.
69  * Link it dynamically in such case.
70  */
71 static VirtualAlloc2_type VirtualAlloc2_ptr;
72 
73 #ifdef RTE_TOOLCHAIN_GCC
74 
75 #define MEM_COALESCE_PLACEHOLDERS 0x00000001
76 #define MEM_PRESERVE_PLACEHOLDER  0x00000002
77 #define MEM_REPLACE_PLACEHOLDER   0x00004000
78 #define MEM_RESERVE_PLACEHOLDER   0x00040000
79 
80 int
81 eal_mem_win32api_init(void)
82 {
83 	/* Contrary to the docs, VirtualAlloc2() is not in kernel32.dll,
84 	 * see https://github.com/MicrosoftDocs/feedback/issues/1129.
85 	 */
86 	static const char library_name[] = "kernelbase.dll";
87 	static const char function[] = "VirtualAlloc2";
88 
89 	HMODULE library = NULL;
90 	int ret = 0;
91 
92 	/* Already done. */
93 	if (VirtualAlloc2_ptr != NULL)
94 		return 0;
95 
96 	library = LoadLibraryA(library_name);
97 	if (library == NULL) {
98 		RTE_LOG_WIN32_ERR("LoadLibraryA(\"%s\")", library_name);
99 		return -1;
100 	}
101 
102 	VirtualAlloc2_ptr = (VirtualAlloc2_type)(
103 		(void *)GetProcAddress(library, function));
104 	if (VirtualAlloc2_ptr == NULL) {
105 		RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n",
106 			library_name, function);
107 
108 		/* Contrary to the docs, Server 2016 is not supported. */
109 		RTE_LOG(ERR, EAL, "Windows 10 or Windows Server 2019 "
110 			" is required for memory management\n");
111 		ret = -1;
112 	}
113 
114 	FreeLibrary(library);
115 
116 	return ret;
117 }
118 
119 #else
120 
121 /* Stub in case VirtualAlloc2() is provided by the toolchain. */
122 int
123 eal_mem_win32api_init(void)
124 {
125 	VirtualAlloc2_ptr = VirtualAlloc2;
126 	return 0;
127 }
128 
129 #endif /* defined(RTE_TOOLCHAIN_GCC) */
130 
131 static HANDLE virt2phys_device = INVALID_HANDLE_VALUE;
132 
133 int
134 eal_mem_virt2iova_init(void)
135 {
136 	HDEVINFO list = INVALID_HANDLE_VALUE;
137 	SP_DEVICE_INTERFACE_DATA ifdata;
138 	SP_DEVICE_INTERFACE_DETAIL_DATA *detail = NULL;
139 	DWORD detail_size;
140 	int ret = -1;
141 
142 	list = SetupDiGetClassDevs(
143 		&GUID_DEVINTERFACE_VIRT2PHYS, NULL, NULL,
144 		DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
145 	if (list == INVALID_HANDLE_VALUE) {
146 		RTE_LOG_WIN32_ERR("SetupDiGetClassDevs()");
147 		goto exit;
148 	}
149 
150 	ifdata.cbSize = sizeof(ifdata);
151 	if (!SetupDiEnumDeviceInterfaces(
152 		list, NULL, &GUID_DEVINTERFACE_VIRT2PHYS, 0, &ifdata)) {
153 		RTE_LOG_WIN32_ERR("SetupDiEnumDeviceInterfaces()");
154 		goto exit;
155 	}
156 
157 	if (!SetupDiGetDeviceInterfaceDetail(
158 		list, &ifdata, NULL, 0, &detail_size, NULL)) {
159 		if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
160 			RTE_LOG_WIN32_ERR(
161 				"SetupDiGetDeviceInterfaceDetail(probe)");
162 			goto exit;
163 		}
164 	}
165 
166 	detail = malloc(detail_size);
167 	if (detail == NULL) {
168 		RTE_LOG(ERR, EAL, "Cannot allocate virt2phys "
169 			"device interface detail data\n");
170 		goto exit;
171 	}
172 
173 	detail->cbSize = sizeof(*detail);
174 	if (!SetupDiGetDeviceInterfaceDetail(
175 		list, &ifdata, detail, detail_size, NULL, NULL)) {
176 		RTE_LOG_WIN32_ERR("SetupDiGetDeviceInterfaceDetail(read)");
177 		goto exit;
178 	}
179 
180 	RTE_LOG(DEBUG, EAL, "Found virt2phys device: %s\n", detail->DevicePath);
181 
182 	virt2phys_device = CreateFile(
183 		detail->DevicePath, 0, 0, NULL, OPEN_EXISTING, 0, NULL);
184 	if (virt2phys_device == INVALID_HANDLE_VALUE) {
185 		RTE_LOG_WIN32_ERR("CreateFile()");
186 		goto exit;
187 	}
188 
189 	/* Indicate success. */
190 	ret = 0;
191 
192 exit:
193 	if (detail != NULL)
194 		free(detail);
195 	if (list != INVALID_HANDLE_VALUE)
196 		SetupDiDestroyDeviceInfoList(list);
197 
198 	return ret;
199 }
200 
201 void
202 eal_mem_virt2iova_cleanup(void)
203 {
204 	if (virt2phys_device != INVALID_HANDLE_VALUE)
205 		CloseHandle(virt2phys_device);
206 }
207 
208 phys_addr_t
209 rte_mem_virt2phy(const void *virt)
210 {
211 	LARGE_INTEGER phys;
212 	DWORD bytes_returned;
213 
214 	if (virt2phys_device == INVALID_HANDLE_VALUE)
215 		return RTE_BAD_PHYS_ADDR;
216 
217 	if (!DeviceIoControl(
218 			virt2phys_device, IOCTL_VIRT2PHYS_TRANSLATE,
219 			&virt, sizeof(virt), &phys, sizeof(phys),
220 			&bytes_returned, NULL)) {
221 		RTE_LOG_WIN32_ERR("DeviceIoControl(IOCTL_VIRT2PHYS_TRANSLATE)");
222 		return RTE_BAD_PHYS_ADDR;
223 	}
224 
225 	return phys.QuadPart;
226 }
227 
228 /* Windows currently only supports IOVA as PA. */
229 rte_iova_t
230 rte_mem_virt2iova(const void *virt)
231 {
232 	phys_addr_t phys;
233 
234 	if (virt2phys_device == INVALID_HANDLE_VALUE)
235 		return RTE_BAD_IOVA;
236 
237 	phys = rte_mem_virt2phy(virt);
238 	if (phys == RTE_BAD_PHYS_ADDR)
239 		return RTE_BAD_IOVA;
240 
241 	return (rte_iova_t)phys;
242 }
243 
244 /* Always using physical addresses under Windows if they can be obtained. */
245 int
246 rte_eal_using_phys_addrs(void)
247 {
248 	return virt2phys_device != INVALID_HANDLE_VALUE;
249 }
250 
251 /* Approximate error mapping from VirtualAlloc2() to POSIX mmap(3). */
252 static void
253 set_errno_from_win32_alloc_error(DWORD code)
254 {
255 	switch (code) {
256 	case ERROR_SUCCESS:
257 		rte_errno = 0;
258 		break;
259 
260 	case ERROR_INVALID_ADDRESS:
261 		/* A valid requested address is not available. */
262 	case ERROR_COMMITMENT_LIMIT:
263 		/* May occur when committing regular memory. */
264 	case ERROR_NO_SYSTEM_RESOURCES:
265 		/* Occurs when the system runs out of hugepages. */
266 		rte_errno = ENOMEM;
267 		break;
268 
269 	case ERROR_INVALID_PARAMETER:
270 	default:
271 		rte_errno = EINVAL;
272 		break;
273 	}
274 }
275 
276 void *
277 eal_mem_reserve(void *requested_addr, size_t size, int flags)
278 {
279 	HANDLE process;
280 	void *virt;
281 
282 	/* Windows requires hugepages to be committed. */
283 	if (flags & EAL_RESERVE_HUGEPAGES) {
284 		rte_errno = ENOTSUP;
285 		return NULL;
286 	}
287 
288 	process = GetCurrentProcess();
289 
290 	virt = VirtualAlloc2_ptr(process, requested_addr, size,
291 		MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS,
292 		NULL, 0);
293 	if (virt == NULL) {
294 		DWORD err = GetLastError();
295 		RTE_LOG_WIN32_ERR("VirtualAlloc2()");
296 		set_errno_from_win32_alloc_error(err);
297 		return NULL;
298 	}
299 
300 	if ((flags & EAL_RESERVE_FORCE_ADDRESS) && (virt != requested_addr)) {
301 		if (!VirtualFreeEx(process, virt, 0, MEM_RELEASE))
302 			RTE_LOG_WIN32_ERR("VirtualFreeEx()");
303 		rte_errno = ENOMEM;
304 		return NULL;
305 	}
306 
307 	return virt;
308 }
309 
310 void *
311 eal_mem_alloc_socket(size_t size, int socket_id)
312 {
313 	DWORD flags = MEM_RESERVE | MEM_COMMIT;
314 	void *addr;
315 
316 	flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
317 	addr = VirtualAllocExNuma(GetCurrentProcess(), NULL, size, flags,
318 		PAGE_READWRITE, eal_socket_numa_node(socket_id));
319 	if (addr == NULL)
320 		rte_errno = ENOMEM;
321 	return addr;
322 }
323 
324 void *
325 eal_mem_commit(void *requested_addr, size_t size, int socket_id)
326 {
327 	HANDLE process;
328 	MEM_EXTENDED_PARAMETER param;
329 	DWORD param_count = 0;
330 	DWORD flags;
331 	void *addr;
332 
333 	process = GetCurrentProcess();
334 
335 	if (requested_addr != NULL) {
336 		MEMORY_BASIC_INFORMATION info;
337 
338 		if (VirtualQueryEx(process, requested_addr, &info,
339 				sizeof(info)) != sizeof(info)) {
340 			RTE_LOG_WIN32_ERR("VirtualQuery(%p)", requested_addr);
341 			return NULL;
342 		}
343 
344 		/* Split reserved region if only a part is committed. */
345 		flags = MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER;
346 		if ((info.RegionSize > size) && !VirtualFreeEx(
347 				process, requested_addr, size, flags)) {
348 			RTE_LOG_WIN32_ERR(
349 				"VirtualFreeEx(%p, %zu, preserve placeholder)",
350 				requested_addr, size);
351 			return NULL;
352 		}
353 
354 		/* Temporarily release the region to be committed.
355 		 *
356 		 * There is an inherent race for this memory range
357 		 * if another thread allocates memory via OS API.
358 		 * However, VirtualAlloc2(MEM_REPLACE_PLACEHOLDER)
359 		 * doesn't work with MEM_LARGE_PAGES on Windows Server.
360 		 */
361 		if (!VirtualFreeEx(process, requested_addr, 0, MEM_RELEASE)) {
362 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
363 				requested_addr);
364 			return NULL;
365 		}
366 	}
367 
368 	if (socket_id != SOCKET_ID_ANY) {
369 		param_count = 1;
370 		memset(&param, 0, sizeof(param));
371 		param.Type = MemExtendedParameterNumaNode;
372 		param.ULong = eal_socket_numa_node(socket_id);
373 	}
374 
375 	flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
376 	addr = VirtualAlloc2_ptr(process, requested_addr, size,
377 		flags, PAGE_READWRITE, &param, param_count);
378 	if (addr == NULL) {
379 		/* Logging may overwrite GetLastError() result. */
380 		DWORD err = GetLastError();
381 		RTE_LOG_WIN32_ERR("VirtualAlloc2(%p, %zu, commit large pages)",
382 			requested_addr, size);
383 		set_errno_from_win32_alloc_error(err);
384 		return NULL;
385 	}
386 
387 	if ((requested_addr != NULL) && (addr != requested_addr)) {
388 		/* We lost the race for the requested_addr. */
389 		if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE))
390 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", addr);
391 
392 		rte_errno = EADDRNOTAVAIL;
393 		return NULL;
394 	}
395 
396 	return addr;
397 }
398 
399 int
400 eal_mem_decommit(void *addr, size_t size)
401 {
402 	HANDLE process;
403 	void *stub;
404 	DWORD flags;
405 
406 	process = GetCurrentProcess();
407 
408 	/* Hugepages cannot be decommited on Windows,
409 	 * so free them and replace the block with a placeholder.
410 	 * There is a race for VA in this block until VirtualAlloc2 call.
411 	 */
412 	if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
413 		RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
414 		return -1;
415 	}
416 
417 	flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER;
418 	stub = VirtualAlloc2_ptr(
419 		process, addr, size, flags, PAGE_NOACCESS, NULL, 0);
420 	if (stub == NULL) {
421 		/* We lost the race for the VA. */
422 		if (!VirtualFreeEx(process, stub, 0, MEM_RELEASE))
423 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", stub);
424 		rte_errno = EADDRNOTAVAIL;
425 		return -1;
426 	}
427 
428 	/* No need to join reserved regions adjacent to the freed one:
429 	 * eal_mem_commit() will just pick up the page-size placeholder
430 	 * created here.
431 	 */
432 	return 0;
433 }
434 
435 /**
436  * Free a reserved memory region in full or in part.
437  *
438  * @param addr
439  *  Starting address of the area to free.
440  * @param size
441  *  Number of bytes to free. Must be a multiple of page size.
442  * @param reserved
443  *  Fail if the region is not in reserved state.
444  * @return
445  *  * 0 on successful deallocation;
446  *  * 1 if region must be in reserved state but it is not;
447  *  * (-1) on system API failures.
448  */
449 static int
450 mem_free(void *addr, size_t size, bool reserved)
451 {
452 	MEMORY_BASIC_INFORMATION info;
453 	HANDLE process;
454 
455 	process = GetCurrentProcess();
456 
457 	if (VirtualQueryEx(
458 			process, addr, &info, sizeof(info)) != sizeof(info)) {
459 		RTE_LOG_WIN32_ERR("VirtualQueryEx(%p)", addr);
460 		return -1;
461 	}
462 
463 	if (reserved && (info.State != MEM_RESERVE))
464 		return 1;
465 
466 	/* Free complete region. */
467 	if ((addr == info.AllocationBase) && (size == info.RegionSize)) {
468 		if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
469 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
470 				addr);
471 		}
472 		return 0;
473 	}
474 
475 	/* Split the part to be freed and the remaining reservation. */
476 	if (!VirtualFreeEx(process, addr, size,
477 			MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
478 		RTE_LOG_WIN32_ERR(
479 			"VirtualFreeEx(%p, %zu, preserve placeholder)",
480 			addr, size);
481 		return -1;
482 	}
483 
484 	/* Actually free reservation part. */
485 	if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
486 		RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
487 		return -1;
488 	}
489 
490 	return 0;
491 }
492 
493 void
494 eal_mem_free(void *virt, size_t size)
495 {
496 	mem_free(virt, size, false);
497 }
498 
499 int
500 eal_mem_set_dump(void *virt, size_t size, bool dump)
501 {
502 	RTE_SET_USED(virt);
503 	RTE_SET_USED(size);
504 	RTE_SET_USED(dump);
505 
506 	/* Windows does not dump reserved memory by default.
507 	 *
508 	 * There is <werapi.h> to include or exclude regions from the dump,
509 	 * but this is not currently required by EAL.
510 	 */
511 
512 	rte_errno = ENOTSUP;
513 	return -1;
514 }
515 
516 void *
517 rte_mem_map(void *requested_addr, size_t size, int prot, int flags,
518 	int fd, uint64_t offset)
519 {
520 	HANDLE file_handle = INVALID_HANDLE_VALUE;
521 	HANDLE mapping_handle = INVALID_HANDLE_VALUE;
522 	DWORD sys_prot = 0;
523 	DWORD sys_access = 0;
524 	DWORD size_high = (DWORD)(size >> 32);
525 	DWORD size_low = (DWORD)size;
526 	DWORD offset_high = (DWORD)(offset >> 32);
527 	DWORD offset_low = (DWORD)offset;
528 	LPVOID virt = NULL;
529 
530 	if (prot & RTE_PROT_EXECUTE) {
531 		if (prot & RTE_PROT_READ) {
532 			sys_prot = PAGE_EXECUTE_READ;
533 			sys_access = FILE_MAP_READ | FILE_MAP_EXECUTE;
534 		}
535 		if (prot & RTE_PROT_WRITE) {
536 			sys_prot = PAGE_EXECUTE_READWRITE;
537 			sys_access = FILE_MAP_WRITE | FILE_MAP_EXECUTE;
538 		}
539 	} else {
540 		if (prot & RTE_PROT_READ) {
541 			sys_prot = PAGE_READONLY;
542 			sys_access = FILE_MAP_READ;
543 		}
544 		if (prot & RTE_PROT_WRITE) {
545 			sys_prot = PAGE_READWRITE;
546 			sys_access = FILE_MAP_WRITE;
547 		}
548 	}
549 
550 	if (flags & RTE_MAP_PRIVATE)
551 		sys_access |= FILE_MAP_COPY;
552 
553 	if ((flags & RTE_MAP_ANONYMOUS) == 0)
554 		file_handle = (HANDLE)_get_osfhandle(fd);
555 
556 	mapping_handle = CreateFileMapping(
557 		file_handle, NULL, sys_prot, size_high, size_low, NULL);
558 	if (mapping_handle == INVALID_HANDLE_VALUE) {
559 		RTE_LOG_WIN32_ERR("CreateFileMapping()");
560 		return NULL;
561 	}
562 
563 	/* There is a race for the requested_addr between mem_free()
564 	 * and MapViewOfFileEx(). MapViewOfFile3() that can replace a reserved
565 	 * region with a mapping in a single operation, but it does not support
566 	 * private mappings.
567 	 */
568 	if (requested_addr != NULL) {
569 		int ret = mem_free(requested_addr, size, true);
570 		if (ret) {
571 			if (ret > 0) {
572 				RTE_LOG(ERR, EAL, "Cannot map memory "
573 					"to a region not reserved\n");
574 				rte_errno = EADDRNOTAVAIL;
575 			}
576 			return NULL;
577 		}
578 	}
579 
580 	virt = MapViewOfFileEx(mapping_handle, sys_access,
581 		offset_high, offset_low, size, requested_addr);
582 	if (!virt) {
583 		RTE_LOG_WIN32_ERR("MapViewOfFileEx()");
584 		return NULL;
585 	}
586 
587 	if ((flags & RTE_MAP_FORCE_ADDRESS) && (virt != requested_addr)) {
588 		if (!UnmapViewOfFile(virt))
589 			RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
590 		virt = NULL;
591 	}
592 
593 	if (!CloseHandle(mapping_handle))
594 		RTE_LOG_WIN32_ERR("CloseHandle()");
595 
596 	return virt;
597 }
598 
599 int
600 rte_mem_unmap(void *virt, size_t size)
601 {
602 	RTE_SET_USED(size);
603 
604 	if (!UnmapViewOfFile(virt)) {
605 		RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
606 		rte_errno = EINVAL;
607 		return -1;
608 	}
609 	return 0;
610 }
611 
612 uint64_t
613 eal_get_baseaddr(void)
614 {
615 	/* Windows strategy for memory allocation is undocumented.
616 	 * Returning 0 here effectively disables address guessing
617 	 * unless user provides an address hint.
618 	 */
619 	return 0;
620 }
621 
622 size_t
623 rte_mem_page_size(void)
624 {
625 	static SYSTEM_INFO info;
626 
627 	if (info.dwPageSize == 0)
628 		GetSystemInfo(&info);
629 
630 	return info.dwPageSize;
631 }
632 
633 int
634 rte_mem_lock(const void *virt, size_t size)
635 {
636 	/* VirtualLock() takes `void*`, work around compiler warning. */
637 	void *addr = (void *)((uintptr_t)virt);
638 
639 	if (!VirtualLock(addr, size)) {
640 		RTE_LOG_WIN32_ERR("VirtualLock(%p %#zx)", virt, size);
641 		return -1;
642 	}
643 
644 	return 0;
645 }
646 
647 int
648 rte_eal_memseg_init(void)
649 {
650 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
651 		EAL_LOG_NOT_IMPLEMENTED();
652 		return -1;
653 	}
654 
655 	return eal_dynmem_memseg_lists_init();
656 }
657 
658 static int
659 eal_nohuge_init(void)
660 {
661 	struct rte_mem_config *mcfg;
662 	struct rte_memseg_list *msl;
663 	int n_segs;
664 	uint64_t mem_sz, page_sz;
665 	void *addr;
666 
667 	mcfg = rte_eal_get_configuration()->mem_config;
668 	struct internal_config *internal_conf =
669 		eal_get_internal_configuration();
670 
671 	/* nohuge mode is legacy mode */
672 	internal_conf->legacy_mem = 1;
673 
674 	msl = &mcfg->memsegs[0];
675 
676 	mem_sz = internal_conf->memory;
677 	page_sz = RTE_PGSIZE_4K;
678 	n_segs = mem_sz / page_sz;
679 
680 	if (eal_memseg_list_init_named(
681 			msl, "nohugemem", page_sz, n_segs, 0, true)) {
682 		return -1;
683 	}
684 
685 	addr = VirtualAlloc(
686 		NULL, mem_sz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
687 	if (addr == NULL) {
688 		RTE_LOG_WIN32_ERR("VirtualAlloc(size=%#zx)", mem_sz);
689 		RTE_LOG(ERR, EAL, "Cannot allocate memory\n");
690 		return -1;
691 	}
692 
693 	msl->base_va = addr;
694 	msl->len = mem_sz;
695 
696 	eal_memseg_list_populate(msl, addr, n_segs);
697 
698 	if (mcfg->dma_maskbits &&
699 		rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
700 		RTE_LOG(ERR, EAL,
701 			"%s(): couldn't allocate memory due to IOVA "
702 			"exceeding limits of current DMA mask.\n", __func__);
703 		return -1;
704 	}
705 
706 	return 0;
707 }
708 
709 int
710 rte_eal_hugepage_init(void)
711 {
712 	const struct internal_config *internal_conf =
713 		eal_get_internal_configuration();
714 
715 	return internal_conf->no_hugetlbfs ?
716 		eal_nohuge_init() : eal_dynmem_hugepage_init();
717 }
718 
719 int
720 rte_eal_hugepage_attach(void)
721 {
722 	EAL_LOG_NOT_IMPLEMENTED();
723 	return -1;
724 }
725