xref: /dpdk/lib/eal/windows/eal_memory.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2020 Dmitry Kozlyuk
3  */
4 
5 #include <inttypes.h>
6 #include <io.h>
7 
8 #include <rte_eal_paging.h>
9 #include <rte_errno.h>
10 
11 #include "eal_internal_cfg.h"
12 #include "eal_memalloc.h"
13 #include "eal_memcfg.h"
14 #include "eal_options.h"
15 #include "eal_private.h"
16 #include "eal_windows.h"
17 
18 #include <rte_virt2phys.h>
19 
20 /* MinGW-w64 headers lack VirtualAlloc2() in some distributions.
21  * Note: definitions are copied verbatim from Microsoft documentation
22  * and don't follow DPDK code style.
23  */
24 #ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS
25 
26 #define MEM_EXTENDED_PARAMETER_TYPE_BITS 4
27 
28 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */
29 typedef enum MEM_EXTENDED_PARAMETER_TYPE {
30 	MemExtendedParameterInvalidType,
31 	MemExtendedParameterAddressRequirements,
32 	MemExtendedParameterNumaNode,
33 	MemExtendedParameterPartitionHandle,
34 	MemExtendedParameterUserPhysicalHandle,
35 	MemExtendedParameterAttributeFlags,
36 	MemExtendedParameterMax
37 } *PMEM_EXTENDED_PARAMETER_TYPE;
38 
39 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */
40 typedef struct MEM_EXTENDED_PARAMETER {
41 	struct {
42 		DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS;
43 		DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS;
44 	} DUMMYSTRUCTNAME;
45 	union {
46 		DWORD64 ULong64;
47 		PVOID   Pointer;
48 		SIZE_T  Size;
49 		HANDLE  Handle;
50 		DWORD   ULong;
51 	} DUMMYUNIONNAME;
52 } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER;
53 
54 #endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */
55 
56 /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */
57 typedef PVOID (*VirtualAlloc2_type)(
58 	HANDLE                 Process,
59 	PVOID                  BaseAddress,
60 	SIZE_T                 Size,
61 	ULONG                  AllocationType,
62 	ULONG                  PageProtection,
63 	MEM_EXTENDED_PARAMETER *ExtendedParameters,
64 	ULONG                  ParameterCount
65 );
66 
67 /* MinGW-w64 distributions, even those that declare VirtualAlloc2(),
68  * lack it in import libraries, which results in a failure at link time.
69  * Link it dynamically in such case.
70  */
71 static VirtualAlloc2_type VirtualAlloc2_ptr;
72 
73 #ifdef RTE_TOOLCHAIN_GCC
74 
75 #ifndef MEM_COALESCE_PLACEHOLDERS
76 #define MEM_COALESCE_PLACEHOLDERS 0x00000001
77 #endif
78 #ifndef MEM_PRESERVE_PLACEHOLDER
79 #define MEM_PRESERVE_PLACEHOLDER  0x00000002
80 #endif
81 #ifndef MEM_REPLACE_PLACEHOLDER
82 #define MEM_REPLACE_PLACEHOLDER   0x00004000
83 #endif
84 #ifndef MEM_RESERVE_PLACEHOLDER
85 #define MEM_RESERVE_PLACEHOLDER   0x00040000
86 #endif
87 
88 int
eal_mem_win32api_init(void)89 eal_mem_win32api_init(void)
90 {
91 	/* Contrary to the docs, VirtualAlloc2() is not in kernel32.dll,
92 	 * see https://github.com/MicrosoftDocs/feedback/issues/1129.
93 	 */
94 	static const char library_name[] = "kernelbase.dll";
95 	static const char function[] = "VirtualAlloc2";
96 
97 	HMODULE library = NULL;
98 	int ret = 0;
99 
100 	/* Already done. */
101 	if (VirtualAlloc2_ptr != NULL)
102 		return 0;
103 
104 	library = LoadLibraryA(library_name);
105 	if (library == NULL) {
106 		RTE_LOG_WIN32_ERR("LoadLibraryA(\"%s\")", library_name);
107 		return -1;
108 	}
109 
110 	VirtualAlloc2_ptr = (VirtualAlloc2_type)(
111 		(void *)GetProcAddress(library, function));
112 	if (VirtualAlloc2_ptr == NULL) {
113 		RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")",
114 			library_name, function);
115 
116 		/* Contrary to the docs, Server 2016 is not supported. */
117 		EAL_LOG(ERR, "Windows 10 or Windows Server 2019 "
118 			" is required for memory management");
119 		ret = -1;
120 	}
121 
122 	FreeLibrary(library);
123 
124 	return ret;
125 }
126 
127 #else
128 
129 /* Stub in case VirtualAlloc2() is provided by the toolchain. */
130 int
eal_mem_win32api_init(void)131 eal_mem_win32api_init(void)
132 {
133 	VirtualAlloc2_ptr = VirtualAlloc2;
134 	return 0;
135 }
136 
137 #endif /* defined(RTE_TOOLCHAIN_GCC) */
138 
139 static HANDLE virt2phys_device = INVALID_HANDLE_VALUE;
140 
141 int
eal_mem_virt2iova_init(void)142 eal_mem_virt2iova_init(void)
143 {
144 	HDEVINFO list = INVALID_HANDLE_VALUE;
145 	SP_DEVICE_INTERFACE_DATA ifdata;
146 	SP_DEVICE_INTERFACE_DETAIL_DATA *detail = NULL;
147 	DWORD detail_size;
148 	int ret = -1;
149 
150 	list = SetupDiGetClassDevs(
151 		&GUID_DEVINTERFACE_VIRT2PHYS, NULL, NULL,
152 		DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
153 	if (list == INVALID_HANDLE_VALUE) {
154 		RTE_LOG_WIN32_ERR("SetupDiGetClassDevs()");
155 		goto exit;
156 	}
157 
158 	ifdata.cbSize = sizeof(ifdata);
159 	if (!SetupDiEnumDeviceInterfaces(
160 		list, NULL, &GUID_DEVINTERFACE_VIRT2PHYS, 0, &ifdata)) {
161 		RTE_LOG_WIN32_ERR("SetupDiEnumDeviceInterfaces()");
162 		goto exit;
163 	}
164 
165 	if (!SetupDiGetDeviceInterfaceDetail(
166 		list, &ifdata, NULL, 0, &detail_size, NULL)) {
167 		if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
168 			RTE_LOG_WIN32_ERR(
169 				"SetupDiGetDeviceInterfaceDetail(probe)");
170 			goto exit;
171 		}
172 	}
173 
174 	detail = malloc(detail_size);
175 	if (detail == NULL) {
176 		EAL_LOG(ERR, "Cannot allocate virt2phys "
177 			"device interface detail data");
178 		goto exit;
179 	}
180 
181 	detail->cbSize = sizeof(*detail);
182 	if (!SetupDiGetDeviceInterfaceDetail(
183 		list, &ifdata, detail, detail_size, NULL, NULL)) {
184 		RTE_LOG_WIN32_ERR("SetupDiGetDeviceInterfaceDetail(read)");
185 		goto exit;
186 	}
187 
188 	EAL_LOG(DEBUG, "Found virt2phys device: %s", detail->DevicePath);
189 
190 	virt2phys_device = CreateFile(
191 		detail->DevicePath, 0, 0, NULL, OPEN_EXISTING, 0, NULL);
192 	if (virt2phys_device == INVALID_HANDLE_VALUE) {
193 		RTE_LOG_WIN32_ERR("CreateFile()");
194 		goto exit;
195 	}
196 
197 	/* Indicate success. */
198 	ret = 0;
199 
200 exit:
201 	free(detail);
202 	if (list != INVALID_HANDLE_VALUE)
203 		SetupDiDestroyDeviceInfoList(list);
204 
205 	return ret;
206 }
207 
208 void
eal_mem_virt2iova_cleanup(void)209 eal_mem_virt2iova_cleanup(void)
210 {
211 	if (virt2phys_device != INVALID_HANDLE_VALUE)
212 		CloseHandle(virt2phys_device);
213 }
214 
215 phys_addr_t
rte_mem_virt2phy(const void * virt)216 rte_mem_virt2phy(const void *virt)
217 {
218 	LARGE_INTEGER phys;
219 	DWORD bytes_returned;
220 
221 	if (virt2phys_device == INVALID_HANDLE_VALUE)
222 		return RTE_BAD_PHYS_ADDR;
223 
224 	if (!DeviceIoControl(
225 			virt2phys_device, IOCTL_VIRT2PHYS_TRANSLATE,
226 			&virt, sizeof(virt), &phys, sizeof(phys),
227 			&bytes_returned, NULL)) {
228 		RTE_LOG_WIN32_ERR("DeviceIoControl(IOCTL_VIRT2PHYS_TRANSLATE)");
229 		return RTE_BAD_PHYS_ADDR;
230 	}
231 
232 	return phys.QuadPart;
233 }
234 
235 rte_iova_t
rte_mem_virt2iova(const void * virt)236 rte_mem_virt2iova(const void *virt)
237 {
238 	phys_addr_t phys;
239 
240 	if (rte_eal_iova_mode() == RTE_IOVA_VA)
241 		return (rte_iova_t)virt;
242 
243 	phys = rte_mem_virt2phy(virt);
244 	if (phys == RTE_BAD_PHYS_ADDR)
245 		return RTE_BAD_IOVA;
246 	return (rte_iova_t)phys;
247 }
248 
249 /* Always using physical addresses under Windows if they can be obtained. */
250 int
rte_eal_using_phys_addrs(void)251 rte_eal_using_phys_addrs(void)
252 {
253 	return virt2phys_device != INVALID_HANDLE_VALUE;
254 }
255 
256 /* Approximate error mapping from VirtualAlloc2() to POSIX mmap(3). */
257 static void
set_errno_from_win32_alloc_error(DWORD code)258 set_errno_from_win32_alloc_error(DWORD code)
259 {
260 	switch (code) {
261 	case ERROR_SUCCESS:
262 		rte_errno = 0;
263 		break;
264 
265 	case ERROR_INVALID_ADDRESS:
266 		/* A valid requested address is not available. */
267 	case ERROR_COMMITMENT_LIMIT:
268 		/* May occur when committing regular memory. */
269 	case ERROR_NO_SYSTEM_RESOURCES:
270 		/* Occurs when the system runs out of hugepages. */
271 		rte_errno = ENOMEM;
272 		break;
273 
274 	case ERROR_INVALID_PARAMETER:
275 	default:
276 		rte_errno = EINVAL;
277 		break;
278 	}
279 }
280 
281 void *
eal_mem_reserve(void * requested_addr,size_t size,int flags)282 eal_mem_reserve(void *requested_addr, size_t size, int flags)
283 {
284 	HANDLE process;
285 	void *virt;
286 
287 	/* Windows requires hugepages to be committed. */
288 	if (flags & EAL_RESERVE_HUGEPAGES) {
289 		rte_errno = ENOTSUP;
290 		return NULL;
291 	}
292 
293 	process = GetCurrentProcess();
294 
295 	virt = VirtualAlloc2_ptr(process, requested_addr, size,
296 		MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS,
297 		NULL, 0);
298 	if (virt == NULL) {
299 		DWORD err = GetLastError();
300 		RTE_LOG_WIN32_ERR("VirtualAlloc2()");
301 		set_errno_from_win32_alloc_error(err);
302 		return NULL;
303 	}
304 
305 	if ((flags & EAL_RESERVE_FORCE_ADDRESS) && (virt != requested_addr)) {
306 		if (!VirtualFreeEx(process, virt, 0, MEM_RELEASE))
307 			RTE_LOG_WIN32_ERR("VirtualFreeEx()");
308 		rte_errno = ENOMEM;
309 		return NULL;
310 	}
311 
312 	return virt;
313 }
314 
315 void *
eal_mem_alloc_socket(size_t size,int socket_id)316 eal_mem_alloc_socket(size_t size, int socket_id)
317 {
318 	DWORD flags = MEM_RESERVE | MEM_COMMIT;
319 	void *addr;
320 
321 	flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
322 	addr = VirtualAllocExNuma(GetCurrentProcess(), NULL, size, flags,
323 		PAGE_READWRITE, eal_socket_numa_node(socket_id));
324 	if (addr == NULL)
325 		rte_errno = ENOMEM;
326 	return addr;
327 }
328 
329 void *
eal_mem_commit(void * requested_addr,size_t size,int socket_id)330 eal_mem_commit(void *requested_addr, size_t size, int socket_id)
331 {
332 	HANDLE process;
333 	MEM_EXTENDED_PARAMETER param;
334 	DWORD param_count = 0;
335 	DWORD flags;
336 	void *addr;
337 
338 	process = GetCurrentProcess();
339 
340 	if (requested_addr != NULL) {
341 		MEMORY_BASIC_INFORMATION info;
342 
343 		if (VirtualQueryEx(process, requested_addr, &info,
344 				sizeof(info)) != sizeof(info)) {
345 			RTE_LOG_WIN32_ERR("VirtualQuery(%p)", requested_addr);
346 			return NULL;
347 		}
348 
349 		/* Split reserved region if only a part is committed. */
350 		flags = MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER;
351 		if ((info.RegionSize > size) && !VirtualFreeEx(
352 				process, requested_addr, size, flags)) {
353 			RTE_LOG_WIN32_ERR(
354 				"VirtualFreeEx(%p, %zu, preserve placeholder)",
355 				requested_addr, size);
356 			return NULL;
357 		}
358 
359 		/* Temporarily release the region to be committed.
360 		 *
361 		 * There is an inherent race for this memory range
362 		 * if another thread allocates memory via OS API.
363 		 * However, VirtualAlloc2(MEM_REPLACE_PLACEHOLDER)
364 		 * doesn't work with MEM_LARGE_PAGES on Windows Server.
365 		 */
366 		if (!VirtualFreeEx(process, requested_addr, 0, MEM_RELEASE)) {
367 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
368 				requested_addr);
369 			return NULL;
370 		}
371 	}
372 
373 	if (socket_id != SOCKET_ID_ANY) {
374 		param_count = 1;
375 		memset(&param, 0, sizeof(param));
376 		param.Type = MemExtendedParameterNumaNode;
377 		param.ULong = eal_socket_numa_node(socket_id);
378 	}
379 
380 	flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
381 	addr = VirtualAlloc2_ptr(process, requested_addr, size,
382 		flags, PAGE_READWRITE, &param, param_count);
383 	if (addr == NULL) {
384 		/* Logging may overwrite GetLastError() result. */
385 		DWORD err = GetLastError();
386 		RTE_LOG_WIN32_ERR("VirtualAlloc2(%p, %zu, commit large pages)",
387 			requested_addr, size);
388 		set_errno_from_win32_alloc_error(err);
389 		return NULL;
390 	}
391 
392 	if ((requested_addr != NULL) && (addr != requested_addr)) {
393 		/* We lost the race for the requested_addr. */
394 		if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE))
395 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", addr);
396 
397 		rte_errno = EADDRNOTAVAIL;
398 		return NULL;
399 	}
400 
401 	return addr;
402 }
403 
404 int
eal_mem_decommit(void * addr,size_t size)405 eal_mem_decommit(void *addr, size_t size)
406 {
407 	HANDLE process;
408 	void *stub;
409 	DWORD flags;
410 
411 	process = GetCurrentProcess();
412 
413 	/* Hugepages cannot be decommited on Windows,
414 	 * so free them and replace the block with a placeholder.
415 	 * There is a race for VA in this block until VirtualAlloc2 call.
416 	 */
417 	if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
418 		RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
419 		return -1;
420 	}
421 
422 	flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER;
423 	stub = VirtualAlloc2_ptr(
424 		process, addr, size, flags, PAGE_NOACCESS, NULL, 0);
425 	if (stub == NULL) {
426 		/* We lost the race for the VA. */
427 		if (!VirtualFreeEx(process, stub, 0, MEM_RELEASE))
428 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", stub);
429 		rte_errno = EADDRNOTAVAIL;
430 		return -1;
431 	}
432 
433 	/* No need to join reserved regions adjacent to the freed one:
434 	 * eal_mem_commit() will just pick up the page-size placeholder
435 	 * created here.
436 	 */
437 	return 0;
438 }
439 
440 /**
441  * Free a reserved memory region in full or in part.
442  *
443  * @param addr
444  *  Starting address of the area to free.
445  * @param size
446  *  Number of bytes to free. Must be a multiple of page size.
447  * @param reserved
448  *  Fail if the region is not in reserved state.
449  * @return
450  *  * 0 on successful deallocation;
451  *  * 1 if region must be in reserved state but it is not;
452  *  * (-1) on system API failures.
453  */
454 static int
mem_free(void * addr,size_t size,bool reserved)455 mem_free(void *addr, size_t size, bool reserved)
456 {
457 	MEMORY_BASIC_INFORMATION info;
458 	HANDLE process;
459 
460 	process = GetCurrentProcess();
461 
462 	if (VirtualQueryEx(
463 			process, addr, &info, sizeof(info)) != sizeof(info)) {
464 		RTE_LOG_WIN32_ERR("VirtualQueryEx(%p)", addr);
465 		return -1;
466 	}
467 
468 	if (reserved && (info.State != MEM_RESERVE))
469 		return 1;
470 
471 	/* Free complete region. */
472 	if ((addr == info.AllocationBase) && (size == info.RegionSize)) {
473 		if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
474 			RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
475 				addr);
476 		}
477 		return 0;
478 	}
479 
480 	/* Split the part to be freed and the remaining reservation. */
481 	if (!VirtualFreeEx(process, addr, size,
482 			MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
483 		RTE_LOG_WIN32_ERR(
484 			"VirtualFreeEx(%p, %zu, preserve placeholder)",
485 			addr, size);
486 		return -1;
487 	}
488 
489 	/* Actually free reservation part. */
490 	if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
491 		RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
492 		return -1;
493 	}
494 
495 	return 0;
496 }
497 
498 void
eal_mem_free(void * virt,size_t size)499 eal_mem_free(void *virt, size_t size)
500 {
501 	mem_free(virt, size, false);
502 }
503 
504 int
eal_mem_set_dump(void * virt,size_t size,bool dump)505 eal_mem_set_dump(void *virt, size_t size, bool dump)
506 {
507 	RTE_SET_USED(virt);
508 	RTE_SET_USED(size);
509 	RTE_SET_USED(dump);
510 
511 	/* Windows does not dump reserved memory by default.
512 	 *
513 	 * There is <werapi.h> to include or exclude regions from the dump,
514 	 * but this is not currently required by EAL.
515 	 */
516 
517 	rte_errno = ENOTSUP;
518 	return -1;
519 }
520 
521 void *
rte_mem_map(void * requested_addr,size_t size,int prot,int flags,int fd,uint64_t offset)522 rte_mem_map(void *requested_addr, size_t size, int prot, int flags,
523 	int fd, uint64_t offset)
524 {
525 	HANDLE file_handle = INVALID_HANDLE_VALUE;
526 	HANDLE mapping_handle = INVALID_HANDLE_VALUE;
527 	DWORD sys_prot = 0;
528 	DWORD sys_access = 0;
529 	DWORD size_high = (DWORD)(size >> 32);
530 	DWORD size_low = (DWORD)size;
531 	DWORD offset_high = (DWORD)(offset >> 32);
532 	DWORD offset_low = (DWORD)offset;
533 	LPVOID virt = NULL;
534 
535 	if (prot & RTE_PROT_EXECUTE) {
536 		if (prot & RTE_PROT_READ) {
537 			sys_prot = PAGE_EXECUTE_READ;
538 			sys_access = FILE_MAP_READ | FILE_MAP_EXECUTE;
539 		}
540 		if (prot & RTE_PROT_WRITE) {
541 			sys_prot = PAGE_EXECUTE_READWRITE;
542 			sys_access = FILE_MAP_WRITE | FILE_MAP_EXECUTE;
543 		}
544 	} else {
545 		if (prot & RTE_PROT_READ) {
546 			sys_prot = PAGE_READONLY;
547 			sys_access = FILE_MAP_READ;
548 		}
549 		if (prot & RTE_PROT_WRITE) {
550 			sys_prot = PAGE_READWRITE;
551 			sys_access = FILE_MAP_WRITE;
552 		}
553 	}
554 
555 	if (flags & RTE_MAP_PRIVATE)
556 		sys_access |= FILE_MAP_COPY;
557 
558 	if ((flags & RTE_MAP_ANONYMOUS) == 0)
559 		file_handle = (HANDLE)_get_osfhandle(fd);
560 
561 	mapping_handle = CreateFileMapping(
562 		file_handle, NULL, sys_prot, size_high, size_low, NULL);
563 	if (mapping_handle == INVALID_HANDLE_VALUE) {
564 		RTE_LOG_WIN32_ERR("CreateFileMapping()");
565 		return NULL;
566 	}
567 
568 	/* There is a race for the requested_addr between mem_free()
569 	 * and MapViewOfFileEx(). MapViewOfFile3() that can replace a reserved
570 	 * region with a mapping in a single operation, but it does not support
571 	 * private mappings.
572 	 */
573 	if (requested_addr != NULL) {
574 		int ret = mem_free(requested_addr, size, true);
575 		if (ret) {
576 			if (ret > 0) {
577 				EAL_LOG(ERR, "Cannot map memory "
578 					"to a region not reserved");
579 				rte_errno = EADDRNOTAVAIL;
580 			}
581 			return NULL;
582 		}
583 	}
584 
585 	virt = MapViewOfFileEx(mapping_handle, sys_access,
586 		offset_high, offset_low, size, requested_addr);
587 	if (!virt) {
588 		RTE_LOG_WIN32_ERR("MapViewOfFileEx()");
589 		return NULL;
590 	}
591 
592 	if ((flags & RTE_MAP_FORCE_ADDRESS) && (virt != requested_addr)) {
593 		if (!UnmapViewOfFile(virt))
594 			RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
595 		virt = NULL;
596 	}
597 
598 	if (!CloseHandle(mapping_handle))
599 		RTE_LOG_WIN32_ERR("CloseHandle()");
600 
601 	return virt;
602 }
603 
604 int
rte_mem_unmap(void * virt,size_t size)605 rte_mem_unmap(void *virt, size_t size)
606 {
607 	RTE_SET_USED(size);
608 
609 	if (!UnmapViewOfFile(virt)) {
610 		RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
611 		rte_errno = EINVAL;
612 		return -1;
613 	}
614 	return 0;
615 }
616 
617 uint64_t
eal_get_baseaddr(void)618 eal_get_baseaddr(void)
619 {
620 	/* Windows strategy for memory allocation is undocumented.
621 	 * Returning 0 here effectively disables address guessing
622 	 * unless user provides an address hint.
623 	 */
624 	return 0;
625 }
626 
627 size_t
rte_mem_page_size(void)628 rte_mem_page_size(void)
629 {
630 	static SYSTEM_INFO info;
631 
632 	if (info.dwPageSize == 0)
633 		GetSystemInfo(&info);
634 
635 	return info.dwPageSize;
636 }
637 
638 int
rte_mem_lock(const void * virt,size_t size)639 rte_mem_lock(const void *virt, size_t size)
640 {
641 	/* VirtualLock() takes `void*`, work around compiler warning. */
642 	void *addr = (void *)((uintptr_t)virt);
643 
644 	if (!VirtualLock(addr, size)) {
645 		RTE_LOG_WIN32_ERR("VirtualLock(%p %#zx)", virt, size);
646 		return -1;
647 	}
648 
649 	return 0;
650 }
651 
652 int
rte_eal_memseg_init(void)653 rte_eal_memseg_init(void)
654 {
655 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
656 		EAL_LOG_NOT_IMPLEMENTED();
657 		return -1;
658 	}
659 
660 	return eal_dynmem_memseg_lists_init();
661 }
662 
663 static int
eal_nohuge_init(void)664 eal_nohuge_init(void)
665 {
666 	struct rte_mem_config *mcfg;
667 	struct rte_memseg_list *msl;
668 	int n_segs;
669 	uint64_t mem_sz, page_sz;
670 	void *addr;
671 
672 	mcfg = rte_eal_get_configuration()->mem_config;
673 	struct internal_config *internal_conf =
674 		eal_get_internal_configuration();
675 
676 	/* nohuge mode is legacy mode */
677 	internal_conf->legacy_mem = 1;
678 
679 	msl = &mcfg->memsegs[0];
680 
681 	mem_sz = internal_conf->memory;
682 	page_sz = RTE_PGSIZE_4K;
683 	n_segs = mem_sz / page_sz;
684 
685 	if (eal_memseg_list_init_named(
686 			msl, "nohugemem", page_sz, n_segs, 0, true)) {
687 		return -1;
688 	}
689 
690 	addr = VirtualAlloc(
691 		NULL, mem_sz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
692 	if (addr == NULL) {
693 		RTE_LOG_WIN32_ERR("VirtualAlloc(size=%#zx)", mem_sz);
694 		EAL_LOG(ERR, "Cannot allocate memory");
695 		return -1;
696 	}
697 
698 	msl->base_va = addr;
699 	msl->len = mem_sz;
700 
701 	eal_memseg_list_populate(msl, addr, n_segs);
702 
703 	if (mcfg->dma_maskbits &&
704 		rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
705 		EAL_LOG(ERR,
706 			"%s(): couldn't allocate memory due to IOVA "
707 			"exceeding limits of current DMA mask.", __func__);
708 		return -1;
709 	}
710 
711 	return 0;
712 }
713 
714 int
rte_eal_hugepage_init(void)715 rte_eal_hugepage_init(void)
716 {
717 	const struct internal_config *internal_conf =
718 		eal_get_internal_configuration();
719 
720 	return internal_conf->no_hugetlbfs ?
721 		eal_nohuge_init() : eal_dynmem_hugepage_init();
722 }
723 
724 int
rte_eal_hugepage_attach(void)725 rte_eal_hugepage_attach(void)
726 {
727 	EAL_LOG_NOT_IMPLEMENTED();
728 	return -1;
729 }
730