xref: /dpdk/lib/eal/windows/eal.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 
7 #include <fcntl.h>
8 #include <io.h>
9 #include <share.h>
10 #include <sys/stat.h>
11 
12 #include <rte_debug.h>
13 #include <rte_bus.h>
14 #include <rte_eal.h>
15 #include <rte_eal_memconfig.h>
16 #include <eal_memcfg.h>
17 #include <rte_errno.h>
18 #include <rte_lcore.h>
19 #include <eal_thread.h>
20 #include <eal_internal_cfg.h>
21 #include <eal_filesystem.h>
22 #include <eal_options.h>
23 #include <eal_private.h>
24 #include <rte_service_component.h>
25 #include <rte_vfio.h>
26 
27 #include "eal_firmware.h"
28 #include "eal_hugepages.h"
29 #include "eal_trace.h"
30 #include "eal_windows.h"
31 #include "log_internal.h"
32 
33 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
34 
35 /* define fd variable here, because file needs to be kept open for the
36  * duration of the program, as we hold a write lock on it in the primary proc
37  */
38 static int mem_cfg_fd = -1;
39 
40 /* internal configuration (per-core) */
41 struct lcore_config lcore_config[RTE_MAX_LCORE];
42 
43 /* Detect if we are a primary or a secondary process */
44 enum rte_proc_type_t
45 eal_proc_type_detect(void)
46 {
47 	enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
48 	const char *pathname = eal_runtime_config_path();
49 	const struct rte_config *config = rte_eal_get_configuration();
50 
51 	/* if we can open the file but not get a write-lock we are a secondary
52 	 * process. NOTE: if we get a file handle back, we keep that open
53 	 * and don't close it to prevent a race condition between multiple opens
54 	 */
55 	errno_t err = _sopen_s(&mem_cfg_fd, pathname,
56 		_O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE);
57 	if (err == 0) {
58 		OVERLAPPED soverlapped = { 0 };
59 		soverlapped.Offset = sizeof(*config->mem_config);
60 		soverlapped.OffsetHigh = 0;
61 
62 		HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
63 
64 		if (!LockFileEx(hwinfilehandle,
65 			LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
66 			sizeof(*config->mem_config), 0, &soverlapped))
67 			ptype = RTE_PROC_SECONDARY;
68 	}
69 
70 	EAL_LOG(INFO, "Auto-detected process type: %s",
71 		ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
72 
73 	return ptype;
74 }
75 
76 bool
77 rte_mp_disable(void)
78 {
79 	return true;
80 }
81 
82 /* display usage */
83 static void
84 eal_usage(const char *prgname)
85 {
86 	rte_usage_hook_t hook = eal_get_application_usage_hook();
87 
88 	printf("\nUsage: %s ", prgname);
89 	eal_common_usage();
90 	/* Allow the application to print its usage message too
91 	 * if hook is set
92 	 */
93 	if (hook) {
94 		printf("===== Application Usage =====\n\n");
95 		(hook)(prgname);
96 	}
97 }
98 
99 /* Parse the arguments for --log-level only */
100 static void
101 eal_log_level_parse(int argc, char **argv)
102 {
103 	int opt;
104 	char **argvopt;
105 	int option_index;
106 	struct internal_config *internal_conf =
107 		eal_get_internal_configuration();
108 
109 	argvopt = argv;
110 
111 	eal_reset_internal_config(internal_conf);
112 
113 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
114 		eal_long_options, &option_index)) != EOF) {
115 
116 		int ret;
117 
118 		/* getopt is not happy, stop right now */
119 		if (opt == '?')
120 			break;
121 
122 		ret = (opt == OPT_LOG_LEVEL_NUM) ?
123 			eal_parse_common_option(opt, optarg,
124 				internal_conf) : 0;
125 
126 		/* common parser is not happy */
127 		if (ret < 0)
128 			break;
129 	}
130 
131 	optind = 0; /* reset getopt lib */
132 }
133 
134 /* Parse the argument given in the command line of the application */
135 static int
136 eal_parse_args(int argc, char **argv)
137 {
138 	int opt, ret;
139 	char **argvopt;
140 	int option_index;
141 	char *prgname = argv[0];
142 	struct internal_config *internal_conf =
143 		eal_get_internal_configuration();
144 
145 	argvopt = argv;
146 
147 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
148 		eal_long_options, &option_index)) != EOF) {
149 
150 		int ret;
151 
152 		/* getopt is not happy, stop right now */
153 		if (opt == '?') {
154 			eal_usage(prgname);
155 			return -1;
156 		}
157 
158 		/* eal_log_level_parse() already handled this option */
159 		if (opt == OPT_LOG_LEVEL_NUM)
160 			continue;
161 
162 		ret = eal_parse_common_option(opt, optarg, internal_conf);
163 		/* common parser is not happy */
164 		if (ret < 0) {
165 			eal_usage(prgname);
166 			return -1;
167 		}
168 		/* common parser handled this option */
169 		if (ret == 0)
170 			continue;
171 
172 		switch (opt) {
173 		case OPT_HELP_NUM:
174 			eal_usage(prgname);
175 			exit(EXIT_SUCCESS);
176 		default:
177 			if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
178 				EAL_LOG(ERR, "Option %c is not supported "
179 					"on Windows", opt);
180 			} else if (opt >= OPT_LONG_MIN_NUM &&
181 				opt < OPT_LONG_MAX_NUM) {
182 				EAL_LOG(ERR, "Option %s is not supported "
183 					"on Windows",
184 					eal_long_options[option_index].name);
185 			} else {
186 				EAL_LOG(ERR, "Option %d is not supported "
187 					"on Windows", opt);
188 			}
189 			eal_usage(prgname);
190 			return -1;
191 		}
192 	}
193 
194 	if (eal_adjust_config(internal_conf) != 0)
195 		return -1;
196 
197 	/* sanity checks */
198 	if (eal_check_common_options(internal_conf) != 0) {
199 		eal_usage(prgname);
200 		return -1;
201 	}
202 
203 	if (optind >= 0)
204 		argv[optind - 1] = prgname;
205 	ret = optind - 1;
206 	optind = 0; /* reset getopt lib */
207 	return ret;
208 }
209 
210 static int
211 sync_func(void *arg __rte_unused)
212 {
213 	return 0;
214 }
215 
216 static void
217 rte_eal_init_alert(const char *msg)
218 {
219 	fprintf(stderr, "EAL: FATAL: %s\n", msg);
220 	EAL_LOG(ERR, "%s", msg);
221 }
222 
223 /* Stubs to enable EAL trace point compilation
224  * until eal_common_trace.c can be compiled.
225  */
226 
227 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
228 RTE_DEFINE_PER_LCORE(void *, trace_mem);
229 
230 void
231 __rte_trace_mem_per_thread_alloc(void)
232 {
233 }
234 
235 void
236 trace_mem_per_thread_free(void)
237 {
238 }
239 
240 void
241 __rte_trace_point_emit_field(size_t sz, const char *field,
242 	const char *type)
243 {
244 	RTE_SET_USED(sz);
245 	RTE_SET_USED(field);
246 	RTE_SET_USED(type);
247 }
248 
249 int
250 __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
251 	void (*register_fn)(void))
252 {
253 	RTE_SET_USED(trace);
254 	RTE_SET_USED(name);
255 	RTE_SET_USED(register_fn);
256 	return -ENOTSUP;
257 }
258 
259 int
260 rte_eal_cleanup(void)
261 {
262 	struct internal_config *internal_conf =
263 		eal_get_internal_configuration();
264 
265 	eal_intr_thread_cancel();
266 	eal_mem_virt2iova_cleanup();
267 	eal_bus_cleanup();
268 	/* after this point, any DPDK pointers will become dangling */
269 	rte_eal_memory_detach();
270 	eal_cleanup_config(internal_conf);
271 	return 0;
272 }
273 
274 /* Launch threads, called at application init(). */
275 int
276 rte_eal_init(int argc, char **argv)
277 {
278 	int i, fctret, bscan;
279 	const struct rte_config *config = rte_eal_get_configuration();
280 	struct internal_config *internal_conf =
281 		eal_get_internal_configuration();
282 	bool has_phys_addr;
283 	enum rte_iova_mode iova_mode;
284 	int ret;
285 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
286 	char thread_name[RTE_THREAD_NAME_SIZE];
287 
288 	eal_log_init(NULL, 0);
289 
290 	eal_log_level_parse(argc, argv);
291 
292 	if (eal_create_cpu_map() < 0) {
293 		rte_eal_init_alert("Cannot discover CPU and NUMA.");
294 		/* rte_errno is set */
295 		return -1;
296 	}
297 
298 	if (rte_eal_cpu_init() < 0) {
299 		rte_eal_init_alert("Cannot detect lcores.");
300 		rte_errno = ENOTSUP;
301 		return -1;
302 	}
303 
304 	fctret = eal_parse_args(argc, argv);
305 	if (fctret < 0)
306 		exit(1);
307 
308 	if (eal_option_device_parse()) {
309 		rte_errno = ENODEV;
310 		return -1;
311 	}
312 
313 	/* Prevent creation of shared memory files. */
314 	if (internal_conf->in_memory == 0) {
315 		EAL_LOG(WARNING, "Multi-process support is requested, "
316 			"but not available.");
317 		internal_conf->in_memory = 1;
318 		internal_conf->no_shconf = 1;
319 	}
320 
321 	if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
322 		rte_eal_init_alert("Cannot get hugepage information");
323 		rte_errno = EACCES;
324 		return -1;
325 	}
326 
327 	if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
328 		if (internal_conf->no_hugetlbfs)
329 			internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
330 	}
331 
332 	if (rte_eal_intr_init() < 0) {
333 		rte_eal_init_alert("Cannot init interrupt-handling thread");
334 		return -1;
335 	}
336 
337 	if (rte_eal_timer_init() < 0) {
338 		rte_eal_init_alert("Cannot init TSC timer");
339 		rte_errno = EFAULT;
340 		return -1;
341 	}
342 
343 	bscan = rte_bus_scan();
344 	if (bscan < 0) {
345 		rte_eal_init_alert("Cannot scan the buses");
346 		rte_errno = ENODEV;
347 		return -1;
348 	}
349 
350 	if (eal_mem_win32api_init() < 0) {
351 		rte_eal_init_alert("Cannot access Win32 memory management");
352 		rte_errno = ENOTSUP;
353 		return -1;
354 	}
355 
356 	has_phys_addr = true;
357 	if (eal_mem_virt2iova_init() < 0) {
358 		/* Non-fatal error if physical addresses are not required. */
359 		EAL_LOG(DEBUG, "Cannot access virt2phys driver, "
360 			"PA will not be available");
361 		has_phys_addr = false;
362 	}
363 
364 	iova_mode = internal_conf->iova_mode;
365 	if (iova_mode == RTE_IOVA_DC) {
366 		EAL_LOG(DEBUG, "Specific IOVA mode is not requested, autodetecting");
367 		if (has_phys_addr) {
368 			EAL_LOG(DEBUG, "Selecting IOVA mode according to bus requests");
369 			iova_mode = rte_bus_get_iommu_class();
370 			if (iova_mode == RTE_IOVA_DC) {
371 				if (!RTE_IOVA_IN_MBUF) {
372 					iova_mode = RTE_IOVA_VA;
373 					EAL_LOG(DEBUG, "IOVA as VA mode is forced by build option.");
374 				} else	{
375 					iova_mode = RTE_IOVA_PA;
376 				}
377 			}
378 		} else {
379 			iova_mode = RTE_IOVA_VA;
380 		}
381 	}
382 
383 	if (iova_mode == RTE_IOVA_PA && !has_phys_addr) {
384 		rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available");
385 		rte_errno = EINVAL;
386 		return -1;
387 	}
388 
389 	if (iova_mode == RTE_IOVA_PA && !RTE_IOVA_IN_MBUF) {
390 		rte_eal_init_alert("Cannot use IOVA as 'PA' as it is disabled during build");
391 		rte_errno = EINVAL;
392 		return -1;
393 	}
394 
395 	EAL_LOG(DEBUG, "Selected IOVA mode '%s'",
396 		iova_mode == RTE_IOVA_PA ? "PA" : "VA");
397 	rte_eal_get_configuration()->iova_mode = iova_mode;
398 
399 	if (rte_eal_memzone_init() < 0) {
400 		rte_eal_init_alert("Cannot init memzone");
401 		rte_errno = ENODEV;
402 		return -1;
403 	}
404 
405 	rte_mcfg_mem_read_lock();
406 
407 	if (rte_eal_memory_init() < 0) {
408 		rte_mcfg_mem_read_unlock();
409 		rte_eal_init_alert("Cannot init memory");
410 		rte_errno = ENOMEM;
411 		return -1;
412 	}
413 
414 	if (rte_eal_malloc_heap_init() < 0) {
415 		rte_mcfg_mem_read_unlock();
416 		rte_eal_init_alert("Cannot init malloc heap");
417 		rte_errno = ENODEV;
418 		return -1;
419 	}
420 
421 	rte_mcfg_mem_read_unlock();
422 
423 	if (rte_eal_malloc_heap_populate() < 0) {
424 		rte_eal_init_alert("Cannot init malloc heap");
425 		rte_errno = ENODEV;
426 		return -1;
427 	}
428 
429 	if (rte_eal_tailqs_init() < 0) {
430 		rte_eal_init_alert("Cannot init tail queues for objects");
431 		rte_errno = EFAULT;
432 		return -1;
433 	}
434 
435 	if (rte_thread_set_affinity_by_id(rte_thread_self(),
436 			&lcore_config[config->main_lcore].cpuset) != 0) {
437 		rte_eal_init_alert("Cannot set affinity");
438 		rte_errno = EINVAL;
439 		return -1;
440 	}
441 	__rte_thread_init(config->main_lcore,
442 		&lcore_config[config->main_lcore].cpuset);
443 
444 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
445 	EAL_LOG(DEBUG, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])",
446 		config->main_lcore, rte_thread_self().opaque_id, cpuset,
447 		ret == 0 ? "" : "...");
448 
449 	RTE_LCORE_FOREACH_WORKER(i) {
450 
451 		/*
452 		 * create communication pipes between main thread
453 		 * and children
454 		 */
455 		if (_pipe(lcore_config[i].pipe_main2worker,
456 			sizeof(char), _O_BINARY) < 0)
457 			rte_panic("Cannot create pipe\n");
458 		if (_pipe(lcore_config[i].pipe_worker2main,
459 			sizeof(char), _O_BINARY) < 0)
460 			rte_panic("Cannot create pipe\n");
461 
462 		lcore_config[i].state = WAIT;
463 
464 		/* create a thread for each lcore */
465 		if (rte_thread_create(&lcore_config[i].thread_id, NULL,
466 				eal_thread_loop, (void *)(uintptr_t)i) != 0)
467 			rte_panic("Cannot create thread\n");
468 
469 		/* Set thread name for aid in debugging. */
470 		snprintf(thread_name, sizeof(thread_name),
471 			"dpdk-worker%d", i);
472 		rte_thread_set_name(lcore_config[i].thread_id, thread_name);
473 
474 		ret = rte_thread_set_affinity_by_id(lcore_config[i].thread_id,
475 			&lcore_config[i].cpuset);
476 		if (ret != 0)
477 			EAL_LOG(DEBUG, "Cannot set affinity");
478 	}
479 
480 	/* Initialize services so drivers can register services during probe. */
481 	ret = rte_service_init();
482 	if (ret) {
483 		rte_eal_init_alert("rte_service_init() failed");
484 		rte_errno = -ret;
485 		return -1;
486 	}
487 
488 	if (rte_bus_probe()) {
489 		rte_eal_init_alert("Cannot probe devices");
490 		rte_errno = ENOTSUP;
491 		return -1;
492 	}
493 
494 	/*
495 	 * Launch a dummy function on all worker lcores, so that main lcore
496 	 * knows they are all ready when this function returns.
497 	 */
498 	rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
499 	rte_eal_mp_wait_lcore();
500 
501 	eal_mcfg_complete();
502 
503 	return fctret;
504 }
505 
506 /* Don't use MinGW asprintf() to have identical code with all toolchains. */
507 int
508 eal_asprintf(char **buffer, const char *format, ...)
509 {
510 	int size, ret;
511 	va_list arg;
512 
513 	va_start(arg, format);
514 	size = vsnprintf(NULL, 0, format, arg);
515 	va_end(arg);
516 	if (size < 0)
517 		return -1;
518 	size++;
519 
520 	*buffer = malloc(size);
521 	if (*buffer == NULL)
522 		return -1;
523 
524 	va_start(arg, format);
525 	ret = vsnprintf(*buffer, size, format, arg);
526 	va_end(arg);
527 	if (ret != size - 1) {
528 		free(*buffer);
529 		return -1;
530 	}
531 	return ret;
532 }
533 
534 int
535 rte_vfio_container_dma_map(__rte_unused int container_fd,
536 			__rte_unused uint64_t vaddr,
537 			__rte_unused uint64_t iova,
538 			__rte_unused uint64_t len)
539 {
540 	rte_errno = ENOTSUP;
541 	return -1;
542 }
543 
544 int
545 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
546 			__rte_unused uint64_t vaddr,
547 			__rte_unused uint64_t iova,
548 			__rte_unused uint64_t len)
549 {
550 	rte_errno = ENOTSUP;
551 	return -1;
552 }
553 
554 int
555 rte_firmware_read(__rte_unused const char *name,
556 			__rte_unused void **buf,
557 			__rte_unused size_t *bufsz)
558 {
559 	return -1;
560 }
561