xref: /dpdk/lib/eal/windows/eal_lcore.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  * Copyright (C) 2022 Microsoft Corporation
4  */
5 
6 #include <stdbool.h>
7 #include <stdint.h>
8 
9 #include <rte_common.h>
10 #include <rte_debug.h>
11 #include <rte_lcore.h>
12 
13 #include "eal_private.h"
14 #include "eal_thread.h"
15 #include "eal_windows.h"
16 
17 /** Number of logical processors (cores) in a processor group (32 or 64). */
18 #define EAL_PROCESSOR_GROUP_SIZE (sizeof(KAFFINITY) * CHAR_BIT)
19 
20 struct lcore_map {
21 	uint8_t socket_id;
22 	uint8_t core_id;
23 };
24 
25 struct socket_map {
26 	uint16_t node_id;
27 };
28 
29 struct cpu_map {
30 	unsigned int lcore_count;
31 	unsigned int socket_count;
32 	unsigned int cpu_count;
33 	struct lcore_map lcores[RTE_MAX_LCORE];
34 	struct socket_map sockets[RTE_MAX_NUMA_NODES];
35 	GROUP_AFFINITY cpus[CPU_SETSIZE];
36 };
37 
38 static struct cpu_map cpu_map;
39 
40 /* eal_create_cpu_map() is called before logging is initialized */
41 static void
42 __rte_format_printf(1, 2)
log_early(const char * format,...)43 log_early(const char *format, ...)
44 {
45 	va_list va;
46 
47 	va_start(va, format);
48 	vfprintf(stderr, format, va);
49 	va_end(va);
50 }
51 
52 static int
eal_query_group_affinity(void)53 eal_query_group_affinity(void)
54 {
55 	SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos = NULL;
56 	unsigned int *cpu_count = &cpu_map.cpu_count;
57 	DWORD infos_size = 0;
58 	int ret = 0;
59 	USHORT group_count;
60 	KAFFINITY affinity;
61 	USHORT group_no;
62 	unsigned int i;
63 
64 	if (!GetLogicalProcessorInformationEx(RelationGroup, NULL,
65 			&infos_size)) {
66 		DWORD error = GetLastError();
67 		if (error != ERROR_INSUFFICIENT_BUFFER) {
68 			EAL_LOG(ERR, "Cannot get group information size, error %lu",
69 				error);
70 			rte_errno = EINVAL;
71 			ret = -1;
72 			goto cleanup;
73 		}
74 	}
75 
76 	infos = malloc(infos_size);
77 	if (infos == NULL) {
78 		EAL_LOG(ERR, "Cannot allocate memory for NUMA node information");
79 		rte_errno = ENOMEM;
80 		ret = -1;
81 		goto cleanup;
82 	}
83 
84 	if (!GetLogicalProcessorInformationEx(RelationGroup, infos,
85 			&infos_size)) {
86 		EAL_LOG(ERR, "Cannot get group information, error %lu",
87 			GetLastError());
88 		rte_errno = EINVAL;
89 		ret = -1;
90 		goto cleanup;
91 	}
92 
93 	*cpu_count = 0;
94 	group_count = infos->Group.ActiveGroupCount;
95 	for (group_no = 0; group_no < group_count; group_no++) {
96 		affinity = infos->Group.GroupInfo[group_no].ActiveProcessorMask;
97 		for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) {
98 			if ((affinity & ((KAFFINITY)1 << i)) == 0)
99 				continue;
100 			cpu_map.cpus[*cpu_count].Group = group_no;
101 			cpu_map.cpus[*cpu_count].Mask = (KAFFINITY)1 << i;
102 			(*cpu_count)++;
103 		}
104 	}
105 
106 cleanup:
107 	free(infos);
108 	return ret;
109 }
110 
111 static bool
eal_create_lcore_map(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX * info)112 eal_create_lcore_map(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *info)
113 {
114 	const unsigned int node_id = info->NumaNode.NodeNumber;
115 	const GROUP_AFFINITY *cores = &info->NumaNode.GroupMask;
116 	struct lcore_map *lcore;
117 	unsigned int socket_id;
118 	unsigned int i;
119 
120 	/*
121 	 * NUMA node may be reported multiple times if it includes
122 	 * cores from different processor groups, e. g. 80 cores
123 	 * of a physical processor comprise one NUMA node, but two
124 	 * processor groups, because group size is limited by 32/64.
125 	 */
126 	for (socket_id = 0; socket_id < cpu_map.socket_count; socket_id++)
127 		if (cpu_map.sockets[socket_id].node_id == node_id)
128 			break;
129 
130 	if (socket_id == cpu_map.socket_count) {
131 		if (socket_id == RTE_DIM(cpu_map.sockets))
132 			return true;
133 
134 		cpu_map.sockets[socket_id].node_id = node_id;
135 		cpu_map.socket_count++;
136 	}
137 
138 	for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) {
139 		if ((cores->Mask & ((KAFFINITY)1 << i)) == 0)
140 			continue;
141 
142 		if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores))
143 			return true;
144 
145 		lcore = &cpu_map.lcores[cpu_map.lcore_count];
146 		lcore->socket_id = socket_id;
147 		lcore->core_id = cores->Group * EAL_PROCESSOR_GROUP_SIZE + i;
148 		cpu_map.lcore_count++;
149 	}
150 	return false;
151 }
152 
153 int
eal_create_cpu_map(void)154 eal_create_cpu_map(void)
155 {
156 	SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos, *info;
157 	DWORD infos_size;
158 	bool full = false;
159 	int ret = 0;
160 
161 	infos = NULL;
162 	infos_size = 0;
163 	if (!GetLogicalProcessorInformationEx(
164 			RelationNumaNode, NULL, &infos_size)) {
165 		DWORD error = GetLastError();
166 		if (error != ERROR_INSUFFICIENT_BUFFER) {
167 			log_early("Cannot get NUMA node info size, error %lu\n",
168 				GetLastError());
169 			rte_errno = ENOMEM;
170 			ret = -1;
171 			goto exit;
172 		}
173 	}
174 
175 	infos = malloc(infos_size);
176 	if (infos == NULL) {
177 		log_early("Cannot allocate memory for NUMA node information\n");
178 		rte_errno = ENOMEM;
179 		ret = -1;
180 		goto exit;
181 	}
182 
183 	if (!GetLogicalProcessorInformationEx(
184 			RelationNumaNode, infos, &infos_size)) {
185 		log_early("Cannot get NUMA node information, error %lu\n",
186 			GetLastError());
187 		rte_errno = EINVAL;
188 		ret = -1;
189 		goto exit;
190 	}
191 
192 	info = infos;
193 	while ((uint8_t *)info - (uint8_t *)infos < infos_size) {
194 		if (eal_create_lcore_map(info)) {
195 			full = true;
196 			break;
197 		}
198 
199 		info = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)(
200 			(uint8_t *)info + info->Size);
201 	}
202 
203 	if (eal_query_group_affinity()) {
204 		/*
205 		 * No need to set rte_errno here.
206 		 * It is set by eal_query_group_affinity().
207 		 */
208 		ret = -1;
209 		goto exit;
210 	}
211 
212 exit:
213 	if (full) {
214 		/* Not a fatal error, but important for troubleshooting. */
215 		log_early("Enumerated maximum of %u NUMA nodes and %u cores\n",
216 			cpu_map.socket_count, cpu_map.lcore_count);
217 	}
218 
219 	free(infos);
220 
221 	return ret;
222 }
223 
224 int
eal_cpu_detected(unsigned int lcore_id)225 eal_cpu_detected(unsigned int lcore_id)
226 {
227 	return lcore_id < cpu_map.lcore_count;
228 }
229 
230 unsigned
eal_cpu_socket_id(unsigned int lcore_id)231 eal_cpu_socket_id(unsigned int lcore_id)
232 {
233 	return cpu_map.lcores[lcore_id].socket_id;
234 }
235 
236 unsigned
eal_cpu_core_id(unsigned int lcore_id)237 eal_cpu_core_id(unsigned int lcore_id)
238 {
239 	return cpu_map.lcores[lcore_id].core_id;
240 }
241 
242 unsigned int
eal_socket_numa_node(unsigned int socket_id)243 eal_socket_numa_node(unsigned int socket_id)
244 {
245 	return cpu_map.sockets[socket_id].node_id;
246 }
247 
248 PGROUP_AFFINITY
eal_get_cpu_affinity(size_t cpu_index)249 eal_get_cpu_affinity(size_t cpu_index)
250 {
251 	RTE_VERIFY(cpu_index < CPU_SETSIZE);
252 
253 	return &cpu_map.cpus[cpu_index];
254 }
255