xref: /dpdk/lib/eal/windows/eal_lcore.c (revision b70a9b78)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  * Copyright (C) 2022 Microsoft Corporation
4  */
5 
6 #include <stdbool.h>
7 #include <stdint.h>
8 
9 #include <rte_common.h>
10 #include <rte_debug.h>
11 #include <rte_lcore.h>
12 
13 #include "eal_private.h"
14 #include "eal_thread.h"
15 #include "eal_windows.h"
16 
17 /** Number of logical processors (cores) in a processor group (32 or 64). */
18 #define EAL_PROCESSOR_GROUP_SIZE (sizeof(KAFFINITY) * CHAR_BIT)
19 
20 struct lcore_map {
21 	uint8_t socket_id;
22 	uint8_t core_id;
23 };
24 
25 struct socket_map {
26 	uint16_t node_id;
27 };
28 
29 struct cpu_map {
30 	unsigned int lcore_count;
31 	unsigned int socket_count;
32 	unsigned int cpu_count;
33 	struct lcore_map lcores[RTE_MAX_LCORE];
34 	struct socket_map sockets[RTE_MAX_NUMA_NODES];
35 	GROUP_AFFINITY cpus[CPU_SETSIZE];
36 };
37 
38 static struct cpu_map cpu_map;
39 
40 /* eal_create_cpu_map() is called before logging is initialized */
41 static void
42 __rte_format_printf(1, 2)
log_early(const char * format,...)43 log_early(const char *format, ...)
44 {
45 	va_list va;
46 
47 	va_start(va, format);
48 	vfprintf(stderr, format, va);
49 	va_end(va);
50 }
51 
52 static int
eal_query_group_affinity(void)53 eal_query_group_affinity(void)
54 {
55 	SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos = NULL;
56 	unsigned int *cpu_count = &cpu_map.cpu_count;
57 	DWORD infos_size = 0;
58 	int ret = 0;
59 	USHORT group_count;
60 	KAFFINITY affinity;
61 	USHORT group_no;
62 	unsigned int i;
63 
64 	if (!GetLogicalProcessorInformationEx(RelationGroup, NULL,
65 			&infos_size)) {
66 		DWORD error = GetLastError();
67 		if (error != ERROR_INSUFFICIENT_BUFFER) {
68 			RTE_LOG(ERR, EAL, "Cannot get group information size, error %lu\n", error);
69 			rte_errno = EINVAL;
70 			ret = -1;
71 			goto cleanup;
72 		}
73 	}
74 
75 	infos = malloc(infos_size);
76 	if (infos == NULL) {
77 		RTE_LOG(ERR, EAL, "Cannot allocate memory for NUMA node information\n");
78 		rte_errno = ENOMEM;
79 		ret = -1;
80 		goto cleanup;
81 	}
82 
83 	if (!GetLogicalProcessorInformationEx(RelationGroup, infos,
84 			&infos_size)) {
85 		RTE_LOG(ERR, EAL, "Cannot get group information, error %lu\n",
86 			GetLastError());
87 		rte_errno = EINVAL;
88 		ret = -1;
89 		goto cleanup;
90 	}
91 
92 	*cpu_count = 0;
93 	group_count = infos->Group.ActiveGroupCount;
94 	for (group_no = 0; group_no < group_count; group_no++) {
95 		affinity = infos->Group.GroupInfo[group_no].ActiveProcessorMask;
96 		for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) {
97 			if ((affinity & ((KAFFINITY)1 << i)) == 0)
98 				continue;
99 			cpu_map.cpus[*cpu_count].Group = group_no;
100 			cpu_map.cpus[*cpu_count].Mask = (KAFFINITY)1 << i;
101 			(*cpu_count)++;
102 		}
103 	}
104 
105 cleanup:
106 	free(infos);
107 	return ret;
108 }
109 
110 static bool
eal_create_lcore_map(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX * info)111 eal_create_lcore_map(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *info)
112 {
113 	const unsigned int node_id = info->NumaNode.NodeNumber;
114 	const GROUP_AFFINITY *cores = &info->NumaNode.GroupMask;
115 	struct lcore_map *lcore;
116 	unsigned int socket_id;
117 	unsigned int i;
118 
119 	/*
120 	 * NUMA node may be reported multiple times if it includes
121 	 * cores from different processor groups, e. g. 80 cores
122 	 * of a physical processor comprise one NUMA node, but two
123 	 * processor groups, because group size is limited by 32/64.
124 	 */
125 	for (socket_id = 0; socket_id < cpu_map.socket_count; socket_id++)
126 		if (cpu_map.sockets[socket_id].node_id == node_id)
127 			break;
128 
129 	if (socket_id == cpu_map.socket_count) {
130 		if (socket_id == RTE_DIM(cpu_map.sockets))
131 			return true;
132 
133 		cpu_map.sockets[socket_id].node_id = node_id;
134 		cpu_map.socket_count++;
135 	}
136 
137 	for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) {
138 		if ((cores->Mask & ((KAFFINITY)1 << i)) == 0)
139 			continue;
140 
141 		if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores))
142 			return true;
143 
144 		lcore = &cpu_map.lcores[cpu_map.lcore_count];
145 		lcore->socket_id = socket_id;
146 		lcore->core_id = cores->Group * EAL_PROCESSOR_GROUP_SIZE + i;
147 		cpu_map.lcore_count++;
148 	}
149 	return false;
150 }
151 
152 int
eal_create_cpu_map(void)153 eal_create_cpu_map(void)
154 {
155 	SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos, *info;
156 	DWORD infos_size;
157 	bool full = false;
158 	int ret = 0;
159 
160 	infos = NULL;
161 	infos_size = 0;
162 	if (!GetLogicalProcessorInformationEx(
163 			RelationNumaNode, NULL, &infos_size)) {
164 		DWORD error = GetLastError();
165 		if (error != ERROR_INSUFFICIENT_BUFFER) {
166 			log_early("Cannot get NUMA node info size, error %lu\n",
167 				GetLastError());
168 			rte_errno = ENOMEM;
169 			ret = -1;
170 			goto exit;
171 		}
172 	}
173 
174 	infos = malloc(infos_size);
175 	if (infos == NULL) {
176 		log_early("Cannot allocate memory for NUMA node information\n");
177 		rte_errno = ENOMEM;
178 		ret = -1;
179 		goto exit;
180 	}
181 
182 	if (!GetLogicalProcessorInformationEx(
183 			RelationNumaNode, infos, &infos_size)) {
184 		log_early("Cannot get NUMA node information, error %lu\n",
185 			GetLastError());
186 		rte_errno = EINVAL;
187 		ret = -1;
188 		goto exit;
189 	}
190 
191 	info = infos;
192 	while ((uint8_t *)info - (uint8_t *)infos < infos_size) {
193 		if (eal_create_lcore_map(info)) {
194 			full = true;
195 			break;
196 		}
197 
198 		info = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)(
199 			(uint8_t *)info + info->Size);
200 	}
201 
202 	if (eal_query_group_affinity()) {
203 		/*
204 		 * No need to set rte_errno here.
205 		 * It is set by eal_query_group_affinity().
206 		 */
207 		ret = -1;
208 		goto exit;
209 	}
210 
211 exit:
212 	if (full) {
213 		/* Not a fatal error, but important for troubleshooting. */
214 		log_early("Enumerated maximum of %u NUMA nodes and %u cores\n",
215 			cpu_map.socket_count, cpu_map.lcore_count);
216 	}
217 
218 	free(infos);
219 
220 	return ret;
221 }
222 
223 int
eal_cpu_detected(unsigned int lcore_id)224 eal_cpu_detected(unsigned int lcore_id)
225 {
226 	return lcore_id < cpu_map.lcore_count;
227 }
228 
229 unsigned
eal_cpu_socket_id(unsigned int lcore_id)230 eal_cpu_socket_id(unsigned int lcore_id)
231 {
232 	return cpu_map.lcores[lcore_id].socket_id;
233 }
234 
235 unsigned
eal_cpu_core_id(unsigned int lcore_id)236 eal_cpu_core_id(unsigned int lcore_id)
237 {
238 	return cpu_map.lcores[lcore_id].core_id;
239 }
240 
241 unsigned int
eal_socket_numa_node(unsigned int socket_id)242 eal_socket_numa_node(unsigned int socket_id)
243 {
244 	return cpu_map.sockets[socket_id].node_id;
245 }
246 
247 PGROUP_AFFINITY
eal_get_cpu_affinity(size_t cpu_index)248 eal_get_cpu_affinity(size_t cpu_index)
249 {
250 	RTE_VERIFY(cpu_index < CPU_SETSIZE);
251 
252 	return &cpu_map.cpus[cpu_index];
253 }
254