xref: /netbsd-src/sys/kern/subr_cpu.c (revision ed75d7a867996c84cfa88e3b8906816277e957f7)
1 /*	$NetBSD: subr_cpu.c,v 1.13 2020/02/15 07:20:40 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
5  *     The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*-
34  * Copyright (c)2007 YAMAMOTO Takashi,
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  */
58 
59 /*
60  * CPU related routines shared with rump.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.13 2020/02/15 07:20:40 skrll Exp $");
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sched.h>
69 #include <sys/conf.h>
70 #include <sys/cpu.h>
71 #include <sys/proc.h>
72 #include <sys/kernel.h>
73 #include <sys/kmem.h>
74 
75 static void	cpu_topology_fake1(struct cpu_info *);
76 
77 kmutex_t	cpu_lock		__cacheline_aligned;
78 int		ncpu			__read_mostly;
79 int		ncpuonline		__read_mostly;
80 bool		mp_online		__read_mostly;
81 static bool	cpu_topology_present	__read_mostly;
82 static bool	cpu_topology_haveslow	__read_mostly;
83 int64_t		cpu_counts[CPU_COUNT_MAX];
84 
85 /* An array of CPUs.  There are ncpu entries. */
86 struct cpu_info **cpu_infos		__read_mostly;
87 
88 /* Note: set on mi_cpu_attach() and idle_loop(). */
89 kcpuset_t *	kcpuset_attached	__read_mostly	= NULL;
90 kcpuset_t *	kcpuset_running		__read_mostly	= NULL;
91 
92 static char cpu_model[128];
93 
94 /*
95  * mi_cpu_init: early initialisation of MI CPU related structures.
96  *
97  * Note: may not block and memory allocator is not yet available.
98  */
99 void
100 mi_cpu_init(void)
101 {
102 	struct cpu_info *ci;
103 
104 	mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
105 
106 	kcpuset_create(&kcpuset_attached, true);
107 	kcpuset_create(&kcpuset_running, true);
108 	kcpuset_set(kcpuset_running, 0);
109 
110 	ci = curcpu();
111 	cpu_topology_fake1(ci);
112 }
113 
114 int
115 cpu_setmodel(const char *fmt, ...)
116 {
117 	int len;
118 	va_list ap;
119 
120 	va_start(ap, fmt);
121 	len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
122 	va_end(ap);
123 	return len;
124 }
125 
126 const char *
127 cpu_getmodel(void)
128 {
129 	return cpu_model;
130 }
131 
132 bool
133 cpu_softintr_p(void)
134 {
135 
136 	return (curlwp->l_pflag & LP_INTR) != 0;
137 }
138 
139 /*
140  * Collect CPU topology information as each CPU is attached.  This can be
141  * called early during boot, so we need to be careful what we do.
142  */
143 void
144 cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
145     u_int smt_id, u_int numa_id)
146 {
147 	enum cpu_rel rel;
148 
149 	cpu_topology_present = true;
150 	ci->ci_package_id = package_id;
151 	ci->ci_core_id = core_id;
152 	ci->ci_smt_id = smt_id;
153 	ci->ci_numa_id = numa_id;
154 	ci->ci_is_slow = false;
155 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
156 		ci->ci_sibling[rel] = ci;
157 		ci->ci_nsibling[rel] = 1;
158 	}
159 }
160 
161 /*
162  * Collect CPU relative speed
163  */
164 void
165 cpu_topology_setspeed(struct cpu_info *ci, bool slow)
166 {
167 
168 	cpu_topology_haveslow |= slow;
169 	ci->ci_is_slow = slow;
170 }
171 
172 /*
173  * Link a CPU into the given circular list.
174  */
175 static void
176 cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
177 {
178 	struct cpu_info *ci3;
179 
180 	/* Walk to the end of the existing circular list and append. */
181 	for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
182 		ci3->ci_nsibling[rel]++;
183 		if (ci3->ci_sibling[rel] == ci2) {
184 			break;
185 		}
186 	}
187 	ci->ci_sibling[rel] = ci2;
188 	ci3->ci_sibling[rel] = ci;
189 	ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
190 }
191 
192 /*
193  * Print out the topology lists.
194  */
195 static void
196 cpu_topology_dump(void)
197 {
198 #ifdef DEBUG
199 	CPU_INFO_ITERATOR cii;
200 	struct cpu_info *ci, *ci2;
201 	const char *names[] = { "core", "pkg", "1st" };
202 	enum cpu_rel rel;
203 	int i;
204 
205 	CTASSERT(__arraycount(names) >= __arraycount(ci->ci_sibling));
206 
207 	for (CPU_INFO_FOREACH(cii, ci)) {
208 		if (cpu_topology_haveslow)
209 			printf("%s ", ci->ci_is_slow ? "slow" : "fast");
210 		for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
211 			printf("%s has %d %s siblings:", cpu_name(ci),
212 			    ci->ci_nsibling[rel], names[rel]);
213 			ci2 = ci->ci_sibling[rel];
214 			i = 0;
215 			do {
216 				printf(" %s", cpu_name(ci2));
217 				ci2 = ci2->ci_sibling[rel];
218 			} while (++i < 64 && ci2 != ci->ci_sibling[rel]);
219 			if (i == 64) {
220 				printf(" GAVE UP");
221 			}
222 			printf("\n");
223 		}
224 		printf("%s first in package: %s\n", cpu_name(ci),
225 		    cpu_name(ci->ci_package1st));
226 	}
227 #endif	/* DEBUG */
228 }
229 
230 /*
231  * Fake up topology info if we have none, or if what we got was bogus.
232  * Used early in boot, and by cpu_topology_fake().
233  */
234 static void
235 cpu_topology_fake1(struct cpu_info *ci)
236 {
237 	enum cpu_rel rel;
238 
239 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
240 		ci->ci_sibling[rel] = ci;
241 		ci->ci_nsibling[rel] = 1;
242 	}
243 	if (!cpu_topology_present) {
244 		ci->ci_package_id = cpu_index(ci);
245 	}
246 	ci->ci_schedstate.spc_flags |=
247 	    (SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
248 	ci->ci_package1st = ci;
249 	ci->ci_is_slow = false;
250 	cpu_topology_haveslow = false;
251 }
252 
253 /*
254  * Fake up topology info if we have none, or if what we got was bogus.
255  * Don't override ci_package_id, etc, if cpu_topology_present is set.
256  * MD code also uses these.
257  */
258 static void
259 cpu_topology_fake(void)
260 {
261 	CPU_INFO_ITERATOR cii;
262 	struct cpu_info *ci;
263 
264 	for (CPU_INFO_FOREACH(cii, ci)) {
265 		cpu_topology_fake1(ci);
266 		/* Undo (early boot) flag set so everything links OK. */
267 		ci->ci_schedstate.spc_flags &=
268 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
269 	}
270 }
271 
272 /*
273  * Fix up basic CPU topology info.  Right now that means attach each CPU to
274  * circular lists of its siblings in the same core, and in the same package.
275  */
276 void
277 cpu_topology_init(void)
278 {
279 	CPU_INFO_ITERATOR cii, cii2;
280 	struct cpu_info *ci, *ci2, *ci3;
281 	u_int minsmt, mincore;
282 
283 	if (!cpu_topology_present) {
284 		cpu_topology_fake();
285 		goto linkit;
286 	}
287 
288 	/* Find siblings in same core and package. */
289 	for (CPU_INFO_FOREACH(cii, ci)) {
290 		ci->ci_schedstate.spc_flags &=
291 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
292 		for (CPU_INFO_FOREACH(cii2, ci2)) {
293 			/* Avoid bad things happening. */
294 			if (ci2->ci_package_id == ci->ci_package_id &&
295 			    ci2->ci_core_id == ci->ci_core_id &&
296 			    ci2->ci_smt_id == ci->ci_smt_id &&
297 			    ci2 != ci) {
298 #ifdef DEBUG
299 				printf("cpu%u %p pkg %u core %u smt %u same as "
300 				       "cpu%u %p pkg %u core %u smt %u\n",
301 				       cpu_index(ci), ci, ci->ci_package_id,
302 				       ci->ci_core_id, ci->ci_smt_id,
303 				       cpu_index(ci2), ci2, ci2->ci_package_id,
304 				       ci2->ci_core_id, ci2->ci_smt_id);
305 #endif
306 			    	printf("cpu_topology_init: info bogus, "
307 			    	    "faking it\n");
308 			    	cpu_topology_fake();
309 			    	goto linkit;
310 			}
311 			if (ci2 == ci ||
312 			    ci2->ci_package_id != ci->ci_package_id) {
313 				continue;
314 			}
315 			/* Find CPUs in the same core. */
316 			if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
317 			    ci->ci_core_id == ci2->ci_core_id) {
318 			    	cpu_topology_link(ci, ci2, CPUREL_CORE);
319 			}
320 			/* Find CPUs in the same package. */
321 			if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
322 			    	cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
323 			}
324 			if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
325 			    ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
326 				break;
327 			}
328 		}
329 	}
330 
331  linkit:
332 	/* Identify lowest numbered SMT in each core. */
333 	for (CPU_INFO_FOREACH(cii, ci)) {
334 		ci2 = ci3 = ci;
335 		minsmt = ci->ci_smt_id;
336 		do {
337 			if (ci2->ci_smt_id < minsmt) {
338 				ci3 = ci2;
339 				minsmt = ci2->ci_smt_id;
340 			}
341 			ci2 = ci2->ci_sibling[CPUREL_CORE];
342 		} while (ci2 != ci);
343 		ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
344 	}
345 
346 	/* Identify lowest numbered SMT in each package. */
347 	ci3 = NULL;
348 	for (CPU_INFO_FOREACH(cii, ci)) {
349 		if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
350 			continue;
351 		}
352 		ci2 = ci3 = ci;
353 		mincore = ci->ci_core_id;
354 		do {
355 			if ((ci2->ci_schedstate.spc_flags &
356 			    SPCF_CORE1ST) != 0 &&
357 			    ci2->ci_core_id < mincore) {
358 				ci3 = ci2;
359 				mincore = ci2->ci_core_id;
360 			}
361 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
362 		} while (ci2 != ci);
363 
364 		if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
365 			/* Already identified - nothing more to do. */
366 			continue;
367 		}
368 		ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
369 
370 		/* Walk through all CPUs in package and point to first. */
371 		ci2 = ci3;
372 		do {
373 			ci2->ci_package1st = ci3;
374 			ci2->ci_sibling[CPUREL_PACKAGE1ST] = ci3;
375 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
376 		} while (ci2 != ci3);
377 
378 		/* Now look for somebody else to link to. */
379 		for (CPU_INFO_FOREACH(cii2, ci2)) {
380 			if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
381 			    != 0 && ci2 != ci3) {
382 			    	cpu_topology_link(ci3, ci2, CPUREL_PACKAGE1ST);
383 			    	break;
384 			}
385 		}
386 	}
387 
388 	/* Walk through all packages, starting with value of ci3 from above. */
389 	KASSERT(ci3 != NULL);
390 	ci = ci3;
391 	do {
392 		/* Walk through CPUs in the package and copy in PACKAGE1ST. */
393 		ci2 = ci;
394 		do {
395 			ci2->ci_sibling[CPUREL_PACKAGE1ST] =
396 			    ci->ci_sibling[CPUREL_PACKAGE1ST];
397 			ci2->ci_nsibling[CPUREL_PACKAGE1ST] =
398 			    ci->ci_nsibling[CPUREL_PACKAGE1ST];
399 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
400 		} while (ci2 != ci);
401 		ci = ci->ci_sibling[CPUREL_PACKAGE1ST];
402 	} while (ci != ci3);
403 
404 	if (cpu_topology_haveslow) {
405 		/*
406 		 * For asymmetric systems where some CPUs are slower than
407 		 * others, mark first class CPUs for the scheduler.  This
408 		 * conflicts with SMT right now so whinge if observed.
409 		 */
410 		if (curcpu()->ci_nsibling[CPUREL_CORE] > 1) {
411 			printf("cpu_topology_init: asymmetric & SMT??\n");
412 		}
413 		for (CPU_INFO_FOREACH(cii, ci)) {
414 			if (!ci->ci_is_slow) {
415 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
416 			}
417 		}
418 	} else {
419 		/*
420 		 * For any other configuration mark the 1st CPU in each
421 		 * core as a first class CPU.
422 		 */
423 		for (CPU_INFO_FOREACH(cii, ci)) {
424 			if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) != 0) {
425 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
426 			}
427 		}
428 	}
429 
430 	cpu_topology_dump();
431 }
432 
433 /*
434  * Adjust one count, for a counter that's NOT updated from interrupt
435  * context.  Hardly worth making an inline due to preemption stuff.
436  */
437 void
438 cpu_count(enum cpu_count idx, int64_t delta)
439 {
440 	lwp_t *l = curlwp;
441 	KPREEMPT_DISABLE(l);
442 	l->l_cpu->ci_counts[idx] += delta;
443 	KPREEMPT_ENABLE(l);
444 }
445 
446 /*
447  * Fetch fresh sum total for all counts.  Expensive - don't call often.
448  */
449 void
450 cpu_count_sync_all(void)
451 {
452 	CPU_INFO_ITERATOR cii;
453 	struct cpu_info *ci;
454 	int64_t sum[CPU_COUNT_MAX], *ptr;
455 	enum cpu_count i;
456 	int s;
457 
458 	KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
459 
460 	if (__predict_true(mp_online)) {
461 		memset(sum, 0, sizeof(sum));
462 		/*
463 		 * We want this to be reasonably quick, so any value we get
464 		 * isn't totally out of whack, so don't let the current LWP
465 		 * get preempted.
466 		 */
467 		s = splvm();
468 		curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
469 		for (CPU_INFO_FOREACH(cii, ci)) {
470 			ptr = ci->ci_counts;
471 			for (i = 0; i < CPU_COUNT_MAX; i += 8) {
472 				sum[i+0] += ptr[i+0];
473 				sum[i+1] += ptr[i+1];
474 				sum[i+2] += ptr[i+2];
475 				sum[i+3] += ptr[i+3];
476 				sum[i+4] += ptr[i+4];
477 				sum[i+5] += ptr[i+5];
478 				sum[i+6] += ptr[i+6];
479 				sum[i+7] += ptr[i+7];
480 			}
481 			KASSERT(i == CPU_COUNT_MAX);
482 		}
483 		memcpy(cpu_counts, sum, sizeof(cpu_counts));
484 		splx(s);
485 	} else {
486 		memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
487 	}
488 }
489 
490 /*
491  * Fetch a fresh sum total for one single count.  Expensive - don't call often.
492  */
493 int64_t
494 cpu_count_sync(enum cpu_count count)
495 {
496 	CPU_INFO_ITERATOR cii;
497 	struct cpu_info *ci;
498 	int64_t sum;
499 	int s;
500 
501 	if (__predict_true(mp_online)) {
502 		s = splvm();
503 		curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
504 		sum = 0;
505 		for (CPU_INFO_FOREACH(cii, ci)) {
506 			sum += ci->ci_counts[count];
507 		}
508 		splx(s);
509 	} else {
510 		/* XXX Early boot, iterator might not be available. */
511 		sum = curcpu()->ci_counts[count];
512 	}
513 	return cpu_counts[count] = sum;
514 }
515