xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision 30d28f20005de11b20499fed000071fb0a455337)
1 /* $NetBSD: acpi_cpu.c,v 1.53 2020/12/07 10:57:41 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.53 2020/12/07 10:57:41 jmcneill Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/cpufreq.h>
41 
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45 
46 #include <machine/acpi_machdep.h>
47 
48 #if defined(__i386__) || defined(__x86_64__)
49 #include <machine/cpuvar.h>
50 #endif
51 
52 #define _COMPONENT	  ACPI_BUS_COMPONENT
53 ACPI_MODULE_NAME	  ("acpi_cpu")
54 
55 static int		  acpicpu_match(device_t, cfdata_t, void *);
56 static void		  acpicpu_attach(device_t, device_t, void *);
57 static int		  acpicpu_detach(device_t, int);
58 static int		  acpicpu_once_attach(void);
59 static int		  acpicpu_once_detach(void);
60 static void		  acpicpu_start(device_t);
61 
62 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
63 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
64 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
65 					  uint32_t, uint32_t *);
66 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
67 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
68 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
69 static void		  acpicpu_evcnt_attach(device_t);
70 static void		  acpicpu_evcnt_detach(device_t);
71 static void		  acpicpu_debug_print(device_t);
72 static const char	 *acpicpu_debug_print_method_c(uint8_t);
73 static const char	 *acpicpu_debug_print_method_pt(uint8_t);
74 static const char	 *acpicpu_debug_print_dep(uint32_t);
75 
76 static uint32_t		  acpicpu_count = 0;
77 struct acpicpu_softc	**acpicpu_sc = NULL;
78 static bool		  acpicpu_dynamic = true;
79 static bool		  acpicpu_passive = true;
80 
81 static const struct {
82 	const char	 *manu;
83 	const char	 *prod;
84 	const char	 *vers;
85 } acpicpu_quirks[] = {
86 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
87 	{ "ASUSTeK Computer INC.", "M2A-MX", "Rev 1.xx" },
88 };
89 
90 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
91     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
92 
93 static int
acpicpu_match(device_t parent,cfdata_t match,void * aux)94 acpicpu_match(device_t parent, cfdata_t match, void *aux)
95 {
96 	const char *manu, *prod, *vers;
97 	struct cpu_info *ci;
98 	size_t i;
99 
100 	if (acpi_softc == NULL)
101 		return 0;
102 
103 	manu = pmf_get_platform("board-vendor");
104 	prod = pmf_get_platform("board-product");
105 	vers = pmf_get_platform("board-version");
106 
107 	if (manu != NULL && prod != NULL && vers != NULL) {
108 
109 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
110 
111 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
112 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
113 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
114 				return 0;
115 		}
116 	}
117 
118 	ci = acpicpu_md_match(parent, match, aux);
119 
120 	if (ci == NULL)
121 		return 0;
122 
123 	if (acpi_match_cpu_info(ci) == NULL)
124 		return 0;
125 
126 	return 10;
127 }
128 
129 static void
acpicpu_attach(device_t parent,device_t self,void * aux)130 acpicpu_attach(device_t parent, device_t self, void *aux)
131 {
132 	struct acpicpu_softc *sc = device_private(self);
133 	struct cpu_info *ci;
134 	ACPI_HANDLE hdl;
135 	cpuid_t id;
136 	int rv;
137 
138 	ci = acpicpu_md_attach(parent, self, aux);
139 
140 	if (ci == NULL)
141 		return;
142 
143 	sc->sc_ci = ci;
144 	sc->sc_dev = self;
145 	sc->sc_cold = true;
146 
147 	hdl = acpi_match_cpu_info(ci);
148 
149 	if (hdl == NULL) {
150 		aprint_normal(": failed to match processor\n");
151 		return;
152 	}
153 
154 	sc->sc_node = acpi_match_node(hdl);
155 
156 	if (acpicpu_once_attach() != 0) {
157 		aprint_normal(": failed to initialize\n");
158 		return;
159 	}
160 
161 	KASSERT(acpi_softc != NULL);
162 	KASSERT(acpicpu_sc != NULL);
163 	KASSERT(sc->sc_node != NULL);
164 
165 	id = sc->sc_ci->ci_acpiid;
166 
167 	if (acpicpu_sc[id] != NULL) {
168 		aprint_normal(": already attached\n");
169 		return;
170 	}
171 
172 	aprint_naive("\n");
173 	aprint_normal(": ACPI CPU\n");
174 
175 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
176 
177 	if (ACPI_FAILURE(rv) && rv != AE_TYPE)
178 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
179 
180 	acpicpu_count++;
181 	acpicpu_sc[id] = sc;
182 
183 	sc->sc_cap = acpicpu_cap(sc);
184 	sc->sc_ncpus = acpi_md_ncpus();
185 	sc->sc_flags = acpicpu_md_flags();
186 
187 	KASSERT(acpicpu_count <= sc->sc_ncpus);
188 	KASSERT(sc->sc_node->ad_device == NULL);
189 
190 	sc->sc_node->ad_device = self;
191 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
192 
193 #if defined(__i386__) || defined(__x86_64__)
194 	acpicpu_cstate_attach(self);
195 #endif
196 	acpicpu_pstate_attach(self);
197 	acpicpu_tstate_attach(self);
198 
199 	acpicpu_debug_print(self);
200 	acpicpu_evcnt_attach(self);
201 
202 	(void)config_interrupts(self, acpicpu_start);
203 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
204 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
205 }
206 
207 static int
acpicpu_detach(device_t self,int flags)208 acpicpu_detach(device_t self, int flags)
209 {
210 	struct acpicpu_softc *sc = device_private(self);
211 
212 	sc->sc_cold = true;
213 
214 	acpicpu_evcnt_detach(self);
215 	acpi_deregister_notify(sc->sc_node);
216 
217 	acpicpu_cstate_detach(self);
218 	acpicpu_pstate_detach(self);
219 	acpicpu_tstate_detach(self);
220 
221 	mutex_destroy(&sc->sc_mtx);
222 	sc->sc_node->ad_device = NULL;
223 
224 	acpicpu_count--;
225 	acpicpu_once_detach();
226 
227 	return 0;
228 }
229 
230 static int
acpicpu_once_attach(void)231 acpicpu_once_attach(void)
232 {
233 	struct acpicpu_softc *sc;
234 	unsigned int i;
235 
236 	if (acpicpu_count != 0)
237 		return 0;
238 
239 	KASSERT(acpicpu_sc == NULL);
240 
241 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
242 
243 	for (i = 0; i < maxcpus; i++)
244 		acpicpu_sc[i] = NULL;
245 
246 	return 0;
247 }
248 
249 static int
acpicpu_once_detach(void)250 acpicpu_once_detach(void)
251 {
252 	struct acpicpu_softc *sc;
253 
254 	if (acpicpu_count != 0)
255 		return EDEADLK;
256 
257 	cpufreq_deregister();
258 
259 	if (acpicpu_sc != NULL)
260 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
261 
262 	return 0;
263 }
264 
265 static void
acpicpu_start(device_t self)266 acpicpu_start(device_t self)
267 {
268 	struct acpicpu_softc *sc = device_private(self);
269 	static uint32_t count = 0;
270 	struct cpufreq cf;
271 	uint32_t i;
272 
273 	/*
274 	 * Run the state-specific initialization routines. These
275 	 * must run only once, after interrupts have been enabled,
276 	 * all CPUs are running, and all ACPI CPUs have attached.
277 	 */
278 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
279 		sc->sc_cold = false;
280 		return;
281 	}
282 
283 	/*
284 	 * Set the last ACPI CPU as non-cold
285 	 * only after C-states are enabled.
286 	 */
287 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
288 		acpicpu_cstate_start(self);
289 
290 	sc->sc_cold = false;
291 
292 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
293 		acpicpu_pstate_start(self);
294 
295 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
296 		acpicpu_tstate_start(self);
297 
298 	aprint_debug_dev(self, "ACPI CPUs started\n");
299 
300 	/*
301 	 * Register with cpufreq(9).
302 	 */
303 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
304 
305 		(void)memset(&cf, 0, sizeof(struct cpufreq));
306 
307 		cf.cf_mp = false;
308 		cf.cf_cookie = NULL;
309 		cf.cf_get_freq = acpicpu_pstate_get;
310 		cf.cf_set_freq = acpicpu_pstate_set;
311 		cf.cf_state_count = sc->sc_pstate_count;
312 
313 		(void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name));
314 
315 		for (i = 0; i < sc->sc_pstate_count; i++) {
316 
317 			if (sc->sc_pstate[i].ps_freq == 0)
318 				continue;
319 
320 			cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq;
321 			cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power;
322 		}
323 
324 		if (cpufreq_register(&cf) != 0)
325 			aprint_error_dev(self, "failed to register cpufreq\n");
326 	}
327 }
328 
329 SYSCTL_SETUP(acpicpu_sysctl, "acpi_cpu sysctls")
330 {
331 	const struct sysctlnode *node;
332 	int err;
333 
334 	err = sysctl_createv(clog, 0, NULL, &node,
335 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
336 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
337 
338 	if (err != 0)
339 		goto fail;
340 
341 	err = sysctl_createv(clog, 0, &node, &node,
342 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
343 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
344 
345 	if (err != 0)
346 		goto fail;
347 
348 	err = sysctl_createv(clog, 0, &node, NULL,
349 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
350 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
351 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
352 
353 	if (err != 0)
354 		goto fail;
355 
356 	err = sysctl_createv(clog, 0, &node, NULL,
357 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
358 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
359 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
360 
361 	if (err != 0)
362 		goto fail;
363 
364 	return;
365 
366 fail:
367 	aprint_error("%s: failed to init sysctl (err %d)\n", __func__, err);
368 }
369 
370 static ACPI_STATUS
acpicpu_object(ACPI_HANDLE hdl,struct acpicpu_object * ao)371 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
372 {
373 	ACPI_OBJECT_TYPE typ;
374 	ACPI_OBJECT *obj;
375 	ACPI_BUFFER buf;
376 	ACPI_STATUS rv;
377 
378 	rv = AcpiGetType(hdl, &typ);
379 	if (typ != ACPI_TYPE_PROCESSOR) {
380 		return AE_TYPE;
381 	}
382 
383 	rv = acpi_eval_struct(hdl, NULL, &buf);
384 
385 	if (ACPI_FAILURE(rv))
386 		goto out;
387 
388 	obj = buf.Pointer;
389 
390 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
391 		rv = AE_TYPE;
392 		goto out;
393 	}
394 
395 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
396 		rv = AE_LIMIT;
397 		goto out;
398 	}
399 
400 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
401 
402 	if (ao != NULL) {
403 		ao->ao_procid = obj->Processor.ProcId;
404 		ao->ao_pblklen = obj->Processor.PblkLength;
405 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
406 	}
407 
408 out:
409 	if (buf.Pointer != NULL)
410 		ACPI_FREE(buf.Pointer);
411 
412 	return rv;
413 }
414 
415 static uint32_t
acpicpu_cap(struct acpicpu_softc * sc)416 acpicpu_cap(struct acpicpu_softc *sc)
417 {
418 	uint32_t flags, cap = 0;
419 	ACPI_STATUS rv;
420 
421 	/*
422 	 * Query and set machine-dependent capabilities.
423 	 * Note that the Intel-specific _PDC method has
424 	 * already been evaluated. It was furthermore
425 	 * deprecated in the ACPI 3.0 in favor of _OSC.
426 	 */
427 	flags = acpi_md_pdc();
428 	rv = acpicpu_cap_osc(sc, flags, &cap);
429 
430 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
431 
432 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
433 		    "_OSC: %s\n", AcpiFormatException(rv));
434 	}
435 
436 	return (cap != 0) ? cap : flags;
437 }
438 
439 static ACPI_STATUS
acpicpu_cap_osc(struct acpicpu_softc * sc,uint32_t flags,uint32_t * val)440 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
441 {
442 	ACPI_OBJECT_LIST arg;
443 	ACPI_OBJECT obj[4];
444 	ACPI_OBJECT *osc;
445 	ACPI_BUFFER buf;
446 	ACPI_STATUS rv;
447 	uint32_t cap[2];
448 	uint32_t *ptr;
449 	int i = 5;
450 
451 	static uint8_t intel_uuid[16] = {
452 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
453 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
454 	};
455 
456 	cap[0] = ACPI_OSC_QUERY;
457 	cap[1] = flags;
458 
459 again:
460 	arg.Count = 4;
461 	arg.Pointer = obj;
462 
463 	obj[0].Type = ACPI_TYPE_BUFFER;
464 	obj[0].Buffer.Length = sizeof(intel_uuid);
465 	obj[0].Buffer.Pointer = intel_uuid;
466 
467 	obj[1].Type = ACPI_TYPE_INTEGER;
468 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
469 
470 	obj[2].Type = ACPI_TYPE_INTEGER;
471 	obj[2].Integer.Value = __arraycount(cap);
472 
473 	obj[3].Type = ACPI_TYPE_BUFFER;
474 	obj[3].Buffer.Length = sizeof(cap);
475 	obj[3].Buffer.Pointer = (void *)cap;
476 
477 	buf.Pointer = NULL;
478 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
479 
480 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
481 
482 	if (ACPI_FAILURE(rv))
483 		goto out;
484 
485 	osc = buf.Pointer;
486 
487 	if (osc->Type != ACPI_TYPE_BUFFER) {
488 		rv = AE_TYPE;
489 		goto out;
490 	}
491 
492 	if (osc->Buffer.Length != sizeof(cap)) {
493 		rv = AE_BUFFER_OVERFLOW;
494 		goto out;
495 	}
496 
497 	ptr = (uint32_t *)osc->Buffer.Pointer;
498 
499 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
500 		rv = AE_ERROR;
501 		goto out;
502 	}
503 
504 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
505 		rv = AE_BAD_PARAMETER;
506 		goto out;
507 	}
508 
509 	/*
510 	 * "It is strongly recommended that the OS evaluate
511 	 *  _OSC with the Query Support Flag set until _OSC
512 	 *  returns the Capabilities Masked bit clear, to
513 	 *  negotiate the set of features to be granted to
514 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
515 	 */
516 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
517 
518 		ACPI_FREE(buf.Pointer);
519 		i--;
520 
521 		goto again;
522 	}
523 
524 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
525 
526 		ACPI_FREE(buf.Pointer);
527 		cap[0] &= ~ACPI_OSC_QUERY;
528 
529 		goto again;
530 	}
531 
532 	/*
533 	 * It is permitted for _OSC to return all
534 	 * bits cleared, but this is specified to
535 	 * vary on per-device basis. Assume that
536 	 * everything rather than nothing will be
537 	 * supported in this case; we do not need
538 	 * the firmware to know the CPU features.
539 	 */
540 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
541 
542 out:
543 	if (buf.Pointer != NULL)
544 		ACPI_FREE(buf.Pointer);
545 
546 	return rv;
547 }
548 
549 static void
acpicpu_notify(ACPI_HANDLE hdl,uint32_t evt,void * aux)550 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
551 {
552 	ACPI_OSD_EXEC_CALLBACK func;
553 	struct acpicpu_softc *sc;
554 	device_t self = aux;
555 
556 	sc = device_private(self);
557 
558 	if (sc->sc_cold != false)
559 		return;
560 
561 	if (acpicpu_dynamic != true)
562 		return;
563 
564 	switch (evt) {
565 
566 	case ACPICPU_C_NOTIFY:
567 
568 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
569 			return;
570 
571 		func = acpicpu_cstate_callback;
572 		break;
573 
574 	case ACPICPU_P_NOTIFY:
575 
576 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
577 			return;
578 
579 		func = acpicpu_pstate_callback;
580 		break;
581 
582 	case ACPICPU_T_NOTIFY:
583 
584 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
585 			return;
586 
587 		func = acpicpu_tstate_callback;
588 		break;
589 
590 	default:
591 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
592 		return;
593 	}
594 
595 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
596 }
597 
598 static bool
acpicpu_suspend(device_t self,const pmf_qual_t * qual)599 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
600 {
601 	struct acpicpu_softc *sc = device_private(self);
602 
603 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
604 		(void)acpicpu_cstate_suspend(self);
605 
606 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
607 		(void)acpicpu_pstate_suspend(self);
608 
609 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
610 		(void)acpicpu_tstate_suspend(self);
611 
612 	sc->sc_cold = true;
613 
614 	return true;
615 }
616 
617 static bool
acpicpu_resume(device_t self,const pmf_qual_t * qual)618 acpicpu_resume(device_t self, const pmf_qual_t *qual)
619 {
620 	struct acpicpu_softc *sc = device_private(self);
621 	static const int handler = OSL_NOTIFY_HANDLER;
622 
623 	sc->sc_cold = false;
624 
625 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
626 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
627 
628 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
629 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
630 
631 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
632 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
633 
634 	return true;
635 }
636 
637 static void
acpicpu_evcnt_attach(device_t self)638 acpicpu_evcnt_attach(device_t self)
639 {
640 	struct acpicpu_softc *sc = device_private(self);
641 	struct acpicpu_cstate *cs;
642 	struct acpicpu_pstate *ps;
643 	struct acpicpu_tstate *ts;
644 	const char *str;
645 	uint32_t i;
646 
647 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
648 
649 		cs = &sc->sc_cstate[i];
650 
651 		if (cs->cs_method == 0)
652 			continue;
653 
654 		str = "HALT";
655 
656 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
657 			str = "MWAIT";
658 
659 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
660 			str = "I/O";
661 
662 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
663 		    "C%d (%s)", i, str);
664 
665 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
666 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
667 	}
668 
669 	for (i = 0; i < sc->sc_pstate_count; i++) {
670 
671 		ps = &sc->sc_pstate[i];
672 
673 		if (ps->ps_freq == 0)
674 			continue;
675 
676 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
677 		    "P%u (%u MHz)", i, ps->ps_freq);
678 
679 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
680 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
681 	}
682 
683 	for (i = 0; i < sc->sc_tstate_count; i++) {
684 
685 		ts = &sc->sc_tstate[i];
686 
687 		if (ts->ts_percent == 0)
688 			continue;
689 
690 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
691 		    "T%u (%u %%)", i, ts->ts_percent);
692 
693 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
694 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
695 	}
696 }
697 
698 static void
acpicpu_evcnt_detach(device_t self)699 acpicpu_evcnt_detach(device_t self)
700 {
701 	struct acpicpu_softc *sc = device_private(self);
702 	struct acpicpu_cstate *cs;
703 	struct acpicpu_pstate *ps;
704 	struct acpicpu_tstate *ts;
705 	uint32_t i;
706 
707 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
708 
709 		cs = &sc->sc_cstate[i];
710 
711 		if (cs->cs_method != 0)
712 			evcnt_detach(&cs->cs_evcnt);
713 	}
714 
715 	for (i = 0; i < sc->sc_pstate_count; i++) {
716 
717 		ps = &sc->sc_pstate[i];
718 
719 		if (ps->ps_freq != 0)
720 			evcnt_detach(&ps->ps_evcnt);
721 	}
722 
723 	for (i = 0; i < sc->sc_tstate_count; i++) {
724 
725 		ts = &sc->sc_tstate[i];
726 
727 		if (ts->ts_percent != 0)
728 			evcnt_detach(&ts->ts_evcnt);
729 	}
730 }
731 
732 static void
acpicpu_debug_print(device_t self)733 acpicpu_debug_print(device_t self)
734 {
735 	struct acpicpu_softc *sc = device_private(self);
736 	struct cpu_info *ci = sc->sc_ci;
737 	struct acpicpu_cstate *cs;
738 	struct acpicpu_pstate *ps;
739 	struct acpicpu_tstate *ts;
740 	static bool once = false;
741 	struct acpicpu_dep *dep;
742 	uint32_t i, method;
743 
744 	if (once != true) {
745 
746 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
747 
748 			cs = &sc->sc_cstate[i];
749 
750 			if (cs->cs_method == 0)
751 				continue;
752 
753 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
754 			    "lat %3u us, pow %5u mW%s\n", i,
755 			    acpicpu_debug_print_method_c(cs->cs_method),
756 			    cs->cs_latency, cs->cs_power,
757 			    (cs->cs_flags != 0) ? ", bus master check" : "");
758 		}
759 
760 		method = sc->sc_pstate_control.reg_spaceid;
761 
762 		for (i = 0; i < sc->sc_pstate_count; i++) {
763 
764 			ps = &sc->sc_pstate[i];
765 
766 			if (ps->ps_freq == 0)
767 				continue;
768 
769 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
770 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
771 			    acpicpu_debug_print_method_pt(method),
772 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
773 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
774 			    ", turbo boost" : "");
775 		}
776 
777 		method = sc->sc_tstate_control.reg_spaceid;
778 
779 		for (i = 0; i < sc->sc_tstate_count; i++) {
780 
781 			ts = &sc->sc_tstate[i];
782 
783 			if (ts->ts_percent == 0)
784 				continue;
785 
786 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
787 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
788 			    acpicpu_debug_print_method_pt(method),
789 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
790 		}
791 
792 		once = true;
793 	}
794 
795 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
796 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
797 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
798 
799 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
800 
801 		dep = &sc->sc_cstate_dep;
802 
803 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
804 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
805 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
806 	}
807 
808 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
809 
810 		dep = &sc->sc_pstate_dep;
811 
812 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
813 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
814 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
815 	}
816 
817 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
818 
819 		dep = &sc->sc_tstate_dep;
820 
821 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
822 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
823 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
824 	}
825 }
826 
827 static const char *
acpicpu_debug_print_method_c(uint8_t val)828 acpicpu_debug_print_method_c(uint8_t val)
829 {
830 
831 	if (val == ACPICPU_C_STATE_FFH)
832 		return "FFH";
833 
834 	if (val == ACPICPU_C_STATE_HALT)
835 		return "HLT";
836 
837 	if (val == ACPICPU_C_STATE_SYSIO)
838 		return "I/O";
839 
840 	return "???";
841 }
842 
843 static const char *
acpicpu_debug_print_method_pt(uint8_t val)844 acpicpu_debug_print_method_pt(uint8_t val)
845 {
846 	if (val == ACPI_ADR_SPACE_SYSTEM_MEMORY)
847 		return "MMIO";
848 
849 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
850 		return "I/O";
851 
852 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
853 		return "FFH";
854 
855 	return "???";
856 }
857 
858 static const char *
acpicpu_debug_print_dep(uint32_t val)859 acpicpu_debug_print_dep(uint32_t val)
860 {
861 
862 	switch (val) {
863 
864 	case ACPICPU_DEP_SW_ALL:
865 		return "SW_ALL";
866 
867 	case ACPICPU_DEP_SW_ANY:
868 		return "SW_ANY";
869 
870 	case ACPICPU_DEP_HW_ALL:
871 		return "HW_ALL";
872 
873 	default:
874 		return "unknown";
875 	}
876 }
877 
878 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
879 
880 #ifdef _MODULE
881 #include "ioconf.c"
882 #endif
883 
884 static int
acpicpu_modcmd(modcmd_t cmd,void * aux)885 acpicpu_modcmd(modcmd_t cmd, void *aux)
886 {
887 	int rv = 0;
888 
889 	switch (cmd) {
890 
891 	case MODULE_CMD_INIT:
892 
893 #ifdef _MODULE
894 		rv = config_init_component(cfdriver_ioconf_acpicpu,
895 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
896 #endif
897 		break;
898 
899 	case MODULE_CMD_FINI:
900 
901 #ifdef _MODULE
902 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
903 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
904 #endif
905 		break;
906 
907 	default:
908 		rv = ENOTTY;
909 	}
910 
911 	return rv;
912 }
913