xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /* $NetBSD: acpi_cpu.c,v 1.52 2020/03/16 21:20:09 pgoyette Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.52 2020/03/16 21:20:09 pgoyette Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/cpufreq.h>
41 
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45 
46 #include <machine/acpi_machdep.h>
47 #include <machine/cpuvar.h>
48 
49 #define _COMPONENT	  ACPI_BUS_COMPONENT
50 ACPI_MODULE_NAME	  ("acpi_cpu")
51 
52 static int		  acpicpu_match(device_t, cfdata_t, void *);
53 static void		  acpicpu_attach(device_t, device_t, void *);
54 static int		  acpicpu_detach(device_t, int);
55 static int		  acpicpu_once_attach(void);
56 static int		  acpicpu_once_detach(void);
57 static void		  acpicpu_start(device_t);
58 
59 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
61 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
62 					  uint32_t, uint32_t *);
63 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
64 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
65 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
66 static void		  acpicpu_evcnt_attach(device_t);
67 static void		  acpicpu_evcnt_detach(device_t);
68 static void		  acpicpu_debug_print(device_t);
69 static const char	 *acpicpu_debug_print_method_c(uint8_t);
70 static const char	 *acpicpu_debug_print_method_pt(uint8_t);
71 static const char	 *acpicpu_debug_print_dep(uint32_t);
72 
73 static uint32_t		  acpicpu_count = 0;
74 struct acpicpu_softc	**acpicpu_sc = NULL;
75 static bool		  acpicpu_dynamic = true;
76 static bool		  acpicpu_passive = true;
77 
78 static const struct {
79 	const char	 *manu;
80 	const char	 *prod;
81 	const char	 *vers;
82 } acpicpu_quirks[] = {
83 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
84 	{ "ASUSTeK Computer INC.", "M2A-MX", "Rev 1.xx" },
85 };
86 
87 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
88     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
89 
90 static int
91 acpicpu_match(device_t parent, cfdata_t match, void *aux)
92 {
93 	const char *manu, *prod, *vers;
94 	struct cpu_info *ci;
95 	size_t i;
96 
97 	if (acpi_softc == NULL)
98 		return 0;
99 
100 	manu = pmf_get_platform("board-vendor");
101 	prod = pmf_get_platform("board-product");
102 	vers = pmf_get_platform("board-version");
103 
104 	if (manu != NULL && prod != NULL && vers != NULL) {
105 
106 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
107 
108 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
109 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
110 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
111 				return 0;
112 		}
113 	}
114 
115 	ci = acpicpu_md_match(parent, match, aux);
116 
117 	if (ci == NULL)
118 		return 0;
119 
120 	if (acpi_match_cpu_info(ci) == NULL)
121 		return 0;
122 
123 	return 10;
124 }
125 
126 static void
127 acpicpu_attach(device_t parent, device_t self, void *aux)
128 {
129 	struct acpicpu_softc *sc = device_private(self);
130 	struct cpu_info *ci;
131 	ACPI_HANDLE hdl;
132 	cpuid_t id;
133 	int rv;
134 
135 	ci = acpicpu_md_attach(parent, self, aux);
136 
137 	if (ci == NULL)
138 		return;
139 
140 	sc->sc_ci = ci;
141 	sc->sc_dev = self;
142 	sc->sc_cold = true;
143 
144 	hdl = acpi_match_cpu_info(ci);
145 
146 	if (hdl == NULL) {
147 		aprint_normal(": failed to match processor\n");
148 		return;
149 	}
150 
151 	sc->sc_node = acpi_match_node(hdl);
152 
153 	if (acpicpu_once_attach() != 0) {
154 		aprint_normal(": failed to initialize\n");
155 		return;
156 	}
157 
158 	KASSERT(acpi_softc != NULL);
159 	KASSERT(acpicpu_sc != NULL);
160 	KASSERT(sc->sc_node != NULL);
161 
162 	id = sc->sc_ci->ci_acpiid;
163 
164 	if (acpicpu_sc[id] != NULL) {
165 		aprint_normal(": already attached\n");
166 		return;
167 	}
168 
169 	aprint_naive("\n");
170 	aprint_normal(": ACPI CPU\n");
171 
172 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
173 
174 	if (ACPI_FAILURE(rv))
175 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
176 
177 	acpicpu_count++;
178 	acpicpu_sc[id] = sc;
179 
180 	sc->sc_cap = acpicpu_cap(sc);
181 	sc->sc_ncpus = acpi_md_ncpus();
182 	sc->sc_flags = acpicpu_md_flags();
183 
184 	KASSERT(acpicpu_count <= sc->sc_ncpus);
185 	KASSERT(sc->sc_node->ad_device == NULL);
186 
187 	sc->sc_node->ad_device = self;
188 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
189 
190 	acpicpu_cstate_attach(self);
191 	acpicpu_pstate_attach(self);
192 	acpicpu_tstate_attach(self);
193 
194 	acpicpu_debug_print(self);
195 	acpicpu_evcnt_attach(self);
196 
197 	(void)config_interrupts(self, acpicpu_start);
198 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
199 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
200 }
201 
202 static int
203 acpicpu_detach(device_t self, int flags)
204 {
205 	struct acpicpu_softc *sc = device_private(self);
206 
207 	sc->sc_cold = true;
208 
209 	acpicpu_evcnt_detach(self);
210 	acpi_deregister_notify(sc->sc_node);
211 
212 	acpicpu_cstate_detach(self);
213 	acpicpu_pstate_detach(self);
214 	acpicpu_tstate_detach(self);
215 
216 	mutex_destroy(&sc->sc_mtx);
217 	sc->sc_node->ad_device = NULL;
218 
219 	acpicpu_count--;
220 	acpicpu_once_detach();
221 
222 	return 0;
223 }
224 
225 static int
226 acpicpu_once_attach(void)
227 {
228 	struct acpicpu_softc *sc;
229 	unsigned int i;
230 
231 	if (acpicpu_count != 0)
232 		return 0;
233 
234 	KASSERT(acpicpu_sc == NULL);
235 
236 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
237 
238 	for (i = 0; i < maxcpus; i++)
239 		acpicpu_sc[i] = NULL;
240 
241 	return 0;
242 }
243 
244 static int
245 acpicpu_once_detach(void)
246 {
247 	struct acpicpu_softc *sc;
248 
249 	if (acpicpu_count != 0)
250 		return EDEADLK;
251 
252 	cpufreq_deregister();
253 
254 	if (acpicpu_sc != NULL)
255 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
256 
257 	return 0;
258 }
259 
260 static void
261 acpicpu_start(device_t self)
262 {
263 	struct acpicpu_softc *sc = device_private(self);
264 	static uint32_t count = 0;
265 	struct cpufreq cf;
266 	uint32_t i;
267 
268 	/*
269 	 * Run the state-specific initialization routines. These
270 	 * must run only once, after interrupts have been enabled,
271 	 * all CPUs are running, and all ACPI CPUs have attached.
272 	 */
273 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
274 		sc->sc_cold = false;
275 		return;
276 	}
277 
278 	/*
279 	 * Set the last ACPI CPU as non-cold
280 	 * only after C-states are enabled.
281 	 */
282 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
283 		acpicpu_cstate_start(self);
284 
285 	sc->sc_cold = false;
286 
287 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
288 		acpicpu_pstate_start(self);
289 
290 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
291 		acpicpu_tstate_start(self);
292 
293 	aprint_debug_dev(self, "ACPI CPUs started\n");
294 
295 	/*
296 	 * Register with cpufreq(9).
297 	 */
298 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
299 
300 		(void)memset(&cf, 0, sizeof(struct cpufreq));
301 
302 		cf.cf_mp = false;
303 		cf.cf_cookie = NULL;
304 		cf.cf_get_freq = acpicpu_pstate_get;
305 		cf.cf_set_freq = acpicpu_pstate_set;
306 		cf.cf_state_count = sc->sc_pstate_count;
307 
308 		(void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name));
309 
310 		for (i = 0; i < sc->sc_pstate_count; i++) {
311 
312 			if (sc->sc_pstate[i].ps_freq == 0)
313 				continue;
314 
315 			cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq;
316 			cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power;
317 		}
318 
319 		if (cpufreq_register(&cf) != 0)
320 			aprint_error_dev(self, "failed to register cpufreq\n");
321 	}
322 }
323 
324 SYSCTL_SETUP(acpicpu_sysctl, "acpi_cpu sysctls")
325 {
326 	const struct sysctlnode *node;
327 	int err;
328 
329 	err = sysctl_createv(clog, 0, NULL, &node,
330 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
331 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
332 
333 	if (err != 0)
334 		goto fail;
335 
336 	err = sysctl_createv(clog, 0, &node, &node,
337 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
338 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
339 
340 	if (err != 0)
341 		goto fail;
342 
343 	err = sysctl_createv(clog, 0, &node, NULL,
344 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
345 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
346 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
347 
348 	if (err != 0)
349 		goto fail;
350 
351 	err = sysctl_createv(clog, 0, &node, NULL,
352 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
353 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
354 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
355 
356 	if (err != 0)
357 		goto fail;
358 
359 	return;
360 
361 fail:
362 	aprint_error("%s: failed to init sysctl (err %d)\n", __func__, err);
363 }
364 
365 static ACPI_STATUS
366 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
367 {
368 	ACPI_OBJECT *obj;
369 	ACPI_BUFFER buf;
370 	ACPI_STATUS rv;
371 
372 	rv = acpi_eval_struct(hdl, NULL, &buf);
373 
374 	if (ACPI_FAILURE(rv))
375 		goto out;
376 
377 	obj = buf.Pointer;
378 
379 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
380 		rv = AE_TYPE;
381 		goto out;
382 	}
383 
384 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
385 		rv = AE_LIMIT;
386 		goto out;
387 	}
388 
389 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
390 
391 	if (ao != NULL) {
392 		ao->ao_procid = obj->Processor.ProcId;
393 		ao->ao_pblklen = obj->Processor.PblkLength;
394 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
395 	}
396 
397 out:
398 	if (buf.Pointer != NULL)
399 		ACPI_FREE(buf.Pointer);
400 
401 	return rv;
402 }
403 
404 static uint32_t
405 acpicpu_cap(struct acpicpu_softc *sc)
406 {
407 	uint32_t flags, cap = 0;
408 	ACPI_STATUS rv;
409 
410 	/*
411 	 * Query and set machine-dependent capabilities.
412 	 * Note that the Intel-specific _PDC method has
413 	 * already been evaluated. It was furthermore
414 	 * deprecated in the ACPI 3.0 in favor of _OSC.
415 	 */
416 	flags = acpi_md_pdc();
417 	rv = acpicpu_cap_osc(sc, flags, &cap);
418 
419 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
420 
421 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
422 		    "_OSC: %s\n", AcpiFormatException(rv));
423 	}
424 
425 	return (cap != 0) ? cap : flags;
426 }
427 
428 static ACPI_STATUS
429 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
430 {
431 	ACPI_OBJECT_LIST arg;
432 	ACPI_OBJECT obj[4];
433 	ACPI_OBJECT *osc;
434 	ACPI_BUFFER buf;
435 	ACPI_STATUS rv;
436 	uint32_t cap[2];
437 	uint32_t *ptr;
438 	int i = 5;
439 
440 	static uint8_t intel_uuid[16] = {
441 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
442 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
443 	};
444 
445 	cap[0] = ACPI_OSC_QUERY;
446 	cap[1] = flags;
447 
448 again:
449 	arg.Count = 4;
450 	arg.Pointer = obj;
451 
452 	obj[0].Type = ACPI_TYPE_BUFFER;
453 	obj[0].Buffer.Length = sizeof(intel_uuid);
454 	obj[0].Buffer.Pointer = intel_uuid;
455 
456 	obj[1].Type = ACPI_TYPE_INTEGER;
457 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
458 
459 	obj[2].Type = ACPI_TYPE_INTEGER;
460 	obj[2].Integer.Value = __arraycount(cap);
461 
462 	obj[3].Type = ACPI_TYPE_BUFFER;
463 	obj[3].Buffer.Length = sizeof(cap);
464 	obj[3].Buffer.Pointer = (void *)cap;
465 
466 	buf.Pointer = NULL;
467 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
468 
469 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
470 
471 	if (ACPI_FAILURE(rv))
472 		goto out;
473 
474 	osc = buf.Pointer;
475 
476 	if (osc->Type != ACPI_TYPE_BUFFER) {
477 		rv = AE_TYPE;
478 		goto out;
479 	}
480 
481 	if (osc->Buffer.Length != sizeof(cap)) {
482 		rv = AE_BUFFER_OVERFLOW;
483 		goto out;
484 	}
485 
486 	ptr = (uint32_t *)osc->Buffer.Pointer;
487 
488 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
489 		rv = AE_ERROR;
490 		goto out;
491 	}
492 
493 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
494 		rv = AE_BAD_PARAMETER;
495 		goto out;
496 	}
497 
498 	/*
499 	 * "It is strongly recommended that the OS evaluate
500 	 *  _OSC with the Query Support Flag set until _OSC
501 	 *  returns the Capabilities Masked bit clear, to
502 	 *  negotiate the set of features to be granted to
503 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
504 	 */
505 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
506 
507 		ACPI_FREE(buf.Pointer);
508 		i--;
509 
510 		goto again;
511 	}
512 
513 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
514 
515 		ACPI_FREE(buf.Pointer);
516 		cap[0] &= ~ACPI_OSC_QUERY;
517 
518 		goto again;
519 	}
520 
521 	/*
522 	 * It is permitted for _OSC to return all
523 	 * bits cleared, but this is specified to
524 	 * vary on per-device basis. Assume that
525 	 * everything rather than nothing will be
526 	 * supported in this case; we do not need
527 	 * the firmware to know the CPU features.
528 	 */
529 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
530 
531 out:
532 	if (buf.Pointer != NULL)
533 		ACPI_FREE(buf.Pointer);
534 
535 	return rv;
536 }
537 
538 static void
539 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
540 {
541 	ACPI_OSD_EXEC_CALLBACK func;
542 	struct acpicpu_softc *sc;
543 	device_t self = aux;
544 
545 	sc = device_private(self);
546 
547 	if (sc->sc_cold != false)
548 		return;
549 
550 	if (acpicpu_dynamic != true)
551 		return;
552 
553 	switch (evt) {
554 
555 	case ACPICPU_C_NOTIFY:
556 
557 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
558 			return;
559 
560 		func = acpicpu_cstate_callback;
561 		break;
562 
563 	case ACPICPU_P_NOTIFY:
564 
565 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
566 			return;
567 
568 		func = acpicpu_pstate_callback;
569 		break;
570 
571 	case ACPICPU_T_NOTIFY:
572 
573 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
574 			return;
575 
576 		func = acpicpu_tstate_callback;
577 		break;
578 
579 	default:
580 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
581 		return;
582 	}
583 
584 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
585 }
586 
587 static bool
588 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
589 {
590 	struct acpicpu_softc *sc = device_private(self);
591 
592 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
593 		(void)acpicpu_cstate_suspend(self);
594 
595 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
596 		(void)acpicpu_pstate_suspend(self);
597 
598 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
599 		(void)acpicpu_tstate_suspend(self);
600 
601 	sc->sc_cold = true;
602 
603 	return true;
604 }
605 
606 static bool
607 acpicpu_resume(device_t self, const pmf_qual_t *qual)
608 {
609 	struct acpicpu_softc *sc = device_private(self);
610 	static const int handler = OSL_NOTIFY_HANDLER;
611 
612 	sc->sc_cold = false;
613 
614 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
615 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
616 
617 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
618 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
619 
620 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
621 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
622 
623 	return true;
624 }
625 
626 static void
627 acpicpu_evcnt_attach(device_t self)
628 {
629 	struct acpicpu_softc *sc = device_private(self);
630 	struct acpicpu_cstate *cs;
631 	struct acpicpu_pstate *ps;
632 	struct acpicpu_tstate *ts;
633 	const char *str;
634 	uint32_t i;
635 
636 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
637 
638 		cs = &sc->sc_cstate[i];
639 
640 		if (cs->cs_method == 0)
641 			continue;
642 
643 		str = "HALT";
644 
645 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
646 			str = "MWAIT";
647 
648 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
649 			str = "I/O";
650 
651 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
652 		    "C%d (%s)", i, str);
653 
654 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
655 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
656 	}
657 
658 	for (i = 0; i < sc->sc_pstate_count; i++) {
659 
660 		ps = &sc->sc_pstate[i];
661 
662 		if (ps->ps_freq == 0)
663 			continue;
664 
665 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
666 		    "P%u (%u MHz)", i, ps->ps_freq);
667 
668 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
669 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
670 	}
671 
672 	for (i = 0; i < sc->sc_tstate_count; i++) {
673 
674 		ts = &sc->sc_tstate[i];
675 
676 		if (ts->ts_percent == 0)
677 			continue;
678 
679 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
680 		    "T%u (%u %%)", i, ts->ts_percent);
681 
682 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
683 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
684 	}
685 }
686 
687 static void
688 acpicpu_evcnt_detach(device_t self)
689 {
690 	struct acpicpu_softc *sc = device_private(self);
691 	struct acpicpu_cstate *cs;
692 	struct acpicpu_pstate *ps;
693 	struct acpicpu_tstate *ts;
694 	uint32_t i;
695 
696 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
697 
698 		cs = &sc->sc_cstate[i];
699 
700 		if (cs->cs_method != 0)
701 			evcnt_detach(&cs->cs_evcnt);
702 	}
703 
704 	for (i = 0; i < sc->sc_pstate_count; i++) {
705 
706 		ps = &sc->sc_pstate[i];
707 
708 		if (ps->ps_freq != 0)
709 			evcnt_detach(&ps->ps_evcnt);
710 	}
711 
712 	for (i = 0; i < sc->sc_tstate_count; i++) {
713 
714 		ts = &sc->sc_tstate[i];
715 
716 		if (ts->ts_percent != 0)
717 			evcnt_detach(&ts->ts_evcnt);
718 	}
719 }
720 
721 static void
722 acpicpu_debug_print(device_t self)
723 {
724 	struct acpicpu_softc *sc = device_private(self);
725 	struct cpu_info *ci = sc->sc_ci;
726 	struct acpicpu_cstate *cs;
727 	struct acpicpu_pstate *ps;
728 	struct acpicpu_tstate *ts;
729 	static bool once = false;
730 	struct acpicpu_dep *dep;
731 	uint32_t i, method;
732 
733 	if (once != true) {
734 
735 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
736 
737 			cs = &sc->sc_cstate[i];
738 
739 			if (cs->cs_method == 0)
740 				continue;
741 
742 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
743 			    "lat %3u us, pow %5u mW%s\n", i,
744 			    acpicpu_debug_print_method_c(cs->cs_method),
745 			    cs->cs_latency, cs->cs_power,
746 			    (cs->cs_flags != 0) ? ", bus master check" : "");
747 		}
748 
749 		method = sc->sc_pstate_control.reg_spaceid;
750 
751 		for (i = 0; i < sc->sc_pstate_count; i++) {
752 
753 			ps = &sc->sc_pstate[i];
754 
755 			if (ps->ps_freq == 0)
756 				continue;
757 
758 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
759 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
760 			    acpicpu_debug_print_method_pt(method),
761 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
762 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
763 			    ", turbo boost" : "");
764 		}
765 
766 		method = sc->sc_tstate_control.reg_spaceid;
767 
768 		for (i = 0; i < sc->sc_tstate_count; i++) {
769 
770 			ts = &sc->sc_tstate[i];
771 
772 			if (ts->ts_percent == 0)
773 				continue;
774 
775 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
776 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
777 			    acpicpu_debug_print_method_pt(method),
778 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
779 		}
780 
781 		once = true;
782 	}
783 
784 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
785 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
786 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
787 
788 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
789 
790 		dep = &sc->sc_cstate_dep;
791 
792 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
793 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
794 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
795 	}
796 
797 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
798 
799 		dep = &sc->sc_pstate_dep;
800 
801 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
802 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
803 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
804 	}
805 
806 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
807 
808 		dep = &sc->sc_tstate_dep;
809 
810 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
811 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
812 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
813 	}
814 }
815 
816 static const char *
817 acpicpu_debug_print_method_c(uint8_t val)
818 {
819 
820 	if (val == ACPICPU_C_STATE_FFH)
821 		return "FFH";
822 
823 	if (val == ACPICPU_C_STATE_HALT)
824 		return "HLT";
825 
826 	if (val == ACPICPU_C_STATE_SYSIO)
827 		return "I/O";
828 
829 	return "???";
830 }
831 
832 static const char *
833 acpicpu_debug_print_method_pt(uint8_t val)
834 {
835 
836 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
837 		return "I/O";
838 
839 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
840 		return "FFH";
841 
842 	return "???";
843 }
844 
845 static const char *
846 acpicpu_debug_print_dep(uint32_t val)
847 {
848 
849 	switch (val) {
850 
851 	case ACPICPU_DEP_SW_ALL:
852 		return "SW_ALL";
853 
854 	case ACPICPU_DEP_SW_ANY:
855 		return "SW_ANY";
856 
857 	case ACPICPU_DEP_HW_ALL:
858 		return "HW_ALL";
859 
860 	default:
861 		return "unknown";
862 	}
863 }
864 
865 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
866 
867 #ifdef _MODULE
868 #include "ioconf.c"
869 #endif
870 
871 static int
872 acpicpu_modcmd(modcmd_t cmd, void *aux)
873 {
874 	int rv = 0;
875 
876 	switch (cmd) {
877 
878 	case MODULE_CMD_INIT:
879 
880 #ifdef _MODULE
881 		rv = config_init_component(cfdriver_ioconf_acpicpu,
882 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
883 #endif
884 		break;
885 
886 	case MODULE_CMD_FINI:
887 
888 #ifdef _MODULE
889 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
890 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
891 #endif
892 		break;
893 
894 	default:
895 		rv = ENOTTY;
896 	}
897 
898 	return rv;
899 }
900