xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /* $NetBSD: acpi_cpu.c,v 1.44 2011/06/22 08:49:54 jruoho Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.44 2011/06/22 08:49:54 jruoho Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44 
45 #include <machine/acpi_machdep.h>
46 #include <machine/cpuvar.h>
47 
48 #define _COMPONENT	  ACPI_BUS_COMPONENT
49 ACPI_MODULE_NAME	  ("acpi_cpu")
50 
51 static int		  acpicpu_match(device_t, cfdata_t, void *);
52 static void		  acpicpu_attach(device_t, device_t, void *);
53 static int		  acpicpu_detach(device_t, int);
54 static int		  acpicpu_once_attach(void);
55 static int		  acpicpu_once_detach(void);
56 static void		  acpicpu_start(device_t);
57 static void		  acpicpu_sysctl(device_t);
58 
59 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
61 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
62 					  uint32_t, uint32_t *);
63 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
64 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
65 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
66 static void		  acpicpu_evcnt_attach(device_t);
67 static void		  acpicpu_evcnt_detach(device_t);
68 static void		  acpicpu_debug_print(device_t);
69 static const char	 *acpicpu_debug_print_method(uint8_t);
70 static const char	 *acpicpu_debug_print_dep(uint32_t);
71 
72 static uint32_t		  acpicpu_count = 0;
73 struct acpicpu_softc	**acpicpu_sc = NULL;
74 static struct sysctllog	 *acpicpu_log = NULL;
75 static bool		  acpicpu_dynamic = true;
76 static bool		  acpicpu_passive = true;
77 
78 static const struct {
79 	const char	 *manu;
80 	const char	 *prod;
81 	const char	 *vers;
82 } acpicpu_quirks[] = {
83 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
84 };
85 
86 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
87     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
88 
89 static int
90 acpicpu_match(device_t parent, cfdata_t match, void *aux)
91 {
92 	const char *manu, *prod, *vers;
93 	struct cpu_info *ci;
94 	size_t i;
95 
96 	if (acpi_softc == NULL)
97 		return 0;
98 
99 	manu = pmf_get_platform("system-manufacturer");
100 	prod = pmf_get_platform("system-product-name");
101 	vers = pmf_get_platform("system-version");
102 
103 	if (manu != NULL && prod != NULL && vers != NULL) {
104 
105 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
106 
107 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
108 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
109 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
110 				return 0;
111 		}
112 	}
113 
114 	ci = acpicpu_md_match(parent, match, aux);
115 
116 	if (ci == NULL)
117 		return 0;
118 
119 	if (acpi_match_cpu_info(ci) == NULL)
120 		return 0;
121 
122 	return 10;
123 }
124 
125 static void
126 acpicpu_attach(device_t parent, device_t self, void *aux)
127 {
128 	struct acpicpu_softc *sc = device_private(self);
129 	struct cpu_info *ci;
130 	ACPI_HANDLE hdl;
131 	cpuid_t id;
132 	int rv;
133 
134 	ci = acpicpu_md_attach(parent, self, aux);
135 
136 	if (ci == NULL)
137 		return;
138 
139 	sc->sc_ci = ci;
140 	sc->sc_dev = self;
141 	sc->sc_cold = true;
142 
143 	hdl = acpi_match_cpu_info(ci);
144 
145 	if (hdl == NULL) {
146 		aprint_normal(": failed to match processor\n");
147 		return;
148 	}
149 
150 	sc->sc_node = acpi_match_node(hdl);
151 
152 	if (acpicpu_once_attach() != 0) {
153 		aprint_normal(": failed to initialize\n");
154 		return;
155 	}
156 
157 	KASSERT(acpi_softc != NULL);
158 	KASSERT(acpicpu_sc != NULL);
159 	KASSERT(sc->sc_node != NULL);
160 
161 	id = sc->sc_ci->ci_acpiid;
162 
163 	if (acpicpu_sc[id] != NULL) {
164 		aprint_normal(": already attached\n");
165 		return;
166 	}
167 
168 	aprint_naive("\n");
169 	aprint_normal(": ACPI CPU\n");
170 
171 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
172 
173 	if (ACPI_FAILURE(rv))
174 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
175 
176 	acpicpu_count++;
177 	acpicpu_sc[id] = sc;
178 
179 	sc->sc_cap = acpicpu_cap(sc);
180 	sc->sc_ncpus = acpi_md_ncpus();
181 	sc->sc_flags = acpicpu_md_flags();
182 
183 	KASSERT(acpicpu_count <= sc->sc_ncpus);
184 	KASSERT(sc->sc_node->ad_device == NULL);
185 
186 	sc->sc_node->ad_device = self;
187 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
188 
189 	acpicpu_cstate_attach(self);
190 	acpicpu_pstate_attach(self);
191 	acpicpu_tstate_attach(self);
192 
193 	acpicpu_debug_print(self);
194 	acpicpu_evcnt_attach(self);
195 
196 	(void)config_interrupts(self, acpicpu_start);
197 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
198 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
199 }
200 
201 static int
202 acpicpu_detach(device_t self, int flags)
203 {
204 	struct acpicpu_softc *sc = device_private(self);
205 
206 	sc->sc_cold = true;
207 
208 	acpicpu_evcnt_detach(self);
209 	acpi_deregister_notify(sc->sc_node);
210 
211 	acpicpu_cstate_detach(self);
212 	acpicpu_pstate_detach(self);
213 	acpicpu_tstate_detach(self);
214 
215 	mutex_destroy(&sc->sc_mtx);
216 	sc->sc_node->ad_device = NULL;
217 
218 	acpicpu_count--;
219 	acpicpu_once_detach();
220 
221 	return 0;
222 }
223 
224 static int
225 acpicpu_once_attach(void)
226 {
227 	struct acpicpu_softc *sc;
228 	unsigned int i;
229 
230 	if (acpicpu_count != 0)
231 		return 0;
232 
233 	KASSERT(acpicpu_sc == NULL);
234 	KASSERT(acpicpu_log == NULL);
235 
236 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
237 
238 	if (acpicpu_sc == NULL)
239 		return ENOMEM;
240 
241 	for (i = 0; i < maxcpus; i++)
242 		acpicpu_sc[i] = NULL;
243 
244 	return 0;
245 }
246 
247 static int
248 acpicpu_once_detach(void)
249 {
250 	struct acpicpu_softc *sc;
251 
252 	if (acpicpu_count != 0)
253 		return EDEADLK;
254 
255 	if (acpicpu_log != NULL)
256 		sysctl_teardown(&acpicpu_log);
257 
258 	if (acpicpu_sc != NULL)
259 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
260 
261 	return 0;
262 }
263 
264 static void
265 acpicpu_start(device_t self)
266 {
267 	struct acpicpu_softc *sc = device_private(self);
268 	static uint32_t count = 0;
269 
270 	/*
271 	 * Run the state-specific initialization routines. These
272 	 * must run only once, after interrupts have been enabled,
273 	 * all CPUs are running, and all ACPI CPUs have attached.
274 	 */
275 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
276 		sc->sc_cold = false;
277 		return;
278 	}
279 
280 	/*
281 	 * Set the last ACPI CPU as non-cold
282 	 * only after C-states are enabled.
283 	 */
284 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
285 		acpicpu_cstate_start(self);
286 
287 	sc->sc_cold = false;
288 
289 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
290 		acpicpu_pstate_start(self);
291 
292 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
293 		acpicpu_tstate_start(self);
294 
295 	acpicpu_sysctl(self);
296 	aprint_debug_dev(self, "ACPI CPUs started\n");
297 }
298 
299 static void
300 acpicpu_sysctl(device_t self)
301 {
302 	const struct sysctlnode *node;
303 	int err;
304 
305 	KASSERT(acpicpu_log == NULL);
306 
307 	err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
308 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
309 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
310 
311 	if (err != 0)
312 		goto fail;
313 
314 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
315 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
316 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
317 
318 	if (err != 0)
319 		goto fail;
320 
321 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
322 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
323 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
324 
325 	if (err != 0)
326 		goto fail;
327 
328 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
329 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
330 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
331 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
332 
333 	if (err != 0)
334 		goto fail;
335 
336 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
337 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
338 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
339 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
340 
341 	if (err != 0)
342 		goto fail;
343 
344 	return;
345 
346 fail:
347 	aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
348 }
349 
350 static ACPI_STATUS
351 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
352 {
353 	ACPI_OBJECT *obj;
354 	ACPI_BUFFER buf;
355 	ACPI_STATUS rv;
356 
357 	rv = acpi_eval_struct(hdl, NULL, &buf);
358 
359 	if (ACPI_FAILURE(rv))
360 		goto out;
361 
362 	obj = buf.Pointer;
363 
364 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
365 		rv = AE_TYPE;
366 		goto out;
367 	}
368 
369 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
370 		rv = AE_LIMIT;
371 		goto out;
372 	}
373 
374 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
375 
376 	if (ao != NULL) {
377 		ao->ao_procid = obj->Processor.ProcId;
378 		ao->ao_pblklen = obj->Processor.PblkLength;
379 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
380 	}
381 
382 out:
383 	if (buf.Pointer != NULL)
384 		ACPI_FREE(buf.Pointer);
385 
386 	return rv;
387 }
388 
389 static uint32_t
390 acpicpu_cap(struct acpicpu_softc *sc)
391 {
392 	uint32_t flags, cap = 0;
393 	ACPI_STATUS rv;
394 
395 	/*
396 	 * Query and set machine-dependent capabilities.
397 	 * Note that the Intel-specific _PDC method has
398 	 * already been evaluated. It was furthermore
399 	 * deprecated in the ACPI 3.0 in favor of _OSC.
400 	 */
401 	flags = acpi_md_pdc();
402 	rv = acpicpu_cap_osc(sc, flags, &cap);
403 
404 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
405 
406 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
407 		    "_OSC: %s\n", AcpiFormatException(rv));
408 	}
409 
410 	return (cap != 0) ? cap : flags;
411 }
412 
413 static ACPI_STATUS
414 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
415 {
416 	ACPI_OBJECT_LIST arg;
417 	ACPI_OBJECT obj[4];
418 	ACPI_OBJECT *osc;
419 	ACPI_BUFFER buf;
420 	ACPI_STATUS rv;
421 	uint32_t cap[2];
422 	uint32_t *ptr;
423 	int i = 5;
424 
425 	static uint8_t intel_uuid[16] = {
426 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
427 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
428 	};
429 
430 	cap[0] = ACPI_OSC_QUERY;
431 	cap[1] = flags;
432 
433 again:
434 	arg.Count = 4;
435 	arg.Pointer = obj;
436 
437 	obj[0].Type = ACPI_TYPE_BUFFER;
438 	obj[0].Buffer.Length = sizeof(intel_uuid);
439 	obj[0].Buffer.Pointer = intel_uuid;
440 
441 	obj[1].Type = ACPI_TYPE_INTEGER;
442 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
443 
444 	obj[2].Type = ACPI_TYPE_INTEGER;
445 	obj[2].Integer.Value = __arraycount(cap);
446 
447 	obj[3].Type = ACPI_TYPE_BUFFER;
448 	obj[3].Buffer.Length = sizeof(cap);
449 	obj[3].Buffer.Pointer = (void *)cap;
450 
451 	buf.Pointer = NULL;
452 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
453 
454 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
455 
456 	if (ACPI_FAILURE(rv))
457 		goto out;
458 
459 	osc = buf.Pointer;
460 
461 	if (osc->Type != ACPI_TYPE_BUFFER) {
462 		rv = AE_TYPE;
463 		goto out;
464 	}
465 
466 	if (osc->Buffer.Length != sizeof(cap)) {
467 		rv = AE_BUFFER_OVERFLOW;
468 		goto out;
469 	}
470 
471 	ptr = (uint32_t *)osc->Buffer.Pointer;
472 
473 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
474 		rv = AE_ERROR;
475 		goto out;
476 	}
477 
478 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
479 		rv = AE_BAD_PARAMETER;
480 		goto out;
481 	}
482 
483 	/*
484 	 * "It is strongly recommended that the OS evaluate
485 	 *  _OSC with the Query Support Flag set until _OSC
486 	 *  returns the Capabilities Masked bit clear, to
487 	 *  negotiate the set of features to be granted to
488 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
489 	 */
490 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
491 
492 		ACPI_FREE(buf.Pointer);
493 		i--;
494 
495 		goto again;
496 	}
497 
498 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
499 
500 		ACPI_FREE(buf.Pointer);
501 		cap[0] &= ~ACPI_OSC_QUERY;
502 
503 		goto again;
504 	}
505 
506 	/*
507 	 * It is permitted for _OSC to return all
508 	 * bits cleared, but this is specified to
509 	 * vary on per-device basis. Assume that
510 	 * everything rather than nothing will be
511 	 * supported in this case; we do not need
512 	 * the firmware to know the CPU features.
513 	 */
514 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
515 
516 out:
517 	if (buf.Pointer != NULL)
518 		ACPI_FREE(buf.Pointer);
519 
520 	return rv;
521 }
522 
523 static void
524 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
525 {
526 	ACPI_OSD_EXEC_CALLBACK func;
527 	struct acpicpu_softc *sc;
528 	device_t self = aux;
529 
530 	sc = device_private(self);
531 
532 	if (sc->sc_cold != false)
533 		return;
534 
535 	if (acpicpu_dynamic != true)
536 		return;
537 
538 	switch (evt) {
539 
540 	case ACPICPU_C_NOTIFY:
541 
542 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
543 			return;
544 
545 		func = acpicpu_cstate_callback;
546 		break;
547 
548 	case ACPICPU_P_NOTIFY:
549 
550 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
551 			return;
552 
553 		func = acpicpu_pstate_callback;
554 		break;
555 
556 	case ACPICPU_T_NOTIFY:
557 
558 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
559 			return;
560 
561 		func = acpicpu_tstate_callback;
562 		break;
563 
564 	default:
565 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
566 		return;
567 	}
568 
569 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
570 }
571 
572 static bool
573 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
574 {
575 	struct acpicpu_softc *sc = device_private(self);
576 
577 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
578 		(void)acpicpu_cstate_suspend(self);
579 
580 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
581 		(void)acpicpu_pstate_suspend(self);
582 
583 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
584 		(void)acpicpu_tstate_suspend(self);
585 
586 	sc->sc_cold = true;
587 
588 	return true;
589 }
590 
591 static bool
592 acpicpu_resume(device_t self, const pmf_qual_t *qual)
593 {
594 	struct acpicpu_softc *sc = device_private(self);
595 	static const int handler = OSL_NOTIFY_HANDLER;
596 
597 	sc->sc_cold = false;
598 
599 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
600 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
601 
602 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
603 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
604 
605 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
606 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
607 
608 	return true;
609 }
610 
611 static void
612 acpicpu_evcnt_attach(device_t self)
613 {
614 	struct acpicpu_softc *sc = device_private(self);
615 	struct acpicpu_cstate *cs;
616 	struct acpicpu_pstate *ps;
617 	struct acpicpu_tstate *ts;
618 	const char *str;
619 	uint32_t i;
620 
621 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
622 
623 		cs = &sc->sc_cstate[i];
624 
625 		if (cs->cs_method == 0)
626 			continue;
627 
628 		str = "HALT";
629 
630 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
631 			str = "MWAIT";
632 
633 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
634 			str = "I/O";
635 
636 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
637 		    "C%d (%s)", i, str);
638 
639 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
640 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
641 	}
642 
643 	for (i = 0; i < sc->sc_pstate_count; i++) {
644 
645 		ps = &sc->sc_pstate[i];
646 
647 		if (ps->ps_freq == 0)
648 			continue;
649 
650 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
651 		    "P%u (%u MHz)", i, ps->ps_freq);
652 
653 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
654 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
655 	}
656 
657 	for (i = 0; i < sc->sc_tstate_count; i++) {
658 
659 		ts = &sc->sc_tstate[i];
660 
661 		if (ts->ts_percent == 0)
662 			continue;
663 
664 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
665 		    "T%u (%u %%)", i, ts->ts_percent);
666 
667 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
668 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
669 	}
670 }
671 
672 static void
673 acpicpu_evcnt_detach(device_t self)
674 {
675 	struct acpicpu_softc *sc = device_private(self);
676 	struct acpicpu_cstate *cs;
677 	struct acpicpu_pstate *ps;
678 	struct acpicpu_tstate *ts;
679 	uint32_t i;
680 
681 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
682 
683 		cs = &sc->sc_cstate[i];
684 
685 		if (cs->cs_method != 0)
686 			evcnt_detach(&cs->cs_evcnt);
687 	}
688 
689 	for (i = 0; i < sc->sc_pstate_count; i++) {
690 
691 		ps = &sc->sc_pstate[i];
692 
693 		if (ps->ps_freq != 0)
694 			evcnt_detach(&ps->ps_evcnt);
695 	}
696 
697 	for (i = 0; i < sc->sc_tstate_count; i++) {
698 
699 		ts = &sc->sc_tstate[i];
700 
701 		if (ts->ts_percent != 0)
702 			evcnt_detach(&ts->ts_evcnt);
703 	}
704 }
705 
706 static void
707 acpicpu_debug_print(device_t self)
708 {
709 	struct acpicpu_softc *sc = device_private(self);
710 	struct cpu_info *ci = sc->sc_ci;
711 	struct acpicpu_cstate *cs;
712 	struct acpicpu_pstate *ps;
713 	struct acpicpu_tstate *ts;
714 	static bool once = false;
715 	struct acpicpu_dep *dep;
716 	uint32_t i, method;
717 
718 	if (once != true) {
719 
720 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
721 
722 			cs = &sc->sc_cstate[i];
723 
724 			if (cs->cs_method == 0)
725 				continue;
726 
727 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
728 			    "lat %3u us, pow %5u mW%s\n", i,
729 			    acpicpu_debug_print_method(cs->cs_method),
730 			    cs->cs_latency, cs->cs_power,
731 			    (cs->cs_flags != 0) ? ", bus master check" : "");
732 		}
733 
734 		method = sc->sc_pstate_control.reg_spaceid;
735 
736 		for (i = 0; i < sc->sc_pstate_count; i++) {
737 
738 			ps = &sc->sc_pstate[i];
739 
740 			if (ps->ps_freq == 0)
741 				continue;
742 
743 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
744 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
745 			    acpicpu_debug_print_method(method),
746 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
747 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
748 			    ", turbo boost" : "");
749 		}
750 
751 		method = sc->sc_tstate_control.reg_spaceid;
752 
753 		for (i = 0; i < sc->sc_tstate_count; i++) {
754 
755 			ts = &sc->sc_tstate[i];
756 
757 			if (ts->ts_percent == 0)
758 				continue;
759 
760 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
761 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
762 			    acpicpu_debug_print_method(method),
763 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
764 		}
765 
766 		once = true;
767 	}
768 
769 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
770 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
771 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
772 
773 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
774 
775 		dep = &sc->sc_cstate_dep;
776 
777 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
778 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
779 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
780 	}
781 
782 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
783 
784 		dep = &sc->sc_pstate_dep;
785 
786 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
787 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
788 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
789 	}
790 
791 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
792 
793 		dep = &sc->sc_tstate_dep;
794 
795 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
796 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
797 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
798 	}
799 }
800 
801 static const char *
802 acpicpu_debug_print_method(uint8_t val)
803 {
804 
805 	if (val == ACPICPU_C_STATE_FFH)
806 		return "FFH";
807 
808 	if (val == ACPICPU_C_STATE_HALT)
809 		return "HLT";
810 
811 	if (val == ACPICPU_C_STATE_SYSIO)
812 		return "I/O";
813 
814 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
815 		return "I/O";
816 
817 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
818 		return "FFH";
819 
820 	return "???";
821 }
822 
823 static const char *
824 acpicpu_debug_print_dep(uint32_t val)
825 {
826 
827 	switch (val) {
828 
829 	case ACPICPU_DEP_SW_ALL:
830 		return "SW_ALL";
831 
832 	case ACPICPU_DEP_SW_ANY:
833 		return "SW_ANY";
834 
835 	case ACPICPU_DEP_HW_ALL:
836 		return "HW_ALL";
837 
838 	default:
839 		return "unknown";
840 	}
841 }
842 
843 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
844 
845 #ifdef _MODULE
846 #include "ioconf.c"
847 #endif
848 
849 static int
850 acpicpu_modcmd(modcmd_t cmd, void *aux)
851 {
852 	int rv = 0;
853 
854 	switch (cmd) {
855 
856 	case MODULE_CMD_INIT:
857 
858 #ifdef _MODULE
859 		rv = config_init_component(cfdriver_ioconf_acpicpu,
860 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
861 #endif
862 		break;
863 
864 	case MODULE_CMD_FINI:
865 
866 #ifdef _MODULE
867 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
868 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
869 #endif
870 		break;
871 
872 	default:
873 		rv = ENOTTY;
874 	}
875 
876 	return rv;
877 }
878