xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision 9ddb6ab554e70fb9bbd90c3d96b812bc57755a14)
1 /* $NetBSD: acpi_cpu.c,v 1.48 2011/11/14 02:44:59 jmcneill Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.48 2011/11/14 02:44:59 jmcneill Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/cpufreq.h>
41 
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45 
46 #include <machine/acpi_machdep.h>
47 #include <machine/cpuvar.h>
48 
49 #define _COMPONENT	  ACPI_BUS_COMPONENT
50 ACPI_MODULE_NAME	  ("acpi_cpu")
51 
52 static int		  acpicpu_match(device_t, cfdata_t, void *);
53 static void		  acpicpu_attach(device_t, device_t, void *);
54 static int		  acpicpu_detach(device_t, int);
55 static int		  acpicpu_once_attach(void);
56 static int		  acpicpu_once_detach(void);
57 static void		  acpicpu_start(device_t);
58 static void		  acpicpu_sysctl(device_t);
59 
60 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
61 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
62 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
63 					  uint32_t, uint32_t *);
64 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
65 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
66 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
67 static void		  acpicpu_evcnt_attach(device_t);
68 static void		  acpicpu_evcnt_detach(device_t);
69 static void		  acpicpu_debug_print(device_t);
70 static const char	 *acpicpu_debug_print_method_c(uint8_t);
71 static const char	 *acpicpu_debug_print_method_pt(uint8_t);
72 static const char	 *acpicpu_debug_print_dep(uint32_t);
73 
74 static uint32_t		  acpicpu_count = 0;
75 struct acpicpu_softc	**acpicpu_sc = NULL;
76 static struct sysctllog	 *acpicpu_log = NULL;
77 static bool		  acpicpu_dynamic = true;
78 static bool		  acpicpu_passive = true;
79 
80 static const struct {
81 	const char	 *manu;
82 	const char	 *prod;
83 	const char	 *vers;
84 } acpicpu_quirks[] = {
85 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
86 };
87 
88 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
89     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
90 
91 static int
92 acpicpu_match(device_t parent, cfdata_t match, void *aux)
93 {
94 	const char *manu, *prod, *vers;
95 	struct cpu_info *ci;
96 	size_t i;
97 
98 	if (acpi_softc == NULL)
99 		return 0;
100 
101 	manu = pmf_get_platform("system-vendor");
102 	prod = pmf_get_platform("system-product");
103 	vers = pmf_get_platform("system-version");
104 
105 	if (manu != NULL && prod != NULL && vers != NULL) {
106 
107 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
108 
109 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
110 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
111 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
112 				return 0;
113 		}
114 	}
115 
116 	ci = acpicpu_md_match(parent, match, aux);
117 
118 	if (ci == NULL)
119 		return 0;
120 
121 	if (acpi_match_cpu_info(ci) == NULL)
122 		return 0;
123 
124 	return 10;
125 }
126 
127 static void
128 acpicpu_attach(device_t parent, device_t self, void *aux)
129 {
130 	struct acpicpu_softc *sc = device_private(self);
131 	struct cpu_info *ci;
132 	ACPI_HANDLE hdl;
133 	cpuid_t id;
134 	int rv;
135 
136 	ci = acpicpu_md_attach(parent, self, aux);
137 
138 	if (ci == NULL)
139 		return;
140 
141 	sc->sc_ci = ci;
142 	sc->sc_dev = self;
143 	sc->sc_cold = true;
144 
145 	hdl = acpi_match_cpu_info(ci);
146 
147 	if (hdl == NULL) {
148 		aprint_normal(": failed to match processor\n");
149 		return;
150 	}
151 
152 	sc->sc_node = acpi_match_node(hdl);
153 
154 	if (acpicpu_once_attach() != 0) {
155 		aprint_normal(": failed to initialize\n");
156 		return;
157 	}
158 
159 	KASSERT(acpi_softc != NULL);
160 	KASSERT(acpicpu_sc != NULL);
161 	KASSERT(sc->sc_node != NULL);
162 
163 	id = sc->sc_ci->ci_acpiid;
164 
165 	if (acpicpu_sc[id] != NULL) {
166 		aprint_normal(": already attached\n");
167 		return;
168 	}
169 
170 	aprint_naive("\n");
171 	aprint_normal(": ACPI CPU\n");
172 
173 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
174 
175 	if (ACPI_FAILURE(rv))
176 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
177 
178 	acpicpu_count++;
179 	acpicpu_sc[id] = sc;
180 
181 	sc->sc_cap = acpicpu_cap(sc);
182 	sc->sc_ncpus = acpi_md_ncpus();
183 	sc->sc_flags = acpicpu_md_flags();
184 
185 	KASSERT(acpicpu_count <= sc->sc_ncpus);
186 	KASSERT(sc->sc_node->ad_device == NULL);
187 
188 	sc->sc_node->ad_device = self;
189 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
190 
191 	acpicpu_cstate_attach(self);
192 	acpicpu_pstate_attach(self);
193 	acpicpu_tstate_attach(self);
194 
195 	acpicpu_debug_print(self);
196 	acpicpu_evcnt_attach(self);
197 
198 	(void)config_interrupts(self, acpicpu_start);
199 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
200 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
201 }
202 
203 static int
204 acpicpu_detach(device_t self, int flags)
205 {
206 	struct acpicpu_softc *sc = device_private(self);
207 
208 	sc->sc_cold = true;
209 
210 	acpicpu_evcnt_detach(self);
211 	acpi_deregister_notify(sc->sc_node);
212 
213 	acpicpu_cstate_detach(self);
214 	acpicpu_pstate_detach(self);
215 	acpicpu_tstate_detach(self);
216 
217 	mutex_destroy(&sc->sc_mtx);
218 	sc->sc_node->ad_device = NULL;
219 
220 	acpicpu_count--;
221 	acpicpu_once_detach();
222 
223 	return 0;
224 }
225 
226 static int
227 acpicpu_once_attach(void)
228 {
229 	struct acpicpu_softc *sc;
230 	unsigned int i;
231 
232 	if (acpicpu_count != 0)
233 		return 0;
234 
235 	KASSERT(acpicpu_sc == NULL);
236 	KASSERT(acpicpu_log == NULL);
237 
238 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
239 
240 	if (acpicpu_sc == NULL)
241 		return ENOMEM;
242 
243 	for (i = 0; i < maxcpus; i++)
244 		acpicpu_sc[i] = NULL;
245 
246 	return 0;
247 }
248 
249 static int
250 acpicpu_once_detach(void)
251 {
252 	struct acpicpu_softc *sc;
253 
254 	if (acpicpu_count != 0)
255 		return EDEADLK;
256 
257 	cpufreq_deregister();
258 
259 	if (acpicpu_log != NULL)
260 		sysctl_teardown(&acpicpu_log);
261 
262 	if (acpicpu_sc != NULL)
263 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
264 
265 	return 0;
266 }
267 
268 static void
269 acpicpu_start(device_t self)
270 {
271 	struct acpicpu_softc *sc = device_private(self);
272 	static uint32_t count = 0;
273 	struct cpufreq cf;
274 	uint32_t i;
275 
276 	/*
277 	 * Run the state-specific initialization routines. These
278 	 * must run only once, after interrupts have been enabled,
279 	 * all CPUs are running, and all ACPI CPUs have attached.
280 	 */
281 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
282 		sc->sc_cold = false;
283 		return;
284 	}
285 
286 	/*
287 	 * Set the last ACPI CPU as non-cold
288 	 * only after C-states are enabled.
289 	 */
290 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
291 		acpicpu_cstate_start(self);
292 
293 	sc->sc_cold = false;
294 
295 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
296 		acpicpu_pstate_start(self);
297 
298 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
299 		acpicpu_tstate_start(self);
300 
301 	acpicpu_sysctl(self);
302 	aprint_debug_dev(self, "ACPI CPUs started\n");
303 
304 	/*
305 	 * Register with cpufreq(9).
306 	 */
307 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
308 
309 		(void)memset(&cf, 0, sizeof(struct cpufreq));
310 
311 		cf.cf_mp = false;
312 		cf.cf_cookie = NULL;
313 		cf.cf_get_freq = acpicpu_pstate_get;
314 		cf.cf_set_freq = acpicpu_pstate_set;
315 		cf.cf_state_count = sc->sc_pstate_count;
316 
317 		(void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name));
318 
319 		for (i = 0; i < sc->sc_pstate_count; i++) {
320 
321 			if (sc->sc_pstate[i].ps_freq == 0)
322 				continue;
323 
324 			cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq;
325 			cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power;
326 		}
327 
328 		if (cpufreq_register(&cf) != 0)
329 			aprint_error_dev(self, "failed to register cpufreq\n");
330 	}
331 }
332 
333 static void
334 acpicpu_sysctl(device_t self)
335 {
336 	const struct sysctlnode *node;
337 	int err;
338 
339 	KASSERT(acpicpu_log == NULL);
340 
341 	err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
342 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
343 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
344 
345 	if (err != 0)
346 		goto fail;
347 
348 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
349 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
350 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
351 
352 	if (err != 0)
353 		goto fail;
354 
355 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
356 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
357 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
358 
359 	if (err != 0)
360 		goto fail;
361 
362 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
363 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
364 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
365 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
366 
367 	if (err != 0)
368 		goto fail;
369 
370 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
371 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
372 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
373 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
374 
375 	if (err != 0)
376 		goto fail;
377 
378 	return;
379 
380 fail:
381 	aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
382 }
383 
384 static ACPI_STATUS
385 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
386 {
387 	ACPI_OBJECT *obj;
388 	ACPI_BUFFER buf;
389 	ACPI_STATUS rv;
390 
391 	rv = acpi_eval_struct(hdl, NULL, &buf);
392 
393 	if (ACPI_FAILURE(rv))
394 		goto out;
395 
396 	obj = buf.Pointer;
397 
398 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
399 		rv = AE_TYPE;
400 		goto out;
401 	}
402 
403 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
404 		rv = AE_LIMIT;
405 		goto out;
406 	}
407 
408 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
409 
410 	if (ao != NULL) {
411 		ao->ao_procid = obj->Processor.ProcId;
412 		ao->ao_pblklen = obj->Processor.PblkLength;
413 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
414 	}
415 
416 out:
417 	if (buf.Pointer != NULL)
418 		ACPI_FREE(buf.Pointer);
419 
420 	return rv;
421 }
422 
423 static uint32_t
424 acpicpu_cap(struct acpicpu_softc *sc)
425 {
426 	uint32_t flags, cap = 0;
427 	ACPI_STATUS rv;
428 
429 	/*
430 	 * Query and set machine-dependent capabilities.
431 	 * Note that the Intel-specific _PDC method has
432 	 * already been evaluated. It was furthermore
433 	 * deprecated in the ACPI 3.0 in favor of _OSC.
434 	 */
435 	flags = acpi_md_pdc();
436 	rv = acpicpu_cap_osc(sc, flags, &cap);
437 
438 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
439 
440 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
441 		    "_OSC: %s\n", AcpiFormatException(rv));
442 	}
443 
444 	return (cap != 0) ? cap : flags;
445 }
446 
447 static ACPI_STATUS
448 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
449 {
450 	ACPI_OBJECT_LIST arg;
451 	ACPI_OBJECT obj[4];
452 	ACPI_OBJECT *osc;
453 	ACPI_BUFFER buf;
454 	ACPI_STATUS rv;
455 	uint32_t cap[2];
456 	uint32_t *ptr;
457 	int i = 5;
458 
459 	static uint8_t intel_uuid[16] = {
460 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
461 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
462 	};
463 
464 	cap[0] = ACPI_OSC_QUERY;
465 	cap[1] = flags;
466 
467 again:
468 	arg.Count = 4;
469 	arg.Pointer = obj;
470 
471 	obj[0].Type = ACPI_TYPE_BUFFER;
472 	obj[0].Buffer.Length = sizeof(intel_uuid);
473 	obj[0].Buffer.Pointer = intel_uuid;
474 
475 	obj[1].Type = ACPI_TYPE_INTEGER;
476 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
477 
478 	obj[2].Type = ACPI_TYPE_INTEGER;
479 	obj[2].Integer.Value = __arraycount(cap);
480 
481 	obj[3].Type = ACPI_TYPE_BUFFER;
482 	obj[3].Buffer.Length = sizeof(cap);
483 	obj[3].Buffer.Pointer = (void *)cap;
484 
485 	buf.Pointer = NULL;
486 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
487 
488 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
489 
490 	if (ACPI_FAILURE(rv))
491 		goto out;
492 
493 	osc = buf.Pointer;
494 
495 	if (osc->Type != ACPI_TYPE_BUFFER) {
496 		rv = AE_TYPE;
497 		goto out;
498 	}
499 
500 	if (osc->Buffer.Length != sizeof(cap)) {
501 		rv = AE_BUFFER_OVERFLOW;
502 		goto out;
503 	}
504 
505 	ptr = (uint32_t *)osc->Buffer.Pointer;
506 
507 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
508 		rv = AE_ERROR;
509 		goto out;
510 	}
511 
512 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
513 		rv = AE_BAD_PARAMETER;
514 		goto out;
515 	}
516 
517 	/*
518 	 * "It is strongly recommended that the OS evaluate
519 	 *  _OSC with the Query Support Flag set until _OSC
520 	 *  returns the Capabilities Masked bit clear, to
521 	 *  negotiate the set of features to be granted to
522 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
523 	 */
524 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
525 
526 		ACPI_FREE(buf.Pointer);
527 		i--;
528 
529 		goto again;
530 	}
531 
532 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
533 
534 		ACPI_FREE(buf.Pointer);
535 		cap[0] &= ~ACPI_OSC_QUERY;
536 
537 		goto again;
538 	}
539 
540 	/*
541 	 * It is permitted for _OSC to return all
542 	 * bits cleared, but this is specified to
543 	 * vary on per-device basis. Assume that
544 	 * everything rather than nothing will be
545 	 * supported in this case; we do not need
546 	 * the firmware to know the CPU features.
547 	 */
548 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
549 
550 out:
551 	if (buf.Pointer != NULL)
552 		ACPI_FREE(buf.Pointer);
553 
554 	return rv;
555 }
556 
557 static void
558 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
559 {
560 	ACPI_OSD_EXEC_CALLBACK func;
561 	struct acpicpu_softc *sc;
562 	device_t self = aux;
563 
564 	sc = device_private(self);
565 
566 	if (sc->sc_cold != false)
567 		return;
568 
569 	if (acpicpu_dynamic != true)
570 		return;
571 
572 	switch (evt) {
573 
574 	case ACPICPU_C_NOTIFY:
575 
576 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
577 			return;
578 
579 		func = acpicpu_cstate_callback;
580 		break;
581 
582 	case ACPICPU_P_NOTIFY:
583 
584 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
585 			return;
586 
587 		func = acpicpu_pstate_callback;
588 		break;
589 
590 	case ACPICPU_T_NOTIFY:
591 
592 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
593 			return;
594 
595 		func = acpicpu_tstate_callback;
596 		break;
597 
598 	default:
599 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
600 		return;
601 	}
602 
603 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
604 }
605 
606 static bool
607 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
608 {
609 	struct acpicpu_softc *sc = device_private(self);
610 
611 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
612 		(void)acpicpu_cstate_suspend(self);
613 
614 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
615 		(void)acpicpu_pstate_suspend(self);
616 
617 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
618 		(void)acpicpu_tstate_suspend(self);
619 
620 	sc->sc_cold = true;
621 
622 	return true;
623 }
624 
625 static bool
626 acpicpu_resume(device_t self, const pmf_qual_t *qual)
627 {
628 	struct acpicpu_softc *sc = device_private(self);
629 	static const int handler = OSL_NOTIFY_HANDLER;
630 
631 	sc->sc_cold = false;
632 
633 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
634 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
635 
636 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
637 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
638 
639 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
640 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
641 
642 	return true;
643 }
644 
645 static void
646 acpicpu_evcnt_attach(device_t self)
647 {
648 	struct acpicpu_softc *sc = device_private(self);
649 	struct acpicpu_cstate *cs;
650 	struct acpicpu_pstate *ps;
651 	struct acpicpu_tstate *ts;
652 	const char *str;
653 	uint32_t i;
654 
655 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
656 
657 		cs = &sc->sc_cstate[i];
658 
659 		if (cs->cs_method == 0)
660 			continue;
661 
662 		str = "HALT";
663 
664 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
665 			str = "MWAIT";
666 
667 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
668 			str = "I/O";
669 
670 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
671 		    "C%d (%s)", i, str);
672 
673 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
674 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
675 	}
676 
677 	for (i = 0; i < sc->sc_pstate_count; i++) {
678 
679 		ps = &sc->sc_pstate[i];
680 
681 		if (ps->ps_freq == 0)
682 			continue;
683 
684 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
685 		    "P%u (%u MHz)", i, ps->ps_freq);
686 
687 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
688 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
689 	}
690 
691 	for (i = 0; i < sc->sc_tstate_count; i++) {
692 
693 		ts = &sc->sc_tstate[i];
694 
695 		if (ts->ts_percent == 0)
696 			continue;
697 
698 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
699 		    "T%u (%u %%)", i, ts->ts_percent);
700 
701 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
702 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
703 	}
704 }
705 
706 static void
707 acpicpu_evcnt_detach(device_t self)
708 {
709 	struct acpicpu_softc *sc = device_private(self);
710 	struct acpicpu_cstate *cs;
711 	struct acpicpu_pstate *ps;
712 	struct acpicpu_tstate *ts;
713 	uint32_t i;
714 
715 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
716 
717 		cs = &sc->sc_cstate[i];
718 
719 		if (cs->cs_method != 0)
720 			evcnt_detach(&cs->cs_evcnt);
721 	}
722 
723 	for (i = 0; i < sc->sc_pstate_count; i++) {
724 
725 		ps = &sc->sc_pstate[i];
726 
727 		if (ps->ps_freq != 0)
728 			evcnt_detach(&ps->ps_evcnt);
729 	}
730 
731 	for (i = 0; i < sc->sc_tstate_count; i++) {
732 
733 		ts = &sc->sc_tstate[i];
734 
735 		if (ts->ts_percent != 0)
736 			evcnt_detach(&ts->ts_evcnt);
737 	}
738 }
739 
740 static void
741 acpicpu_debug_print(device_t self)
742 {
743 	struct acpicpu_softc *sc = device_private(self);
744 	struct cpu_info *ci = sc->sc_ci;
745 	struct acpicpu_cstate *cs;
746 	struct acpicpu_pstate *ps;
747 	struct acpicpu_tstate *ts;
748 	static bool once = false;
749 	struct acpicpu_dep *dep;
750 	uint32_t i, method;
751 
752 	if (once != true) {
753 
754 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
755 
756 			cs = &sc->sc_cstate[i];
757 
758 			if (cs->cs_method == 0)
759 				continue;
760 
761 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
762 			    "lat %3u us, pow %5u mW%s\n", i,
763 			    acpicpu_debug_print_method_c(cs->cs_method),
764 			    cs->cs_latency, cs->cs_power,
765 			    (cs->cs_flags != 0) ? ", bus master check" : "");
766 		}
767 
768 		method = sc->sc_pstate_control.reg_spaceid;
769 
770 		for (i = 0; i < sc->sc_pstate_count; i++) {
771 
772 			ps = &sc->sc_pstate[i];
773 
774 			if (ps->ps_freq == 0)
775 				continue;
776 
777 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
778 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
779 			    acpicpu_debug_print_method_pt(method),
780 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
781 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
782 			    ", turbo boost" : "");
783 		}
784 
785 		method = sc->sc_tstate_control.reg_spaceid;
786 
787 		for (i = 0; i < sc->sc_tstate_count; i++) {
788 
789 			ts = &sc->sc_tstate[i];
790 
791 			if (ts->ts_percent == 0)
792 				continue;
793 
794 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
795 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
796 			    acpicpu_debug_print_method_pt(method),
797 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
798 		}
799 
800 		once = true;
801 	}
802 
803 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
804 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
805 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
806 
807 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
808 
809 		dep = &sc->sc_cstate_dep;
810 
811 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
812 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
813 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
814 	}
815 
816 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
817 
818 		dep = &sc->sc_pstate_dep;
819 
820 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
821 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
822 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
823 	}
824 
825 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
826 
827 		dep = &sc->sc_tstate_dep;
828 
829 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
830 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
831 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
832 	}
833 }
834 
835 static const char *
836 acpicpu_debug_print_method_c(uint8_t val)
837 {
838 
839 	if (val == ACPICPU_C_STATE_FFH)
840 		return "FFH";
841 
842 	if (val == ACPICPU_C_STATE_HALT)
843 		return "HLT";
844 
845 	if (val == ACPICPU_C_STATE_SYSIO)
846 		return "I/O";
847 
848 	return "???";
849 }
850 
851 static const char *
852 acpicpu_debug_print_method_pt(uint8_t val)
853 {
854 
855 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
856 		return "I/O";
857 
858 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
859 		return "FFH";
860 
861 	return "???";
862 }
863 
864 static const char *
865 acpicpu_debug_print_dep(uint32_t val)
866 {
867 
868 	switch (val) {
869 
870 	case ACPICPU_DEP_SW_ALL:
871 		return "SW_ALL";
872 
873 	case ACPICPU_DEP_SW_ANY:
874 		return "SW_ANY";
875 
876 	case ACPICPU_DEP_HW_ALL:
877 		return "HW_ALL";
878 
879 	default:
880 		return "unknown";
881 	}
882 }
883 
884 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
885 
886 #ifdef _MODULE
887 #include "ioconf.c"
888 #endif
889 
890 static int
891 acpicpu_modcmd(modcmd_t cmd, void *aux)
892 {
893 	int rv = 0;
894 
895 	switch (cmd) {
896 
897 	case MODULE_CMD_INIT:
898 
899 #ifdef _MODULE
900 		rv = config_init_component(cfdriver_ioconf_acpicpu,
901 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
902 #endif
903 		break;
904 
905 	case MODULE_CMD_FINI:
906 
907 #ifdef _MODULE
908 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
909 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
910 #endif
911 		break;
912 
913 	default:
914 		rv = ENOTTY;
915 	}
916 
917 	return rv;
918 }
919