xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /* $NetBSD: acpi_cpu.c,v 1.49 2012/03/27 18:37:57 jruoho Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.49 2012/03/27 18:37:57 jruoho Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/cpufreq.h>
41 
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45 
46 #include <machine/acpi_machdep.h>
47 #include <machine/cpuvar.h>
48 
49 #define _COMPONENT	  ACPI_BUS_COMPONENT
50 ACPI_MODULE_NAME	  ("acpi_cpu")
51 
52 static int		  acpicpu_match(device_t, cfdata_t, void *);
53 static void		  acpicpu_attach(device_t, device_t, void *);
54 static int		  acpicpu_detach(device_t, int);
55 static int		  acpicpu_once_attach(void);
56 static int		  acpicpu_once_detach(void);
57 static void		  acpicpu_start(device_t);
58 static void		  acpicpu_sysctl(device_t);
59 
60 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
61 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
62 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
63 					  uint32_t, uint32_t *);
64 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
65 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
66 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
67 static void		  acpicpu_evcnt_attach(device_t);
68 static void		  acpicpu_evcnt_detach(device_t);
69 static void		  acpicpu_debug_print(device_t);
70 static const char	 *acpicpu_debug_print_method_c(uint8_t);
71 static const char	 *acpicpu_debug_print_method_pt(uint8_t);
72 static const char	 *acpicpu_debug_print_dep(uint32_t);
73 
74 static uint32_t		  acpicpu_count = 0;
75 struct acpicpu_softc	**acpicpu_sc = NULL;
76 static struct sysctllog	 *acpicpu_log = NULL;
77 static bool		  acpicpu_dynamic = true;
78 static bool		  acpicpu_passive = true;
79 
80 static const struct {
81 	const char	 *manu;
82 	const char	 *prod;
83 	const char	 *vers;
84 } acpicpu_quirks[] = {
85 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
86 	{ "ASUSTeK Computer INC.", "M2A-MX", "Rev 1.xx" },
87 };
88 
89 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
90     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
91 
92 static int
93 acpicpu_match(device_t parent, cfdata_t match, void *aux)
94 {
95 	const char *manu, *prod, *vers;
96 	struct cpu_info *ci;
97 	size_t i;
98 
99 	if (acpi_softc == NULL)
100 		return 0;
101 
102 	manu = pmf_get_platform("board-vendor");
103 	prod = pmf_get_platform("board-product");
104 	vers = pmf_get_platform("board-version");
105 
106 	if (manu != NULL && prod != NULL && vers != NULL) {
107 
108 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
109 
110 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
111 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
112 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
113 				return 0;
114 		}
115 	}
116 
117 	ci = acpicpu_md_match(parent, match, aux);
118 
119 	if (ci == NULL)
120 		return 0;
121 
122 	if (acpi_match_cpu_info(ci) == NULL)
123 		return 0;
124 
125 	return 10;
126 }
127 
128 static void
129 acpicpu_attach(device_t parent, device_t self, void *aux)
130 {
131 	struct acpicpu_softc *sc = device_private(self);
132 	struct cpu_info *ci;
133 	ACPI_HANDLE hdl;
134 	cpuid_t id;
135 	int rv;
136 
137 	ci = acpicpu_md_attach(parent, self, aux);
138 
139 	if (ci == NULL)
140 		return;
141 
142 	sc->sc_ci = ci;
143 	sc->sc_dev = self;
144 	sc->sc_cold = true;
145 
146 	hdl = acpi_match_cpu_info(ci);
147 
148 	if (hdl == NULL) {
149 		aprint_normal(": failed to match processor\n");
150 		return;
151 	}
152 
153 	sc->sc_node = acpi_match_node(hdl);
154 
155 	if (acpicpu_once_attach() != 0) {
156 		aprint_normal(": failed to initialize\n");
157 		return;
158 	}
159 
160 	KASSERT(acpi_softc != NULL);
161 	KASSERT(acpicpu_sc != NULL);
162 	KASSERT(sc->sc_node != NULL);
163 
164 	id = sc->sc_ci->ci_acpiid;
165 
166 	if (acpicpu_sc[id] != NULL) {
167 		aprint_normal(": already attached\n");
168 		return;
169 	}
170 
171 	aprint_naive("\n");
172 	aprint_normal(": ACPI CPU\n");
173 
174 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
175 
176 	if (ACPI_FAILURE(rv))
177 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
178 
179 	acpicpu_count++;
180 	acpicpu_sc[id] = sc;
181 
182 	sc->sc_cap = acpicpu_cap(sc);
183 	sc->sc_ncpus = acpi_md_ncpus();
184 	sc->sc_flags = acpicpu_md_flags();
185 
186 	KASSERT(acpicpu_count <= sc->sc_ncpus);
187 	KASSERT(sc->sc_node->ad_device == NULL);
188 
189 	sc->sc_node->ad_device = self;
190 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
191 
192 	acpicpu_cstate_attach(self);
193 	acpicpu_pstate_attach(self);
194 	acpicpu_tstate_attach(self);
195 
196 	acpicpu_debug_print(self);
197 	acpicpu_evcnt_attach(self);
198 
199 	(void)config_interrupts(self, acpicpu_start);
200 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
201 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
202 }
203 
204 static int
205 acpicpu_detach(device_t self, int flags)
206 {
207 	struct acpicpu_softc *sc = device_private(self);
208 
209 	sc->sc_cold = true;
210 
211 	acpicpu_evcnt_detach(self);
212 	acpi_deregister_notify(sc->sc_node);
213 
214 	acpicpu_cstate_detach(self);
215 	acpicpu_pstate_detach(self);
216 	acpicpu_tstate_detach(self);
217 
218 	mutex_destroy(&sc->sc_mtx);
219 	sc->sc_node->ad_device = NULL;
220 
221 	acpicpu_count--;
222 	acpicpu_once_detach();
223 
224 	return 0;
225 }
226 
227 static int
228 acpicpu_once_attach(void)
229 {
230 	struct acpicpu_softc *sc;
231 	unsigned int i;
232 
233 	if (acpicpu_count != 0)
234 		return 0;
235 
236 	KASSERT(acpicpu_sc == NULL);
237 	KASSERT(acpicpu_log == NULL);
238 
239 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
240 
241 	if (acpicpu_sc == NULL)
242 		return ENOMEM;
243 
244 	for (i = 0; i < maxcpus; i++)
245 		acpicpu_sc[i] = NULL;
246 
247 	return 0;
248 }
249 
250 static int
251 acpicpu_once_detach(void)
252 {
253 	struct acpicpu_softc *sc;
254 
255 	if (acpicpu_count != 0)
256 		return EDEADLK;
257 
258 	cpufreq_deregister();
259 
260 	if (acpicpu_log != NULL)
261 		sysctl_teardown(&acpicpu_log);
262 
263 	if (acpicpu_sc != NULL)
264 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
265 
266 	return 0;
267 }
268 
269 static void
270 acpicpu_start(device_t self)
271 {
272 	struct acpicpu_softc *sc = device_private(self);
273 	static uint32_t count = 0;
274 	struct cpufreq cf;
275 	uint32_t i;
276 
277 	/*
278 	 * Run the state-specific initialization routines. These
279 	 * must run only once, after interrupts have been enabled,
280 	 * all CPUs are running, and all ACPI CPUs have attached.
281 	 */
282 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
283 		sc->sc_cold = false;
284 		return;
285 	}
286 
287 	/*
288 	 * Set the last ACPI CPU as non-cold
289 	 * only after C-states are enabled.
290 	 */
291 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
292 		acpicpu_cstate_start(self);
293 
294 	sc->sc_cold = false;
295 
296 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
297 		acpicpu_pstate_start(self);
298 
299 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
300 		acpicpu_tstate_start(self);
301 
302 	acpicpu_sysctl(self);
303 	aprint_debug_dev(self, "ACPI CPUs started\n");
304 
305 	/*
306 	 * Register with cpufreq(9).
307 	 */
308 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
309 
310 		(void)memset(&cf, 0, sizeof(struct cpufreq));
311 
312 		cf.cf_mp = false;
313 		cf.cf_cookie = NULL;
314 		cf.cf_get_freq = acpicpu_pstate_get;
315 		cf.cf_set_freq = acpicpu_pstate_set;
316 		cf.cf_state_count = sc->sc_pstate_count;
317 
318 		(void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name));
319 
320 		for (i = 0; i < sc->sc_pstate_count; i++) {
321 
322 			if (sc->sc_pstate[i].ps_freq == 0)
323 				continue;
324 
325 			cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq;
326 			cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power;
327 		}
328 
329 		if (cpufreq_register(&cf) != 0)
330 			aprint_error_dev(self, "failed to register cpufreq\n");
331 	}
332 }
333 
334 static void
335 acpicpu_sysctl(device_t self)
336 {
337 	const struct sysctlnode *node;
338 	int err;
339 
340 	KASSERT(acpicpu_log == NULL);
341 
342 	err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
343 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
344 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
345 
346 	if (err != 0)
347 		goto fail;
348 
349 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
350 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
351 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
352 
353 	if (err != 0)
354 		goto fail;
355 
356 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
357 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
358 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
359 
360 	if (err != 0)
361 		goto fail;
362 
363 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
364 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
365 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
366 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
367 
368 	if (err != 0)
369 		goto fail;
370 
371 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
372 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
373 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
374 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
375 
376 	if (err != 0)
377 		goto fail;
378 
379 	return;
380 
381 fail:
382 	aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
383 }
384 
385 static ACPI_STATUS
386 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
387 {
388 	ACPI_OBJECT *obj;
389 	ACPI_BUFFER buf;
390 	ACPI_STATUS rv;
391 
392 	rv = acpi_eval_struct(hdl, NULL, &buf);
393 
394 	if (ACPI_FAILURE(rv))
395 		goto out;
396 
397 	obj = buf.Pointer;
398 
399 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
400 		rv = AE_TYPE;
401 		goto out;
402 	}
403 
404 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
405 		rv = AE_LIMIT;
406 		goto out;
407 	}
408 
409 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
410 
411 	if (ao != NULL) {
412 		ao->ao_procid = obj->Processor.ProcId;
413 		ao->ao_pblklen = obj->Processor.PblkLength;
414 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
415 	}
416 
417 out:
418 	if (buf.Pointer != NULL)
419 		ACPI_FREE(buf.Pointer);
420 
421 	return rv;
422 }
423 
424 static uint32_t
425 acpicpu_cap(struct acpicpu_softc *sc)
426 {
427 	uint32_t flags, cap = 0;
428 	ACPI_STATUS rv;
429 
430 	/*
431 	 * Query and set machine-dependent capabilities.
432 	 * Note that the Intel-specific _PDC method has
433 	 * already been evaluated. It was furthermore
434 	 * deprecated in the ACPI 3.0 in favor of _OSC.
435 	 */
436 	flags = acpi_md_pdc();
437 	rv = acpicpu_cap_osc(sc, flags, &cap);
438 
439 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
440 
441 		aprint_error_dev(sc->sc_dev, "failed to evaluate "
442 		    "_OSC: %s\n", AcpiFormatException(rv));
443 	}
444 
445 	return (cap != 0) ? cap : flags;
446 }
447 
448 static ACPI_STATUS
449 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
450 {
451 	ACPI_OBJECT_LIST arg;
452 	ACPI_OBJECT obj[4];
453 	ACPI_OBJECT *osc;
454 	ACPI_BUFFER buf;
455 	ACPI_STATUS rv;
456 	uint32_t cap[2];
457 	uint32_t *ptr;
458 	int i = 5;
459 
460 	static uint8_t intel_uuid[16] = {
461 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
462 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
463 	};
464 
465 	cap[0] = ACPI_OSC_QUERY;
466 	cap[1] = flags;
467 
468 again:
469 	arg.Count = 4;
470 	arg.Pointer = obj;
471 
472 	obj[0].Type = ACPI_TYPE_BUFFER;
473 	obj[0].Buffer.Length = sizeof(intel_uuid);
474 	obj[0].Buffer.Pointer = intel_uuid;
475 
476 	obj[1].Type = ACPI_TYPE_INTEGER;
477 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
478 
479 	obj[2].Type = ACPI_TYPE_INTEGER;
480 	obj[2].Integer.Value = __arraycount(cap);
481 
482 	obj[3].Type = ACPI_TYPE_BUFFER;
483 	obj[3].Buffer.Length = sizeof(cap);
484 	obj[3].Buffer.Pointer = (void *)cap;
485 
486 	buf.Pointer = NULL;
487 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
488 
489 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
490 
491 	if (ACPI_FAILURE(rv))
492 		goto out;
493 
494 	osc = buf.Pointer;
495 
496 	if (osc->Type != ACPI_TYPE_BUFFER) {
497 		rv = AE_TYPE;
498 		goto out;
499 	}
500 
501 	if (osc->Buffer.Length != sizeof(cap)) {
502 		rv = AE_BUFFER_OVERFLOW;
503 		goto out;
504 	}
505 
506 	ptr = (uint32_t *)osc->Buffer.Pointer;
507 
508 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
509 		rv = AE_ERROR;
510 		goto out;
511 	}
512 
513 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
514 		rv = AE_BAD_PARAMETER;
515 		goto out;
516 	}
517 
518 	/*
519 	 * "It is strongly recommended that the OS evaluate
520 	 *  _OSC with the Query Support Flag set until _OSC
521 	 *  returns the Capabilities Masked bit clear, to
522 	 *  negotiate the set of features to be granted to
523 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
524 	 */
525 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
526 
527 		ACPI_FREE(buf.Pointer);
528 		i--;
529 
530 		goto again;
531 	}
532 
533 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
534 
535 		ACPI_FREE(buf.Pointer);
536 		cap[0] &= ~ACPI_OSC_QUERY;
537 
538 		goto again;
539 	}
540 
541 	/*
542 	 * It is permitted for _OSC to return all
543 	 * bits cleared, but this is specified to
544 	 * vary on per-device basis. Assume that
545 	 * everything rather than nothing will be
546 	 * supported in this case; we do not need
547 	 * the firmware to know the CPU features.
548 	 */
549 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
550 
551 out:
552 	if (buf.Pointer != NULL)
553 		ACPI_FREE(buf.Pointer);
554 
555 	return rv;
556 }
557 
558 static void
559 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
560 {
561 	ACPI_OSD_EXEC_CALLBACK func;
562 	struct acpicpu_softc *sc;
563 	device_t self = aux;
564 
565 	sc = device_private(self);
566 
567 	if (sc->sc_cold != false)
568 		return;
569 
570 	if (acpicpu_dynamic != true)
571 		return;
572 
573 	switch (evt) {
574 
575 	case ACPICPU_C_NOTIFY:
576 
577 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
578 			return;
579 
580 		func = acpicpu_cstate_callback;
581 		break;
582 
583 	case ACPICPU_P_NOTIFY:
584 
585 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
586 			return;
587 
588 		func = acpicpu_pstate_callback;
589 		break;
590 
591 	case ACPICPU_T_NOTIFY:
592 
593 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
594 			return;
595 
596 		func = acpicpu_tstate_callback;
597 		break;
598 
599 	default:
600 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
601 		return;
602 	}
603 
604 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
605 }
606 
607 static bool
608 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
609 {
610 	struct acpicpu_softc *sc = device_private(self);
611 
612 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
613 		(void)acpicpu_cstate_suspend(self);
614 
615 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
616 		(void)acpicpu_pstate_suspend(self);
617 
618 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
619 		(void)acpicpu_tstate_suspend(self);
620 
621 	sc->sc_cold = true;
622 
623 	return true;
624 }
625 
626 static bool
627 acpicpu_resume(device_t self, const pmf_qual_t *qual)
628 {
629 	struct acpicpu_softc *sc = device_private(self);
630 	static const int handler = OSL_NOTIFY_HANDLER;
631 
632 	sc->sc_cold = false;
633 
634 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
635 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
636 
637 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
638 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
639 
640 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
641 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
642 
643 	return true;
644 }
645 
646 static void
647 acpicpu_evcnt_attach(device_t self)
648 {
649 	struct acpicpu_softc *sc = device_private(self);
650 	struct acpicpu_cstate *cs;
651 	struct acpicpu_pstate *ps;
652 	struct acpicpu_tstate *ts;
653 	const char *str;
654 	uint32_t i;
655 
656 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
657 
658 		cs = &sc->sc_cstate[i];
659 
660 		if (cs->cs_method == 0)
661 			continue;
662 
663 		str = "HALT";
664 
665 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
666 			str = "MWAIT";
667 
668 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
669 			str = "I/O";
670 
671 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
672 		    "C%d (%s)", i, str);
673 
674 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
675 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
676 	}
677 
678 	for (i = 0; i < sc->sc_pstate_count; i++) {
679 
680 		ps = &sc->sc_pstate[i];
681 
682 		if (ps->ps_freq == 0)
683 			continue;
684 
685 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
686 		    "P%u (%u MHz)", i, ps->ps_freq);
687 
688 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
689 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
690 	}
691 
692 	for (i = 0; i < sc->sc_tstate_count; i++) {
693 
694 		ts = &sc->sc_tstate[i];
695 
696 		if (ts->ts_percent == 0)
697 			continue;
698 
699 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
700 		    "T%u (%u %%)", i, ts->ts_percent);
701 
702 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
703 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
704 	}
705 }
706 
707 static void
708 acpicpu_evcnt_detach(device_t self)
709 {
710 	struct acpicpu_softc *sc = device_private(self);
711 	struct acpicpu_cstate *cs;
712 	struct acpicpu_pstate *ps;
713 	struct acpicpu_tstate *ts;
714 	uint32_t i;
715 
716 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
717 
718 		cs = &sc->sc_cstate[i];
719 
720 		if (cs->cs_method != 0)
721 			evcnt_detach(&cs->cs_evcnt);
722 	}
723 
724 	for (i = 0; i < sc->sc_pstate_count; i++) {
725 
726 		ps = &sc->sc_pstate[i];
727 
728 		if (ps->ps_freq != 0)
729 			evcnt_detach(&ps->ps_evcnt);
730 	}
731 
732 	for (i = 0; i < sc->sc_tstate_count; i++) {
733 
734 		ts = &sc->sc_tstate[i];
735 
736 		if (ts->ts_percent != 0)
737 			evcnt_detach(&ts->ts_evcnt);
738 	}
739 }
740 
741 static void
742 acpicpu_debug_print(device_t self)
743 {
744 	struct acpicpu_softc *sc = device_private(self);
745 	struct cpu_info *ci = sc->sc_ci;
746 	struct acpicpu_cstate *cs;
747 	struct acpicpu_pstate *ps;
748 	struct acpicpu_tstate *ts;
749 	static bool once = false;
750 	struct acpicpu_dep *dep;
751 	uint32_t i, method;
752 
753 	if (once != true) {
754 
755 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
756 
757 			cs = &sc->sc_cstate[i];
758 
759 			if (cs->cs_method == 0)
760 				continue;
761 
762 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
763 			    "lat %3u us, pow %5u mW%s\n", i,
764 			    acpicpu_debug_print_method_c(cs->cs_method),
765 			    cs->cs_latency, cs->cs_power,
766 			    (cs->cs_flags != 0) ? ", bus master check" : "");
767 		}
768 
769 		method = sc->sc_pstate_control.reg_spaceid;
770 
771 		for (i = 0; i < sc->sc_pstate_count; i++) {
772 
773 			ps = &sc->sc_pstate[i];
774 
775 			if (ps->ps_freq == 0)
776 				continue;
777 
778 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
779 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
780 			    acpicpu_debug_print_method_pt(method),
781 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
782 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
783 			    ", turbo boost" : "");
784 		}
785 
786 		method = sc->sc_tstate_control.reg_spaceid;
787 
788 		for (i = 0; i < sc->sc_tstate_count; i++) {
789 
790 			ts = &sc->sc_tstate[i];
791 
792 			if (ts->ts_percent == 0)
793 				continue;
794 
795 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
796 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
797 			    acpicpu_debug_print_method_pt(method),
798 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
799 		}
800 
801 		once = true;
802 	}
803 
804 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
805 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
806 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
807 
808 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
809 
810 		dep = &sc->sc_cstate_dep;
811 
812 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
813 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
814 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
815 	}
816 
817 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
818 
819 		dep = &sc->sc_pstate_dep;
820 
821 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
822 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
823 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
824 	}
825 
826 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
827 
828 		dep = &sc->sc_tstate_dep;
829 
830 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
831 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
832 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
833 	}
834 }
835 
836 static const char *
837 acpicpu_debug_print_method_c(uint8_t val)
838 {
839 
840 	if (val == ACPICPU_C_STATE_FFH)
841 		return "FFH";
842 
843 	if (val == ACPICPU_C_STATE_HALT)
844 		return "HLT";
845 
846 	if (val == ACPICPU_C_STATE_SYSIO)
847 		return "I/O";
848 
849 	return "???";
850 }
851 
852 static const char *
853 acpicpu_debug_print_method_pt(uint8_t val)
854 {
855 
856 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
857 		return "I/O";
858 
859 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
860 		return "FFH";
861 
862 	return "???";
863 }
864 
865 static const char *
866 acpicpu_debug_print_dep(uint32_t val)
867 {
868 
869 	switch (val) {
870 
871 	case ACPICPU_DEP_SW_ALL:
872 		return "SW_ALL";
873 
874 	case ACPICPU_DEP_SW_ANY:
875 		return "SW_ANY";
876 
877 	case ACPICPU_DEP_HW_ALL:
878 		return "HW_ALL";
879 
880 	default:
881 		return "unknown";
882 	}
883 }
884 
885 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
886 
887 #ifdef _MODULE
888 #include "ioconf.c"
889 #endif
890 
891 static int
892 acpicpu_modcmd(modcmd_t cmd, void *aux)
893 {
894 	int rv = 0;
895 
896 	switch (cmd) {
897 
898 	case MODULE_CMD_INIT:
899 
900 #ifdef _MODULE
901 		rv = config_init_component(cfdriver_ioconf_acpicpu,
902 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
903 #endif
904 		break;
905 
906 	case MODULE_CMD_FINI:
907 
908 #ifdef _MODULE
909 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
910 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
911 #endif
912 		break;
913 
914 	default:
915 		rv = ENOTTY;
916 	}
917 
918 	return rv;
919 }
920