xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision 46f5119e40af2e51998f686b2fdcc76b5488f7f3)
1 /* $NetBSD: acpi_cpu.c,v 1.40 2011/04/25 05:30:21 jruoho Exp $ */
2 
3 /*-
4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.40 2011/04/25 05:30:21 jruoho Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44 
45 #include <machine/acpi_machdep.h>
46 #include <machine/cpuvar.h>
47 
48 #define _COMPONENT	  ACPI_BUS_COMPONENT
49 ACPI_MODULE_NAME	  ("acpi_cpu")
50 
51 static int		  acpicpu_match(device_t, cfdata_t, void *);
52 static void		  acpicpu_attach(device_t, device_t, void *);
53 static int		  acpicpu_detach(device_t, int);
54 static int		  acpicpu_once_attach(void);
55 static int		  acpicpu_once_detach(void);
56 static void		  acpicpu_start(device_t);
57 static void		  acpicpu_sysctl(device_t);
58 
59 static ACPI_STATUS	  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static int		  acpicpu_find(struct cpu_info *,
61 				       struct acpi_devnode **);
62 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
63 static ACPI_STATUS	  acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
64 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
65 					  uint32_t, uint32_t *);
66 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
67 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
68 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
69 static void		  acpicpu_evcnt_attach(device_t);
70 static void		  acpicpu_evcnt_detach(device_t);
71 static void		  acpicpu_debug_print(device_t);
72 static const char	 *acpicpu_debug_print_method(uint8_t);
73 static const char	 *acpicpu_debug_print_dep(uint32_t);
74 
75 static uint32_t		  acpicpu_count = 0;
76 struct acpicpu_softc	**acpicpu_sc = NULL;
77 static struct sysctllog	 *acpicpu_log = NULL;
78 static bool		  acpicpu_dynamic = true;
79 static bool		  acpicpu_passive = true;
80 
81 static const struct {
82 	const char	 *manu;
83 	const char	 *prod;
84 	const char	 *vers;
85 } acpicpu_quirks[] = {
86 	{ "Supermicro", "PDSMi-LN4", "0123456789" },
87 };
88 
89 static const char * const acpicpu_hid[] = {
90 	"ACPI0007",
91 	NULL
92 };
93 
94 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
95     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
96 
97 static int
98 acpicpu_match(device_t parent, cfdata_t match, void *aux)
99 {
100 	const char *manu, *prod, *vers;
101 	struct cpu_info *ci;
102 	size_t i;
103 
104 	if (acpi_softc == NULL)
105 		return 0;
106 
107 	manu = pmf_get_platform("system-manufacturer");
108 	prod = pmf_get_platform("system-product-name");
109 	vers = pmf_get_platform("system-version");
110 
111 	if (manu != NULL && prod != NULL && vers != NULL) {
112 
113 		for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
114 
115 			if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
116 			    strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
117 			    strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
118 				return 0;
119 		}
120 	}
121 
122 	ci = acpicpu_md_match(parent, match, aux);
123 
124 	if (ci == NULL)
125 		return 0;
126 
127 	return acpicpu_find(ci, NULL);
128 }
129 
130 static void
131 acpicpu_attach(device_t parent, device_t self, void *aux)
132 {
133 	struct acpicpu_softc *sc = device_private(self);
134 	struct cpu_info *ci;
135 	cpuid_t id;
136 	int rv;
137 
138 	ci = acpicpu_md_attach(parent, self, aux);
139 
140 	if (ci == NULL)
141 		return;
142 
143 	sc->sc_ci = ci;
144 	sc->sc_dev = self;
145 	sc->sc_cold = true;
146 	sc->sc_node = NULL;
147 
148 	rv = acpicpu_find(ci, &sc->sc_node);
149 
150 	if (rv == 0) {
151 		aprint_normal(": failed to match processor\n");
152 		return;
153 	}
154 
155 	if (acpicpu_once_attach() != 0) {
156 		aprint_normal(": failed to initialize\n");
157 		return;
158 	}
159 
160 	KASSERT(acpi_softc != NULL);
161 	KASSERT(acpicpu_sc != NULL);
162 	KASSERT(sc->sc_node != NULL);
163 
164 	id = sc->sc_ci->ci_acpiid;
165 
166 	if (acpicpu_sc[id] != NULL) {
167 		aprint_normal(": already attached\n");
168 		return;
169 	}
170 
171 	aprint_naive("\n");
172 	aprint_normal(": ACPI CPU\n");
173 
174 	rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
175 
176 	if (ACPI_FAILURE(rv))
177 		aprint_verbose_dev(self, "failed to obtain CPU object\n");
178 
179 	acpicpu_count++;
180 	acpicpu_sc[id] = sc;
181 
182 	sc->sc_cap = acpicpu_cap(sc);
183 	sc->sc_ncpus = acpi_md_ncpus();
184 	sc->sc_flags = acpicpu_md_flags();
185 
186 	KASSERT(acpicpu_count <= sc->sc_ncpus);
187 	KASSERT(sc->sc_node->ad_device == NULL);
188 
189 	sc->sc_node->ad_device = self;
190 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
191 
192 	acpicpu_cstate_attach(self);
193 	acpicpu_pstate_attach(self);
194 	acpicpu_tstate_attach(self);
195 
196 	acpicpu_debug_print(self);
197 	acpicpu_evcnt_attach(self);
198 
199 	(void)config_interrupts(self, acpicpu_start);
200 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
201 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
202 }
203 
204 static int
205 acpicpu_detach(device_t self, int flags)
206 {
207 	struct acpicpu_softc *sc = device_private(self);
208 	int rv = 0;
209 
210 	sc->sc_cold = true;
211 
212 	acpicpu_evcnt_detach(self);
213 	acpi_deregister_notify(sc->sc_node);
214 
215 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
216 		rv = acpicpu_cstate_detach(self);
217 
218 	if (rv != 0)
219 		return rv;
220 
221 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
222 		rv = acpicpu_pstate_detach(self);
223 
224 	if (rv != 0)
225 		return rv;
226 
227 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
228 		rv = acpicpu_tstate_detach(self);
229 
230 	if (rv != 0)
231 		return rv;
232 
233 	mutex_destroy(&sc->sc_mtx);
234 
235 	sc->sc_node->ad_device = NULL;
236 
237 	acpicpu_count--;
238 	acpicpu_once_detach();
239 
240 	return 0;
241 }
242 
243 static int
244 acpicpu_once_attach(void)
245 {
246 	struct acpicpu_softc *sc;
247 	unsigned int i;
248 
249 	if (acpicpu_count != 0)
250 		return 0;
251 
252 	KASSERT(acpicpu_sc == NULL);
253 	KASSERT(acpicpu_log == NULL);
254 
255 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
256 
257 	if (acpicpu_sc == NULL)
258 		return ENOMEM;
259 
260 	for (i = 0; i < maxcpus; i++)
261 		acpicpu_sc[i] = NULL;
262 
263 	return 0;
264 }
265 
266 static int
267 acpicpu_once_detach(void)
268 {
269 	struct acpicpu_softc *sc;
270 
271 	if (acpicpu_count != 0)
272 		return EDEADLK;
273 
274 	if (acpicpu_log != NULL)
275 		sysctl_teardown(&acpicpu_log);
276 
277 	if (acpicpu_sc != NULL)
278 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
279 
280 	return 0;
281 }
282 
283 static void
284 acpicpu_start(device_t self)
285 {
286 	struct acpicpu_softc *sc = device_private(self);
287 	static uint32_t count = 0;
288 
289 	/*
290 	 * Run the state-specific initialization routines. These
291 	 * must run only once, after interrupts have been enabled,
292 	 * all CPUs are running, and all ACPI CPUs have attached.
293 	 */
294 	if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
295 		sc->sc_cold = false;
296 		return;
297 	}
298 
299 	/*
300 	 * Set the last ACPI CPU as non-cold
301 	 * only after C-states are enabled.
302 	 */
303 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
304 		acpicpu_cstate_start(self);
305 
306 	sc->sc_cold = false;
307 
308 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
309 		acpicpu_pstate_start(self);
310 
311 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
312 		acpicpu_tstate_start(self);
313 
314 	acpicpu_sysctl(self);
315 	aprint_debug_dev(self, "ACPI CPUs started\n");
316 }
317 
318 static void
319 acpicpu_sysctl(device_t self)
320 {
321 	const struct sysctlnode *node;
322 	int err;
323 
324 	KASSERT(acpicpu_log == NULL);
325 
326 	err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
327 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
328 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
329 
330 	if (err != 0)
331 		goto fail;
332 
333 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
334 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
335 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
336 
337 	if (err != 0)
338 		goto fail;
339 
340 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
341 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
342 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
343 
344 	if (err != 0)
345 		goto fail;
346 
347 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
348 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
349 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
350 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
351 
352 	if (err != 0)
353 		goto fail;
354 
355 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
356 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
357 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
358 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
359 
360 	if (err != 0)
361 		goto fail;
362 
363 	return;
364 
365 fail:
366 	aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
367 }
368 
369 static ACPI_STATUS
370 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
371 {
372 	ACPI_OBJECT *obj;
373 	ACPI_BUFFER buf;
374 	ACPI_STATUS rv;
375 
376 	rv = acpi_eval_struct(hdl, NULL, &buf);
377 
378 	if (ACPI_FAILURE(rv))
379 		goto out;
380 
381 	obj = buf.Pointer;
382 
383 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
384 		rv = AE_TYPE;
385 		goto out;
386 	}
387 
388 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
389 		rv = AE_LIMIT;
390 		goto out;
391 	}
392 
393 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
394 
395 	if (ao != NULL) {
396 		ao->ao_procid = obj->Processor.ProcId;
397 		ao->ao_pblklen = obj->Processor.PblkLength;
398 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
399 	}
400 
401 out:
402 	if (buf.Pointer != NULL)
403 		ACPI_FREE(buf.Pointer);
404 
405 	return rv;
406 }
407 
408 static int
409 acpicpu_find(struct cpu_info *ci, struct acpi_devnode **ptr)
410 {
411 	struct acpi_softc *sc = acpi_softc;
412 	struct acpicpu_object ao;
413 	struct acpi_devnode *ad;
414 	ACPI_INTEGER val;
415 	ACPI_STATUS rv;
416 
417 	if (sc == NULL || acpi_active == 0)
418 		return 0;
419 
420 	/*
421 	 * CPUs are declared in the ACPI namespace
422 	 * either as a Processor() or as a Device().
423 	 * In both cases the MADT entries are used
424 	 * for the match (see ACPI 4.0, section 8.4).
425 	 */
426 	SIMPLEQ_FOREACH(ad, &sc->ad_head, ad_list) {
427 
428 		if (ad->ad_type == ACPI_TYPE_PROCESSOR) {
429 
430 			rv = acpicpu_object(ad->ad_handle, &ao);
431 
432 			if (ACPI_SUCCESS(rv) && ci->ci_acpiid == ao.ao_procid)
433 				goto out;
434 		}
435 
436 		if (acpi_match_hid(ad->ad_devinfo, acpicpu_hid) != 0) {
437 
438 			rv = acpi_eval_integer(ad->ad_handle, "_UID", &val);
439 
440 			if (ACPI_SUCCESS(rv) && ci->ci_acpiid == val)
441 				goto out;
442 		}
443 	}
444 
445 	return 0;
446 
447 out:
448 	if (ptr != NULL)
449 		*ptr = ad;
450 
451 	return 10;
452 }
453 
454 static uint32_t
455 acpicpu_cap(struct acpicpu_softc *sc)
456 {
457 	uint32_t flags, cap = 0;
458 	const char *str;
459 	ACPI_STATUS rv;
460 
461 	/*
462 	 * Query and set machine-dependent capabilities.
463 	 * Note that the Intel-specific _PDC method was
464 	 * deprecated in the ACPI 3.0 in favor of _OSC.
465 	 */
466 	flags = acpicpu_md_cap();
467 	rv = acpicpu_cap_osc(sc, flags, &cap);
468 
469 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
470 		str = "_OSC";
471 		goto fail;
472 	}
473 
474 	rv = acpicpu_cap_pdc(sc, flags);
475 
476 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
477 		str = "_PDC";
478 		goto fail;
479 	}
480 
481 	if (cap == 0)
482 		cap = flags;
483 
484 	return cap;
485 
486 fail:
487 	aprint_error_dev(sc->sc_dev, "failed to evaluate "
488 	    "%s: %s\n", str, AcpiFormatException(rv));
489 
490 	return 0;
491 }
492 
493 static ACPI_STATUS
494 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
495 {
496 	ACPI_OBJECT_LIST arg;
497 	ACPI_OBJECT obj;
498 	uint32_t cap[3];
499 
500 	arg.Count = 1;
501 	arg.Pointer = &obj;
502 
503 	cap[0] = ACPICPU_PDC_REVID;
504 	cap[1] = 1;
505 	cap[2] = flags;
506 
507 	obj.Type = ACPI_TYPE_BUFFER;
508 	obj.Buffer.Length = sizeof(cap);
509 	obj.Buffer.Pointer = (void *)cap;
510 
511 	return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
512 }
513 
514 static ACPI_STATUS
515 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
516 {
517 	ACPI_OBJECT_LIST arg;
518 	ACPI_OBJECT obj[4];
519 	ACPI_OBJECT *osc;
520 	ACPI_BUFFER buf;
521 	ACPI_STATUS rv;
522 	uint32_t cap[2];
523 	uint32_t *ptr;
524 	int i = 5;
525 
526 	static uint8_t intel_uuid[16] = {
527 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
528 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
529 	};
530 
531 	cap[0] = ACPI_OSC_QUERY;
532 	cap[1] = flags;
533 
534 again:
535 	arg.Count = 4;
536 	arg.Pointer = obj;
537 
538 	obj[0].Type = ACPI_TYPE_BUFFER;
539 	obj[0].Buffer.Length = sizeof(intel_uuid);
540 	obj[0].Buffer.Pointer = intel_uuid;
541 
542 	obj[1].Type = ACPI_TYPE_INTEGER;
543 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
544 
545 	obj[2].Type = ACPI_TYPE_INTEGER;
546 	obj[2].Integer.Value = __arraycount(cap);
547 
548 	obj[3].Type = ACPI_TYPE_BUFFER;
549 	obj[3].Buffer.Length = sizeof(cap);
550 	obj[3].Buffer.Pointer = (void *)cap;
551 
552 	buf.Pointer = NULL;
553 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
554 
555 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
556 
557 	if (ACPI_FAILURE(rv))
558 		goto out;
559 
560 	osc = buf.Pointer;
561 
562 	if (osc->Type != ACPI_TYPE_BUFFER) {
563 		rv = AE_TYPE;
564 		goto out;
565 	}
566 
567 	if (osc->Buffer.Length != sizeof(cap)) {
568 		rv = AE_BUFFER_OVERFLOW;
569 		goto out;
570 	}
571 
572 	ptr = (uint32_t *)osc->Buffer.Pointer;
573 
574 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
575 		rv = AE_ERROR;
576 		goto out;
577 	}
578 
579 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
580 		rv = AE_BAD_PARAMETER;
581 		goto out;
582 	}
583 
584 	/*
585 	 * "It is strongly recommended that the OS evaluate
586 	 *  _OSC with the Query Support Flag set until _OSC
587 	 *  returns the Capabilities Masked bit clear, to
588 	 *  negotiate the set of features to be granted to
589 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
590 	 */
591 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
592 
593 		ACPI_FREE(buf.Pointer);
594 		i--;
595 
596 		goto again;
597 	}
598 
599 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
600 
601 		ACPI_FREE(buf.Pointer);
602 		cap[0] &= ~ACPI_OSC_QUERY;
603 
604 		goto again;
605 	}
606 
607 	/*
608 	 * It is permitted for _OSC to return all
609 	 * bits cleared, but this is specified to
610 	 * vary on per-device basis. Assume that
611 	 * everything rather than nothing will be
612 	 * supported in this case; we do not need
613 	 * the firmware to know the CPU features.
614 	 */
615 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
616 
617 out:
618 	if (buf.Pointer != NULL)
619 		ACPI_FREE(buf.Pointer);
620 
621 	return rv;
622 }
623 
624 static void
625 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
626 {
627 	ACPI_OSD_EXEC_CALLBACK func;
628 	struct acpicpu_softc *sc;
629 	device_t self = aux;
630 
631 	sc = device_private(self);
632 
633 	if (sc->sc_cold != false)
634 		return;
635 
636 	if (acpicpu_dynamic != true)
637 		return;
638 
639 	switch (evt) {
640 
641 	case ACPICPU_C_NOTIFY:
642 
643 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
644 			return;
645 
646 		func = acpicpu_cstate_callback;
647 		break;
648 
649 	case ACPICPU_P_NOTIFY:
650 
651 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
652 			return;
653 
654 		func = acpicpu_pstate_callback;
655 		break;
656 
657 	case ACPICPU_T_NOTIFY:
658 
659 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
660 			return;
661 
662 		func = acpicpu_tstate_callback;
663 		break;
664 
665 	default:
666 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
667 		return;
668 	}
669 
670 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
671 }
672 
673 static bool
674 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
675 {
676 	struct acpicpu_softc *sc = device_private(self);
677 
678 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
679 		(void)acpicpu_cstate_suspend(self);
680 
681 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
682 		(void)acpicpu_pstate_suspend(self);
683 
684 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
685 		(void)acpicpu_tstate_suspend(self);
686 
687 	sc->sc_cold = true;
688 
689 	return true;
690 }
691 
692 static bool
693 acpicpu_resume(device_t self, const pmf_qual_t *qual)
694 {
695 	struct acpicpu_softc *sc = device_private(self);
696 	static const int handler = OSL_NOTIFY_HANDLER;
697 
698 	sc->sc_cold = false;
699 
700 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
701 		(void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
702 
703 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
704 		(void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
705 
706 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
707 		(void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
708 
709 	return true;
710 }
711 
712 static void
713 acpicpu_evcnt_attach(device_t self)
714 {
715 	struct acpicpu_softc *sc = device_private(self);
716 	struct acpicpu_cstate *cs;
717 	struct acpicpu_pstate *ps;
718 	struct acpicpu_tstate *ts;
719 	const char *str;
720 	uint32_t i;
721 
722 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
723 
724 		cs = &sc->sc_cstate[i];
725 
726 		if (cs->cs_method == 0)
727 			continue;
728 
729 		str = "HALT";
730 
731 		if (cs->cs_method == ACPICPU_C_STATE_FFH)
732 			str = "MWAIT";
733 
734 		if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
735 			str = "I/O";
736 
737 		(void)snprintf(cs->cs_name, sizeof(cs->cs_name),
738 		    "C%d (%s)", i, str);
739 
740 		evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
741 		    NULL, device_xname(sc->sc_dev), cs->cs_name);
742 	}
743 
744 	for (i = 0; i < sc->sc_pstate_count; i++) {
745 
746 		ps = &sc->sc_pstate[i];
747 
748 		if (ps->ps_freq == 0)
749 			continue;
750 
751 		(void)snprintf(ps->ps_name, sizeof(ps->ps_name),
752 		    "P%u (%u MHz)", i, ps->ps_freq);
753 
754 		evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
755 		    NULL, device_xname(sc->sc_dev), ps->ps_name);
756 	}
757 
758 	for (i = 0; i < sc->sc_tstate_count; i++) {
759 
760 		ts = &sc->sc_tstate[i];
761 
762 		if (ts->ts_percent == 0)
763 			continue;
764 
765 		(void)snprintf(ts->ts_name, sizeof(ts->ts_name),
766 		    "T%u (%u %%)", i, ts->ts_percent);
767 
768 		evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
769 		    NULL, device_xname(sc->sc_dev), ts->ts_name);
770 	}
771 }
772 
773 static void
774 acpicpu_evcnt_detach(device_t self)
775 {
776 	struct acpicpu_softc *sc = device_private(self);
777 	struct acpicpu_cstate *cs;
778 	struct acpicpu_pstate *ps;
779 	struct acpicpu_tstate *ts;
780 	uint32_t i;
781 
782 	for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
783 
784 		cs = &sc->sc_cstate[i];
785 
786 		if (cs->cs_method != 0)
787 			evcnt_detach(&cs->cs_evcnt);
788 	}
789 
790 	for (i = 0; i < sc->sc_pstate_count; i++) {
791 
792 		ps = &sc->sc_pstate[i];
793 
794 		if (ps->ps_freq != 0)
795 			evcnt_detach(&ps->ps_evcnt);
796 	}
797 
798 	for (i = 0; i < sc->sc_tstate_count; i++) {
799 
800 		ts = &sc->sc_tstate[i];
801 
802 		if (ts->ts_percent != 0)
803 			evcnt_detach(&ts->ts_evcnt);
804 	}
805 }
806 
807 static void
808 acpicpu_debug_print(device_t self)
809 {
810 	struct acpicpu_softc *sc = device_private(self);
811 	struct cpu_info *ci = sc->sc_ci;
812 	struct acpicpu_cstate *cs;
813 	struct acpicpu_pstate *ps;
814 	struct acpicpu_tstate *ts;
815 	static bool once = false;
816 	struct acpicpu_dep *dep;
817 	uint32_t i, method;
818 
819 	if (once != true) {
820 
821 		for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
822 
823 			cs = &sc->sc_cstate[i];
824 
825 			if (cs->cs_method == 0)
826 				continue;
827 
828 			aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
829 			    "lat %3u us, pow %5u mW%s\n", i,
830 			    acpicpu_debug_print_method(cs->cs_method),
831 			    cs->cs_latency, cs->cs_power,
832 			    (cs->cs_flags != 0) ? ", bus master check" : "");
833 		}
834 
835 		method = sc->sc_pstate_control.reg_spaceid;
836 
837 		for (i = 0; i < sc->sc_pstate_count; i++) {
838 
839 			ps = &sc->sc_pstate[i];
840 
841 			if (ps->ps_freq == 0)
842 				continue;
843 
844 			aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
845 			    "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
846 			    acpicpu_debug_print_method(method),
847 			    ps->ps_latency, ps->ps_power, ps->ps_freq,
848 			    (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
849 			    ", turbo boost" : "");
850 		}
851 
852 		method = sc->sc_tstate_control.reg_spaceid;
853 
854 		for (i = 0; i < sc->sc_tstate_count; i++) {
855 
856 			ts = &sc->sc_tstate[i];
857 
858 			if (ts->ts_percent == 0)
859 				continue;
860 
861 			aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
862 			    "lat %3u us, pow %5u mW, %3u %%\n", i,
863 			    acpicpu_debug_print_method(method),
864 			    ts->ts_latency, ts->ts_power, ts->ts_percent);
865 		}
866 
867 		once = true;
868 	}
869 
870 	aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
871 	    "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
872 	    (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
873 
874 	if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
875 
876 		dep = &sc->sc_cstate_dep;
877 
878 		aprint_debug_dev(sc->sc_dev, "C-state coordination: "
879 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
880 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
881 	}
882 
883 	if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
884 
885 		dep = &sc->sc_pstate_dep;
886 
887 		aprint_debug_dev(sc->sc_dev, "P-state coordination: "
888 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
889 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
890 	}
891 
892 	if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
893 
894 		dep = &sc->sc_tstate_dep;
895 
896 		aprint_debug_dev(sc->sc_dev, "T-state coordination: "
897 		    "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
898 		    dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
899 	}
900 }
901 
902 static const char *
903 acpicpu_debug_print_method(uint8_t val)
904 {
905 
906 	if (val == ACPICPU_C_STATE_FFH)
907 		return "FFH";
908 
909 	if (val == ACPICPU_C_STATE_HALT)
910 		return "HLT";
911 
912 	if (val == ACPICPU_C_STATE_SYSIO)
913 		return "I/O";
914 
915 	if (val == ACPI_ADR_SPACE_SYSTEM_IO)
916 		return "I/O";
917 
918 	if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
919 		return "FFH";
920 
921 	return "???";
922 }
923 
924 static const char *
925 acpicpu_debug_print_dep(uint32_t val)
926 {
927 
928 	switch (val) {
929 
930 	case ACPICPU_DEP_SW_ALL:
931 		return "SW_ALL";
932 
933 	case ACPICPU_DEP_SW_ANY:
934 		return "SW_ANY";
935 
936 	case ACPICPU_DEP_HW_ALL:
937 		return "HW_ALL";
938 
939 	default:
940 		return "unknown";
941 	}
942 }
943 
944 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
945 
946 #ifdef _MODULE
947 #include "ioconf.c"
948 #endif
949 
950 static int
951 acpicpu_modcmd(modcmd_t cmd, void *aux)
952 {
953 	int rv = 0;
954 
955 	switch (cmd) {
956 
957 	case MODULE_CMD_INIT:
958 
959 #ifdef _MODULE
960 		rv = config_init_component(cfdriver_ioconf_acpicpu,
961 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
962 #endif
963 		break;
964 
965 	case MODULE_CMD_FINI:
966 
967 #ifdef _MODULE
968 		rv = config_fini_component(cfdriver_ioconf_acpicpu,
969 		    cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
970 #endif
971 		break;
972 
973 	default:
974 		rv = ENOTTY;
975 	}
976 
977 	return rv;
978 }
979