xref: /netbsd-src/sys/dev/acpi/acpi_cpu.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /* $NetBSD: acpi_cpu.c,v 1.25 2011/01/13 04:18:19 jruoho Exp $ */
2 
3 /*-
4  * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.25 2011/01/13 04:18:19 jruoho Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/once.h>
39 #include <sys/sysctl.h>
40 
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44 
45 #include <machine/acpi_machdep.h>
46 
47 #define _COMPONENT	  ACPI_BUS_COMPONENT
48 ACPI_MODULE_NAME	  ("acpi_cpu")
49 
50 static int		  acpicpu_match(device_t, cfdata_t, void *);
51 static void		  acpicpu_attach(device_t, device_t, void *);
52 static int		  acpicpu_detach(device_t, int);
53 static int		  acpicpu_once_attach(void);
54 static int		  acpicpu_once_detach(void);
55 static void		  acpicpu_prestart(device_t);
56 static void		  acpicpu_start(device_t);
57 static void		  acpicpu_sysctl(device_t);
58 
59 static int		  acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static cpuid_t		  acpicpu_id(uint32_t);
61 static uint32_t		  acpicpu_cap(struct acpicpu_softc *);
62 static ACPI_STATUS	  acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
63 static ACPI_STATUS	  acpicpu_cap_osc(struct acpicpu_softc *,
64 					  uint32_t, uint32_t *);
65 static void		  acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
66 static bool		  acpicpu_suspend(device_t, const pmf_qual_t *);
67 static bool		  acpicpu_resume(device_t, const pmf_qual_t *);
68 
69 extern uint32_t		  acpi_cpus;
70 struct acpicpu_softc	**acpicpu_sc = NULL;
71 static struct sysctllog	 *acpicpu_log = NULL;
72 static bool		  acpicpu_dynamic = true;
73 static bool		  acpicpu_passive = true;
74 
75 static const char * const acpicpu_hid[] = {
76 	"ACPI0007",
77 	NULL
78 };
79 
80 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
81     acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
82 
83 static int
84 acpicpu_match(device_t parent, cfdata_t match, void *aux)
85 {
86 	struct acpi_attach_args *aa = aux;
87 	struct acpicpu_object ao;
88 	int rv;
89 
90 	if (aa->aa_node->ad_type != ACPI_TYPE_PROCESSOR)
91 		return 0;
92 
93 	if (acpi_match_hid(aa->aa_node->ad_devinfo, acpicpu_hid) != 0)
94 		return 1;
95 
96 	rv = acpicpu_object(aa->aa_node->ad_handle, &ao);
97 
98 	if (rv != 0 || acpicpu_id(ao.ao_procid) == 0xFFFFFF)
99 		return 0;
100 
101 	return 1;
102 }
103 
104 static void
105 acpicpu_attach(device_t parent, device_t self, void *aux)
106 {
107 	struct acpicpu_softc *sc = device_private(self);
108 	struct acpi_attach_args *aa = aux;
109 	static ONCE_DECL(once_attach);
110 	int rv;
111 
112 	rv = acpicpu_object(aa->aa_node->ad_handle, &sc->sc_object);
113 
114 	if (rv != 0)
115 		return;
116 
117 	rv = RUN_ONCE(&once_attach, acpicpu_once_attach);
118 
119 	if (rv != 0)
120 		return;
121 
122 	sc->sc_dev = self;
123 	sc->sc_cold = true;
124 	sc->sc_node = aa->aa_node;
125 	sc->sc_cpuid = acpicpu_id(sc->sc_object.ao_procid);
126 
127 	if (sc->sc_cpuid == 0xFFFFFF) {
128 		aprint_error(": invalid CPU ID\n");
129 		return;
130 	}
131 
132 	if (acpicpu_sc[sc->sc_cpuid] != NULL) {
133 		aprint_error(": already attached\n");
134 		return;
135 	}
136 
137 	aprint_naive("\n");
138 	aprint_normal(": ACPI CPU\n");
139 
140 	acpi_cpus++;
141 	acpicpu_sc[sc->sc_cpuid] = sc;
142 
143 	sc->sc_cap = acpicpu_cap(sc);
144 	sc->sc_flags |= acpicpu_md_quirks();
145 
146 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
147 
148 	acpicpu_cstate_attach(self);
149 	acpicpu_pstate_attach(self);
150 	acpicpu_tstate_attach(self);
151 
152 	(void)config_defer(self, acpicpu_prestart);
153 	(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
154 	(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
155 }
156 
157 static int
158 acpicpu_detach(device_t self, int flags)
159 {
160 	struct acpicpu_softc *sc = device_private(self);
161 	static ONCE_DECL(once_detach);
162 	int rv = 0;
163 
164 	sc->sc_cold = true;
165 	acpi_deregister_notify(sc->sc_node);
166 
167 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
168 		rv = acpicpu_cstate_detach(self);
169 
170 	if (rv != 0)
171 		return rv;
172 
173 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
174 		rv = acpicpu_pstate_detach(self);
175 
176 	if (rv != 0)
177 		return rv;
178 
179 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
180 		rv = acpicpu_tstate_detach(self);
181 
182 	if (rv != 0)
183 		return rv;
184 
185 	rv = RUN_ONCE(&once_detach, acpicpu_once_detach);
186 
187 	if (rv != 0)
188 		return rv;
189 
190 	mutex_destroy(&sc->sc_mtx);
191 	acpi_cpus--;
192 
193 	return 0;
194 }
195 
196 static int
197 acpicpu_once_attach(void)
198 {
199 	struct acpicpu_softc *sc;
200 	unsigned int i;
201 
202 	acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
203 
204 	if (acpicpu_sc == NULL)
205 		return ENOMEM;
206 
207 	for (i = 0; i < maxcpus; i++)
208 		acpicpu_sc[i] = NULL;
209 
210 	return 0;
211 }
212 
213 static int
214 acpicpu_once_detach(void)
215 {
216 	struct acpicpu_softc *sc;
217 
218 	if (acpicpu_sc != NULL)
219 		kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
220 
221 	if (acpicpu_log != NULL)
222 		sysctl_teardown(&acpicpu_log);
223 
224 	return 0;
225 }
226 
227 static void
228 acpicpu_prestart(device_t self)
229 {
230 	struct acpicpu_softc *sc = device_private(self);
231 	static bool once = false;
232 
233 	if (once != false) {
234 		sc->sc_cold = false;
235 		return;
236 	}
237 
238 	once = true;
239 
240 	(void)config_interrupts(self, acpicpu_start);
241 }
242 
243 static void
244 acpicpu_start(device_t self)
245 {
246 	struct acpicpu_softc *sc = device_private(self);
247 
248 	/*
249 	 * Run the state-specific initialization
250 	 * routines. These should be called only
251 	 * once, after interrupts are enabled and
252 	 * all ACPI CPUs have attached.
253 	 */
254 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
255 		acpicpu_cstate_start(self);
256 
257 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
258 		acpicpu_pstate_start(self);
259 
260 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
261 		acpicpu_tstate_start(self);
262 
263 	acpicpu_sysctl(self);
264 
265 	aprint_debug_dev(sc->sc_dev, "ACPI CPUs started (cap "
266 	    "0x%02x, flags 0x%06x)\n", sc->sc_cap, sc->sc_flags);
267 
268 	sc->sc_cold = false;
269 }
270 
271 static void
272 acpicpu_sysctl(device_t self)
273 {
274 	const struct sysctlnode *node;
275 	int err;
276 
277 	err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
278 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
279 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
280 
281 	if (err != 0)
282 		goto fail;
283 
284 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
285 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
286 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
287 
288 	if (err != 0)
289 		goto fail;
290 
291 	err = sysctl_createv(&acpicpu_log, 0, &node, &node,
292 	    0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
293 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
294 
295 	if (err != 0)
296 		goto fail;
297 
298 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
299 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
300 	    SYSCTL_DESCR("Dynamic states"), NULL, 0,
301 	    &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
302 
303 	if (err != 0)
304 		goto fail;
305 
306 	err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
307 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
308 	    SYSCTL_DESCR("Passive cooling"), NULL, 0,
309 	    &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
310 
311 	if (err != 0)
312 		goto fail;
313 
314 	return;
315 
316 fail:
317 	aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
318 }
319 
320 static int
321 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
322 {
323 	ACPI_OBJECT *obj;
324 	ACPI_BUFFER buf;
325 	ACPI_STATUS rv;
326 
327 	rv = acpi_eval_struct(hdl, NULL, &buf);
328 
329 	if (ACPI_FAILURE(rv))
330 		return 1;
331 
332 	obj = buf.Pointer;
333 
334 	if (obj->Type != ACPI_TYPE_PROCESSOR) {
335 		rv = AE_TYPE;
336 		goto out;
337 	}
338 
339 	if (obj->Processor.ProcId > (uint32_t)maxcpus) {
340 		rv = AE_LIMIT;
341 		goto out;
342 	}
343 
344 	KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
345 
346 	if (ao != NULL) {
347 		ao->ao_procid = obj->Processor.ProcId;
348 		ao->ao_pblklen = obj->Processor.PblkLength;
349 		ao->ao_pblkaddr = obj->Processor.PblkAddress;
350 	}
351 
352 out:
353 	if (buf.Pointer != NULL)
354 		ACPI_FREE(buf.Pointer);
355 
356 	return ACPI_FAILURE(rv) ? 1 : 0;
357 }
358 
359 static cpuid_t
360 acpicpu_id(uint32_t id)
361 {
362 	CPU_INFO_ITERATOR cii;
363 	struct cpu_info *ci;
364 
365 	for (CPU_INFO_FOREACH(cii, ci)) {
366 
367 		if (id == ci->ci_acpiid)
368 			return id;
369 	}
370 
371 	return 0xFFFFFF;
372 }
373 
374 static uint32_t
375 acpicpu_cap(struct acpicpu_softc *sc)
376 {
377 	uint32_t flags, cap = 0;
378 	const char *str;
379 	ACPI_STATUS rv;
380 
381 	/*
382 	 * Query and set machine-dependent capabilities.
383 	 * Note that the Intel-specific _PDC method was
384 	 * deprecated in the ACPI 3.0 in favor of _OSC.
385 	 */
386 	flags = acpicpu_md_cap();
387 	rv = acpicpu_cap_osc(sc, flags, &cap);
388 
389 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
390 		str = "_OSC";
391 		goto fail;
392 	}
393 
394 	rv = acpicpu_cap_pdc(sc, flags);
395 
396 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
397 		str = "_PDC";
398 		goto fail;
399 	}
400 
401 	if (cap == 0)
402 		cap = flags;
403 
404 	return cap;
405 
406 fail:
407 	aprint_error_dev(sc->sc_dev, "failed to evaluate "
408 	    "%s: %s\n", str, AcpiFormatException(rv));
409 
410 	return 0;
411 }
412 
413 static ACPI_STATUS
414 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
415 {
416 	ACPI_OBJECT_LIST arg;
417 	ACPI_OBJECT obj;
418 	uint32_t cap[3];
419 
420 	arg.Count = 1;
421 	arg.Pointer = &obj;
422 
423 	cap[0] = ACPICPU_PDC_REVID;
424 	cap[1] = 1;
425 	cap[2] = flags;
426 
427 	obj.Type = ACPI_TYPE_BUFFER;
428 	obj.Buffer.Length = sizeof(cap);
429 	obj.Buffer.Pointer = (void *)cap;
430 
431 	return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
432 }
433 
434 static ACPI_STATUS
435 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
436 {
437 	ACPI_OBJECT_LIST arg;
438 	ACPI_OBJECT obj[4];
439 	ACPI_OBJECT *osc;
440 	ACPI_BUFFER buf;
441 	ACPI_STATUS rv;
442 	uint32_t cap[2];
443 	uint32_t *ptr;
444 	int i = 5;
445 
446 	static uint8_t intel_uuid[16] = {
447 		0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
448 		0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
449 	};
450 
451 	cap[0] = ACPI_OSC_QUERY;
452 	cap[1] = flags;
453 
454 again:
455 	arg.Count = 4;
456 	arg.Pointer = obj;
457 
458 	obj[0].Type = ACPI_TYPE_BUFFER;
459 	obj[0].Buffer.Length = sizeof(intel_uuid);
460 	obj[0].Buffer.Pointer = intel_uuid;
461 
462 	obj[1].Type = ACPI_TYPE_INTEGER;
463 	obj[1].Integer.Value = ACPICPU_PDC_REVID;
464 
465 	obj[2].Type = ACPI_TYPE_INTEGER;
466 	obj[2].Integer.Value = __arraycount(cap);
467 
468 	obj[3].Type = ACPI_TYPE_BUFFER;
469 	obj[3].Buffer.Length = sizeof(cap);
470 	obj[3].Buffer.Pointer = (void *)cap;
471 
472 	buf.Pointer = NULL;
473 	buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
474 
475 	rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
476 
477 	if (ACPI_FAILURE(rv))
478 		goto out;
479 
480 	osc = buf.Pointer;
481 
482 	if (osc->Type != ACPI_TYPE_BUFFER) {
483 		rv = AE_TYPE;
484 		goto out;
485 	}
486 
487 	if (osc->Buffer.Length != sizeof(cap)) {
488 		rv = AE_BUFFER_OVERFLOW;
489 		goto out;
490 	}
491 
492 	ptr = (uint32_t *)osc->Buffer.Pointer;
493 
494 	if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
495 		rv = AE_ERROR;
496 		goto out;
497 	}
498 
499 	if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
500 		rv = AE_BAD_PARAMETER;
501 		goto out;
502 	}
503 
504 	/*
505 	 * "It is strongly recommended that the OS evaluate
506 	 *  _OSC with the Query Support Flag set until _OSC
507 	 *  returns the Capabilities Masked bit clear, to
508 	 *  negotiate the set of features to be granted to
509 	 *  the OS for native support (ACPI 4.0, 6.2.10)."
510 	 */
511 	if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
512 
513 		ACPI_FREE(buf.Pointer);
514 		i--;
515 
516 		goto again;
517 	}
518 
519 	if ((cap[0] & ACPI_OSC_QUERY) != 0) {
520 
521 		ACPI_FREE(buf.Pointer);
522 		cap[0] &= ~ACPI_OSC_QUERY;
523 
524 		goto again;
525 	}
526 
527 	/*
528 	 * It is permitted for _OSC to return all
529 	 * bits cleared, but this is specified to
530 	 * vary on per-device basis. Assume that
531 	 * everything rather than nothing will be
532 	 * supported in this case; we do not need
533 	 * the firmware to know the CPU features.
534 	 */
535 	*val = (ptr[1] != 0) ? ptr[1] : cap[1];
536 
537 out:
538 	if (buf.Pointer != NULL)
539 		ACPI_FREE(buf.Pointer);
540 
541 	return rv;
542 }
543 
544 static void
545 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
546 {
547 	ACPI_OSD_EXEC_CALLBACK func;
548 	struct acpicpu_softc *sc;
549 	device_t self = aux;
550 
551 	sc = device_private(self);
552 
553 	if (sc->sc_cold != false)
554 		return;
555 
556 	if (acpicpu_dynamic != true)
557 		return;
558 
559 	switch (evt) {
560 
561 	case ACPICPU_C_NOTIFY:
562 
563 		if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
564 			return;
565 
566 		func = acpicpu_cstate_callback;
567 		break;
568 
569 	case ACPICPU_P_NOTIFY:
570 
571 		if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
572 			return;
573 
574 		func = acpicpu_pstate_callback;
575 		break;
576 
577 	case ACPICPU_T_NOTIFY:
578 
579 		if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
580 			return;
581 
582 		func = acpicpu_tstate_callback;
583 		break;
584 
585 	default:
586 		aprint_error_dev(sc->sc_dev,  "unknown notify: 0x%02X\n", evt);
587 		return;
588 	}
589 
590 	(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
591 }
592 
593 static bool
594 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
595 {
596 	struct acpicpu_softc *sc = device_private(self);
597 
598 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
599 		(void)acpicpu_cstate_suspend(self);
600 
601 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
602 		(void)acpicpu_pstate_suspend(self);
603 
604 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
605 		(void)acpicpu_tstate_suspend(self);
606 
607 	sc->sc_cold = true;
608 
609 	return true;
610 }
611 
612 static bool
613 acpicpu_resume(device_t self, const pmf_qual_t *qual)
614 {
615 	struct acpicpu_softc *sc = device_private(self);
616 
617 	sc->sc_cold = false;
618 
619 	if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
620 		(void)acpicpu_cstate_resume(self);
621 
622 	if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
623 		(void)acpicpu_pstate_resume(self);
624 
625 	if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
626 		(void)acpicpu_tstate_resume(self);
627 
628 	return true;
629 }
630 
631 #ifdef _MODULE
632 
633 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
634 CFDRIVER_DECL(acpicpu, DV_DULL, NULL);
635 
636 static int acpicpuloc[] = { -1 };
637 extern struct cfattach acpicpu_ca;
638 
639 static struct cfparent acpiparent = {
640 	"acpinodebus", NULL, DVUNIT_ANY
641 };
642 
643 static struct cfdata acpicpu_cfdata[] = {
644 	{
645 		.cf_name = "acpicpu",
646 		.cf_atname = "acpicpu",
647 		.cf_unit = 0,
648 		.cf_fstate = FSTATE_STAR,
649 		.cf_loc = acpicpuloc,
650 		.cf_flags = 0,
651 		.cf_pspec = &acpiparent,
652 	},
653 
654 	{ NULL, NULL, 0, 0, NULL, 0, NULL }
655 };
656 
657 static int
658 acpicpu_modcmd(modcmd_t cmd, void *context)
659 {
660 	int err;
661 
662 	switch (cmd) {
663 
664 	case MODULE_CMD_INIT:
665 
666 		err = config_cfdriver_attach(&acpicpu_cd);
667 
668 		if (err != 0)
669 			return err;
670 
671 		err = config_cfattach_attach("acpicpu", &acpicpu_ca);
672 
673 		if (err != 0) {
674 			config_cfdriver_detach(&acpicpu_cd);
675 			return err;
676 		}
677 
678 		err = config_cfdata_attach(acpicpu_cfdata, 1);
679 
680 		if (err != 0) {
681 			config_cfattach_detach("acpicpu", &acpicpu_ca);
682 			config_cfdriver_detach(&acpicpu_cd);
683 			return err;
684 		}
685 
686 		return 0;
687 
688 	case MODULE_CMD_FINI:
689 
690 		err = config_cfdata_detach(acpicpu_cfdata);
691 
692 		if (err != 0)
693 			return err;
694 
695 		config_cfattach_detach("acpicpu", &acpicpu_ca);
696 		config_cfdriver_detach(&acpicpu_cd);
697 
698 		return 0;
699 
700 	default:
701 		return ENOTTY;
702 	}
703 }
704 
705 #endif	/* _MODULE */
706