1 /*- 2 * Copyright (c) 2000 Takanori Watanabe <takawata@jp.kfreebsd.org> 3 * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.kfreebsd.org> 4 * Copyright (c) 2000, 2001 Michael Smith 5 * Copyright (c) 2000 BSDi 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/dev/acpica/acpi.c,v 1.243.2.4.4.1 2009/04/15 03:14:26 kensmith Exp $ 30 */ 31 32 #include "opt_acpi.h" 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/proc.h> 36 #include <sys/fcntl.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/bus.h> 40 #include <sys/conf.h> 41 #include <sys/reboot.h> 42 #include <sys/sysctl.h> 43 #include <sys/ctype.h> 44 #include <sys/linker.h> 45 #include <sys/power.h> 46 #include <sys/sbuf.h> 47 #include <sys/device.h> 48 #include <sys/spinlock.h> 49 #include <sys/spinlock2.h> 50 #include <sys/uuid.h> 51 52 #include <sys/rman.h> 53 #include <bus/isa/isavar.h> 54 #include <bus/isa/pnpvar.h> 55 56 #include "acpi.h" 57 #include <dev/acpica/acpivar.h> 58 #include <dev/acpica/acpiio.h> 59 #include <dev/acpica/acpiio_mcall.h> 60 #include "achware.h" 61 #include "acnamesp.h" 62 #include "acglobal.h" 63 64 #include "pci_if.h" 65 #include <bus/pci/pci_cfgreg.h> 66 #include <bus/pci/pcivar.h> 67 #include <bus/pci/pci_private.h> 68 69 #include <vm/vm_param.h> 70 71 MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); 72 73 /* Hooks for the ACPICA debugging infrastructure */ 74 #define _COMPONENT ACPI_BUS 75 ACPI_MODULE_NAME("ACPI"); 76 77 static d_open_t acpiopen; 78 static d_close_t acpiclose; 79 static d_ioctl_t acpiioctl; 80 81 static struct dev_ops acpi_ops = { 82 { "acpi", 0, 0 }, 83 .d_open = acpiopen, 84 .d_close = acpiclose, 85 .d_ioctl = acpiioctl 86 }; 87 88 struct acpi_interface { 89 ACPI_STRING *data; 90 int num; 91 }; 92 93 /* Global mutex for locking access to the ACPI subsystem. */ 94 struct lock acpi_lock; 95 96 /* Bitmap of device quirks. */ 97 int acpi_quirks; 98 99 static int acpi_modevent(struct module *mod, int event, void *junk); 100 static void acpi_identify(driver_t *driver, device_t parent); 101 static int acpi_probe(device_t dev); 102 static int acpi_attach(device_t dev); 103 static int acpi_suspend(device_t dev); 104 static int acpi_resume(device_t dev); 105 static int acpi_shutdown(device_t dev); 106 static device_t acpi_add_child(device_t bus, device_t parent, int order, const char *name, 107 int unit); 108 static int acpi_print_child(device_t bus, device_t child); 109 static void acpi_probe_nomatch(device_t bus, device_t child); 110 static void acpi_driver_added(device_t dev, driver_t *driver); 111 static int acpi_read_ivar(device_t dev, device_t child, int index, 112 uintptr_t *result); 113 static int acpi_write_ivar(device_t dev, device_t child, int index, 114 uintptr_t value); 115 static struct resource_list *acpi_get_rlist(device_t dev, device_t child); 116 static int acpi_sysres_alloc(device_t dev); 117 static struct resource *acpi_alloc_resource(device_t bus, device_t child, 118 int type, int *rid, u_long start, u_long end, 119 u_long count, u_int flags, int cpuid); 120 static int acpi_release_resource(device_t bus, device_t child, int type, 121 int rid, struct resource *r); 122 static void acpi_delete_resource(device_t bus, device_t child, int type, 123 int rid); 124 static uint32_t acpi_isa_get_logicalid(device_t dev); 125 static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); 126 static char *acpi_device_id_probe(device_t bus, device_t dev, char **ids); 127 static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, 128 ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, 129 ACPI_BUFFER *ret); 130 static int acpi_device_pwr_for_sleep(device_t bus, device_t dev, 131 int *dstate); 132 static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, 133 void *context, void **retval); 134 static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, 135 int max_depth, acpi_scan_cb_t user_fn, void *arg); 136 static int acpi_set_powerstate_method(device_t bus, device_t child, 137 int state); 138 static int acpi_isa_pnp_probe(device_t bus, device_t child, 139 struct isa_pnp_id *ids); 140 static void acpi_probe_children(device_t bus); 141 static void acpi_probe_order(ACPI_HANDLE handle, int *order); 142 static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, 143 void *context, void **status); 144 static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); 145 static void acpi_shutdown_final(void *arg, int howto); 146 static void acpi_enable_fixed_events(struct acpi_softc *sc); 147 static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); 148 static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); 149 static int acpi_wake_prep_walk(int sstate); 150 static int acpi_wake_sysctl_walk(device_t dev); 151 #ifdef notyet 152 static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); 153 #endif 154 static void acpi_system_eventhandler_sleep(void *arg, int state); 155 static void acpi_system_eventhandler_wakeup(void *arg, int state); 156 static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 157 static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); 158 static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); 159 static int acpi_pm_func(u_long cmd, void *arg, ...); 160 static int acpi_child_location_str_method(device_t acdev, device_t child, 161 char *buf, size_t buflen); 162 static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child, 163 char *buf, size_t buflen); 164 static void acpi_enable_pcie(void); 165 static void acpi_reset_interfaces(device_t dev); 166 167 static device_method_t acpi_methods[] = { 168 /* Device interface */ 169 DEVMETHOD(device_identify, acpi_identify), 170 DEVMETHOD(device_probe, acpi_probe), 171 DEVMETHOD(device_attach, acpi_attach), 172 DEVMETHOD(device_shutdown, acpi_shutdown), 173 DEVMETHOD(device_detach, bus_generic_detach), 174 DEVMETHOD(device_suspend, acpi_suspend), 175 DEVMETHOD(device_resume, acpi_resume), 176 177 /* Bus interface */ 178 DEVMETHOD(bus_add_child, acpi_add_child), 179 DEVMETHOD(bus_print_child, acpi_print_child), 180 DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), 181 DEVMETHOD(bus_driver_added, acpi_driver_added), 182 DEVMETHOD(bus_read_ivar, acpi_read_ivar), 183 DEVMETHOD(bus_write_ivar, acpi_write_ivar), 184 DEVMETHOD(bus_get_resource_list, acpi_get_rlist), 185 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 186 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 187 DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), 188 DEVMETHOD(bus_release_resource, acpi_release_resource), 189 DEVMETHOD(bus_delete_resource, acpi_delete_resource), 190 DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method), 191 DEVMETHOD(bus_child_location_str, acpi_child_location_str_method), 192 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 193 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 194 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 195 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 196 197 /* ACPI bus */ 198 DEVMETHOD(acpi_id_probe, acpi_device_id_probe), 199 DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), 200 DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), 201 DEVMETHOD(acpi_scan_children, acpi_device_scan_children), 202 203 /* PCI emulation */ 204 DEVMETHOD(pci_set_powerstate, acpi_set_powerstate_method), 205 206 /* ISA emulation */ 207 DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), 208 209 DEVMETHOD_END 210 }; 211 212 static driver_t acpi_driver = { 213 "acpi", 214 acpi_methods, 215 sizeof(struct acpi_softc), 216 }; 217 218 static devclass_t acpi_devclass; 219 DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, NULL); 220 MODULE_VERSION(acpi, 1); 221 222 ACPI_SERIAL_DECL(acpi, "ACPI serializer"); 223 224 /* Local pools for managing system resources for ACPI child devices. */ 225 static struct rman acpi_rman_io, acpi_rman_mem; 226 227 #define ACPI_MINIMUM_AWAKETIME 5 228 229 static const char* sleep_state_names[] = { 230 "S0", "S1", "S2", "S3", "S4", "S5", "NONE"}; 231 232 SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging"); 233 static char acpi_ca_version[12]; 234 SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, 235 acpi_ca_version, 0, "Version of Intel ACPICA"); 236 237 /* 238 * Allow overriding _OSI methods. 239 */ 240 static char acpi_install_interface[256]; 241 TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, 242 sizeof(acpi_install_interface)); 243 static char acpi_remove_interface[256]; 244 TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, 245 sizeof(acpi_remove_interface)); 246 247 /* 248 * Use this tunable to disable the control method auto-serialization 249 * mechanism that was added in 20140214 and superseded the previous 250 * AcpiGbl_SerializeAllMethods global. 251 */ 252 static int acpi_auto_serialize_methods = 1; 253 TUNABLE_INT("hw.acpi.auto_serialize_methods", &acpi_auto_serialize_methods); 254 255 /* Allow users to dump Debug objects without ACPI debugger. */ 256 static int acpi_debug_objects; 257 TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); 258 SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, 259 CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I", 260 "Enable Debug objects."); 261 262 /* Allow ignoring the XSDT. */ 263 static int acpi_ignore_xsdt; 264 TUNABLE_INT("debug.acpi.ignore_xsdt", &acpi_ignore_xsdt); 265 SYSCTL_INT(_debug_acpi, OID_AUTO, ignore_xsdt, CTLFLAG_RD, 266 &acpi_ignore_xsdt, 1, "Ignore the XSDT, forcing the use of the RSDT."); 267 268 /* Allow the interpreter to ignore common mistakes in BIOS. */ 269 static int acpi_interpreter_slack = 1; 270 TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); 271 SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RD, 272 &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); 273 274 /* Allow preferring 32-bit FADT register addresses over the 64-bit ones. */ 275 static int acpi_fadt_addr32; 276 TUNABLE_INT("debug.acpi.fadt_addr32", &acpi_fadt_addr32); 277 SYSCTL_INT(_debug_acpi, OID_AUTO, fadt_addr32, CTLFLAG_RD, 278 &acpi_fadt_addr32, 1, 279 "Prefer 32-bit FADT register addresses over 64-bit ones."); 280 281 /* Prefer 32-bit FACS table addresses over the 64-bit ones. */ 282 static int acpi_facs_addr32 = 1; 283 TUNABLE_INT("debug.acpi.facs_addr32", &acpi_facs_addr32); 284 SYSCTL_INT(_debug_acpi, OID_AUTO, facs_addr32, CTLFLAG_RD, 285 &acpi_facs_addr32, 1, 286 "Prefer 32-bit FACS table addresses over 64-bit ones."); 287 288 static int acpi_group_module_level_code; 289 TUNABLE_INT("debug.acpi.group_module_level_code", &acpi_group_module_level_code); 290 SYSCTL_INT(_debug_acpi, OID_AUTO, group_module_level_code, CTLFLAG_RD, 291 &acpi_group_module_level_code, 1, 292 "Group module-level code."); 293 294 /* Power devices off and on in suspend and resume. XXX Remove once tested. */ 295 static int acpi_do_powerstate = 1; 296 TUNABLE_INT("debug.acpi.do_powerstate", &acpi_do_powerstate); 297 SYSCTL_INT(_debug_acpi, OID_AUTO, do_powerstate, CTLFLAG_RW, 298 &acpi_do_powerstate, 1, "Turn off devices when suspending."); 299 300 /* Allow users to override quirks. */ 301 TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); 302 303 /* Allow to call ACPI methods from userland. */ 304 static int acpi_allow_mcall; 305 TUNABLE_INT("debug.acpi.allow_method_calls", &acpi_allow_mcall); 306 307 static int acpi_susp_bounce; 308 SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, 309 &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); 310 311 /* 312 * ACPI can only be loaded as a module by the loader; activating it after 313 * system bootstrap time is not useful, and can be fatal to the system. 314 * It also cannot be unloaded, since the entire system bus heirarchy hangs 315 * off it. 316 */ 317 static int 318 acpi_modevent(struct module *mod, int event, void *junk) 319 { 320 switch (event) { 321 case MOD_LOAD: 322 if (!cold) { 323 kprintf("The ACPI driver cannot be loaded after boot.\n"); 324 return (EPERM); 325 } 326 break; 327 case MOD_UNLOAD: 328 if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) 329 return (EBUSY); 330 break; 331 default: 332 break; 333 } 334 return (0); 335 } 336 337 /* 338 * Perform early initialization. 339 */ 340 ACPI_STATUS 341 acpi_Startup(void) 342 { 343 static int started = 0; 344 ACPI_STATUS status; 345 int val; 346 347 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 348 349 /* Only run the startup code once. The MADT driver also calls this. */ 350 if (started) 351 return_VALUE (AE_OK); 352 started = 1; 353 354 /* 355 * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing 356 * if more tables exist. 357 */ 358 if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { 359 kprintf("ACPI: Table initialisation failed: %s\n", 360 AcpiFormatException(status)); 361 return_VALUE (status); 362 } 363 364 /* Set up any quirks we have for this system. */ 365 if (acpi_quirks == ACPI_Q_OK) 366 acpi_table_quirks(&acpi_quirks); 367 368 /* If the user manually set the disabled hint to 0, force-enable ACPI. */ 369 if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) 370 acpi_quirks &= ~ACPI_Q_BROKEN; 371 if (acpi_quirks & ACPI_Q_BROKEN) { 372 kprintf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); 373 status = AE_SUPPORT; 374 } 375 376 return_VALUE (status); 377 } 378 379 /* 380 * Detect ACPI, perform early initialisation 381 */ 382 static void 383 acpi_identify(driver_t *driver, device_t parent) 384 { 385 device_t child; 386 387 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 388 389 if (!cold) 390 return_VOID; 391 392 /* Check that we haven't been disabled with a hint. */ 393 if (resource_disabled("acpi", 0)) 394 return_VOID; 395 396 /* Make sure we're not being doubly invoked. */ 397 if (device_find_child(parent, "acpi", 0) != NULL) 398 return_VOID; 399 400 ksnprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); 401 402 /* Initialize root tables. */ 403 if (ACPI_FAILURE(acpi_Startup())) { 404 kprintf("ACPI: Try disabling either ACPI or apic support.\n"); 405 return_VOID; 406 } 407 408 /* Attach the actual ACPI device. */ 409 if ((child = BUS_ADD_CHILD(parent, parent, 10, "acpi", 0)) == NULL) { 410 device_printf(parent, "device_identify failed\n"); 411 return_VOID; 412 } 413 } 414 415 /* 416 * Fetch some descriptive data from ACPI to put in our attach message. 417 */ 418 static int 419 acpi_probe(device_t dev) 420 { 421 ACPI_TABLE_RSDP *rsdp; 422 ACPI_TABLE_HEADER *rsdt; 423 ACPI_PHYSICAL_ADDRESS paddr; 424 char buf[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; 425 struct sbuf sb; 426 427 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 428 429 if (power_pm_get_type() != POWER_PM_TYPE_NONE && 430 power_pm_get_type() != POWER_PM_TYPE_ACPI) { 431 device_printf(dev, "probe failed, other PM system enabled.\n"); 432 return_VALUE (ENXIO); 433 } 434 435 if ((paddr = AcpiOsGetRootPointer()) == 0 || 436 (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) 437 return_VALUE (ENXIO); 438 if (acpi_ignore_xsdt == 0 && 439 rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) 440 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; 441 else 442 paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; 443 AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); 444 445 if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) 446 return_VALUE (ENXIO); 447 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 448 sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); 449 sbuf_trim(&sb); 450 sbuf_putc(&sb, ' '); 451 sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); 452 sbuf_trim(&sb); 453 sbuf_finish(&sb); 454 device_set_desc_copy(dev, sbuf_data(&sb)); 455 sbuf_delete(&sb); 456 AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); 457 458 return_VALUE (0); 459 } 460 461 static int 462 acpi_attach(device_t dev) 463 { 464 struct acpi_softc *sc; 465 ACPI_STATUS status; 466 int error, state; 467 UINT32 flags; 468 UINT8 TypeA, TypeB; 469 char *env; 470 471 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 472 473 sc = device_get_softc(dev); 474 sc->acpi_dev = dev; 475 callout_init(&sc->susp_force_to); 476 477 if ((error = acpi_task_thread_init())) { 478 device_printf(dev, "Could not start task thread.\n"); 479 goto out; 480 } 481 482 error = ENXIO; 483 484 /* Initialize resource manager. */ 485 acpi_rman_io.rm_type = RMAN_ARRAY; 486 acpi_rman_io.rm_start = 0; 487 acpi_rman_io.rm_end = 0xffff; 488 acpi_rman_io.rm_descr = "ACPI I/O ports"; 489 if (rman_init(&acpi_rman_io, -1) != 0) 490 panic("acpi rman_init IO ports failed"); 491 acpi_rman_mem.rm_type = RMAN_ARRAY; 492 acpi_rman_mem.rm_start = 0; 493 acpi_rman_mem.rm_end = ~0ul; 494 acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; 495 if (rman_init(&acpi_rman_mem, -1) != 0) 496 panic("acpi rman_init memory failed"); 497 498 /* Initialise the ACPI mutex */ 499 ACPI_LOCK_INIT(acpi, "acpi"); 500 ACPI_SERIAL_INIT(acpi); 501 502 /* 503 * Set the globals from our tunables. This is needed because ACPICA 504 * uses UINT8 for some values and we have no tunable_byte. 505 */ 506 AcpiGbl_AutoSerializeMethods = acpi_auto_serialize_methods ? TRUE : FALSE; 507 AcpiGbl_DoNotUseXsdt = acpi_ignore_xsdt ? TRUE : FALSE; 508 AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; 509 AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; 510 AcpiGbl_GroupModuleLevelCode = acpi_group_module_level_code ? TRUE : FALSE; 511 AcpiGbl_Use32BitFadtAddresses = acpi_fadt_addr32 ? TRUE : FALSE; 512 AcpiGbl_Use32BitFacsAddresses = acpi_facs_addr32 ? TRUE : FALSE; 513 514 #ifndef ACPI_DEBUG 515 /* 516 * Disable Debug Object output. 517 */ 518 AcpiDbgLevel &= ~ACPI_LV_DEBUG_OBJECT; 519 #endif 520 521 /* Start up the ACPICA subsystem. */ 522 status = AcpiInitializeSubsystem(); 523 if (ACPI_FAILURE(status)) { 524 device_printf(dev, "Could not initialize Subsystem: %s\n", 525 AcpiFormatException(status)); 526 goto out; 527 } 528 529 /* Override OS interfaces if the user requested. */ 530 acpi_reset_interfaces(dev); 531 532 /* Load ACPI name space. */ 533 status = AcpiLoadTables(); 534 if (ACPI_FAILURE(status)) { 535 device_printf(dev, "Could not load Namespace: %s\n", 536 AcpiFormatException(status)); 537 goto out; 538 } 539 540 /* Handle MCFG table if present. */ 541 acpi_enable_pcie(); 542 543 /* 544 * Note that some systems (specifically, those with namespace evaluation 545 * issues that require the avoidance of parts of the namespace) must 546 * avoid running _INI and _STA on everything, as well as dodging the final 547 * object init pass. 548 * 549 * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). 550 * 551 * XXX We should arrange for the object init pass after we have attached 552 * all our child devices, but on many systems it works here. 553 */ 554 flags = ACPI_FULL_INITIALIZATION; 555 if (ktestenv("debug.acpi.avoid")) 556 flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; 557 558 /* Bring the hardware and basic handlers online. */ 559 if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { 560 device_printf(dev, "Could not enable ACPI: %s\n", 561 AcpiFormatException(status)); 562 goto out; 563 } 564 565 /* 566 * Fix up the interrupt timer after enabling ACPI, so that the 567 * interrupt cputimer that choked by ACPI power management could 568 * be resurrected before probing various devices. 569 */ 570 DELAY(5000); 571 cputimer_intr_pmfixup(); 572 573 /* 574 * Call the ECDT probe function to provide EC functionality before 575 * the namespace has been evaluated. 576 * 577 * XXX This happens before the sysresource devices have been probed and 578 * attached so its resources come from nexus0. In practice, this isn't 579 * a problem but should be addressed eventually. 580 */ 581 acpi_ec_ecdt_probe(dev); 582 583 /* Bring device objects and regions online. */ 584 if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { 585 device_printf(dev, "Could not initialize ACPI objects: %s\n", 586 AcpiFormatException(status)); 587 goto out; 588 } 589 590 /* 591 * Setup our sysctl tree. 592 * 593 * XXX: This doesn't check to make sure that none of these fail. 594 */ 595 sysctl_ctx_init(&sc->acpi_sysctl_ctx); 596 sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, 597 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 598 device_get_name(dev), CTLFLAG_RD, 0, ""); 599 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 600 OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD, 601 0, 0, acpi_supported_sleep_state_sysctl, "A", ""); 602 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 603 OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW, 604 &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); 605 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 606 OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW, 607 &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); 608 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 609 OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW, 610 &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", ""); 611 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 612 OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW, 613 &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); 614 SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 615 OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW, 616 &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); 617 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 618 OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, 619 "sleep delay"); 620 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 621 OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); 622 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 623 OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); 624 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 625 OID_AUTO, "disable_on_reboot", CTLFLAG_RW, 626 &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); 627 SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), 628 OID_AUTO, "handle_reboot", CTLFLAG_RW, 629 &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); 630 631 /* 632 * Default to 1 second before sleeping to give some machines time to 633 * stabilize. 634 */ 635 sc->acpi_sleep_delay = 1; 636 if (bootverbose) 637 sc->acpi_verbose = 1; 638 if ((env = kgetenv("hw.acpi.verbose")) != NULL) { 639 if (strcmp(env, "0") != 0) 640 sc->acpi_verbose = 1; 641 kfreeenv(env); 642 } 643 644 /* Only enable reboot by default if the FADT says it is available. */ 645 if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) 646 sc->acpi_handle_reboot = 1; 647 648 /* Only enable S4BIOS by default if the FACS says it is available. */ 649 if (AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) 650 sc->acpi_s4bios = 1; 651 652 /* 653 * Dispatch the default sleep state to devices. The lid switch is set 654 * to NONE by default to avoid surprising users. 655 */ 656 sc->acpi_power_button_sx = ACPI_STATE_S5; 657 sc->acpi_lid_switch_sx = ACPI_S_STATES_MAX + 1; 658 sc->acpi_standby_sx = ACPI_STATE_S1; 659 sc->acpi_suspend_sx = ACPI_STATE_S3; 660 661 /* Pick the first valid sleep state for the sleep button default. */ 662 sc->acpi_sleep_button_sx = ACPI_S_STATES_MAX + 1; 663 for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) 664 if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) { 665 sc->acpi_sleep_button_sx = state; 666 break; 667 } 668 669 acpi_enable_fixed_events(sc); 670 671 /* 672 * Scan the namespace and attach/initialise children. 673 */ 674 675 /* Register our shutdown handler. */ 676 EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, 677 SHUTDOWN_PRI_LAST); 678 679 /* 680 * Register our acpi event handlers. 681 * XXX should be configurable eg. via userland policy manager. 682 */ 683 EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, 684 sc, ACPI_EVENT_PRI_LAST); 685 EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, 686 sc, ACPI_EVENT_PRI_LAST); 687 688 /* Flag our initial states. */ 689 sc->acpi_enabled = 1; 690 sc->acpi_sstate = ACPI_STATE_S0; 691 sc->acpi_sleep_disabled = 0; 692 /* Create the control device */ 693 sc->acpi_dev_t = make_dev(&acpi_ops, 0, UID_ROOT, GID_WHEEL, 0644, "acpi"); 694 sc->acpi_dev_t->si_drv1 = sc; 695 696 if ((error = acpi_machdep_init(dev))) 697 goto out; 698 699 /* Register ACPI again to pass the correct argument of pm_func. */ 700 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); 701 702 if (!acpi_disabled("bus")) 703 acpi_probe_children(dev); 704 705 /* Update all GPEs and enable runtime GPEs. */ 706 status = AcpiUpdateAllGpes(); 707 if (ACPI_FAILURE(status)) { 708 device_printf(dev, "Could not update all GPEs: %s\n", 709 AcpiFormatException(status)); 710 } 711 712 /* Allow sleep request after a while. */ 713 /* timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME); */ 714 715 error = 0; 716 717 out: 718 cputimer_intr_pmfixup(); 719 acpi_task_thread_schedule(); 720 return_VALUE (error); 721 } 722 723 static int 724 acpi_suspend(device_t dev) 725 { 726 device_t child, *devlist; 727 int error, i, numdevs, pstate; 728 729 /* First give child devices a chance to suspend. */ 730 error = bus_generic_suspend(dev); 731 if (error) 732 return (error); 733 734 /* 735 * Now, set them into the appropriate power state, usually D3. If the 736 * device has an _SxD method for the next sleep state, use that power 737 * state instead. 738 */ 739 device_get_children(dev, &devlist, &numdevs); 740 for (i = 0; i < numdevs; i++) { 741 /* If the device is not attached, we've powered it down elsewhere. */ 742 child = devlist[i]; 743 if (!device_is_attached(child)) 744 continue; 745 746 /* 747 * Default to D3 for all sleep states. The _SxD method is optional 748 * so set the powerstate even if it's absent. 749 */ 750 pstate = PCI_POWERSTATE_D3; 751 error = acpi_device_pwr_for_sleep(device_get_parent(child), 752 child, &pstate); 753 if ((error == 0 || error == ESRCH) && acpi_do_powerstate) 754 pci_set_powerstate(child, pstate); 755 } 756 kfree(devlist, M_TEMP); 757 error = 0; 758 759 return (error); 760 } 761 762 static int 763 acpi_resume(device_t dev) 764 { 765 ACPI_HANDLE handle; 766 int i, numdevs; 767 device_t child, *devlist; 768 769 /* 770 * Put all devices in D0 before resuming them. Call _S0D on each one 771 * since some systems expect this. 772 */ 773 device_get_children(dev, &devlist, &numdevs); 774 for (i = 0; i < numdevs; i++) { 775 child = devlist[i]; 776 handle = acpi_get_handle(child); 777 if (handle) 778 AcpiEvaluateObject(handle, "_S0D", NULL, NULL); 779 if (device_is_attached(child) && acpi_do_powerstate) 780 pci_set_powerstate(child, PCI_POWERSTATE_D0); 781 } 782 kfree(devlist, M_TEMP); 783 784 return (bus_generic_resume(dev)); 785 } 786 787 static int 788 acpi_shutdown(device_t dev) 789 { 790 /* Allow children to shutdown first. */ 791 bus_generic_shutdown(dev); 792 793 /* 794 * Enable any GPEs that are able to power-on the system (i.e., RTC). 795 * Also, disable any that are not valid for this state (most). 796 */ 797 acpi_wake_prep_walk(ACPI_STATE_S5); 798 799 return (0); 800 } 801 802 /* 803 * Handle a new device being added 804 */ 805 static device_t 806 acpi_add_child(device_t bus, device_t parent, int order, const char *name, int unit) 807 { 808 struct acpi_device *ad; 809 device_t child; 810 811 if ((ad = kmalloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) 812 return (NULL); 813 814 resource_list_init(&ad->ad_rl); 815 child = device_add_child_ordered(parent, order, name, unit); 816 if (child != NULL) 817 device_set_ivars(child, ad); 818 else 819 kfree(ad, M_ACPIDEV); 820 return (child); 821 } 822 823 static int 824 acpi_print_child(device_t bus, device_t child) 825 { 826 struct acpi_device *adev = device_get_ivars(child); 827 struct resource_list *rl = &adev->ad_rl; 828 int retval = 0; 829 830 retval += bus_print_child_header(bus, child); 831 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); 832 retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); 833 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); 834 retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld"); 835 if (device_get_flags(child)) 836 retval += kprintf(" flags %#x", device_get_flags(child)); 837 retval += bus_print_child_footer(bus, child); 838 839 return (retval); 840 } 841 842 /* 843 * If this device is an ACPI child but no one claimed it, attempt 844 * to power it off. We'll power it back up when a driver is added. 845 * 846 * XXX Disabled for now since many necessary devices (like fdc and 847 * ATA) don't claim the devices we created for them but still expect 848 * them to be powered up. 849 */ 850 static void 851 acpi_probe_nomatch(device_t bus, device_t child) 852 { 853 854 /* pci_set_powerstate(child, PCI_POWERSTATE_D3); */ 855 } 856 857 /* 858 * If a new driver has a chance to probe a child, first power it up. 859 * 860 * XXX Disabled for now (see acpi_probe_nomatch for details). 861 */ 862 static void 863 acpi_driver_added(device_t dev, driver_t *driver) 864 { 865 device_t child, *devlist; 866 int i, numdevs; 867 868 DEVICE_IDENTIFY(driver, dev); 869 device_get_children(dev, &devlist, &numdevs); 870 for (i = 0; i < numdevs; i++) { 871 child = devlist[i]; 872 if (device_get_state(child) == DS_NOTPRESENT) { 873 /* pci_set_powerstate(child, PCI_POWERSTATE_D0); */ 874 if (device_probe_and_attach(child) != 0) 875 ; /* pci_set_powerstate(child, PCI_POWERSTATE_D3); */ 876 } 877 } 878 kfree(devlist, M_TEMP); 879 } 880 881 /* Location hint for devctl(8) */ 882 static int 883 acpi_child_location_str_method(device_t cbdev, device_t child, char *buf, 884 size_t buflen) 885 { 886 struct acpi_device *dinfo = device_get_ivars(child); 887 888 if (dinfo->ad_handle) 889 ksnprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle)); 890 else 891 ksnprintf(buf, buflen, "unknown"); 892 return (0); 893 } 894 895 /* PnP information for devctl(8) */ 896 static int 897 acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, 898 size_t buflen) 899 { 900 ACPI_DEVICE_INFO *adinfo; 901 struct acpi_device *dinfo = device_get_ivars(child); 902 char *end; 903 904 if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) { 905 ksnprintf(buf, buflen, "unknown"); 906 } else { 907 ksnprintf(buf, buflen, "_HID=%s _UID=%lu", 908 (adinfo->Valid & ACPI_VALID_HID) ? 909 adinfo->HardwareId.String : "none", 910 (adinfo->Valid & ACPI_VALID_UID) ? 911 strtoul(adinfo->UniqueId.String, &end, 10) : 0); 912 if (adinfo) 913 AcpiOsFree(adinfo); 914 } 915 return (0); 916 } 917 918 /* 919 * Handle per-device ivars 920 */ 921 static int 922 acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 923 { 924 struct acpi_device *ad; 925 926 if ((ad = device_get_ivars(child)) == NULL) { 927 device_printf(child, "device has no ivars\n"); 928 return (ENOENT); 929 } 930 931 /* ACPI and ISA compatibility ivars */ 932 switch(index) { 933 case ACPI_IVAR_HANDLE: 934 *(ACPI_HANDLE *)result = ad->ad_handle; 935 break; 936 case ACPI_IVAR_MAGIC: 937 *result = ad->ad_magic; 938 break; 939 case ACPI_IVAR_PRIVATE: 940 *(void **)result = ad->ad_private; 941 break; 942 case ACPI_IVAR_FLAGS: 943 *(int *)result = ad->ad_flags; 944 break; 945 case ISA_IVAR_VENDORID: 946 case ISA_IVAR_SERIAL: 947 case ISA_IVAR_COMPATID: 948 *(int *)result = -1; 949 break; 950 case ISA_IVAR_LOGICALID: 951 *(int *)result = acpi_isa_get_logicalid(child); 952 break; 953 default: 954 return (ENOENT); 955 } 956 957 return (0); 958 } 959 960 static int 961 acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 962 { 963 struct acpi_device *ad; 964 965 if ((ad = device_get_ivars(child)) == NULL) { 966 device_printf(child, "device has no ivars\n"); 967 return (ENOENT); 968 } 969 970 switch(index) { 971 case ACPI_IVAR_HANDLE: 972 ad->ad_handle = (ACPI_HANDLE)value; 973 break; 974 case ACPI_IVAR_MAGIC: 975 ad->ad_magic = value; 976 break; 977 case ACPI_IVAR_PRIVATE: 978 ad->ad_private = (void *)value; 979 break; 980 case ACPI_IVAR_FLAGS: 981 ad->ad_flags = (int)value; 982 break; 983 default: 984 panic("bad ivar write request (%d)", index); 985 return (ENOENT); 986 } 987 988 return (0); 989 } 990 991 /* 992 * Handle child resource allocation/removal 993 */ 994 static struct resource_list * 995 acpi_get_rlist(device_t dev, device_t child) 996 { 997 struct acpi_device *ad; 998 999 ad = device_get_ivars(child); 1000 return (&ad->ad_rl); 1001 } 1002 1003 /* 1004 * Pre-allocate/manage all memory and IO resources. Since rman can't handle 1005 * duplicates, we merge any in the sysresource attach routine. 1006 */ 1007 static int 1008 acpi_sysres_alloc(device_t dev) 1009 { 1010 struct resource *res; 1011 struct resource_list *rl; 1012 struct resource_list_entry *rle; 1013 struct rman *rm; 1014 char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; 1015 device_t *children; 1016 int child_count, i; 1017 /* 1018 * Probe/attach any sysresource devices. This would be unnecessary if we 1019 * had multi-pass probe/attach. 1020 */ 1021 if (device_get_children(dev, &children, &child_count) != 0) 1022 return (ENXIO); 1023 for (i = 0; i < child_count; i++) { 1024 if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL) 1025 device_probe_and_attach(children[i]); 1026 } 1027 kfree(children, M_TEMP); 1028 1029 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); 1030 if(!rl) 1031 return 0; 1032 SLIST_FOREACH(rle, rl, link) { 1033 if (rle->res != NULL) { 1034 device_printf(dev, "duplicate resource for %lx\n", rle->start); 1035 continue; 1036 } 1037 1038 /* Only memory and IO resources are valid here. */ 1039 switch (rle->type) { 1040 case SYS_RES_IOPORT: 1041 rm = &acpi_rman_io; 1042 break; 1043 case SYS_RES_MEMORY: 1044 rm = &acpi_rman_mem; 1045 break; 1046 default: 1047 continue; 1048 } 1049 1050 /* Pre-allocate resource and add to our rman pool. */ 1051 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type, 1052 &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 1053 0, -1); 1054 if (res != NULL) { 1055 rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); 1056 rle->res = res; 1057 } else 1058 device_printf(dev, "reservation of %lx, %lx (%d) failed\n", 1059 rle->start, rle->count, rle->type); 1060 } 1061 return (0); 1062 } 1063 1064 static struct resource * 1065 acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, 1066 u_long start, u_long end, u_long count, u_int flags, int cpuid) 1067 { 1068 ACPI_RESOURCE ares; 1069 struct acpi_device *ad = device_get_ivars(child); 1070 struct resource_list *rl = &ad->ad_rl; 1071 struct resource_list_entry *rle; 1072 struct resource *res; 1073 struct rman *rm; 1074 1075 res = NULL; 1076 1077 /* We only handle memory and IO resources through rman. */ 1078 switch (type) { 1079 case SYS_RES_IOPORT: 1080 rm = &acpi_rman_io; 1081 break; 1082 case SYS_RES_MEMORY: 1083 rm = &acpi_rman_mem; 1084 break; 1085 default: 1086 rm = NULL; 1087 } 1088 1089 ACPI_SERIAL_BEGIN(acpi); 1090 1091 /* 1092 * If this is an allocation of the "default" range for a given RID, and 1093 * we know what the resources for this device are (i.e., they're on the 1094 * child's resource list), use those start/end values. 1095 */ 1096 if (bus == device_get_parent(child) && start == 0UL && end == ~0UL) { 1097 rle = resource_list_find(rl, type, *rid); 1098 if (rle == NULL) 1099 goto out; 1100 start = rle->start; 1101 end = rle->end; 1102 count = rle->count; 1103 cpuid = rle->cpuid; 1104 } 1105 1106 /* 1107 * If this is an allocation of a specific range, see if we can satisfy 1108 * the request from our system resource regions. If we can't, pass the 1109 * request up to the parent. 1110 */ 1111 if (start + count - 1 == end && rm != NULL) 1112 res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, 1113 child); 1114 if (res == NULL) { 1115 res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, 1116 start, end, count, flags, cpuid); 1117 } else { 1118 rman_set_rid(res, *rid); 1119 1120 /* If requested, activate the resource using the parent's method. */ 1121 if (flags & RF_ACTIVE) 1122 if (bus_activate_resource(child, type, *rid, res) != 0) { 1123 rman_release_resource(res); 1124 res = NULL; 1125 goto out; 1126 } 1127 } 1128 1129 if (res != NULL && device_get_parent(child) == bus) 1130 switch (type) { 1131 case SYS_RES_IRQ: 1132 /* 1133 * Since bus_config_intr() takes immediate effect, we cannot 1134 * configure the interrupt associated with a device when we 1135 * parse the resources but have to defer it until a driver 1136 * actually allocates the interrupt via bus_alloc_resource(). 1137 * 1138 * NB: Lookup failure is fine, since the device may add its 1139 * own interrupt resources, e.g. MSI or MSI-X. 1140 */ 1141 if (ACPI_SUCCESS( 1142 acpi_lookup_irq_resource(child, *rid, res, &ares))) { 1143 acpi_config_intr(child, &ares); 1144 } else { 1145 if (bootverbose) 1146 kprintf("irq resource not found\n"); 1147 } 1148 break; 1149 } 1150 1151 out: 1152 ACPI_SERIAL_END(acpi); 1153 return (res); 1154 } 1155 1156 static int 1157 acpi_release_resource(device_t bus, device_t child, int type, int rid, 1158 struct resource *r) 1159 { 1160 struct rman *rm; 1161 int ret; 1162 1163 /* We only handle memory and IO resources through rman. */ 1164 switch (type) { 1165 case SYS_RES_IOPORT: 1166 rm = &acpi_rman_io; 1167 break; 1168 case SYS_RES_MEMORY: 1169 rm = &acpi_rman_mem; 1170 break; 1171 default: 1172 rm = NULL; 1173 } 1174 1175 ACPI_SERIAL_BEGIN(acpi); 1176 1177 /* 1178 * If this resource belongs to one of our internal managers, 1179 * deactivate it and release it to the local pool. If it doesn't, 1180 * pass this request up to the parent. 1181 */ 1182 if (rm != NULL && rman_is_region_manager(r, rm)) { 1183 if (rman_get_flags(r) & RF_ACTIVE) { 1184 ret = bus_deactivate_resource(child, type, rid, r); 1185 if (ret != 0) 1186 goto out; 1187 } 1188 ret = rman_release_resource(r); 1189 } else 1190 ret = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, r); 1191 1192 out: 1193 ACPI_SERIAL_END(acpi); 1194 return (ret); 1195 } 1196 1197 static void 1198 acpi_delete_resource(device_t bus, device_t child, int type, int rid) 1199 { 1200 struct resource_list *rl; 1201 1202 rl = acpi_get_rlist(bus, child); 1203 resource_list_delete(rl, type, rid); 1204 } 1205 1206 /* Allocate an IO port or memory resource, given its GAS. */ 1207 int 1208 acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, 1209 struct resource **res, u_int flags) 1210 { 1211 int error, res_type; 1212 1213 error = ENOMEM; 1214 if (type == NULL || rid == NULL || gas == NULL || res == NULL) 1215 return (EINVAL); 1216 1217 /* We only support memory and IO spaces. */ 1218 switch (gas->SpaceId) { 1219 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1220 res_type = SYS_RES_MEMORY; 1221 break; 1222 case ACPI_ADR_SPACE_SYSTEM_IO: 1223 res_type = SYS_RES_IOPORT; 1224 break; 1225 default: 1226 return (EOPNOTSUPP); 1227 } 1228 1229 /* 1230 * If the register width is less than 8, assume the BIOS author means 1231 * it is a bit field and just allocate a byte. 1232 */ 1233 if (gas->BitWidth && gas->BitWidth < 8) 1234 gas->BitWidth = 8; 1235 1236 /* Validate the address after we're sure we support the space. */ 1237 if (gas->Address == 0 || gas->BitWidth == 0) 1238 return (EINVAL); 1239 1240 bus_set_resource(dev, res_type, *rid, gas->Address, 1241 gas->BitWidth / 8, -1); 1242 *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); 1243 if (*res != NULL) { 1244 *type = res_type; 1245 error = 0; 1246 } else 1247 bus_delete_resource(dev, res_type, *rid); 1248 1249 return (error); 1250 } 1251 1252 ACPI_STATUS 1253 acpi_eval_osc(device_t dev, ACPI_HANDLE handle, const char *uuidstr, 1254 int revision, uint32_t *buf, int count) 1255 { 1256 ACPI_BUFFER retbuf = { ACPI_ALLOCATE_BUFFER, NULL }; 1257 ACPI_OBJECT_LIST arglist; 1258 ACPI_OBJECT arg[4]; 1259 ACPI_OBJECT *retobj; 1260 ACPI_STATUS status; 1261 struct uuid uuid; 1262 uint32_t error; 1263 uint8_t oscuuid[16]; 1264 int i; 1265 1266 if (parse_uuid(uuidstr, &uuid) != 0) 1267 return (AE_ERROR); 1268 le_uuid_enc(oscuuid, &uuid); 1269 1270 arglist.Pointer = arg; 1271 arglist.Count = 4; 1272 arg[0].Type = ACPI_TYPE_BUFFER; 1273 arg[0].Buffer.Length = sizeof(oscuuid); 1274 arg[0].Buffer.Pointer = oscuuid; /* UUID */ 1275 arg[1].Type = ACPI_TYPE_INTEGER; 1276 arg[1].Integer.Value = revision; /* revision */ 1277 arg[2].Type = ACPI_TYPE_INTEGER; 1278 arg[2].Integer.Value = count; /* # of cap integers */ 1279 arg[3].Type = ACPI_TYPE_BUFFER; 1280 arg[3].Buffer.Length = count * sizeof(uint32_t); /* capabilities buffer */ 1281 arg[3].Buffer.Pointer = (uint8_t *)buf; 1282 1283 status = AcpiEvaluateObject(handle, "_OSC", &arglist, &retbuf); 1284 if (ACPI_FAILURE(status)) 1285 goto done; 1286 retobj = retbuf.Pointer; 1287 error = ((uint32_t *)retobj->Buffer.Pointer)[0] & ACPI_OSCERR_MASK; 1288 if (error == 0) 1289 goto done; 1290 status = AE_ERROR; 1291 if (error & ACPI_OSCERR_OSCFAIL) 1292 device_printf(dev, "_OSC unable to process request\n"); 1293 if (error & ACPI_OSCERR_UUID) 1294 device_printf(dev, "_OSC unrecognized UUID (%s)\n", uuidstr); 1295 if (error & ACPI_OSCERR_REVISION) 1296 device_printf(dev, "_OSC unrecognized revision ID (%d)\n", revision); 1297 if (error & ACPI_OSCERR_CAPSMASKED) { 1298 if ((buf[0] & ACPI_OSC_QUERY_SUPPORT) == 0) { 1299 for (i = 1; i < count; i++) { 1300 device_printf(dev, 1301 "_OSC capabilities have been masked: buf[%d]:%#x\n", 1302 i, buf[i] & ~((uint32_t *)retobj->Buffer.Pointer)[i]); 1303 } 1304 status = AE_SUPPORT; 1305 } else { 1306 status = AE_OK; 1307 } 1308 } 1309 1310 done: 1311 if (retbuf.Pointer != NULL) 1312 AcpiOsFree(retbuf.Pointer); 1313 return (status); 1314 } 1315 1316 /* Probe _HID and _CID for compatible ISA PNP ids. */ 1317 static uint32_t 1318 acpi_isa_get_logicalid(device_t dev) 1319 { 1320 ACPI_DEVICE_INFO *devinfo; 1321 ACPI_HANDLE h; 1322 uint32_t pnpid; 1323 1324 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1325 1326 devinfo = NULL; 1327 pnpid = 0; 1328 1329 /* Fetch and validate the HID. */ 1330 if ((h = acpi_get_handle(dev)) == NULL || 1331 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1332 goto out; 1333 1334 if ((devinfo->Valid & ACPI_VALID_HID) != 0) 1335 pnpid = PNP_EISAID(devinfo->HardwareId.String); 1336 1337 out: 1338 if (devinfo) 1339 AcpiOsFree(devinfo); 1340 return_VALUE (pnpid); 1341 } 1342 1343 static int 1344 acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) 1345 { 1346 ACPI_DEVICE_INFO *devinfo; 1347 ACPI_HANDLE h; 1348 uint32_t *pnpid; 1349 int valid, i; 1350 1351 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1352 1353 devinfo = NULL; 1354 pnpid = cids; 1355 valid = 0; 1356 1357 /* Fetch and validate the CID */ 1358 if ((h = acpi_get_handle(dev)) == NULL || 1359 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)) || 1360 (devinfo->Valid & ACPI_VALID_CID) == 0) 1361 goto out; 1362 1363 if (devinfo->CompatibleIdList.Count < count) 1364 count = devinfo->CompatibleIdList.Count; 1365 for (i = 0; i < count; i++) { 1366 if (strncmp(devinfo->CompatibleIdList.Ids[i].String, "PNP", 3) != 0) 1367 continue; 1368 *pnpid++ = PNP_EISAID(devinfo->CompatibleIdList.Ids[i].String); 1369 valid++; 1370 } 1371 1372 out: 1373 if (devinfo) 1374 AcpiOsFree(devinfo); 1375 return_VALUE (valid); 1376 } 1377 1378 static char * 1379 acpi_device_id_probe(device_t bus, device_t dev, char **ids) 1380 { 1381 ACPI_HANDLE h; 1382 int i; 1383 1384 h = acpi_get_handle(dev); 1385 if (ids == NULL || h == NULL || acpi_get_type(dev) != ACPI_TYPE_DEVICE) 1386 return (NULL); 1387 1388 /* Try to match one of the array of IDs with a HID or CID. */ 1389 for (i = 0; ids[i] != NULL; i++) { 1390 if (acpi_MatchHid(h, ids[i])) 1391 return (ids[i]); 1392 } 1393 return (NULL); 1394 } 1395 1396 static ACPI_STATUS 1397 acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, 1398 ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) 1399 { 1400 ACPI_HANDLE h; 1401 1402 if (dev == NULL) 1403 h = ACPI_ROOT_OBJECT; 1404 else if ((h = acpi_get_handle(dev)) == NULL) 1405 return (AE_BAD_PARAMETER); 1406 return (AcpiEvaluateObject(h, pathname, parameters, ret)); 1407 } 1408 1409 static int 1410 acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) 1411 { 1412 struct acpi_softc *sc; 1413 ACPI_HANDLE handle; 1414 ACPI_STATUS status; 1415 char sxd[8]; 1416 int error; 1417 1418 sc = device_get_softc(bus); 1419 handle = acpi_get_handle(dev); 1420 1421 /* 1422 * XXX If we find these devices, don't try to power them down. 1423 * The serial and IRDA ports on my T23 hang the system when 1424 * set to D3 and it appears that such legacy devices may 1425 * need special handling in their drivers. 1426 */ 1427 if (handle == NULL || 1428 acpi_MatchHid(handle, "PNP0500") || 1429 acpi_MatchHid(handle, "PNP0501") || 1430 acpi_MatchHid(handle, "PNP0502") || 1431 acpi_MatchHid(handle, "PNP0510") || 1432 acpi_MatchHid(handle, "PNP0511")) 1433 return (ENXIO); 1434 1435 /* 1436 * Override next state with the value from _SxD, if present. If no 1437 * dstate argument was provided, don't fetch the return value. 1438 */ 1439 ksnprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); 1440 if (dstate) 1441 status = acpi_GetInteger(handle, sxd, dstate); 1442 else 1443 status = AcpiEvaluateObject(handle, sxd, NULL, NULL); 1444 1445 switch (status) { 1446 case AE_OK: 1447 error = 0; 1448 break; 1449 case AE_NOT_FOUND: 1450 error = ESRCH; 1451 break; 1452 default: 1453 error = ENXIO; 1454 break; 1455 } 1456 1457 return (error); 1458 } 1459 1460 /* Callback arg for our implementation of walking the namespace. */ 1461 struct acpi_device_scan_ctx { 1462 acpi_scan_cb_t user_fn; 1463 void *arg; 1464 ACPI_HANDLE parent; 1465 }; 1466 1467 static ACPI_STATUS 1468 acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) 1469 { 1470 struct acpi_device_scan_ctx *ctx; 1471 device_t dev, old_dev; 1472 ACPI_STATUS status; 1473 ACPI_OBJECT_TYPE type; 1474 1475 /* 1476 * Skip this device if we think we'll have trouble with it or it is 1477 * the parent where the scan began. 1478 */ 1479 ctx = (struct acpi_device_scan_ctx *)arg; 1480 if (acpi_avoid(h) || h == ctx->parent) 1481 return (AE_OK); 1482 1483 /* If this is not a valid device type (e.g., a method), skip it. */ 1484 if (ACPI_FAILURE(AcpiGetType(h, &type))) 1485 return (AE_OK); 1486 if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && 1487 type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) 1488 return (AE_OK); 1489 1490 /* 1491 * Call the user function with the current device. If it is unchanged 1492 * afterwards, return. Otherwise, we update the handle to the new dev. 1493 */ 1494 old_dev = acpi_get_device(h); 1495 dev = old_dev; 1496 status = ctx->user_fn(h, &dev, level, ctx->arg); 1497 if (ACPI_FAILURE(status) || old_dev == dev) 1498 return (status); 1499 1500 /* Remove the old child and its connection to the handle. */ 1501 if (old_dev != NULL) { 1502 device_delete_child(device_get_parent(old_dev), old_dev); 1503 AcpiDetachData(h, acpi_fake_objhandler); 1504 } 1505 1506 /* Recreate the handle association if the user created a device. */ 1507 if (dev != NULL) 1508 AcpiAttachData(h, acpi_fake_objhandler, dev); 1509 1510 return (AE_OK); 1511 } 1512 1513 static ACPI_STATUS 1514 acpi_device_scan_children(device_t bus, device_t dev, int max_depth, 1515 acpi_scan_cb_t user_fn, void *arg) 1516 { 1517 ACPI_HANDLE h; 1518 struct acpi_device_scan_ctx ctx; 1519 1520 if (acpi_disabled("children")) 1521 return (AE_OK); 1522 1523 if (dev == NULL) 1524 h = ACPI_ROOT_OBJECT; 1525 else if ((h = acpi_get_handle(dev)) == NULL) 1526 return (AE_BAD_PARAMETER); 1527 ctx.user_fn = user_fn; 1528 ctx.arg = arg; 1529 ctx.parent = h; 1530 return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, 1531 acpi_device_scan_cb, NULL, &ctx, NULL)); 1532 } 1533 1534 /* 1535 * Even though ACPI devices are not PCI, we use the PCI approach for setting 1536 * device power states since it's close enough to ACPI. 1537 */ 1538 static int 1539 acpi_set_powerstate_method(device_t bus, device_t child, int state) 1540 { 1541 ACPI_HANDLE h; 1542 ACPI_STATUS status; 1543 int error; 1544 1545 error = 0; 1546 h = acpi_get_handle(child); 1547 if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3) 1548 return (EINVAL); 1549 if (h == NULL) 1550 return (0); 1551 1552 /* Ignore errors if the power methods aren't present. */ 1553 status = acpi_pwr_switch_consumer(h, state); 1554 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND 1555 && status != AE_BAD_PARAMETER) 1556 device_printf(bus, "failed to set ACPI power state D%d on %s: %s\n", 1557 state, acpi_name(h), AcpiFormatException(status)); 1558 1559 return (error); 1560 } 1561 1562 static int 1563 acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) 1564 { 1565 int result, cid_count, i; 1566 uint32_t lid, cids[8]; 1567 1568 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1569 1570 /* 1571 * ISA-style drivers attached to ACPI may persist and 1572 * probe manually if we return ENOENT. We never want 1573 * that to happen, so don't ever return it. 1574 */ 1575 result = ENXIO; 1576 1577 /* Scan the supplied IDs for a match */ 1578 lid = acpi_isa_get_logicalid(child); 1579 cid_count = acpi_isa_get_compatid(child, cids, 8); 1580 while (ids && ids->ip_id) { 1581 if (lid == ids->ip_id) { 1582 result = 0; 1583 goto out; 1584 } 1585 for (i = 0; i < cid_count; i++) { 1586 if (cids[i] == ids->ip_id) { 1587 result = 0; 1588 goto out; 1589 } 1590 } 1591 ids++; 1592 } 1593 1594 out: 1595 if (result == 0 && ids->ip_desc) 1596 device_set_desc(child, ids->ip_desc); 1597 1598 return_VALUE (result); 1599 } 1600 1601 /* 1602 * Look for a MCFG table. If it is present, use the settings for 1603 * domain (segment) 0 to setup PCI config space access via the memory 1604 * map. 1605 */ 1606 static void 1607 acpi_enable_pcie(void) 1608 { 1609 ACPI_TABLE_HEADER *hdr; 1610 ACPI_MCFG_ALLOCATION *alloc, *end; 1611 ACPI_STATUS status; 1612 1613 status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); 1614 if (ACPI_FAILURE(status)) 1615 return; 1616 1617 end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); 1618 alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); 1619 while (alloc < end) { 1620 if (alloc->PciSegment == 0) { 1621 pcie_cfgregopen(alloc->Address, alloc->StartBusNumber, 1622 alloc->EndBusNumber); 1623 return; 1624 } 1625 alloc++; 1626 } 1627 } 1628 1629 /* 1630 * Scan all of the ACPI namespace and attach child devices. 1631 * 1632 * We should only expect to find devices in the \_PR, \_TZ, \_SI, and 1633 * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. 1634 * However, in violation of the spec, some systems place their PCI link 1635 * devices in \, so we have to walk the whole namespace. We check the 1636 * type of namespace nodes, so this should be ok. 1637 */ 1638 static void 1639 acpi_probe_children(device_t bus) 1640 { 1641 1642 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1643 1644 /* 1645 * Scan the namespace and insert placeholders for all the devices that 1646 * we find. We also probe/attach any early devices. 1647 * 1648 * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because 1649 * we want to create nodes for all devices, not just those that are 1650 * currently present. (This assumes that we don't want to create/remove 1651 * devices as they appear, which might be smarter.) 1652 */ 1653 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); 1654 AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, 1655 acpi_probe_child, NULL, bus, NULL); 1656 1657 /* Pre-allocate resources for our rman from any sysresource devices. */ 1658 acpi_sysres_alloc(bus); 1659 /* Create any static children by calling device identify methods. */ 1660 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); 1661 bus_generic_probe(bus); 1662 1663 /* Probe/attach all children, created staticly and from the namespace. */ 1664 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "first bus_generic_attach\n")); 1665 bus_generic_attach(bus); 1666 1667 /* 1668 * Some of these children may have attached others as part of their attach 1669 * process (eg. the root PCI bus driver), so rescan. 1670 */ 1671 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "second bus_generic_attach\n")); 1672 bus_generic_attach(bus); 1673 1674 /* Attach wake sysctls. */ 1675 acpi_wake_sysctl_walk(bus); 1676 1677 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); 1678 return_VOID; 1679 } 1680 1681 /* 1682 * Determine the probe order for a given device. 1683 */ 1684 static void 1685 acpi_probe_order(ACPI_HANDLE handle, int *order) 1686 { 1687 ACPI_OBJECT_TYPE type; 1688 1689 /* 1690 * 1. I/O port and memory system resource holders 1691 * 2. Embedded controllers (to handle early accesses) 1692 * 3. PCI Link Devices 1693 * 100000. CPUs 1694 */ 1695 AcpiGetType(handle, &type); 1696 if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) 1697 *order = 1; 1698 else if (acpi_MatchHid(handle, "PNP0C09")) 1699 *order = 2; 1700 else if (acpi_MatchHid(handle, "PNP0C0F")) 1701 *order = 3; 1702 else if (type == ACPI_TYPE_PROCESSOR) 1703 *order = 100000; 1704 } 1705 1706 /* 1707 * Evaluate a child device and determine whether we might attach a device to 1708 * it. 1709 */ 1710 static ACPI_STATUS 1711 acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 1712 { 1713 struct acpi_prw_data prw; 1714 ACPI_OBJECT_TYPE type; 1715 ACPI_HANDLE h; 1716 device_t bus, child; 1717 int order; 1718 char *handle_str; 1719 1720 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1721 1722 if (acpi_disabled("children")) 1723 return_ACPI_STATUS (AE_OK); 1724 1725 /* Skip this device if we think we'll have trouble with it. */ 1726 if (acpi_avoid(handle)) 1727 return_ACPI_STATUS (AE_OK); 1728 1729 bus = (device_t)context; 1730 if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { 1731 handle_str = acpi_name(handle); 1732 switch (type) { 1733 case ACPI_TYPE_DEVICE: 1734 /* 1735 * Since we scan from \, be sure to skip system scope objects. 1736 * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around 1737 * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run 1738 * during the intialization and \_TZ_ is to support Notify() on it. 1739 */ 1740 if (strcmp(handle_str, "\\_SB_") == 0 || 1741 strcmp(handle_str, "\\_TZ_") == 0) 1742 break; 1743 1744 if (acpi_parse_prw(handle, &prw) == 0) 1745 AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); 1746 1747 /* FALLTHROUGH */ 1748 case ACPI_TYPE_PROCESSOR: 1749 case ACPI_TYPE_THERMAL: 1750 case ACPI_TYPE_POWER: 1751 /* 1752 * Create a placeholder device for this node. Sort the 1753 * placeholder so that the probe/attach passes will run 1754 * breadth-first. Orders less than ACPI_DEV_BASE_ORDER 1755 * are reserved for special objects (i.e., system 1756 * resources). CPU devices have a very high order to 1757 * ensure they are probed after other devices. 1758 */ 1759 ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); 1760 order = level * 10 + 100; 1761 acpi_probe_order(handle, &order); 1762 child = BUS_ADD_CHILD(bus, bus, order, NULL, -1); 1763 if (child == NULL) 1764 break; 1765 1766 /* Associate the handle with the device_t and vice versa. */ 1767 acpi_set_handle(child, handle); 1768 AcpiAttachData(handle, acpi_fake_objhandler, child); 1769 1770 /* 1771 * Check that the device is present. If it's not present, 1772 * leave it disabled (so that we have a device_t attached to 1773 * the handle, but we don't probe it). 1774 * 1775 * XXX PCI link devices sometimes report "present" but not 1776 * "functional" (i.e. if disabled). Go ahead and probe them 1777 * anyway since we may enable them later. 1778 */ 1779 if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { 1780 /* Never disable PCI link devices. */ 1781 if (acpi_MatchHid(handle, "PNP0C0F")) 1782 break; 1783 /* 1784 * Docking stations should remain enabled since the system 1785 * may be undocked at boot. 1786 */ 1787 if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) 1788 break; 1789 1790 device_disable(child); 1791 break; 1792 } 1793 1794 /* 1795 * Get the device's resource settings and attach them. 1796 * Note that if the device has _PRS but no _CRS, we need 1797 * to decide when it's appropriate to try to configure the 1798 * device. Ignore the return value here; it's OK for the 1799 * device not to have any resources. 1800 */ 1801 acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); 1802 break; 1803 } 1804 } 1805 1806 return_ACPI_STATUS (AE_OK); 1807 } 1808 1809 /* 1810 * AcpiAttachData() requires an object handler but never uses it. This is a 1811 * placeholder object handler so we can store a device_t in an ACPI_HANDLE. 1812 */ 1813 void 1814 acpi_fake_objhandler(ACPI_HANDLE h, void *data) 1815 { 1816 } 1817 1818 static void 1819 acpi_shutdown_final(void *arg, int howto) 1820 { 1821 struct acpi_softc *sc; 1822 ACPI_STATUS status; 1823 1824 /* 1825 * XXX Shutdown code should only run on the BSP (cpuid 0). 1826 * Some chipsets do not power off the system correctly if called from 1827 * an AP. 1828 */ 1829 sc = arg; 1830 if ((howto & RB_POWEROFF) != 0) { 1831 status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); 1832 if (ACPI_FAILURE(status)) { 1833 device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", 1834 AcpiFormatException(status)); 1835 return; 1836 } 1837 device_printf(sc->acpi_dev, "Powering system off\n"); 1838 ACPI_DISABLE_IRQS(); 1839 status = AcpiEnterSleepState(ACPI_STATE_S5); 1840 if (ACPI_FAILURE(status)) { 1841 device_printf(sc->acpi_dev, "power-off failed - %s\n", 1842 AcpiFormatException(status)); 1843 } else { 1844 DELAY(1000000); 1845 device_printf(sc->acpi_dev, "power-off failed - timeout\n"); 1846 } 1847 } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { 1848 /* Reboot using the reset register. */ 1849 status = AcpiReset(); 1850 if (ACPI_FAILURE(status)) { 1851 if (status != AE_NOT_EXIST) 1852 device_printf(sc->acpi_dev, "reset failed - %s\n", 1853 AcpiFormatException(status)); 1854 } else { 1855 DELAY(1000000); 1856 device_printf(sc->acpi_dev, "reset failed - timeout\n"); 1857 } 1858 } else if (sc->acpi_do_disable && panicstr == NULL) { 1859 /* 1860 * Only disable ACPI if the user requested. On some systems, writing 1861 * the disable value to SMI_CMD hangs the system. 1862 */ 1863 device_printf(sc->acpi_dev, "Shutting down\n"); 1864 AcpiTerminate(); 1865 } 1866 } 1867 1868 static void 1869 acpi_enable_fixed_events(struct acpi_softc *sc) 1870 { 1871 static int first_time = 1; 1872 1873 /* Enable and clear fixed events and install handlers. */ 1874 if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { 1875 AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); 1876 AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, 1877 acpi_event_power_button_sleep, sc); 1878 if (first_time) 1879 device_printf(sc->acpi_dev, "Power Button (fixed)\n"); 1880 } 1881 if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1882 AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); 1883 AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, 1884 acpi_event_sleep_button_sleep, sc); 1885 if (first_time) 1886 device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); 1887 } 1888 1889 first_time = 0; 1890 } 1891 1892 /* 1893 * Returns true if the device is actually present and should 1894 * be attached to. This requires the present, enabled, UI-visible 1895 * and diagnostics-passed bits to be set. 1896 */ 1897 BOOLEAN 1898 acpi_DeviceIsPresent(device_t dev) 1899 { 1900 ACPI_DEVICE_INFO *devinfo; 1901 ACPI_HANDLE h; 1902 int ret; 1903 1904 ret = FALSE; 1905 if ((h = acpi_get_handle(dev)) == NULL || 1906 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1907 return (FALSE); 1908 1909 /* If no _STA method, must be present */ 1910 if ((devinfo->Valid & ACPI_VALID_STA) == 0) 1911 ret = TRUE; 1912 1913 /* Return true for 'present' and 'functioning' */ 1914 if (ACPI_DEVICE_PRESENT(devinfo->CurrentStatus)) 1915 ret = TRUE; 1916 1917 AcpiOsFree(devinfo); 1918 return (ret); 1919 } 1920 1921 /* 1922 * Returns true if the battery is actually present and inserted. 1923 */ 1924 BOOLEAN 1925 acpi_BatteryIsPresent(device_t dev) 1926 { 1927 ACPI_DEVICE_INFO *devinfo; 1928 ACPI_HANDLE h; 1929 int ret; 1930 1931 ret = FALSE; 1932 if ((h = acpi_get_handle(dev)) == NULL || 1933 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1934 return (FALSE); 1935 1936 /* If no _STA method, must be present */ 1937 if ((devinfo->Valid & ACPI_VALID_STA) == 0) 1938 ret = TRUE; 1939 1940 /* Return true for 'present', 'battery present', and 'functioning' */ 1941 if (ACPI_BATTERY_PRESENT(devinfo->CurrentStatus)) 1942 ret = TRUE; 1943 1944 AcpiOsFree(devinfo); 1945 return (ret); 1946 } 1947 1948 /* 1949 * Match a HID string against a handle 1950 */ 1951 BOOLEAN 1952 acpi_MatchHid(ACPI_HANDLE h, const char *hid) 1953 { 1954 ACPI_DEVICE_INFO *devinfo; 1955 int ret, i; 1956 1957 ret = FALSE; 1958 if (hid == NULL || h == NULL || 1959 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1960 return (ret); 1961 1962 if ((devinfo->Valid & ACPI_VALID_HID) != 0 && 1963 strcmp(hid, devinfo->HardwareId.String) == 0) 1964 ret = TRUE; 1965 else if ((devinfo->Valid & ACPI_VALID_CID) != 0) { 1966 for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { 1967 if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { 1968 ret = TRUE; 1969 break; 1970 } 1971 } 1972 } 1973 1974 AcpiOsFree(devinfo); 1975 return (ret); 1976 } 1977 1978 /* 1979 * Match a UID string against a handle 1980 */ 1981 BOOLEAN 1982 acpi_MatchUid(ACPI_HANDLE h, const char *uid) 1983 { 1984 ACPI_DEVICE_INFO *devinfo; 1985 int ret; 1986 1987 ret = FALSE; 1988 if (uid == NULL || h == NULL || 1989 ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) 1990 return (ret); 1991 1992 if ((devinfo->Valid & ACPI_VALID_UID) != 0 && 1993 strcmp(uid, devinfo->UniqueId.String) == 0) 1994 ret = TRUE; 1995 1996 AcpiOsFree(devinfo); 1997 return (ret); 1998 } 1999 2000 /* 2001 * Return the handle of a named object within our scope, ie. that of (parent) 2002 * or one if its parents. 2003 */ 2004 ACPI_STATUS 2005 acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) 2006 { 2007 ACPI_HANDLE r; 2008 ACPI_STATUS status; 2009 2010 /* Walk back up the tree to the root */ 2011 for (;;) { 2012 status = AcpiGetHandle(parent, path, &r); 2013 if (ACPI_SUCCESS(status)) { 2014 *result = r; 2015 return (AE_OK); 2016 } 2017 /* XXX Return error here? */ 2018 if (status != AE_NOT_FOUND) 2019 return (AE_OK); 2020 if (ACPI_FAILURE(AcpiGetParent(parent, &r))) 2021 return (AE_NOT_FOUND); 2022 parent = r; 2023 } 2024 } 2025 2026 /* 2027 * Allocate a buffer with a preset data size. 2028 */ 2029 ACPI_BUFFER * 2030 acpi_AllocBuffer(int size) 2031 { 2032 ACPI_BUFFER *buf; 2033 2034 if ((buf = kmalloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) 2035 return (NULL); 2036 buf->Length = size; 2037 buf->Pointer = (void *)(buf + 1); 2038 return (buf); 2039 } 2040 2041 ACPI_STATUS 2042 acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) 2043 { 2044 ACPI_OBJECT arg1; 2045 ACPI_OBJECT_LIST args; 2046 2047 arg1.Type = ACPI_TYPE_INTEGER; 2048 arg1.Integer.Value = number; 2049 args.Count = 1; 2050 args.Pointer = &arg1; 2051 2052 return (AcpiEvaluateObject(handle, path, &args, NULL)); 2053 } 2054 2055 /* 2056 * Evaluate a path that should return an integer. 2057 */ 2058 ACPI_STATUS 2059 acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) 2060 { 2061 ACPI_STATUS status; 2062 ACPI_BUFFER buf; 2063 ACPI_OBJECT param; 2064 2065 if (handle == NULL) 2066 handle = ACPI_ROOT_OBJECT; 2067 2068 /* 2069 * Assume that what we've been pointed at is an Integer object, or 2070 * a method that will return an Integer. 2071 */ 2072 buf.Pointer = ¶m; 2073 buf.Length = sizeof(param); 2074 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2075 if (ACPI_SUCCESS(status)) { 2076 if (param.Type == ACPI_TYPE_INTEGER) 2077 *number = param.Integer.Value; 2078 else 2079 status = AE_TYPE; 2080 } 2081 2082 /* 2083 * In some applications, a method that's expected to return an Integer 2084 * may instead return a Buffer (probably to simplify some internal 2085 * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, 2086 * convert it into an Integer as best we can. 2087 * 2088 * This is a hack. 2089 */ 2090 if (status == AE_BUFFER_OVERFLOW) { 2091 if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { 2092 status = AE_NO_MEMORY; 2093 } else { 2094 status = AcpiEvaluateObject(handle, path, NULL, &buf); 2095 if (ACPI_SUCCESS(status)) 2096 status = acpi_ConvertBufferToInteger(&buf, number); 2097 AcpiOsFree(buf.Pointer); 2098 } 2099 } 2100 return (status); 2101 } 2102 2103 ACPI_STATUS 2104 acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) 2105 { 2106 ACPI_OBJECT *p; 2107 UINT8 *val; 2108 int i; 2109 2110 p = (ACPI_OBJECT *)bufp->Pointer; 2111 if (p->Type == ACPI_TYPE_INTEGER) { 2112 *number = p->Integer.Value; 2113 return (AE_OK); 2114 } 2115 if (p->Type != ACPI_TYPE_BUFFER) 2116 return (AE_TYPE); 2117 if (p->Buffer.Length > sizeof(int)) 2118 return (AE_BAD_DATA); 2119 2120 *number = 0; 2121 val = p->Buffer.Pointer; 2122 for (i = 0; i < p->Buffer.Length; i++) 2123 *number += val[i] << (i * 8); 2124 return (AE_OK); 2125 } 2126 2127 /* 2128 * Iterate over the elements of an a package object, calling the supplied 2129 * function for each element. 2130 * 2131 * XXX possible enhancement might be to abort traversal on error. 2132 */ 2133 ACPI_STATUS 2134 acpi_ForeachPackageObject(ACPI_OBJECT *pkg, 2135 void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) 2136 { 2137 ACPI_OBJECT *comp; 2138 int i; 2139 2140 if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) 2141 return (AE_BAD_PARAMETER); 2142 2143 /* Iterate over components */ 2144 i = 0; 2145 comp = pkg->Package.Elements; 2146 for (; i < pkg->Package.Count; i++, comp++) 2147 func(comp, arg); 2148 2149 return (AE_OK); 2150 } 2151 2152 /* 2153 * Find the (index)th resource object in a set. 2154 */ 2155 ACPI_STATUS 2156 acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) 2157 { 2158 ACPI_RESOURCE *rp; 2159 int i; 2160 2161 rp = (ACPI_RESOURCE *)buf->Pointer; 2162 i = index; 2163 while (i-- > 0) { 2164 /* Range check */ 2165 if (rp > (ACPI_RESOURCE *)((uint8_t *)buf->Pointer + buf->Length)) 2166 return (AE_BAD_PARAMETER); 2167 2168 /* Check for terminator */ 2169 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2170 return (AE_NOT_FOUND); 2171 rp = ACPI_NEXT_RESOURCE(rp); 2172 } 2173 if (resp != NULL) 2174 *resp = rp; 2175 2176 return (AE_OK); 2177 } 2178 2179 /* 2180 * Append an ACPI_RESOURCE to an ACPI_BUFFER. 2181 * 2182 * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER 2183 * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible 2184 * backing block. If the ACPI_RESOURCE is NULL, return an empty set of 2185 * resources. 2186 */ 2187 #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 2188 2189 ACPI_STATUS 2190 acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) 2191 { 2192 ACPI_RESOURCE *rp; 2193 void *newp; 2194 2195 /* Initialise the buffer if necessary. */ 2196 if (buf->Pointer == NULL) { 2197 buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; 2198 if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) 2199 return (AE_NO_MEMORY); 2200 rp = (ACPI_RESOURCE *)buf->Pointer; 2201 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2202 rp->Length = ACPI_RS_SIZE_MIN; 2203 } 2204 if (res == NULL) 2205 return (AE_OK); 2206 2207 /* 2208 * Scan the current buffer looking for the terminator. 2209 * This will either find the terminator or hit the end 2210 * of the buffer and return an error. 2211 */ 2212 rp = (ACPI_RESOURCE *)buf->Pointer; 2213 for (;;) { 2214 /* Range check, don't go outside the buffer */ 2215 if (rp >= (ACPI_RESOURCE *)((uint8_t *)buf->Pointer + buf->Length)) 2216 return (AE_BAD_PARAMETER); 2217 if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) 2218 break; 2219 rp = ACPI_NEXT_RESOURCE(rp); 2220 } 2221 2222 /* 2223 * Check the size of the buffer and expand if required. 2224 * 2225 * Required size is: 2226 * size of existing resources before terminator + 2227 * size of new resource and header + 2228 * size of terminator. 2229 * 2230 * Note that this loop should really only run once, unless 2231 * for some reason we are stuffing a *really* huge resource. 2232 */ 2233 while ((((uint8_t *)rp - (uint8_t *)buf->Pointer) + 2234 res->Length + ACPI_RS_SIZE_NO_DATA + 2235 ACPI_RS_SIZE_MIN) >= buf->Length) { 2236 if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) 2237 return (AE_NO_MEMORY); 2238 bcopy(buf->Pointer, newp, buf->Length); 2239 rp = (ACPI_RESOURCE *)((uint8_t *)newp + 2240 ((uint8_t *)rp - (uint8_t *)buf->Pointer)); 2241 AcpiOsFree(buf->Pointer); 2242 buf->Pointer = newp; 2243 buf->Length += buf->Length; 2244 } 2245 2246 /* Insert the new resource. */ 2247 bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); 2248 2249 /* And add the terminator. */ 2250 rp = ACPI_NEXT_RESOURCE(rp); 2251 rp->Type = ACPI_RESOURCE_TYPE_END_TAG; 2252 rp->Length = ACPI_RS_SIZE_MIN; 2253 2254 return (AE_OK); 2255 } 2256 2257 /* 2258 * Set interrupt model. 2259 */ 2260 ACPI_STATUS 2261 acpi_SetIntrModel(int model) 2262 { 2263 2264 return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); 2265 } 2266 2267 /* 2268 * DEPRECATED. This interface has serious deficiencies and will be 2269 * removed. 2270 * 2271 * Immediately enter the sleep state. In the old model, acpiconf(8) ran 2272 * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. 2273 */ 2274 ACPI_STATUS 2275 acpi_SetSleepState(struct acpi_softc *sc, int state) 2276 { 2277 static int once; 2278 2279 if (!once) { 2280 device_printf(sc->acpi_dev, 2281 "warning: acpi_SetSleepState() deprecated, need to update your software\n"); 2282 once = 1; 2283 } 2284 return (acpi_EnterSleepState(sc, state)); 2285 } 2286 2287 static void 2288 acpi_sleep_force(void *arg) 2289 { 2290 struct acpi_softc *sc; 2291 2292 sc = arg; 2293 device_printf(sc->acpi_dev, 2294 "suspend request timed out, forcing sleep now\n"); 2295 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 2296 device_printf(sc->acpi_dev, "force sleep state S%d failed\n", 2297 sc->acpi_next_sstate); 2298 } 2299 2300 /* 2301 * Request that the system enter the given suspend state. All /dev/apm 2302 * devices and devd(8) will be notified. Userland then has a chance to 2303 * save state and acknowledge the request. The system sleeps once all 2304 * acks are in. 2305 */ 2306 int 2307 acpi_ReqSleepState(struct acpi_softc *sc, int state) 2308 { 2309 #ifdef notyet 2310 struct apm_clone_data *clone; 2311 #endif 2312 2313 if (state < ACPI_STATE_S1 || state > ACPI_STATE_S5) 2314 return (EINVAL); 2315 2316 /* S5 (soft-off) should be entered directly with no waiting. */ 2317 if (state == ACPI_STATE_S5) { 2318 if (ACPI_SUCCESS(acpi_EnterSleepState(sc, state))) 2319 return (0); 2320 else 2321 return (ENXIO); 2322 } 2323 2324 /* This platform does not support acpi suspend/resume. */ 2325 return (EOPNOTSUPP); 2326 2327 /* If a suspend request is already in progress, just return. */ 2328 ACPI_LOCK(acpi); 2329 if (sc->acpi_next_sstate != 0) { 2330 ACPI_UNLOCK(acpi); 2331 return (0); 2332 } 2333 2334 /* Record the pending state and notify all apm devices. */ 2335 sc->acpi_next_sstate = state; 2336 #if 0 2337 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 2338 clone->notify_status = APM_EV_NONE; 2339 if ((clone->flags & ACPI_EVF_DEVD) == 0) { 2340 KNOTE(&clone->sel_read.si_note, 0); 2341 } 2342 } 2343 #endif 2344 2345 /* If devd(8) is not running, immediately enter the sleep state. */ 2346 if (devctl_process_running() == FALSE) { 2347 ACPI_UNLOCK(acpi); 2348 if (ACPI_SUCCESS(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) { 2349 return (0); 2350 } else { 2351 return (ENXIO); 2352 } 2353 } 2354 2355 /* Now notify devd(8) also. */ 2356 acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); 2357 2358 /* 2359 * Set a timeout to fire if userland doesn't ack the suspend request 2360 * in time. This way we still eventually go to sleep if we were 2361 * overheating or running low on battery, even if userland is hung. 2362 * We cancel this timeout once all userland acks are in or the 2363 * suspend request is aborted. 2364 */ 2365 callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); 2366 ACPI_UNLOCK(acpi); 2367 return (0); 2368 } 2369 2370 /* 2371 * Acknowledge (or reject) a pending sleep state. The caller has 2372 * prepared for suspend and is now ready for it to proceed. If the 2373 * error argument is non-zero, it indicates suspend should be cancelled 2374 * and gives an errno value describing why. Once all votes are in, 2375 * we suspend the system. 2376 */ 2377 int 2378 acpi_AckSleepState(struct apm_clone_data *clone, int error) 2379 { 2380 struct acpi_softc *sc; 2381 int ret, sleeping; 2382 2383 /* This platform does not support acpi suspend/resume. */ 2384 return (EOPNOTSUPP); 2385 2386 /* If no pending sleep state, return an error. */ 2387 ACPI_LOCK(acpi); 2388 sc = clone->acpi_sc; 2389 if (sc->acpi_next_sstate == 0) { 2390 ACPI_UNLOCK(acpi); 2391 return (ENXIO); 2392 } 2393 2394 /* Caller wants to abort suspend process. */ 2395 if (error) { 2396 sc->acpi_next_sstate = 0; 2397 callout_stop(&sc->susp_force_to); 2398 device_printf(sc->acpi_dev, 2399 "listener on %s cancelled the pending suspend\n", 2400 devtoname(clone->cdev)); 2401 ACPI_UNLOCK(acpi); 2402 return (0); 2403 } 2404 2405 /* 2406 * Mark this device as acking the suspend request. Then, walk through 2407 * all devices, seeing if they agree yet. We only count devices that 2408 * are writable since read-only devices couldn't ack the request. 2409 */ 2410 clone->notify_status = APM_EV_ACKED; 2411 sleeping = TRUE; 2412 STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { 2413 if ((clone->flags & ACPI_EVF_WRITE) != 0 && 2414 clone->notify_status != APM_EV_ACKED) { 2415 sleeping = FALSE; 2416 break; 2417 } 2418 } 2419 2420 /* If all devices have voted "yes", we will suspend now. */ 2421 if (sleeping) 2422 callout_stop(&sc->susp_force_to); 2423 ACPI_UNLOCK(acpi); 2424 ret = 0; 2425 if (sleeping) { 2426 if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) 2427 ret = ENODEV; 2428 } 2429 2430 return (ret); 2431 } 2432 2433 static void 2434 acpi_sleep_enable(void *arg) 2435 { 2436 ((struct acpi_softc *)arg)->acpi_sleep_disabled = 0; 2437 } 2438 2439 enum acpi_sleep_state { 2440 ACPI_SS_NONE, 2441 ACPI_SS_GPE_SET, 2442 ACPI_SS_DEV_SUSPEND, 2443 ACPI_SS_SLP_PREP, 2444 ACPI_SS_SLEPT, 2445 }; 2446 2447 /* 2448 * Enter the desired system sleep state. 2449 * 2450 * Currently we support S1-S5 but S4 is only S4BIOS 2451 */ 2452 static ACPI_STATUS 2453 acpi_EnterSleepState(struct acpi_softc *sc, int state) 2454 { 2455 ACPI_STATUS status; 2456 UINT8 TypeA; 2457 UINT8 TypeB; 2458 enum acpi_sleep_state slp_state; 2459 2460 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2461 2462 /* Re-entry once we're suspending is not allowed. */ 2463 status = AE_OK; 2464 ACPI_LOCK(acpi); 2465 if (sc->acpi_sleep_disabled) { 2466 ACPI_UNLOCK(acpi); 2467 device_printf(sc->acpi_dev, 2468 "suspend request ignored (not ready yet)\n"); 2469 return (AE_ERROR); 2470 } 2471 sc->acpi_sleep_disabled = 1; 2472 ACPI_UNLOCK(acpi); 2473 2474 /* 2475 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE 2476 * drivers need this. 2477 */ 2478 //get_mplock(); 2479 slp_state = ACPI_SS_NONE; 2480 switch (state) { 2481 case ACPI_STATE_S1: 2482 case ACPI_STATE_S2: 2483 case ACPI_STATE_S3: 2484 case ACPI_STATE_S4: 2485 status = AcpiGetSleepTypeData(state, &TypeA, &TypeB); 2486 if (status == AE_NOT_FOUND) { 2487 device_printf(sc->acpi_dev, 2488 "Sleep state S%d not supported by BIOS\n", state); 2489 break; 2490 } else if (ACPI_FAILURE(status)) { 2491 device_printf(sc->acpi_dev, "AcpiGetSleepTypeData failed - %s\n", 2492 AcpiFormatException(status)); 2493 break; 2494 } 2495 2496 sc->acpi_sstate = state; 2497 2498 /* Enable any GPEs as appropriate and requested by the user. */ 2499 acpi_wake_prep_walk(state); 2500 slp_state = ACPI_SS_GPE_SET; 2501 2502 /* 2503 * Inform all devices that we are going to sleep. If at least one 2504 * device fails, DEVICE_SUSPEND() automatically resumes the tree. 2505 * 2506 * XXX Note that a better two-pass approach with a 'veto' pass 2507 * followed by a "real thing" pass would be better, but the current 2508 * bus interface does not provide for this. 2509 */ 2510 if (DEVICE_SUSPEND(root_bus) != 0) { 2511 device_printf(sc->acpi_dev, "device_suspend failed\n"); 2512 break; 2513 } 2514 slp_state = ACPI_SS_DEV_SUSPEND; 2515 2516 /* If testing device suspend only, back out of everything here. */ 2517 if (acpi_susp_bounce) 2518 break; 2519 2520 status = AcpiEnterSleepStatePrep(state); 2521 if (ACPI_FAILURE(status)) { 2522 device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", 2523 AcpiFormatException(status)); 2524 break; 2525 } 2526 slp_state = ACPI_SS_SLP_PREP; 2527 2528 if (sc->acpi_sleep_delay > 0) 2529 DELAY(sc->acpi_sleep_delay * 1000000); 2530 2531 if (state != ACPI_STATE_S1) { 2532 acpi_sleep_machdep(sc, state); 2533 2534 /* Re-enable ACPI hardware on wakeup from sleep state 4. */ 2535 if (state == ACPI_STATE_S4) 2536 AcpiEnable(); 2537 } else { 2538 ACPI_DISABLE_IRQS(); 2539 status = AcpiEnterSleepState(state); 2540 if (ACPI_FAILURE(status)) { 2541 device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", 2542 AcpiFormatException(status)); 2543 break; 2544 } 2545 } 2546 slp_state = ACPI_SS_SLEPT; 2547 break; 2548 case ACPI_STATE_S5: 2549 /* 2550 * Shut down cleanly and power off. This will call us back through the 2551 * shutdown handlers. 2552 */ 2553 shutdown_nice(RB_POWEROFF); 2554 break; 2555 case ACPI_STATE_S0: 2556 default: 2557 status = AE_BAD_PARAMETER; 2558 break; 2559 } 2560 2561 /* 2562 * Back out state according to how far along we got in the suspend 2563 * process. This handles both the error and success cases. 2564 */ 2565 sc->acpi_next_sstate = 0; 2566 if (slp_state >= ACPI_SS_GPE_SET) { 2567 acpi_wake_prep_walk(state); 2568 sc->acpi_sstate = ACPI_STATE_S0; 2569 } 2570 if (slp_state >= ACPI_SS_SLP_PREP) 2571 AcpiLeaveSleepState(state); 2572 if (slp_state >= ACPI_SS_DEV_SUSPEND) 2573 DEVICE_RESUME(root_bus); 2574 if (slp_state >= ACPI_SS_SLEPT) 2575 acpi_enable_fixed_events(sc); 2576 2577 /* Allow another sleep request after a while. */ 2578 /* XXX: needs timeout */ 2579 if (state != ACPI_STATE_S5) 2580 acpi_sleep_enable(sc); 2581 2582 /* Run /etc/rc.resume after we are back. */ 2583 acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); 2584 2585 //rel_mplock(); 2586 return_ACPI_STATUS (status); 2587 } 2588 2589 /* Enable or disable the device's GPE. */ 2590 int 2591 acpi_wake_set_enable(device_t dev, int enable) 2592 { 2593 struct acpi_prw_data prw; 2594 ACPI_STATUS status; 2595 int flags; 2596 2597 /* Make sure the device supports waking the system and get the GPE. */ 2598 if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) 2599 return (ENXIO); 2600 2601 flags = acpi_get_flags(dev); 2602 if (enable) { 2603 status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, 2604 ACPI_GPE_ENABLE); 2605 if (ACPI_FAILURE(status)) { 2606 device_printf(dev, "enable wake failed\n"); 2607 return (ENXIO); 2608 } 2609 acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); 2610 } else { 2611 status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, 2612 ACPI_GPE_DISABLE); 2613 if (ACPI_FAILURE(status)) { 2614 device_printf(dev, "disable wake failed\n"); 2615 return (ENXIO); 2616 } 2617 acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); 2618 } 2619 2620 return (0); 2621 } 2622 2623 static int 2624 acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) 2625 { 2626 struct acpi_prw_data prw; 2627 device_t dev; 2628 2629 /* Check that this is a wake-capable device and get its GPE. */ 2630 if (acpi_parse_prw(handle, &prw) != 0) 2631 return (ENXIO); 2632 dev = acpi_get_device(handle); 2633 2634 /* 2635 * The destination sleep state must be less than (i.e., higher power) 2636 * or equal to the value specified by _PRW. If this GPE cannot be 2637 * enabled for the next sleep state, then disable it. If it can and 2638 * the user requested it be enabled, turn on any required power resources 2639 * and set _PSW. 2640 */ 2641 if (sstate > prw.lowest_wake) { 2642 AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); 2643 if (bootverbose) 2644 device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", 2645 acpi_name(handle), sstate); 2646 } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { 2647 acpi_pwr_wake_enable(handle, 1); 2648 acpi_SetInteger(handle, "_PSW", 1); 2649 if (bootverbose) 2650 device_printf(dev, "wake_prep enabled for %s (S%d)\n", 2651 acpi_name(handle), sstate); 2652 } 2653 2654 return (0); 2655 } 2656 2657 static int 2658 acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) 2659 { 2660 struct acpi_prw_data prw; 2661 device_t dev; 2662 2663 /* 2664 * Check that this is a wake-capable device and get its GPE. Return 2665 * now if the user didn't enable this device for wake. 2666 */ 2667 if (acpi_parse_prw(handle, &prw) != 0) 2668 return (ENXIO); 2669 dev = acpi_get_device(handle); 2670 if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) 2671 return (0); 2672 2673 /* 2674 * If this GPE couldn't be enabled for the previous sleep state, it was 2675 * disabled before going to sleep so re-enable it. If it was enabled, 2676 * clear _PSW and turn off any power resources it used. 2677 */ 2678 if (sstate > prw.lowest_wake) { 2679 AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); 2680 if (bootverbose) 2681 device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); 2682 } else { 2683 acpi_SetInteger(handle, "_PSW", 0); 2684 acpi_pwr_wake_enable(handle, 0); 2685 if (bootverbose) 2686 device_printf(dev, "run_prep cleaned up for %s\n", 2687 acpi_name(handle)); 2688 } 2689 2690 return (0); 2691 } 2692 2693 static ACPI_STATUS 2694 acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) 2695 { 2696 int sstate; 2697 2698 /* If suspending, run the sleep prep function, otherwise wake. */ 2699 sstate = *(int *)context; 2700 if (AcpiGbl_SystemAwakeAndRunning) 2701 acpi_wake_sleep_prep(handle, sstate); 2702 else 2703 acpi_wake_run_prep(handle, sstate); 2704 return (AE_OK); 2705 } 2706 2707 /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ 2708 static int 2709 acpi_wake_prep_walk(int sstate) 2710 { 2711 ACPI_HANDLE sb_handle; 2712 2713 if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) { 2714 AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, 2715 acpi_wake_prep, NULL, &sstate, NULL); 2716 } 2717 return (0); 2718 } 2719 2720 /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ 2721 static int 2722 acpi_wake_sysctl_walk(device_t dev) 2723 { 2724 #ifdef notyet 2725 int error, i, numdevs; 2726 device_t *devlist; 2727 device_t child; 2728 ACPI_STATUS status; 2729 2730 error = device_get_children(dev, &devlist, &numdevs); 2731 if (error != 0 || numdevs == 0) { 2732 if (numdevs == 0) 2733 kfree(devlist, M_TEMP); 2734 return (error); 2735 } 2736 for (i = 0; i < numdevs; i++) { 2737 child = devlist[i]; 2738 acpi_wake_sysctl_walk(child); 2739 if (!device_is_attached(child)) 2740 continue; 2741 status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); 2742 if (ACPI_SUCCESS(status)) { 2743 SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), 2744 SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, 2745 "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0, 2746 acpi_wake_set_sysctl, "I", "Device set to wake the system"); 2747 } 2748 } 2749 kfree(devlist, M_TEMP); 2750 #endif 2751 2752 return (0); 2753 } 2754 2755 #ifdef notyet 2756 /* Enable or disable wake from userland. */ 2757 static int 2758 acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) 2759 { 2760 int enable, error; 2761 device_t dev; 2762 2763 dev = (device_t)arg1; 2764 enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; 2765 2766 error = sysctl_handle_int(oidp, &enable, 0, req); 2767 if (error != 0 || req->newptr == NULL) 2768 return (error); 2769 if (enable != 0 && enable != 1) 2770 return (EINVAL); 2771 2772 return (acpi_wake_set_enable(dev, enable)); 2773 } 2774 #endif 2775 2776 /* Parse a device's _PRW into a structure. */ 2777 int 2778 acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) 2779 { 2780 ACPI_STATUS status; 2781 ACPI_BUFFER prw_buffer; 2782 ACPI_OBJECT *res, *res2; 2783 int error, i, power_count; 2784 2785 if (h == NULL || prw == NULL) 2786 return (EINVAL); 2787 2788 /* 2789 * The _PRW object (7.2.9) is only required for devices that have the 2790 * ability to wake the system from a sleeping state. 2791 */ 2792 error = EINVAL; 2793 prw_buffer.Pointer = NULL; 2794 prw_buffer.Length = ACPI_ALLOCATE_BUFFER; 2795 status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); 2796 if (ACPI_FAILURE(status)) 2797 return (ENOENT); 2798 res = (ACPI_OBJECT *)prw_buffer.Pointer; 2799 if (res == NULL) 2800 return (ENOENT); 2801 if (!ACPI_PKG_VALID(res, 2)) 2802 goto out; 2803 2804 /* 2805 * Element 1 of the _PRW object: 2806 * The lowest power system sleeping state that can be entered while still 2807 * providing wake functionality. The sleeping state being entered must 2808 * be less than (i.e., higher power) or equal to this value. 2809 */ 2810 if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) 2811 goto out; 2812 2813 /* 2814 * Element 0 of the _PRW object: 2815 */ 2816 switch (res->Package.Elements[0].Type) { 2817 case ACPI_TYPE_INTEGER: 2818 /* 2819 * If the data type of this package element is numeric, then this 2820 * _PRW package element is the bit index in the GPEx_EN, in the 2821 * GPE blocks described in the FADT, of the enable bit that is 2822 * enabled for the wake event. 2823 */ 2824 prw->gpe_handle = NULL; 2825 prw->gpe_bit = res->Package.Elements[0].Integer.Value; 2826 error = 0; 2827 break; 2828 case ACPI_TYPE_PACKAGE: 2829 /* 2830 * If the data type of this package element is a package, then this 2831 * _PRW package element is itself a package containing two 2832 * elements. The first is an object reference to the GPE Block 2833 * device that contains the GPE that will be triggered by the wake 2834 * event. The second element is numeric and it contains the bit 2835 * index in the GPEx_EN, in the GPE Block referenced by the 2836 * first element in the package, of the enable bit that is enabled for 2837 * the wake event. 2838 * 2839 * For example, if this field is a package then it is of the form: 2840 * Package() {\_SB.PCI0.ISA.GPE, 2} 2841 */ 2842 res2 = &res->Package.Elements[0]; 2843 if (!ACPI_PKG_VALID(res2, 2)) 2844 goto out; 2845 prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); 2846 if (prw->gpe_handle == NULL) 2847 goto out; 2848 if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) 2849 goto out; 2850 error = 0; 2851 break; 2852 default: 2853 goto out; 2854 } 2855 2856 /* Elements 2 to N of the _PRW object are power resources. */ 2857 power_count = res->Package.Count - 2; 2858 if (power_count > ACPI_PRW_MAX_POWERRES) { 2859 kprintf("ACPI device %s has too many power resources\n", acpi_name(h)); 2860 power_count = 0; 2861 } 2862 prw->power_res_count = power_count; 2863 for (i = 0; i < power_count; i++) 2864 prw->power_res[i] = res->Package.Elements[i]; 2865 2866 out: 2867 if (prw_buffer.Pointer != NULL) 2868 AcpiOsFree(prw_buffer.Pointer); 2869 return (error); 2870 } 2871 2872 /* 2873 * ACPI Event Handlers 2874 */ 2875 2876 /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ 2877 2878 static void 2879 acpi_system_eventhandler_sleep(void *arg, int state) 2880 { 2881 struct acpi_softc *sc; 2882 int ret; 2883 2884 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2885 2886 sc = arg; 2887 2888 /* Check if button action is disabled. */ 2889 if (state == ACPI_S_STATES_MAX + 1) 2890 return; 2891 2892 /* Request that the system prepare to enter the given suspend state. */ 2893 ret = acpi_ReqSleepState((struct acpi_softc *)arg, state); 2894 if (ret != 0) 2895 device_printf(sc->acpi_dev, 2896 "request to enter state S%d failed (err %d)\n", state, ret); 2897 2898 return_VOID; 2899 } 2900 2901 static void 2902 acpi_system_eventhandler_wakeup(void *arg, int state) 2903 { 2904 2905 ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); 2906 2907 /* Currently, nothing to do for wakeup. */ 2908 2909 return_VOID; 2910 } 2911 2912 /* 2913 * ACPICA Event Handlers (FixedEvent, also called from button notify handler) 2914 */ 2915 UINT32 2916 acpi_event_power_button_sleep(void *context) 2917 { 2918 struct acpi_softc *sc = (struct acpi_softc *)context; 2919 2920 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2921 2922 EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx); 2923 2924 return_VALUE (ACPI_INTERRUPT_HANDLED); 2925 } 2926 2927 UINT32 2928 acpi_event_power_button_wake(void *context) 2929 { 2930 struct acpi_softc *sc = (struct acpi_softc *)context; 2931 2932 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2933 2934 EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx); 2935 2936 return_VALUE (ACPI_INTERRUPT_HANDLED); 2937 } 2938 2939 UINT32 2940 acpi_event_sleep_button_sleep(void *context) 2941 { 2942 struct acpi_softc *sc = (struct acpi_softc *)context; 2943 2944 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2945 2946 EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx); 2947 2948 return_VALUE (ACPI_INTERRUPT_HANDLED); 2949 } 2950 2951 UINT32 2952 acpi_event_sleep_button_wake(void *context) 2953 { 2954 struct acpi_softc *sc = (struct acpi_softc *)context; 2955 2956 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2957 2958 EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx); 2959 2960 return_VALUE (ACPI_INTERRUPT_HANDLED); 2961 } 2962 2963 /* 2964 * XXX This static buffer is suboptimal. There is no locking so only 2965 * use this for single-threaded callers. 2966 */ 2967 char * 2968 acpi_name(ACPI_HANDLE handle) 2969 { 2970 ACPI_BUFFER buf; 2971 static char data[256]; 2972 2973 buf.Length = sizeof(data); 2974 buf.Pointer = data; 2975 2976 if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) 2977 return (data); 2978 return ("(unknown)"); 2979 } 2980 2981 /* 2982 * Debugging/bug-avoidance. Avoid trying to fetch info on various 2983 * parts of the namespace. 2984 */ 2985 int 2986 acpi_avoid(ACPI_HANDLE handle) 2987 { 2988 char *cp, *env, *np; 2989 int len; 2990 2991 np = acpi_name(handle); 2992 if (*np == '\\') 2993 np++; 2994 if ((env = kgetenv("debug.acpi.avoid")) == NULL) 2995 return (0); 2996 2997 /* Scan the avoid list checking for a match */ 2998 cp = env; 2999 for (;;) { 3000 while (*cp != 0 && isspace(*cp)) 3001 cp++; 3002 if (*cp == 0) 3003 break; 3004 len = 0; 3005 while (cp[len] != 0 && !isspace(cp[len])) 3006 len++; 3007 if (!strncmp(cp, np, len)) { 3008 kfreeenv(env); 3009 return(1); 3010 } 3011 cp += len; 3012 } 3013 kfreeenv(env); 3014 3015 return (0); 3016 } 3017 3018 /* 3019 * Debugging/bug-avoidance. Disable ACPI subsystem components. 3020 */ 3021 int 3022 acpi_disabled(char *subsys) 3023 { 3024 char *cp, *env; 3025 int len; 3026 3027 if ((env = kgetenv("debug.acpi.disabled")) == NULL) 3028 return (0); 3029 if (strcmp(env, "all") == 0) { 3030 kfreeenv(env); 3031 return (1); 3032 } 3033 3034 /* Scan the disable list, checking for a match. */ 3035 cp = env; 3036 for (;;) { 3037 while (*cp != '\0' && isspace(*cp)) 3038 cp++; 3039 if (*cp == '\0') 3040 break; 3041 len = 0; 3042 while (cp[len] != '\0' && !isspace(cp[len])) 3043 len++; 3044 if (strncmp(cp, subsys, len) == 0) { 3045 kfreeenv(env); 3046 return (1); 3047 } 3048 cp += len; 3049 } 3050 kfreeenv(env); 3051 3052 return (0); 3053 } 3054 3055 /* 3056 * Debugging/bug-avoidance. Enable ACPI subsystem components. Most 3057 * components are enabled by default. The ones that are not have to be 3058 * enabled via debug.acpi.enabled. 3059 */ 3060 int 3061 acpi_enabled(char *subsys) 3062 { 3063 char *cp, *env; 3064 int len; 3065 3066 if ((env = kgetenv("debug.acpi.enabled")) == NULL) 3067 return (0); 3068 if (strcmp(env, "all") == 0) { 3069 kfreeenv(env); 3070 return (1); 3071 } 3072 3073 /* Scan the enable list, checking for a match. */ 3074 cp = env; 3075 for (;;) { 3076 while (*cp != '\0' && isspace(*cp)) 3077 cp++; 3078 if (*cp == '\0') 3079 break; 3080 len = 0; 3081 while (cp[len] != '\0' && !isspace(cp[len])) 3082 len++; 3083 if (strncmp(cp, subsys, len) == 0) { 3084 kfreeenv(env); 3085 return (1); 3086 } 3087 cp += len; 3088 } 3089 kfreeenv(env); 3090 3091 return (0); 3092 } 3093 3094 /* 3095 * Control interface. 3096 * 3097 * We multiplex ioctls for all participating ACPI devices here. Individual 3098 * drivers wanting to be accessible via /dev/acpi should use the 3099 * register/deregister interface to make their handlers visible. 3100 */ 3101 struct acpi_ioctl_hook 3102 { 3103 TAILQ_ENTRY(acpi_ioctl_hook) link; 3104 u_long cmd; 3105 acpi_ioctl_fn fn; 3106 void *arg; 3107 }; 3108 3109 static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; 3110 static int acpi_ioctl_hooks_initted; 3111 3112 int 3113 acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) 3114 { 3115 struct acpi_ioctl_hook *hp; 3116 3117 if ((hp = kmalloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) 3118 return (ENOMEM); 3119 hp->cmd = cmd; 3120 hp->fn = fn; 3121 hp->arg = arg; 3122 3123 ACPI_LOCK(acpi); 3124 if (acpi_ioctl_hooks_initted == 0) { 3125 TAILQ_INIT(&acpi_ioctl_hooks); 3126 acpi_ioctl_hooks_initted = 1; 3127 } 3128 TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); 3129 ACPI_UNLOCK(acpi); 3130 3131 return (0); 3132 } 3133 3134 void 3135 acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) 3136 { 3137 struct acpi_ioctl_hook *hp; 3138 3139 ACPI_LOCK(acpi); 3140 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) 3141 if (hp->cmd == cmd && hp->fn == fn) 3142 break; 3143 3144 if (hp != NULL) { 3145 TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); 3146 kfree(hp, M_ACPIDEV); 3147 } 3148 ACPI_UNLOCK(acpi); 3149 } 3150 3151 static int 3152 acpiopen(struct dev_open_args *ap) 3153 { 3154 return (0); 3155 } 3156 3157 static int 3158 acpiclose(struct dev_close_args *ap) 3159 { 3160 return (0); 3161 } 3162 3163 static int 3164 acpiioctl(struct dev_ioctl_args *ap) 3165 { 3166 struct acpi_softc *sc; 3167 struct acpi_ioctl_hook *hp; 3168 int error, state; 3169 3170 error = 0; 3171 hp = NULL; 3172 sc = ap->a_head.a_dev->si_drv1; 3173 3174 /* 3175 * Scan the list of registered ioctls, looking for handlers. 3176 */ 3177 ACPI_LOCK(acpi); 3178 if (acpi_ioctl_hooks_initted) 3179 TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { 3180 if (hp->cmd == ap->a_cmd) 3181 break; 3182 } 3183 ACPI_UNLOCK(acpi); 3184 if (hp) 3185 return (hp->fn(ap->a_cmd, ap->a_data, hp->arg)); 3186 3187 /* 3188 * Core ioctls are not permitted for non-writable user. 3189 * Currently, other ioctls just fetch information. 3190 * Not changing system behavior. 3191 */ 3192 if ((ap->a_fflag & FWRITE) == 0) 3193 return (EPERM); 3194 3195 /* Core system ioctls. */ 3196 switch (ap->a_cmd) { 3197 case ACPIIO_REQSLPSTATE: 3198 state = *(int *)ap->a_data; 3199 if (state != ACPI_STATE_S5) 3200 error = acpi_ReqSleepState(sc, state); 3201 else { 3202 device_printf(sc->acpi_dev, 3203 "power off via acpi ioctl not supported\n"); 3204 error = ENXIO; 3205 } 3206 break; 3207 case ACPIIO_ACKSLPSTATE: 3208 error = EOPNOTSUPP; 3209 #if 0 /* notyet */ 3210 error = *(int *)ap->a_data; 3211 error = acpi_AckSleepState(sc->acpi_clone, error); 3212 #endif 3213 break; 3214 case ACPIIO_SETSLPSTATE: /* DEPRECATED */ 3215 error = EINVAL; 3216 state = *(int *)ap->a_data; 3217 if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) 3218 if (ACPI_SUCCESS(acpi_SetSleepState(sc, state))) 3219 error = 0; 3220 break; 3221 case ACPIIO_DO_MCALL: 3222 if (acpi_allow_mcall == 1) { 3223 struct acpi_mcall_ioctl_arg *params; 3224 ACPI_BUFFER result = { ACPI_ALLOCATE_BUFFER, NULL }; 3225 ACPI_OBJECT *resobj; 3226 3227 error = EINVAL; 3228 params = (struct acpi_mcall_ioctl_arg *)ap->a_data; 3229 params->retval = AcpiEvaluateObject(NULL, params->path, 3230 ¶ms->args, &result); 3231 if (ACPI_SUCCESS(params->retval) && result.Pointer != NULL && 3232 params->result.Pointer != NULL) { 3233 params->result.Length = min(params->result.Length, 3234 result.Length); 3235 copyout(result.Pointer, params->result.Pointer, 3236 params->result.Length); 3237 params->reslen = result.Length; 3238 if (result.Length >= sizeof(ACPI_OBJECT)) { 3239 resobj = (ACPI_OBJECT *)params->result.Pointer; 3240 switch (resobj->Type) { 3241 case ACPI_TYPE_STRING: 3242 resobj->String.Pointer = (char *) 3243 ((UINT8 *)(resobj->String.Pointer) - 3244 (UINT8 *)result.Pointer + 3245 (UINT8 *)resobj); 3246 break; 3247 case ACPI_TYPE_BUFFER: 3248 resobj->Buffer.Pointer -= (UINT8 *)result.Pointer - 3249 (UINT8 *)resobj; 3250 break; 3251 } 3252 } 3253 error = 0; 3254 } 3255 if (result.Pointer != NULL) 3256 AcpiOsFree(result.Pointer); 3257 } else { 3258 device_printf(sc->acpi_dev, 3259 "debug.acpi.allow_method_calls must be set\n"); 3260 error = ENXIO; 3261 } 3262 break; 3263 default: 3264 error = ENXIO; 3265 break; 3266 } 3267 return (error); 3268 } 3269 3270 static int 3271 acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 3272 { 3273 int error; 3274 struct sbuf sb; 3275 UINT8 state, TypeA, TypeB; 3276 3277 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 3278 for (state = ACPI_STATE_S1; state < ACPI_S_STATES_MAX + 1; state++) 3279 if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) 3280 sbuf_printf(&sb, "S%d ", state); 3281 sbuf_trim(&sb); 3282 sbuf_finish(&sb); 3283 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 3284 sbuf_delete(&sb); 3285 return (error); 3286 } 3287 3288 static int 3289 acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) 3290 { 3291 char sleep_state[10]; 3292 int error; 3293 u_int new_state, old_state; 3294 3295 old_state = *(u_int *)oidp->oid_arg1; 3296 if (old_state > ACPI_S_STATES_MAX + 1) 3297 strlcpy(sleep_state, "unknown", sizeof(sleep_state)); 3298 else 3299 strlcpy(sleep_state, sleep_state_names[old_state], sizeof(sleep_state)); 3300 error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); 3301 if (error == 0 && req->newptr != NULL) { 3302 new_state = ACPI_STATE_S0; 3303 for (; new_state <= ACPI_S_STATES_MAX + 1; new_state++) 3304 if (strcmp(sleep_state, sleep_state_names[new_state]) == 0) 3305 break; 3306 if (new_state <= ACPI_S_STATES_MAX + 1) { 3307 if (new_state != old_state) 3308 *(u_int *)oidp->oid_arg1 = new_state; 3309 } else 3310 error = EINVAL; 3311 } 3312 3313 return (error); 3314 } 3315 3316 /* Inform devctl(4) when we receive a Notify. */ 3317 void 3318 acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) 3319 { 3320 char notify_buf[16]; 3321 ACPI_BUFFER handle_buf; 3322 ACPI_STATUS status; 3323 3324 if (subsystem == NULL) 3325 return; 3326 3327 handle_buf.Pointer = NULL; 3328 handle_buf.Length = ACPI_ALLOCATE_BUFFER; 3329 status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); 3330 if (ACPI_FAILURE(status)) 3331 return; 3332 ksnprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); 3333 devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); 3334 AcpiOsFree(handle_buf.Pointer); 3335 } 3336 3337 #ifdef ACPI_DEBUG 3338 /* 3339 * Support for parsing debug options from the kernel environment. 3340 * 3341 * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers 3342 * by specifying the names of the bits in the debug.acpi.layer and 3343 * debug.acpi.level environment variables. Bits may be unset by 3344 * prefixing the bit name with !. 3345 */ 3346 struct debugtag 3347 { 3348 char *name; 3349 UINT32 value; 3350 }; 3351 3352 static struct debugtag dbg_layer[] = { 3353 {"ACPI_UTILITIES", ACPI_UTILITIES}, 3354 {"ACPI_HARDWARE", ACPI_HARDWARE}, 3355 {"ACPI_EVENTS", ACPI_EVENTS}, 3356 {"ACPI_TABLES", ACPI_TABLES}, 3357 {"ACPI_NAMESPACE", ACPI_NAMESPACE}, 3358 {"ACPI_PARSER", ACPI_PARSER}, 3359 {"ACPI_DISPATCHER", ACPI_DISPATCHER}, 3360 {"ACPI_EXECUTER", ACPI_EXECUTER}, 3361 {"ACPI_RESOURCES", ACPI_RESOURCES}, 3362 {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, 3363 {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, 3364 {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, 3365 {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, 3366 3367 {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, 3368 {"ACPI_BATTERY", ACPI_BATTERY}, 3369 {"ACPI_BUS", ACPI_BUS}, 3370 {"ACPI_BUTTON", ACPI_BUTTON}, 3371 {"ACPI_EC", ACPI_EC}, 3372 {"ACPI_FAN", ACPI_FAN}, 3373 {"ACPI_POWERRES", ACPI_POWERRES}, 3374 {"ACPI_PROCESSOR", ACPI_PROCESSOR}, 3375 {"ACPI_THERMAL", ACPI_THERMAL}, 3376 {"ACPI_TIMER", ACPI_TIMER}, 3377 {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, 3378 {NULL, 0} 3379 }; 3380 3381 static struct debugtag dbg_level[] = { 3382 {"ACPI_LV_INIT", ACPI_LV_INIT}, 3383 {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, 3384 {"ACPI_LV_INFO", ACPI_LV_INFO}, 3385 {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, 3386 {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, 3387 3388 /* Trace verbosity level 1 [Standard Trace Level] */ 3389 {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, 3390 {"ACPI_LV_PARSE", ACPI_LV_PARSE}, 3391 {"ACPI_LV_LOAD", ACPI_LV_LOAD}, 3392 {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, 3393 {"ACPI_LV_EXEC", ACPI_LV_EXEC}, 3394 {"ACPI_LV_NAMES", ACPI_LV_NAMES}, 3395 {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, 3396 {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, 3397 {"ACPI_LV_TABLES", ACPI_LV_TABLES}, 3398 {"ACPI_LV_VALUES", ACPI_LV_VALUES}, 3399 {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, 3400 {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, 3401 {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, 3402 {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, 3403 {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, 3404 3405 /* Trace verbosity level 2 [Function tracing and memory allocation] */ 3406 {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, 3407 {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, 3408 {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, 3409 {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, 3410 {"ACPI_LV_ALL", ACPI_LV_ALL}, 3411 3412 /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ 3413 {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, 3414 {"ACPI_LV_THREADS", ACPI_LV_THREADS}, 3415 {"ACPI_LV_IO", ACPI_LV_IO}, 3416 {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, 3417 {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, 3418 3419 /* Exceptionally verbose output -- also used in the global "DebugLevel" */ 3420 {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, 3421 {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, 3422 {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, 3423 {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, 3424 {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, 3425 {NULL, 0} 3426 }; 3427 3428 static void 3429 acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) 3430 { 3431 char *ep; 3432 int i, l; 3433 int set; 3434 3435 while (*cp) { 3436 if (isspace(*cp)) { 3437 cp++; 3438 continue; 3439 } 3440 ep = cp; 3441 while (*ep && !isspace(*ep)) 3442 ep++; 3443 if (*cp == '!') { 3444 set = 0; 3445 cp++; 3446 if (cp == ep) 3447 continue; 3448 } else { 3449 set = 1; 3450 } 3451 l = ep - cp; 3452 for (i = 0; tag[i].name != NULL; i++) { 3453 if (!strncmp(cp, tag[i].name, l)) { 3454 if (set) 3455 *flag |= tag[i].value; 3456 else 3457 *flag &= ~tag[i].value; 3458 } 3459 } 3460 cp = ep; 3461 } 3462 } 3463 3464 static void 3465 acpi_set_debugging(void *junk) 3466 { 3467 char *layer, *level; 3468 3469 if (cold) { 3470 AcpiDbgLayer = 0; 3471 AcpiDbgLevel = 0; 3472 } 3473 3474 layer = kgetenv("debug.acpi.layer"); 3475 level = kgetenv("debug.acpi.level"); 3476 if (layer == NULL && level == NULL) 3477 return; 3478 3479 kprintf("ACPI set debug"); 3480 if (layer != NULL) { 3481 if (strcmp("NONE", layer) != 0) 3482 kprintf(" layer '%s'", layer); 3483 acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); 3484 kfreeenv(layer); 3485 } 3486 if (level != NULL) { 3487 if (strcmp("NONE", level) != 0) 3488 kprintf(" level '%s'", level); 3489 acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); 3490 kfreeenv(level); 3491 } 3492 kprintf("\n"); 3493 } 3494 3495 SYSINIT(acpi_debugging, SI_BOOT1_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, 3496 NULL); 3497 3498 static int 3499 acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) 3500 { 3501 int error, *dbg; 3502 struct debugtag *tag; 3503 struct sbuf sb; 3504 3505 if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) 3506 return (ENOMEM); 3507 if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { 3508 tag = &dbg_layer[0]; 3509 dbg = &AcpiDbgLayer; 3510 } else { 3511 tag = &dbg_level[0]; 3512 dbg = &AcpiDbgLevel; 3513 } 3514 3515 /* Get old values if this is a get request. */ 3516 ACPI_SERIAL_BEGIN(acpi); 3517 if (*dbg == 0) { 3518 sbuf_cpy(&sb, "NONE"); 3519 } else if (req->newptr == NULL) { 3520 for (; tag->name != NULL; tag++) { 3521 if ((*dbg & tag->value) == tag->value) 3522 sbuf_printf(&sb, "%s ", tag->name); 3523 } 3524 } 3525 sbuf_trim(&sb); 3526 sbuf_finish(&sb); 3527 3528 /* Copy out the old values to the user. */ 3529 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); 3530 sbuf_delete(&sb); 3531 3532 /* If the user is setting a string, parse it. */ 3533 if (error == 0 && req->newptr != NULL) { 3534 *dbg = 0; 3535 ksetenv((char *)oidp->oid_arg1, (char *)req->newptr); 3536 acpi_set_debugging(NULL); 3537 } 3538 ACPI_SERIAL_END(acpi); 3539 3540 return (error); 3541 } 3542 3543 SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING, 3544 "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); 3545 SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING, 3546 "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); 3547 #endif /* ACPI_DEBUG */ 3548 3549 static int 3550 acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) 3551 { 3552 int error; 3553 int old; 3554 3555 old = acpi_debug_objects; 3556 error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); 3557 if (error != 0 || req->newptr == NULL) 3558 return (error); 3559 if (old == acpi_debug_objects || (old && acpi_debug_objects)) 3560 return (0); 3561 3562 ACPI_SERIAL_BEGIN(acpi); 3563 AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; 3564 ACPI_SERIAL_END(acpi); 3565 3566 return (0); 3567 } 3568 3569 3570 static int 3571 acpi_parse_interfaces(char *str, struct acpi_interface *iface) 3572 { 3573 char *p; 3574 size_t len; 3575 int i, j; 3576 3577 p = str; 3578 while (isspace(*p) || *p == ',') 3579 p++; 3580 len = strlen(p); 3581 if (len == 0) 3582 return (0); 3583 p = kstrdup(p, M_TEMP); 3584 for (i = 0; i < len; i++) 3585 if (p[i] == ',') 3586 p[i] = '\0'; 3587 i = j = 0; 3588 while (i < len) 3589 if (isspace(p[i]) || p[i] == '\0') 3590 i++; 3591 else { 3592 i += strlen(p + i) + 1; 3593 j++; 3594 } 3595 if (j == 0) { 3596 kfree(p, M_TEMP); 3597 return (0); 3598 } 3599 iface->data = kmalloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); 3600 iface->num = j; 3601 i = j = 0; 3602 while (i < len) 3603 if (isspace(p[i]) || p[i] == '\0') 3604 i++; 3605 else { 3606 iface->data[j] = p + i; 3607 i += strlen(p + i) + 1; 3608 j++; 3609 } 3610 3611 return (j); 3612 } 3613 3614 static void 3615 acpi_free_interfaces(struct acpi_interface *iface) 3616 { 3617 kfree(iface->data[0], M_TEMP); 3618 kfree(iface->data, M_TEMP); 3619 } 3620 3621 static void 3622 acpi_reset_interfaces(device_t dev) 3623 { 3624 struct acpi_interface list; 3625 ACPI_STATUS status; 3626 int i; 3627 3628 if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { 3629 for (i = 0; i < list.num; i++) { 3630 status = AcpiInstallInterface(list.data[i]); 3631 if (ACPI_FAILURE(status)) 3632 device_printf(dev, 3633 "failed to install _OSI(\"%s\"): %s\n", 3634 list.data[i], AcpiFormatException(status)); 3635 else if (bootverbose) 3636 device_printf(dev, "installed _OSI(\"%s\")\n", 3637 list.data[i]); 3638 } 3639 acpi_free_interfaces(&list); 3640 } 3641 if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { 3642 for (i = 0; i < list.num; i++) { 3643 status = AcpiRemoveInterface(list.data[i]); 3644 if (ACPI_FAILURE(status)) 3645 device_printf(dev, 3646 "failed to remove _OSI(\"%s\"): %s\n", 3647 list.data[i], AcpiFormatException(status)); 3648 else if (bootverbose) 3649 device_printf(dev, "removed _OSI(\"%s\")\n", 3650 list.data[i]); 3651 } 3652 acpi_free_interfaces(&list); 3653 } 3654 } 3655 3656 static int 3657 acpi_pm_func(u_long cmd, void *arg, ...) 3658 { 3659 int state, acpi_state; 3660 int error; 3661 struct acpi_softc *sc; 3662 va_list ap; 3663 3664 error = 0; 3665 switch (cmd) { 3666 case POWER_CMD_SUSPEND: 3667 sc = (struct acpi_softc *)arg; 3668 if (sc == NULL) { 3669 error = EINVAL; 3670 goto out; 3671 } 3672 3673 va_start(ap, arg); 3674 state = va_arg(ap, int); 3675 va_end(ap); 3676 3677 switch (state) { 3678 case POWER_SLEEP_STATE_STANDBY: 3679 acpi_state = sc->acpi_standby_sx; 3680 break; 3681 case POWER_SLEEP_STATE_SUSPEND: 3682 acpi_state = sc->acpi_suspend_sx; 3683 break; 3684 case POWER_SLEEP_STATE_HIBERNATE: 3685 acpi_state = ACPI_STATE_S4; 3686 break; 3687 default: 3688 error = EINVAL; 3689 goto out; 3690 } 3691 3692 if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) 3693 error = ENXIO; 3694 break; 3695 default: 3696 error = EINVAL; 3697 goto out; 3698 } 3699 3700 out: 3701 return (error); 3702 } 3703 3704 static void 3705 acpi_pm_register(void *arg) 3706 { 3707 if (!cold || resource_disabled("acpi", 0)) 3708 return; 3709 3710 power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); 3711 } 3712 3713 SYSINIT(power, SI_BOOT2_KLD, SI_ORDER_ANY, acpi_pm_register, 0); 3714