1 /* $OpenBSD: acpi.c,v 1.143 2009/10/26 20:17:26 deraadt Exp $ */ 2 /* 3 * Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com> 4 * Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/device.h> 22 #include <sys/malloc.h> 23 #include <sys/fcntl.h> 24 #include <sys/ioccom.h> 25 #include <sys/event.h> 26 #include <sys/signalvar.h> 27 #include <sys/proc.h> 28 #include <sys/kthread.h> 29 #include <sys/workq.h> 30 31 #include <machine/conf.h> 32 #include <machine/cpufunc.h> 33 #include <machine/bus.h> 34 35 #include <dev/pci/pcivar.h> 36 #include <dev/acpi/acpireg.h> 37 #include <dev/acpi/acpivar.h> 38 #include <dev/acpi/amltypes.h> 39 #include <dev/acpi/acpidev.h> 40 #include <dev/acpi/dsdt.h> 41 42 #include <dev/pci/pciidereg.h> 43 #include <dev/pci/pciidevar.h> 44 45 #include <machine/apmvar.h> 46 #define APMUNIT(dev) (minor(dev)&0xf0) 47 #define APMDEV(dev) (minor(dev)&0x0f) 48 #define APMDEV_NORMAL 0 49 #define APMDEV_CTL 8 50 51 #ifdef ACPI_DEBUG 52 int acpi_debug = 16; 53 #endif 54 int acpi_enabled; 55 int acpi_poll_enabled; 56 int acpi_hasprocfvs; 57 int acpi_thinkpad_enabled; 58 59 #define ACPIEN_RETRIES 15 60 61 void acpi_isr_thread(void *); 62 void acpi_create_thread(void *); 63 64 int acpi_match(struct device *, void *, void *); 65 void acpi_attach(struct device *, struct device *, void *); 66 int acpi_submatch(struct device *, void *, void *); 67 int acpi_print(void *, const char *); 68 69 void acpi_map_pmregs(struct acpi_softc *); 70 71 int acpi_founddock(struct aml_node *, void *); 72 int acpi_foundpss(struct aml_node *, void *); 73 int acpi_foundhid(struct aml_node *, void *); 74 int acpi_foundec(struct aml_node *, void *); 75 int acpi_foundtmp(struct aml_node *, void *); 76 int acpi_foundprt(struct aml_node *, void *); 77 int acpi_foundprw(struct aml_node *, void *); 78 int acpi_foundvideo(struct aml_node *, void *); 79 int acpi_inidev(struct aml_node *, void *); 80 81 int acpi_loadtables(struct acpi_softc *, struct acpi_rsdp *); 82 83 void acpi_init_states(struct acpi_softc *); 84 void acpi_init_gpes(struct acpi_softc *); 85 void acpi_init_pm(struct acpi_softc *); 86 87 void acpi_dev_sort(void); 88 void acpi_dev_free(void); 89 90 int acpi_foundide(struct aml_node *node, void *arg); 91 int acpiide_notify(struct aml_node *, int, void *); 92 93 void wdcattach(struct channel_softc *); 94 int wdcdetach(struct channel_softc *, int); 95 96 struct acpi_q *acpi_maptable(paddr_t, const char *, const char *, const char *); 97 98 struct idechnl 99 { 100 struct acpi_softc *sc; 101 int64_t addr; 102 int64_t chnl; 103 int64_t sta; 104 }; 105 106 int is_ejectable_bay(struct aml_node *node); 107 int is_ata(struct aml_node *node); 108 int is_ejectable(struct aml_node *node); 109 110 #ifdef ACPI_SLEEP_ENABLED 111 void acpi_sleep_walk(struct acpi_softc *, int); 112 #endif /* ACPI_SLEEP_ENABLED */ 113 114 #ifndef SMALL_KERNEL 115 int acpi_add_device(struct aml_node *node, void *arg); 116 #endif /* SMALL_KERNEL */ 117 118 void acpi_enable_onegpe(struct acpi_softc *, int, int); 119 int acpi_gpe_level(struct acpi_softc *, int, void *); 120 int acpi_gpe_edge(struct acpi_softc *, int, void *); 121 122 struct gpe_block *acpi_find_gpe(struct acpi_softc *, int); 123 124 #define ACPI_LOCK(sc) 125 #define ACPI_UNLOCK(sc) 126 127 /* XXX move this into dsdt softc at some point */ 128 extern struct aml_node aml_root; 129 130 /* XXX do we need this? */ 131 void acpi_filtdetach(struct knote *); 132 int acpi_filtread(struct knote *, long); 133 134 struct filterops acpiread_filtops = { 135 1, NULL, acpi_filtdetach, acpi_filtread 136 }; 137 138 struct cfattach acpi_ca = { 139 sizeof(struct acpi_softc), acpi_match, acpi_attach 140 }; 141 142 struct cfdriver acpi_cd = { 143 NULL, "acpi", DV_DULL 144 }; 145 146 struct acpi_softc *acpi_softc; 147 int acpi_evindex; 148 149 #define acpi_bus_space_map _bus_space_map 150 #define acpi_bus_space_unmap _bus_space_unmap 151 152 #define pch(x) (((x)>=' ' && (x)<='z') ? (x) : ' ') 153 154 #if 0 155 void 156 acpi_delay(struct acpi_softc *sc, int64_t uSecs) 157 { 158 /* XXX this needs to become a tsleep later */ 159 delay(uSecs); 160 } 161 #endif 162 163 int 164 acpi_gasio(struct acpi_softc *sc, int iodir, int iospace, uint64_t address, 165 int access_size, int len, void *buffer) 166 { 167 u_int8_t *pb; 168 bus_space_handle_t ioh; 169 struct acpi_mem_map mh; 170 pci_chipset_tag_t pc; 171 pcitag_t tag; 172 bus_addr_t ioaddr; 173 int reg, idx, ival, sval; 174 175 dnprintf(50, "gasio: %.2x 0x%.8llx %s\n", 176 iospace, address, (iodir == ACPI_IOWRITE) ? "write" : "read"); 177 178 pb = (u_int8_t *)buffer; 179 switch (iospace) { 180 case GAS_SYSTEM_MEMORY: 181 /* copy to/from system memory */ 182 acpi_map(address, len, &mh); 183 if (iodir == ACPI_IOREAD) 184 memcpy(buffer, mh.va, len); 185 else 186 memcpy(mh.va, buffer, len); 187 acpi_unmap(&mh); 188 break; 189 190 case GAS_SYSTEM_IOSPACE: 191 /* read/write from I/O registers */ 192 ioaddr = address; 193 if (acpi_bus_space_map(sc->sc_iot, ioaddr, len, 0, &ioh) != 0) { 194 printf("unable to map iospace\n"); 195 return (-1); 196 } 197 for (reg = 0; reg < len; reg += access_size) { 198 if (iodir == ACPI_IOREAD) { 199 switch (access_size) { 200 case 1: 201 *(uint8_t *)(pb+reg) = bus_space_read_1( 202 sc->sc_iot, ioh, reg); 203 dnprintf(80, "os_in8(%llx) = %x\n", 204 reg+address, *(uint8_t *)(pb+reg)); 205 break; 206 case 2: 207 *(uint16_t *)(pb+reg) = bus_space_read_2( 208 sc->sc_iot, ioh, reg); 209 dnprintf(80, "os_in16(%llx) = %x\n", 210 reg+address, *(uint16_t *)(pb+reg)); 211 break; 212 case 4: 213 *(uint32_t *)(pb+reg) = bus_space_read_4( 214 sc->sc_iot, ioh, reg); 215 break; 216 default: 217 printf("rdio: invalid size %d\n", access_size); 218 break; 219 } 220 } else { 221 switch (access_size) { 222 case 1: 223 bus_space_write_1(sc->sc_iot, ioh, reg, 224 *(uint8_t *)(pb+reg)); 225 dnprintf(80, "os_out8(%llx,%x)\n", 226 reg+address, *(uint8_t *)(pb+reg)); 227 break; 228 case 2: 229 bus_space_write_2(sc->sc_iot, ioh, reg, 230 *(uint16_t *)(pb+reg)); 231 dnprintf(80, "os_out16(%llx,%x)\n", 232 reg+address, *(uint16_t *)(pb+reg)); 233 break; 234 case 4: 235 bus_space_write_4(sc->sc_iot, ioh, reg, 236 *(uint32_t *)(pb+reg)); 237 break; 238 default: 239 printf("wrio: invalid size %d\n", access_size); 240 break; 241 } 242 } 243 244 /* During autoconf some devices are still gathering 245 * information. Delay here to give them an opportunity 246 * to finish. During runtime we simply need to ignore 247 * transient values. 248 */ 249 if (cold) 250 delay(10000); 251 } 252 acpi_bus_space_unmap(sc->sc_iot, ioh, len, &ioaddr); 253 break; 254 255 case GAS_PCI_CFG_SPACE: 256 /* format of address: 257 * bits 00..15 = register 258 * bits 16..31 = function 259 * bits 32..47 = device 260 * bits 48..63 = bus 261 */ 262 pc = NULL; 263 tag = pci_make_tag(pc, 264 ACPI_PCI_BUS(address), ACPI_PCI_DEV(address), 265 ACPI_PCI_FN(address)); 266 267 /* XXX: This is ugly. read-modify-write does a byte at a time */ 268 reg = ACPI_PCI_REG(address); 269 for (idx = reg; idx < reg+len; idx++) { 270 ival = pci_conf_read(pc, tag, idx & ~0x3); 271 if (iodir == ACPI_IOREAD) { 272 *pb = ival >> (8 * (idx & 0x3)); 273 } else { 274 sval = *pb; 275 ival &= ~(0xFF << (8* (idx & 0x3))); 276 ival |= sval << (8* (idx & 0x3)); 277 pci_conf_write(pc, tag, idx & ~0x3, ival); 278 } 279 pb++; 280 } 281 break; 282 case GAS_EMBEDDED: 283 if (sc->sc_ec == NULL) 284 break; 285 #ifndef SMALL_KERNEL 286 if (iodir == ACPI_IOREAD) 287 acpiec_read(sc->sc_ec, (u_int8_t)address, len, buffer); 288 else 289 acpiec_write(sc->sc_ec, (u_int8_t)address, len, buffer); 290 #endif 291 break; 292 } 293 return (0); 294 } 295 296 int 297 acpi_inidev(struct aml_node *node, void *arg) 298 { 299 struct acpi_softc *sc = (struct acpi_softc *)arg; 300 int64_t st; 301 302 /* 303 * Per the ACPI spec 6.5.1, only run _INI when device is there or 304 * when there is no _STA. We terminate the tree walk (with return 1) 305 * early if necessary. 306 */ 307 308 /* Evaluate _STA to decide _INI fate and walk fate */ 309 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 310 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 311 312 /* Evaluate _INI if we are present */ 313 if (st & STA_PRESENT) 314 aml_evalnode(sc, node, 0, NULL, NULL); 315 316 /* If we are functioning, we walk/search our children */ 317 if(st & STA_DEV_OK) 318 return 0; 319 320 /* If we are not enabled, or not present, terminate search */ 321 if (!(st & (STA_PRESENT|STA_ENABLED))) 322 return 1; 323 324 /* Default just continue search */ 325 return 0; 326 } 327 328 int 329 acpi_foundprt(struct aml_node *node, void *arg) 330 { 331 struct acpi_softc *sc = (struct acpi_softc *)arg; 332 struct device *self = (struct device *)arg; 333 struct acpi_attach_args aaa; 334 int64_t st = 0; 335 336 dnprintf(10, "found prt entry: %s\n", node->parent->name); 337 338 /* Evaluate _STA to decide _PRT fate and walk fate */ 339 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 340 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 341 342 if (st & STA_PRESENT) { 343 memset(&aaa, 0, sizeof(aaa)); 344 aaa.aaa_iot = sc->sc_iot; 345 aaa.aaa_memt = sc->sc_memt; 346 aaa.aaa_node = node; 347 aaa.aaa_name = "acpiprt"; 348 349 config_found(self, &aaa, acpi_print); 350 } 351 352 /* If we are functioning, we walk/search our children */ 353 if(st & STA_DEV_OK) 354 return 0; 355 356 /* If we are not enabled, or not present, terminate search */ 357 if (!(st & (STA_PRESENT|STA_ENABLED))) 358 return 1; 359 360 /* Default just continue search */ 361 return 0; 362 } 363 364 int 365 is_ata(struct aml_node *node) 366 { 367 return (aml_searchname(node, "_GTM") != NULL || 368 aml_searchname(node, "_GTF") != NULL || 369 aml_searchname(node, "_STM") != NULL || 370 aml_searchname(node, "_SDD") != NULL); 371 } 372 373 int 374 is_ejectable(struct aml_node *node) 375 { 376 return (aml_searchname(node, "_EJ0") != NULL); 377 } 378 379 int 380 is_ejectable_bay(struct aml_node *node) 381 { 382 return ((is_ata(node) || is_ata(node->parent)) && is_ejectable(node)); 383 } 384 385 int 386 acpiide_notify(struct aml_node *node, int ntype, void *arg) 387 { 388 struct idechnl *ide = arg; 389 struct acpi_softc *sc = ide->sc; 390 struct pciide_softc *wsc; 391 struct device *dev; 392 int b,d,f; 393 int64_t sta; 394 395 if (aml_evalinteger(sc, node, "_STA", 0, NULL, &sta) != 0) 396 return (0); 397 398 dnprintf(10, "IDE notify! %s %d status:%llx\n", aml_nodename(node), 399 ntype, sta); 400 401 /* Walk device list looking for IDE device match */ 402 TAILQ_FOREACH(dev, &alldevs, dv_list) { 403 if (strcmp(dev->dv_cfdata->cf_driver->cd_name, "pciide")) 404 continue; 405 406 wsc = (struct pciide_softc *)dev; 407 pci_decompose_tag(NULL, wsc->sc_tag, &b, &d, &f); 408 if (b != ACPI_PCI_BUS(ide->addr) || 409 d != ACPI_PCI_DEV(ide->addr) || 410 f != ACPI_PCI_FN(ide->addr)) 411 continue; 412 dnprintf(10, "Found pciide: %s %x.%x.%x channel:%llx\n", 413 dev->dv_xname, b,d,f, ide->chnl); 414 415 if (sta == 0 && ide->sta) 416 wdcdetach( 417 &wsc->pciide_channels[ide->chnl].wdc_channel, 0); 418 else if (sta && !ide->sta) 419 wdcattach( 420 &wsc->pciide_channels[ide->chnl].wdc_channel); 421 ide->sta = sta; 422 } 423 return (0); 424 } 425 426 int 427 acpi_foundide(struct aml_node *node, void *arg) 428 { 429 struct acpi_softc *sc = arg; 430 struct aml_node *pp; 431 struct idechnl *ide; 432 union amlpci_t pi; 433 int lvl; 434 435 /* Check if this is an ejectable bay */ 436 if (!is_ejectable_bay(node)) 437 return (0); 438 439 ide = malloc(sizeof(struct idechnl), M_DEVBUF, M_NOWAIT | M_ZERO); 440 ide->sc = sc; 441 442 /* GTM/GTF can be at 2/3 levels: pciX.ideX.channelX[.driveX] */ 443 lvl = 0; 444 for (pp=node->parent; pp; pp=pp->parent) { 445 lvl++; 446 if (aml_searchname(pp, "_HID")) 447 break; 448 } 449 450 /* Get PCI address and channel */ 451 if (lvl == 3) { 452 aml_evalinteger(sc, node->parent, "_ADR", 0, NULL, 453 &ide->chnl); 454 aml_rdpciaddr(node->parent->parent, &pi); 455 ide->addr = pi.addr; 456 } else if (lvl == 4) { 457 aml_evalinteger(sc, node->parent->parent, "_ADR", 0, NULL, 458 &ide->chnl); 459 aml_rdpciaddr(node->parent->parent->parent, &pi); 460 ide->addr = pi.addr; 461 } 462 dnprintf(10, "%s %llx channel:%llx\n", 463 aml_nodename(node), ide->addr, ide->chnl); 464 465 aml_evalinteger(sc, node, "_STA", 0, NULL, &ide->sta); 466 dnprintf(10, "Got Initial STA: %llx\n", ide->sta); 467 468 aml_register_notify(node, "acpiide", acpiide_notify, ide, 0); 469 return (0); 470 } 471 472 int 473 acpi_match(struct device *parent, void *match, void *aux) 474 { 475 struct bios_attach_args *ba = aux; 476 struct cfdata *cf = match; 477 478 /* sanity */ 479 if (strcmp(ba->ba_name, cf->cf_driver->cd_name)) 480 return (0); 481 482 if (!acpi_probe(parent, cf, ba)) 483 return (0); 484 485 return (1); 486 } 487 488 void 489 acpi_attach(struct device *parent, struct device *self, void *aux) 490 { 491 struct bios_attach_args *ba = aux; 492 struct acpi_softc *sc = (struct acpi_softc *)self; 493 struct acpi_mem_map handle; 494 struct acpi_rsdp *rsdp; 495 struct acpi_q *entry; 496 struct acpi_dsdt *p_dsdt; 497 int idx; 498 #ifndef SMALL_KERNEL 499 struct acpi_wakeq *wentry; 500 struct device *dev; 501 struct acpi_ac *ac; 502 struct acpi_bat *bat; 503 #endif /* SMALL_KERNEL */ 504 paddr_t facspa; 505 506 sc->sc_iot = ba->ba_iot; 507 sc->sc_memt = ba->ba_memt; 508 509 if (acpi_map(ba->ba_acpipbase, sizeof(struct acpi_rsdp), &handle)) { 510 printf(": can't map memory\n"); 511 return; 512 } 513 514 rsdp = (struct acpi_rsdp *)handle.va; 515 sc->sc_revision = (int)rsdp->rsdp_revision; 516 printf(": rev %d", sc->sc_revision); 517 518 SIMPLEQ_INIT(&sc->sc_tables); 519 SIMPLEQ_INIT(&sc->sc_wakedevs); 520 521 #ifndef SMALL_KERNEL 522 sc->sc_note = malloc(sizeof(struct klist), M_DEVBUF, M_NOWAIT | M_ZERO); 523 if (sc->sc_note == NULL) { 524 printf(", can't allocate memory\n"); 525 acpi_unmap(&handle); 526 return; 527 } 528 #endif /* SMALL_KERNEL */ 529 530 if (acpi_loadtables(sc, rsdp)) { 531 printf(", can't load tables\n"); 532 acpi_unmap(&handle); 533 return; 534 } 535 536 acpi_unmap(&handle); 537 538 /* 539 * Find the FADT 540 */ 541 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 542 if (memcmp(entry->q_table, FADT_SIG, 543 sizeof(FADT_SIG) - 1) == 0) { 544 sc->sc_fadt = entry->q_table; 545 break; 546 } 547 } 548 if (sc->sc_fadt == NULL) { 549 printf(", no FADT\n"); 550 return; 551 } 552 553 /* 554 * Check if we are able to enable ACPI control 555 */ 556 if (!sc->sc_fadt->smi_cmd || 557 (!sc->sc_fadt->acpi_enable && !sc->sc_fadt->acpi_disable)) { 558 printf(", ACPI control unavailable\n"); 559 return; 560 } 561 562 /* 563 * Set up a pointer to the firmware control structure 564 */ 565 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_firmware_ctl == 0) 566 facspa = sc->sc_fadt->firmware_ctl; 567 else 568 facspa = sc->sc_fadt->x_firmware_ctl; 569 570 if (acpi_map(facspa, sizeof(struct acpi_facs), &handle)) 571 printf(" !FACS"); 572 else 573 sc->sc_facs = (struct acpi_facs *)handle.va; 574 575 acpi_enabled = 1; 576 577 /* Create opcode hashtable */ 578 aml_hashopcodes(); 579 580 /* Create Default AML objects */ 581 aml_create_defaultobjects(); 582 583 /* 584 * Load the DSDT from the FADT pointer -- use the 585 * extended (64-bit) pointer if it exists 586 */ 587 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_dsdt == 0) 588 entry = acpi_maptable(sc->sc_fadt->dsdt, NULL, NULL, NULL); 589 else 590 entry = acpi_maptable(sc->sc_fadt->x_dsdt, NULL, NULL, NULL); 591 592 if (entry == NULL) 593 printf(" !DSDT"); 594 SIMPLEQ_INSERT_HEAD(&sc->sc_tables, entry, q_next); 595 596 p_dsdt = entry->q_table; 597 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 598 sizeof(p_dsdt->hdr)); 599 600 /* Load SSDT's */ 601 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 602 if (memcmp(entry->q_table, SSDT_SIG, 603 sizeof(SSDT_SIG) - 1) == 0) { 604 p_dsdt = entry->q_table; 605 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 606 sizeof(p_dsdt->hdr)); 607 } 608 } 609 610 /* Perform post-parsing fixups */ 611 aml_postparse(); 612 613 #ifndef SMALL_KERNEL 614 /* Find available sleeping states */ 615 acpi_init_states(sc); 616 617 /* Find available sleep/resume related methods. */ 618 acpi_init_pm(sc); 619 #endif /* SMALL_KERNEL */ 620 621 /* Map Power Management registers */ 622 acpi_map_pmregs(sc); 623 624 #ifndef SMALL_KERNEL 625 /* Initialize GPE handlers */ 626 acpi_init_gpes(sc); 627 628 /* some devices require periodic polling */ 629 timeout_set(&sc->sc_dev_timeout, acpi_poll, sc); 630 #endif /* SMALL_KERNEL */ 631 632 /* 633 * Take over ACPI control. Note that once we do this, we 634 * effectively tell the system that we have ownership of 635 * the ACPI hardware registers, and that SMI should leave 636 * them alone 637 * 638 * This may prevent thermal control on some systems where 639 * that actually does work 640 */ 641 acpi_write_pmreg(sc, ACPIREG_SMICMD, 0, sc->sc_fadt->acpi_enable); 642 idx = 0; 643 do { 644 if (idx++ > ACPIEN_RETRIES) { 645 printf(", can't enable ACPI\n"); 646 return; 647 } 648 } while (!(acpi_read_pmreg(sc, ACPIREG_PM1_CNT, 0) & ACPI_PM1_SCI_EN)); 649 650 printf("\n%s: tables", DEVNAME(sc)); 651 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 652 printf(" %.4s", entry->q_table); 653 } 654 printf("\n"); 655 656 #ifndef SMALL_KERNEL 657 /* Display wakeup devices and lowest S-state */ 658 printf("%s: wakeup devices", DEVNAME(sc)); 659 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 660 printf(" %.4s(S%d)", wentry->q_node->name, 661 wentry->q_state); 662 } 663 printf("\n"); 664 665 666 /* 667 * ACPI is enabled now -- attach timer 668 */ 669 { 670 struct acpi_attach_args aaa; 671 672 memset(&aaa, 0, sizeof(aaa)); 673 aaa.aaa_name = "acpitimer"; 674 aaa.aaa_iot = sc->sc_iot; 675 aaa.aaa_memt = sc->sc_memt; 676 #if 0 677 aaa.aaa_pcit = sc->sc_pcit; 678 aaa.aaa_smbust = sc->sc_smbust; 679 #endif 680 config_found(self, &aaa, acpi_print); 681 } 682 #endif /* SMALL_KERNEL */ 683 684 /* 685 * Attach table-defined devices 686 */ 687 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 688 struct acpi_attach_args aaa; 689 690 memset(&aaa, 0, sizeof(aaa)); 691 aaa.aaa_iot = sc->sc_iot; 692 aaa.aaa_memt = sc->sc_memt; 693 #if 0 694 aaa.aaa_pcit = sc->sc_pcit; 695 aaa.aaa_smbust = sc->sc_smbust; 696 #endif 697 aaa.aaa_table = entry->q_table; 698 config_found_sm(self, &aaa, acpi_print, acpi_submatch); 699 } 700 701 acpi_softc = sc; 702 703 /* initialize runtime environment */ 704 aml_find_node(&aml_root, "_INI", acpi_inidev, sc); 705 706 /* attach pci interrupt routing tables */ 707 aml_find_node(&aml_root, "_PRT", acpi_foundprt, sc); 708 709 #ifndef SMALL_KERNEL 710 /* XXX EC needs to be attached first on some systems */ 711 aml_find_node(&aml_root, "_HID", acpi_foundec, sc); 712 713 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_add_device, sc); 714 715 /* attach battery, power supply and button devices */ 716 aml_find_node(&aml_root, "_HID", acpi_foundhid, sc); 717 718 /* Attach IDE bay */ 719 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_foundide, sc); 720 721 /* attach docks */ 722 aml_find_node(&aml_root, "_DCK", acpi_founddock, sc); 723 724 /* attach video only if this is not a stinkpad */ 725 if (!acpi_thinkpad_enabled) 726 aml_find_node(&aml_root, "_DOS", acpi_foundvideo, sc); 727 728 /* create list of devices we want to query when APM come in */ 729 SLIST_INIT(&sc->sc_ac); 730 SLIST_INIT(&sc->sc_bat); 731 TAILQ_FOREACH(dev, &alldevs, dv_list) { 732 if (!strcmp(dev->dv_cfdata->cf_driver->cd_name, "acpiac")) { 733 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO); 734 ac->aac_softc = (struct acpiac_softc *)dev; 735 SLIST_INSERT_HEAD(&sc->sc_ac, ac, aac_link); 736 } else if (!strcmp(dev->dv_cfdata->cf_driver->cd_name, "acpibat")) { 737 bat = malloc(sizeof(*bat), M_DEVBUF, M_WAITOK | M_ZERO); 738 bat->aba_softc = (struct acpibat_softc *)dev; 739 SLIST_INSERT_HEAD(&sc->sc_bat, bat, aba_link); 740 } 741 } 742 743 /* Setup threads */ 744 sc->sc_thread = malloc(sizeof(struct acpi_thread), M_DEVBUF, M_WAITOK); 745 sc->sc_thread->sc = sc; 746 sc->sc_thread->running = 1; 747 748 acpi_attach_machdep(sc); 749 750 kthread_create_deferred(acpi_create_thread, sc); 751 #endif /* SMALL_KERNEL */ 752 } 753 754 int 755 acpi_submatch(struct device *parent, void *match, void *aux) 756 { 757 struct acpi_attach_args *aaa = (struct acpi_attach_args *)aux; 758 struct cfdata *cf = match; 759 760 if (aaa->aaa_table == NULL) 761 return (0); 762 return ((*cf->cf_attach->ca_match)(parent, match, aux)); 763 } 764 765 int 766 acpi_print(void *aux, const char *pnp) 767 { 768 struct acpi_attach_args *aa = aux; 769 770 if (pnp) { 771 if (aa->aaa_name) 772 printf("%s at %s", aa->aaa_name, pnp); 773 else 774 return (QUIET); 775 } 776 777 return (UNCONF); 778 } 779 780 struct acpi_q * 781 acpi_maptable(paddr_t addr, const char *sig, const char *oem, const char *tbl) 782 { 783 static int tblid; 784 struct acpi_mem_map handle; 785 struct acpi_table_header *hdr; 786 struct acpi_q *entry; 787 size_t len; 788 789 /* Check if we can map address */ 790 if (addr == 0) 791 return NULL; 792 if (acpi_map(addr, sizeof(*hdr), &handle)) 793 return NULL; 794 hdr = (struct acpi_table_header *)handle.va; 795 len = hdr->length; 796 acpi_unmap(&handle); 797 798 /* Validate length/checksum */ 799 if (acpi_map(addr, len, &handle)) 800 return NULL; 801 hdr = (struct acpi_table_header *)handle.va; 802 if (acpi_checksum(hdr, len)) { 803 acpi_unmap(&handle); 804 return NULL; 805 } 806 if ((sig && memcmp(sig, hdr->signature, 4)) || 807 (oem && memcmp(oem, hdr->oemid, 6)) || 808 (tbl && memcmp(tbl, hdr->oemtableid, 8))) { 809 acpi_unmap(&handle); 810 return NULL; 811 } 812 813 /* Allocate copy */ 814 entry = malloc(len + sizeof(*entry), M_DEVBUF, M_NOWAIT); 815 if (entry != NULL) { 816 memcpy(entry->q_data, handle.va, len); 817 entry->q_table = entry->q_data; 818 entry->q_id = ++tblid; 819 } 820 acpi_unmap(&handle); 821 return entry; 822 } 823 824 int 825 acpi_loadtables(struct acpi_softc *sc, struct acpi_rsdp *rsdp) 826 { 827 struct acpi_q *entry, *sdt; 828 int i, ntables; 829 size_t len; 830 831 if (rsdp->rsdp_revision == 2 && rsdp->rsdp_xsdt) { 832 struct acpi_xsdt *xsdt; 833 834 sdt = acpi_maptable(rsdp->rsdp_xsdt, NULL, NULL, NULL); 835 if (sdt == NULL) { 836 printf("couldn't map rsdt\n"); 837 return (ENOMEM); 838 } 839 840 xsdt = (struct acpi_xsdt *)sdt->q_data; 841 len = xsdt->hdr.length; 842 ntables = (len - sizeof(struct acpi_table_header)) / 843 sizeof(xsdt->table_offsets[0]); 844 845 for (i = 0; i < ntables; i++) { 846 entry = acpi_maptable(xsdt->table_offsets[i], NULL, NULL, 847 NULL); 848 if (entry != NULL) 849 SIMPLEQ_INSERT_TAIL(&sc->sc_tables, entry, 850 q_next); 851 } 852 free(sdt, M_DEVBUF); 853 } else { 854 struct acpi_rsdt *rsdt; 855 856 sdt = acpi_maptable(rsdp->rsdp_rsdt, NULL, NULL, NULL); 857 if (sdt == NULL) { 858 printf("couldn't map rsdt\n"); 859 return (ENOMEM); 860 } 861 862 rsdt = (struct acpi_rsdt *)sdt->q_data; 863 len = rsdt->hdr.length; 864 ntables = (len - sizeof(struct acpi_table_header)) / 865 sizeof(rsdt->table_offsets[0]); 866 867 for (i = 0; i < ntables; i++) { 868 entry = acpi_maptable(rsdt->table_offsets[i], NULL, NULL, 869 NULL); 870 if (entry != NULL) 871 SIMPLEQ_INSERT_TAIL(&sc->sc_tables, entry, 872 q_next); 873 } 874 free(sdt, M_DEVBUF); 875 } 876 877 return (0); 878 } 879 880 int 881 acpiopen(dev_t dev, int flag, int mode, struct proc *p) 882 { 883 int error = 0; 884 #ifndef SMALL_KERNEL 885 struct acpi_softc *sc; 886 887 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 888 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 889 return (ENXIO); 890 891 switch (APMDEV(dev)) { 892 case APMDEV_CTL: 893 if (!(flag & FWRITE)) { 894 error = EINVAL; 895 break; 896 } 897 break; 898 case APMDEV_NORMAL: 899 if (!(flag & FREAD) || (flag & FWRITE)) { 900 error = EINVAL; 901 break; 902 } 903 break; 904 default: 905 error = ENXIO; 906 break; 907 } 908 #else 909 error = ENXIO; 910 #endif 911 return (error); 912 } 913 914 int 915 acpiclose(dev_t dev, int flag, int mode, struct proc *p) 916 { 917 int error = 0; 918 #ifndef SMALL_KERNEL 919 struct acpi_softc *sc; 920 921 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 922 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 923 return (ENXIO); 924 925 switch (APMDEV(dev)) { 926 case APMDEV_CTL: 927 case APMDEV_NORMAL: 928 break; 929 default: 930 error = ENXIO; 931 break; 932 } 933 #else 934 error = ENXIO; 935 #endif 936 return (error); 937 } 938 939 int 940 acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 941 { 942 int error = 0; 943 #ifndef SMALL_KERNEL 944 struct acpi_softc *sc; 945 struct acpi_ac *ac; 946 struct acpi_bat *bat; 947 struct apm_power_info *pi = (struct apm_power_info *)data; 948 int bats; 949 unsigned int remaining, rem, minutes, rate; 950 951 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 952 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 953 return (ENXIO); 954 955 ACPI_LOCK(sc); 956 /* fake APM */ 957 switch (cmd) { 958 #ifdef ACPI_SLEEP_ENABLED 959 case APM_IOC_STANDBY_REQ: 960 case APM_IOC_SUSPEND_REQ: 961 case APM_IOC_SUSPEND: 962 case APM_IOC_STANDBY: 963 workq_add_task(NULL, 0, (workq_fn)acpi_sleep_state, 964 acpi_softc, (void *)ACPI_STATE_S3); 965 break; 966 #endif /* ACPI_SLEEP_ENABLED */ 967 case APM_IOC_GETPOWER: 968 /* A/C */ 969 pi->ac_state = APM_AC_UNKNOWN; 970 SLIST_FOREACH(ac, &sc->sc_ac, aac_link) { 971 if (ac->aac_softc->sc_ac_stat == PSR_ONLINE) 972 pi->ac_state = APM_AC_ON; 973 else if (ac->aac_softc->sc_ac_stat == PSR_OFFLINE) 974 if (pi->ac_state == APM_AC_UNKNOWN) 975 pi->ac_state = APM_AC_OFF; 976 } 977 978 /* battery */ 979 pi->battery_state = APM_BATT_UNKNOWN; 980 pi->battery_life = 0; 981 pi->minutes_left = 0; 982 bats = 0; 983 remaining = rem = 0; 984 minutes = 0; 985 rate = 0; 986 SLIST_FOREACH(bat, &sc->sc_bat, aba_link) { 987 if (bat->aba_softc->sc_bat_present == 0) 988 continue; 989 990 if (bat->aba_softc->sc_bif.bif_last_capacity == 0) 991 continue; 992 993 bats++; 994 rem = (bat->aba_softc->sc_bst.bst_capacity * 100) / 995 bat->aba_softc->sc_bif.bif_last_capacity; 996 if (rem > 100) 997 rem = 100; 998 remaining += rem; 999 1000 if (bat->aba_softc->sc_bst.bst_rate == BST_UNKNOWN) 1001 continue; 1002 else if (bat->aba_softc->sc_bst.bst_rate > 1) 1003 rate = bat->aba_softc->sc_bst.bst_rate; 1004 1005 minutes += bat->aba_softc->sc_bst.bst_capacity; 1006 } 1007 1008 if (bats == 0) { 1009 pi->battery_state = APM_BATTERY_ABSENT; 1010 pi->battery_life = 0; 1011 pi->minutes_left = (unsigned int)-1; 1012 break; 1013 } 1014 1015 if (pi->ac_state == APM_AC_ON || rate == 0) 1016 pi->minutes_left = (unsigned int)-1; 1017 else 1018 pi->minutes_left = 100 * minutes / rate; 1019 1020 /* running on battery */ 1021 pi->battery_life = remaining / bats; 1022 if (pi->battery_life > 50) 1023 pi->battery_state = APM_BATT_HIGH; 1024 else if (pi->battery_life > 25) 1025 pi->battery_state = APM_BATT_LOW; 1026 else 1027 pi->battery_state = APM_BATT_CRITICAL; 1028 1029 break; 1030 1031 default: 1032 error = ENOTTY; 1033 } 1034 1035 ACPI_UNLOCK(sc); 1036 #else 1037 error = ENXIO; 1038 #endif /* SMALL_KERNEL */ 1039 return (error); 1040 } 1041 1042 void 1043 acpi_filtdetach(struct knote *kn) 1044 { 1045 #ifndef SMALL_KERNEL 1046 struct acpi_softc *sc = kn->kn_hook; 1047 1048 ACPI_LOCK(sc); 1049 SLIST_REMOVE(sc->sc_note, kn, knote, kn_selnext); 1050 ACPI_UNLOCK(sc); 1051 #endif 1052 } 1053 1054 int 1055 acpi_filtread(struct knote *kn, long hint) 1056 { 1057 #ifndef SMALL_KERNEL 1058 /* XXX weird kqueue_scan() semantics */ 1059 if (hint & !kn->kn_data) 1060 kn->kn_data = hint; 1061 #endif 1062 return (1); 1063 } 1064 1065 int 1066 acpikqfilter(dev_t dev, struct knote *kn) 1067 { 1068 #ifndef SMALL_KERNEL 1069 struct acpi_softc *sc; 1070 1071 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 1072 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 1073 return (ENXIO); 1074 1075 switch (kn->kn_filter) { 1076 case EVFILT_READ: 1077 kn->kn_fop = &acpiread_filtops; 1078 break; 1079 default: 1080 return (1); 1081 } 1082 1083 kn->kn_hook = sc; 1084 1085 ACPI_LOCK(sc); 1086 SLIST_INSERT_HEAD(sc->sc_note, kn, kn_selnext); 1087 ACPI_UNLOCK(sc); 1088 1089 return (0); 1090 #else 1091 return (1); 1092 #endif 1093 } 1094 1095 /* Read from power management register */ 1096 int 1097 acpi_read_pmreg(struct acpi_softc *sc, int reg, int offset) 1098 { 1099 bus_space_handle_t ioh; 1100 bus_size_t size, __size; 1101 int regval; 1102 1103 __size = 0; 1104 /* Special cases: 1A/1B blocks can be OR'ed together */ 1105 switch (reg) { 1106 case ACPIREG_PM1_EN: 1107 return (acpi_read_pmreg(sc, ACPIREG_PM1A_EN, offset) | 1108 acpi_read_pmreg(sc, ACPIREG_PM1B_EN, offset)); 1109 case ACPIREG_PM1_STS: 1110 return (acpi_read_pmreg(sc, ACPIREG_PM1A_STS, offset) | 1111 acpi_read_pmreg(sc, ACPIREG_PM1B_STS, offset)); 1112 case ACPIREG_PM1_CNT: 1113 return (acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, offset) | 1114 acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, offset)); 1115 case ACPIREG_GPE_STS: 1116 __size = 1; 1117 dnprintf(50, "read GPE_STS offset: %.2x %.2x %.2x\n", offset, 1118 sc->sc_fadt->gpe0_blk_len>>1, sc->sc_fadt->gpe1_blk_len>>1); 1119 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1120 reg = ACPIREG_GPE0_STS; 1121 } 1122 break; 1123 case ACPIREG_GPE_EN: 1124 __size = 1; 1125 dnprintf(50, "read GPE_EN offset: %.2x %.2x %.2x\n", 1126 offset, sc->sc_fadt->gpe0_blk_len>>1, 1127 sc->sc_fadt->gpe1_blk_len>>1); 1128 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1129 reg = ACPIREG_GPE0_EN; 1130 } 1131 break; 1132 } 1133 1134 if (reg >= ACPIREG_MAXREG || sc->sc_pmregs[reg].size == 0) 1135 return (0); 1136 1137 regval = 0; 1138 ioh = sc->sc_pmregs[reg].ioh; 1139 size = sc->sc_pmregs[reg].size; 1140 if (__size) 1141 size = __size; 1142 if (size > 4) 1143 size = 4; 1144 1145 switch (size) { 1146 case 1: 1147 regval = bus_space_read_1(sc->sc_iot, ioh, offset); 1148 break; 1149 case 2: 1150 regval = bus_space_read_2(sc->sc_iot, ioh, offset); 1151 break; 1152 case 4: 1153 regval = bus_space_read_4(sc->sc_iot, ioh, offset); 1154 break; 1155 } 1156 1157 dnprintf(30, "acpi_readpm: %s = %.4x:%.4x %x\n", 1158 sc->sc_pmregs[reg].name, 1159 sc->sc_pmregs[reg].addr, offset, regval); 1160 return (regval); 1161 } 1162 1163 /* Write to power management register */ 1164 void 1165 acpi_write_pmreg(struct acpi_softc *sc, int reg, int offset, int regval) 1166 { 1167 bus_space_handle_t ioh; 1168 bus_size_t size, __size; 1169 1170 __size = 0; 1171 /* Special cases: 1A/1B blocks can be written with same value */ 1172 switch (reg) { 1173 case ACPIREG_PM1_EN: 1174 acpi_write_pmreg(sc, ACPIREG_PM1A_EN, offset, regval); 1175 acpi_write_pmreg(sc, ACPIREG_PM1B_EN, offset, regval); 1176 break; 1177 case ACPIREG_PM1_STS: 1178 acpi_write_pmreg(sc, ACPIREG_PM1A_STS, offset, regval); 1179 acpi_write_pmreg(sc, ACPIREG_PM1B_STS, offset, regval); 1180 break; 1181 case ACPIREG_PM1_CNT: 1182 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, offset, regval); 1183 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, offset, regval); 1184 break; 1185 case ACPIREG_GPE_STS: 1186 __size = 1; 1187 dnprintf(50, "write GPE_STS offset: %.2x %.2x %.2x %.2x\n", 1188 offset, sc->sc_fadt->gpe0_blk_len>>1, 1189 sc->sc_fadt->gpe1_blk_len>>1, regval); 1190 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1191 reg = ACPIREG_GPE0_STS; 1192 } 1193 break; 1194 case ACPIREG_GPE_EN: 1195 __size = 1; 1196 dnprintf(50, "write GPE_EN offset: %.2x %.2x %.2x %.2x\n", 1197 offset, sc->sc_fadt->gpe0_blk_len>>1, 1198 sc->sc_fadt->gpe1_blk_len>>1, regval); 1199 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1200 reg = ACPIREG_GPE0_EN; 1201 } 1202 break; 1203 } 1204 1205 /* All special case return here */ 1206 if (reg >= ACPIREG_MAXREG) 1207 return; 1208 1209 ioh = sc->sc_pmregs[reg].ioh; 1210 size = sc->sc_pmregs[reg].size; 1211 if (__size) 1212 size = __size; 1213 if (size > 4) 1214 size = 4; 1215 switch (size) { 1216 case 1: 1217 bus_space_write_1(sc->sc_iot, ioh, offset, regval); 1218 break; 1219 case 2: 1220 bus_space_write_2(sc->sc_iot, ioh, offset, regval); 1221 break; 1222 case 4: 1223 bus_space_write_4(sc->sc_iot, ioh, offset, regval); 1224 break; 1225 } 1226 1227 dnprintf(30, "acpi_writepm: %s = %.4x:%.4x %x\n", 1228 sc->sc_pmregs[reg].name, sc->sc_pmregs[reg].addr, offset, regval); 1229 } 1230 1231 /* Map Power Management registers */ 1232 void 1233 acpi_map_pmregs(struct acpi_softc *sc) 1234 { 1235 bus_addr_t addr; 1236 bus_size_t size; 1237 const char *name; 1238 int reg; 1239 1240 for (reg = 0; reg < ACPIREG_MAXREG; reg++) { 1241 size = 0; 1242 switch (reg) { 1243 case ACPIREG_SMICMD: 1244 name = "smi"; 1245 size = 1; 1246 addr = sc->sc_fadt->smi_cmd; 1247 break; 1248 case ACPIREG_PM1A_STS: 1249 case ACPIREG_PM1A_EN: 1250 name = "pm1a_sts"; 1251 size = sc->sc_fadt->pm1_evt_len >> 1; 1252 addr = sc->sc_fadt->pm1a_evt_blk; 1253 if (reg == ACPIREG_PM1A_EN && addr) { 1254 addr += size; 1255 name = "pm1a_en"; 1256 } 1257 break; 1258 case ACPIREG_PM1A_CNT: 1259 name = "pm1a_cnt"; 1260 size = sc->sc_fadt->pm1_cnt_len; 1261 addr = sc->sc_fadt->pm1a_cnt_blk; 1262 break; 1263 case ACPIREG_PM1B_STS: 1264 case ACPIREG_PM1B_EN: 1265 name = "pm1b_sts"; 1266 size = sc->sc_fadt->pm1_evt_len >> 1; 1267 addr = sc->sc_fadt->pm1b_evt_blk; 1268 if (reg == ACPIREG_PM1B_EN && addr) { 1269 addr += size; 1270 name = "pm1b_en"; 1271 } 1272 break; 1273 case ACPIREG_PM1B_CNT: 1274 name = "pm1b_cnt"; 1275 size = sc->sc_fadt->pm1_cnt_len; 1276 addr = sc->sc_fadt->pm1b_cnt_blk; 1277 break; 1278 case ACPIREG_PM2_CNT: 1279 name = "pm2_cnt"; 1280 size = sc->sc_fadt->pm2_cnt_len; 1281 addr = sc->sc_fadt->pm2_cnt_blk; 1282 break; 1283 #if 0 1284 case ACPIREG_PM_TMR: 1285 /* Allocated in acpitimer */ 1286 name = "pm_tmr"; 1287 size = sc->sc_fadt->pm_tmr_len; 1288 addr = sc->sc_fadt->pm_tmr_blk; 1289 break; 1290 #endif 1291 case ACPIREG_GPE0_STS: 1292 case ACPIREG_GPE0_EN: 1293 name = "gpe0_sts"; 1294 size = sc->sc_fadt->gpe0_blk_len >> 1; 1295 addr = sc->sc_fadt->gpe0_blk; 1296 1297 dnprintf(20, "gpe0 block len : %x\n", 1298 sc->sc_fadt->gpe0_blk_len >> 1); 1299 dnprintf(20, "gpe0 block addr: %x\n", 1300 sc->sc_fadt->gpe0_blk); 1301 if (reg == ACPIREG_GPE0_EN && addr) { 1302 addr += size; 1303 name = "gpe0_en"; 1304 } 1305 break; 1306 case ACPIREG_GPE1_STS: 1307 case ACPIREG_GPE1_EN: 1308 name = "gpe1_sts"; 1309 size = sc->sc_fadt->gpe1_blk_len >> 1; 1310 addr = sc->sc_fadt->gpe1_blk; 1311 1312 dnprintf(20, "gpe1 block len : %x\n", 1313 sc->sc_fadt->gpe1_blk_len >> 1); 1314 dnprintf(20, "gpe1 block addr: %x\n", 1315 sc->sc_fadt->gpe1_blk); 1316 if (reg == ACPIREG_GPE1_EN && addr) { 1317 addr += size; 1318 name = "gpe1_en"; 1319 } 1320 break; 1321 } 1322 if (size && addr) { 1323 dnprintf(50, "mapping: %.4x %.4x %s\n", 1324 addr, size, name); 1325 1326 /* Size and address exist; map register space */ 1327 bus_space_map(sc->sc_iot, addr, size, 0, 1328 &sc->sc_pmregs[reg].ioh); 1329 1330 sc->sc_pmregs[reg].name = name; 1331 sc->sc_pmregs[reg].size = size; 1332 sc->sc_pmregs[reg].addr = addr; 1333 } 1334 } 1335 } 1336 1337 /* move all stuff that doesn't go on the boot media in here */ 1338 #ifndef SMALL_KERNEL 1339 void 1340 acpi_reset(void) 1341 { 1342 struct acpi_fadt *fadt; 1343 u_int32_t reset_as, reset_len; 1344 u_int32_t value; 1345 1346 fadt = acpi_softc->sc_fadt; 1347 1348 /* 1349 * RESET_REG_SUP is not properly set in some implementations, 1350 * but not testing against it breaks more machines than it fixes 1351 */ 1352 if (acpi_softc->sc_revision <= 1 || 1353 !(fadt->flags & FADT_RESET_REG_SUP) || fadt->reset_reg.address == 0) 1354 return; 1355 1356 value = fadt->reset_value; 1357 1358 reset_as = fadt->reset_reg.register_bit_width / 8; 1359 if (reset_as == 0) 1360 reset_as = 1; 1361 1362 reset_len = fadt->reset_reg.access_size; 1363 if (reset_len == 0) 1364 reset_len = reset_as; 1365 1366 acpi_gasio(acpi_softc, ACPI_IOWRITE, 1367 fadt->reset_reg.address_space_id, 1368 fadt->reset_reg.address, reset_as, reset_len, &value); 1369 1370 delay(100000); 1371 } 1372 1373 int 1374 acpi_interrupt(void *arg) 1375 { 1376 struct acpi_softc *sc = (struct acpi_softc *)arg; 1377 u_int32_t processed, sts, en, idx, jdx; 1378 1379 processed = 0; 1380 1381 #if 0 1382 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1383 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1384 sc->sc_fadt->gpe1_base); 1385 #endif 1386 1387 dnprintf(40, "ACPI Interrupt\n"); 1388 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1389 sts = acpi_read_pmreg(sc, ACPIREG_GPE_STS, idx>>3); 1390 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, idx>>3); 1391 if (en & sts) { 1392 dnprintf(10, "GPE block: %.2x %.2x %.2x\n", idx, sts, 1393 en); 1394 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, en & ~sts); 1395 for (jdx = 0; jdx < 8; jdx++) { 1396 if (en & sts & (1L << jdx)) { 1397 /* Signal this GPE */ 1398 sc->gpe_table[idx+jdx].active = 1; 1399 processed = 1; 1400 } 1401 } 1402 } 1403 } 1404 1405 sts = acpi_read_pmreg(sc, ACPIREG_PM1_STS, 0); 1406 en = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 1407 if (sts & en) { 1408 dnprintf(10,"GEN interrupt: %.4x\n", sts & en); 1409 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en & ~sts); 1410 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 0, en); 1411 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en); 1412 if (sts & ACPI_PM1_PWRBTN_STS) 1413 sc->sc_powerbtn = 1; 1414 if (sts & ACPI_PM1_SLPBTN_STS) 1415 sc->sc_sleepbtn = 1; 1416 processed = 1; 1417 } 1418 1419 if (processed) { 1420 sc->sc_wakeup = 0; 1421 wakeup(sc); 1422 } 1423 1424 return (processed); 1425 } 1426 1427 int 1428 acpi_add_device(struct aml_node *node, void *arg) 1429 { 1430 static int nacpicpus = 0; 1431 struct device *self = arg; 1432 struct acpi_softc *sc = arg; 1433 struct acpi_attach_args aaa; 1434 #ifdef MULTIPROCESSOR 1435 struct aml_value res; 1436 int proc_id = -1; 1437 #endif 1438 1439 memset(&aaa, 0, sizeof(aaa)); 1440 aaa.aaa_node = node; 1441 aaa.aaa_iot = sc->sc_iot; 1442 aaa.aaa_memt = sc->sc_memt; 1443 if (node == NULL || node->value == NULL) 1444 return 0; 1445 1446 switch (node->value->type) { 1447 case AML_OBJTYPE_PROCESSOR: 1448 if (nacpicpus >= ncpus) 1449 return 0; 1450 #ifdef MULTIPROCESSOR 1451 if (aml_evalnode(sc, aaa.aaa_node, 0, NULL, &res) == 0) { 1452 if (res.type == AML_OBJTYPE_PROCESSOR) 1453 proc_id = res.v_processor.proc_id; 1454 aml_freevalue(&res); 1455 } 1456 if (proc_id < -1 || proc_id >= LAPIC_MAP_SIZE || 1457 (acpi_lapic_flags[proc_id] & ACPI_PROC_ENABLE) == 0) 1458 return 0; 1459 #endif 1460 nacpicpus++; 1461 1462 aaa.aaa_name = "acpicpu"; 1463 break; 1464 case AML_OBJTYPE_THERMZONE: 1465 aaa.aaa_name = "acpitz"; 1466 break; 1467 case AML_OBJTYPE_POWERRSRC: 1468 aaa.aaa_name = "acpipwrres"; 1469 break; 1470 default: 1471 return 0; 1472 } 1473 config_found(self, &aaa, acpi_print); 1474 return 0; 1475 } 1476 1477 void 1478 acpi_enable_onegpe(struct acpi_softc *sc, int gpe, int enable) 1479 { 1480 uint8_t mask = (1L << (gpe & 7)); 1481 uint8_t en; 1482 1483 /* Read enabled register */ 1484 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, gpe>>3); 1485 dnprintf(50, "%sabling GPE %.2x (current: %sabled) %.2x\n", 1486 enable ? "en" : "dis", gpe, (en & mask) ? "en" : "dis", en); 1487 if (enable) 1488 en |= mask; 1489 else 1490 en &= ~mask; 1491 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, en); 1492 } 1493 1494 int 1495 acpi_set_gpehandler(struct acpi_softc *sc, int gpe, int (*handler) 1496 (struct acpi_softc *, int, void *), void *arg, const char *label) 1497 { 1498 struct gpe_block *ptbl; 1499 1500 ptbl = acpi_find_gpe(sc, gpe); 1501 if (ptbl == NULL || handler == NULL) 1502 return -EINVAL; 1503 if (ptbl->handler != NULL) { 1504 dnprintf(10, "error: GPE %.2x already enabled\n", gpe); 1505 return -EBUSY; 1506 } 1507 dnprintf(50, "Adding GPE handler %.2x (%s)\n", gpe, label); 1508 ptbl->handler = handler; 1509 ptbl->arg = arg; 1510 1511 return (0); 1512 } 1513 1514 int 1515 acpi_gpe_level(struct acpi_softc *sc, int gpe, void *arg) 1516 { 1517 struct aml_node *node = arg; 1518 uint8_t mask; 1519 1520 dnprintf(10, "handling Level-sensitive GPE %.2x\n", gpe); 1521 mask = (1L << (gpe & 7)); 1522 1523 aml_evalnode(sc, node, 0, NULL, NULL); 1524 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1525 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1526 1527 return (0); 1528 } 1529 1530 int 1531 acpi_gpe_edge(struct acpi_softc *sc, int gpe, void *arg) 1532 { 1533 1534 struct aml_node *node = arg; 1535 uint8_t mask; 1536 1537 dnprintf(10, "handling Edge-sensitive GPE %.2x\n", gpe); 1538 mask = (1L << (gpe & 7)); 1539 1540 aml_evalnode(sc, node, 0, NULL, NULL); 1541 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1542 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1543 1544 return (0); 1545 } 1546 1547 /* Discover Devices that can wakeup the system 1548 * _PRW returns a package 1549 * pkg[0] = integer (FADT gpe bit) or package (gpe block,gpe bit) 1550 * pkg[1] = lowest sleep state 1551 * pkg[2+] = power resource devices (optional) 1552 * 1553 * To enable wakeup devices: 1554 * Evaluate _ON method in each power resource device 1555 * Evaluate _PSW method 1556 */ 1557 int 1558 acpi_foundprw(struct aml_node *node, void *arg) 1559 { 1560 struct acpi_softc *sc = arg; 1561 struct acpi_wakeq *wq; 1562 1563 wq = malloc(sizeof(struct acpi_wakeq), M_DEVBUF, M_NOWAIT | M_ZERO); 1564 if (wq == NULL) { 1565 return 0; 1566 } 1567 1568 wq->q_wakepkg = malloc(sizeof(struct aml_value), M_DEVBUF, 1569 M_NOWAIT | M_ZERO); 1570 if (wq->q_wakepkg == NULL) { 1571 free(wq, M_DEVBUF); 1572 return 0; 1573 } 1574 dnprintf(10, "Found _PRW (%s)\n", node->parent->name); 1575 aml_evalnode(sc, node, 0, NULL, wq->q_wakepkg); 1576 wq->q_node = node->parent; 1577 wq->q_gpe = -1; 1578 1579 /* Get GPE of wakeup device, and lowest sleep level */ 1580 if (wq->q_wakepkg->type == AML_OBJTYPE_PACKAGE && wq->q_wakepkg->length >= 2) { 1581 if (wq->q_wakepkg->v_package[0]->type == AML_OBJTYPE_INTEGER) { 1582 wq->q_gpe = wq->q_wakepkg->v_package[0]->v_integer; 1583 } 1584 if (wq->q_wakepkg->v_package[1]->type == AML_OBJTYPE_INTEGER) { 1585 wq->q_state = wq->q_wakepkg->v_package[1]->v_integer; 1586 } 1587 } 1588 SIMPLEQ_INSERT_TAIL(&sc->sc_wakedevs, wq, q_next); 1589 return 0; 1590 } 1591 1592 struct gpe_block * 1593 acpi_find_gpe(struct acpi_softc *sc, int gpe) 1594 { 1595 #if 1 1596 if (gpe >= sc->sc_lastgpe) 1597 return NULL; 1598 return &sc->gpe_table[gpe]; 1599 #else 1600 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1601 if (gpe >= pgpe->start && gpe <= (pgpe->start+7)) 1602 return &pgpe->table[gpe & 7]; 1603 } 1604 return NULL; 1605 #endif 1606 } 1607 1608 #if 0 1609 /* New GPE handling code: Create GPE block */ 1610 void 1611 acpi_init_gpeblock(struct acpi_softc *sc, int reg, int len, int base) 1612 { 1613 int i, j; 1614 1615 if (!reg || !len) 1616 return; 1617 for (i=0; i<len; i++) { 1618 pgpe = acpi_os_malloc(sizeof(gpeblock)); 1619 if (pgpe == NULL) 1620 return; 1621 1622 /* Allocate GPE Handler Block */ 1623 pgpe->start = base + i; 1624 acpi_bus_space_map(sc->sc_iot, reg+i, 1, 0, &pgpe->sts_ioh); 1625 acpi_bus_space_map(sc->sc_iot, reg+i+len, 1, 0, &pgpe->en_ioh); 1626 SIMPLEQ_INSERT_TAIL(&sc->sc_gpes, gpe, gpe_link); 1627 1628 /* Clear pending GPEs */ 1629 bus_space_write_1(sc->sc_iot, pgpe->sts_ioh, 0, 0xFF); 1630 bus_space_write_1(sc->sc_iot, pgpe->en_ioh, 0, 0x00); 1631 } 1632 1633 /* Search for GPE handlers */ 1634 for (i=0; i<len*8; i++) { 1635 char gpestr[32]; 1636 struct aml_node *h; 1637 1638 snprintf(gpestr, sizeof(gpestr), "\\_GPE._L%.2X", base+i); 1639 h = aml_searchnode(&aml_root, gpestr); 1640 if (acpi_set_gpehandler(sc, base+i, acpi_gpe_level, h, "level") != 0) { 1641 snprintf(gpestr, sizeof(gpestr), "\\_GPE._E%.2X", base+i); 1642 h = aml_searchnode(&aml_root, gpestr); 1643 acpi_set_gpehandler(sc, base+i, acpi_gpe_edge, h, "edge"); 1644 } 1645 } 1646 } 1647 1648 /* Process GPE interrupts */ 1649 int 1650 acpi_handle_gpes(struct acpi_softc *sc) 1651 { 1652 uint8_t en, sts; 1653 int processed, i; 1654 1655 processed=0; 1656 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1657 sts = bus_space_read_1(sc->sc_iot, pgpe->sts_ioh, 0); 1658 en = bus_space_read_1(sc->sc_iot, pgpe->en_ioh, 0); 1659 for (i=0; i<8; i++) { 1660 if (en & sts & (1L << i)) { 1661 pgpe->table[i].active = 1; 1662 processed=1; 1663 } 1664 } 1665 } 1666 return processed; 1667 } 1668 #endif 1669 1670 #if 0 1671 void 1672 acpi_add_gpeblock(struct acpi_softc *sc, int reg, int len, int gpe) 1673 { 1674 int idx, jdx; 1675 u_int8_t en, sts; 1676 1677 if (!reg || !len) 1678 return; 1679 for (idx=0; idx<len; idx++) { 1680 sts = inb(reg + idx); 1681 en = inb(reg + len + idx); 1682 printf("-- gpe %.2x-%.2x : en:%.2x sts:%.2x %.2x\n", 1683 gpe+idx*8, gpe+idx*8+7, en, sts, en&sts); 1684 for (jdx=0; jdx<8; jdx++) { 1685 char gpestr[32]; 1686 struct aml_node *l, *e; 1687 1688 if (en & sts & (1L << jdx)) { 1689 snprintf(gpestr,sizeof(gpestr), "\\_GPE._L%.2X", gpe+idx*8+jdx); 1690 l = aml_searchname(&aml_root, gpestr); 1691 snprintf(gpestr,sizeof(gpestr), "\\_GPE._E%.2X", gpe+idx*8+jdx); 1692 e = aml_searchname(&aml_root, gpestr); 1693 printf(" GPE %.2x active L%x E%x\n", gpe+idx*8+jdx, l, e); 1694 } 1695 } 1696 } 1697 } 1698 #endif 1699 1700 void 1701 acpi_init_gpes(struct acpi_softc *sc) 1702 { 1703 struct aml_node *gpe; 1704 char name[12]; 1705 int idx, ngpe; 1706 1707 #if 0 1708 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1709 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1710 sc->sc_fadt->gpe1_base); 1711 #endif 1712 1713 sc->sc_lastgpe = sc->sc_fadt->gpe0_blk_len << 2; 1714 if (sc->sc_fadt->gpe1_blk_len) { 1715 } 1716 dnprintf(50, "Last GPE: %.2x\n", sc->sc_lastgpe); 1717 1718 /* Allocate GPE table */ 1719 sc->gpe_table = malloc(sc->sc_lastgpe * sizeof(struct gpe_block), 1720 M_DEVBUF, M_WAITOK | M_ZERO); 1721 1722 ngpe = 0; 1723 1724 /* Clear GPE status */ 1725 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1726 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1727 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1728 } 1729 for (idx = 0; idx < sc->sc_lastgpe; idx++) { 1730 /* Search Level-sensitive GPES */ 1731 snprintf(name, sizeof(name), "\\_GPE._L%.2X", idx); 1732 gpe = aml_searchname(&aml_root, name); 1733 if (gpe != NULL) 1734 acpi_set_gpehandler(sc, idx, acpi_gpe_level, gpe, 1735 "level"); 1736 if (gpe == NULL) { 1737 /* Search Edge-sensitive GPES */ 1738 snprintf(name, sizeof(name), "\\_GPE._E%.2X", idx); 1739 gpe = aml_searchname(&aml_root, name); 1740 if (gpe != NULL) 1741 acpi_set_gpehandler(sc, idx, acpi_gpe_edge, gpe, 1742 "edge"); 1743 } 1744 } 1745 aml_find_node(&aml_root, "_PRW", acpi_foundprw, sc); 1746 sc->sc_maxgpe = ngpe; 1747 } 1748 1749 void 1750 acpi_init_states(struct acpi_softc *sc) 1751 { 1752 struct aml_value res; 1753 char name[8]; 1754 int i; 1755 1756 for (i = ACPI_STATE_S0; i <= ACPI_STATE_S5; i++) { 1757 snprintf(name, sizeof(name), "_S%d_", i); 1758 sc->sc_sleeptype[i].slp_typa = -1; 1759 sc->sc_sleeptype[i].slp_typb = -1; 1760 if (aml_evalname(sc, &aml_root, name, 0, NULL, &res) == 0) { 1761 if (res.type == AML_OBJTYPE_PACKAGE) { 1762 sc->sc_sleeptype[i].slp_typa = aml_val2int(res.v_package[0]); 1763 sc->sc_sleeptype[i].slp_typb = aml_val2int(res.v_package[1]); 1764 } 1765 aml_freevalue(&res); 1766 } 1767 } 1768 } 1769 1770 void 1771 acpi_init_pm(struct acpi_softc *sc) 1772 { 1773 sc->sc_tts = aml_searchname(&aml_root, "_TTS"); 1774 sc->sc_pts = aml_searchname(&aml_root, "_PTS"); 1775 sc->sc_wak = aml_searchname(&aml_root, "_WAK"); 1776 sc->sc_bfs = aml_searchname(&aml_root, "_BFS"); 1777 sc->sc_gts = aml_searchname(&aml_root, "_GTS"); 1778 } 1779 1780 #ifndef SMALL_KERNEL 1781 void 1782 acpi_sleep_walk(struct acpi_softc *sc, int state) 1783 { 1784 struct acpi_wakeq *wentry; 1785 int idx; 1786 1787 /* Clear GPE status */ 1788 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1789 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1790 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1791 } 1792 1793 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 1794 dnprintf(10, "%.4s(S%d) gpe %.2x\n", wentry->q_node->name, 1795 wentry->q_state, 1796 wentry->q_gpe); 1797 1798 if (state <= wentry->q_state) 1799 acpi_enable_onegpe(sc, wentry->q_gpe, 1); 1800 } 1801 } 1802 #endif /* ! SMALL_KERNEL */ 1803 1804 int 1805 acpi_sleep_state(struct acpi_softc *sc, int state) 1806 { 1807 int ret; 1808 1809 switch (state) { 1810 case ACPI_STATE_S0: 1811 return (0); 1812 case ACPI_STATE_S4: 1813 return (EOPNOTSUPP); 1814 case ACPI_STATE_S5: 1815 break; 1816 case ACPI_STATE_S1: 1817 case ACPI_STATE_S2: 1818 case ACPI_STATE_S3: 1819 if (sc->sc_sleeptype[state].slp_typa == -1 || 1820 sc->sc_sleeptype[state].slp_typb == -1) 1821 return (EOPNOTSUPP); 1822 } 1823 1824 acpi_sleep_walk(sc, state); 1825 1826 if ((ret = acpi_prepare_sleep_state(sc, state)) != 0) 1827 return (ret); 1828 1829 if (state != ACPI_STATE_S1) 1830 ret = acpi_sleep_machdep(sc, state); 1831 else 1832 ret = acpi_enter_sleep_state(sc, state); 1833 1834 #ifndef SMALL_KERNEL 1835 acpi_resume(sc); 1836 #endif /* ! SMALL_KERNEL */ 1837 return (ret); 1838 } 1839 1840 int 1841 acpi_enter_sleep_state(struct acpi_softc *sc, int state) 1842 { 1843 uint16_t rega, regb; 1844 int retries; 1845 1846 /* Clear WAK_STS bit */ 1847 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 1, ACPI_PM1_WAK_STS); 1848 1849 /* Disable BM arbitration */ 1850 acpi_write_pmreg(sc, ACPIREG_PM2_CNT, 1, ACPI_PM2_ARB_DIS); 1851 1852 /* Write SLP_TYPx values */ 1853 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, 0); 1854 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, 0); 1855 rega &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1856 regb &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1857 rega |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typa); 1858 regb |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typb); 1859 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1860 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1861 1862 /* Set SLP_EN bit */ 1863 rega |= ACPI_PM1_SLP_EN; 1864 regb |= ACPI_PM1_SLP_EN; 1865 1866 /* 1867 * Let the machdep code flush caches and do any other necessary 1868 * tasks before going away. 1869 */ 1870 acpi_cpu_flush(sc, state); 1871 1872 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1873 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1874 /* Loop on WAK_STS */ 1875 for (retries = 1000; retries > 0; retries--) { 1876 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_STS, 0); 1877 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_STS, 0); 1878 if (rega & ACPI_PM1_WAK_STS || 1879 regb & ACPI_PM1_WAK_STS) 1880 break; 1881 DELAY(10); 1882 } 1883 1884 return (-1); 1885 } 1886 1887 #ifndef SMALL_KERNEL 1888 void 1889 acpi_resume(struct acpi_softc *sc) 1890 { 1891 struct aml_value env; 1892 1893 memset(&env, 0, sizeof(env)); 1894 env.type = AML_OBJTYPE_INTEGER; 1895 env.v_integer = sc->sc_state; 1896 1897 if (sc->sc_bfs) 1898 if (aml_evalnode(sc, sc->sc_bfs, 1, &env, NULL) != 0) { 1899 dnprintf(10, "%s evaluating method _BFS failed.\n", 1900 DEVNAME(sc)); 1901 } 1902 1903 dopowerhooks(PWR_RESUME); 1904 inittodr(0); 1905 1906 if (sc->sc_wak) 1907 if (aml_evalnode(sc, sc->sc_wak, 1, &env, NULL) != 0) { 1908 dnprintf(10, "%s evaluating method _WAK failed.\n", 1909 DEVNAME(sc)); 1910 } 1911 1912 sc->sc_state = ACPI_STATE_S0; 1913 if (sc->sc_tts) { 1914 env.v_integer = sc->sc_state; 1915 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1916 dnprintf(10, "%s evaluating method _TTS failed.\n", 1917 DEVNAME(sc)); 1918 } 1919 } 1920 } 1921 #endif /* ! SMALL_KERNEL */ 1922 1923 int 1924 acpi_prepare_sleep_state(struct acpi_softc *sc, int state) 1925 { 1926 struct aml_value env; 1927 1928 if (sc == NULL || state == ACPI_STATE_S0) 1929 return(0); 1930 1931 if (sc->sc_sleeptype[state].slp_typa == -1 || 1932 sc->sc_sleeptype[state].slp_typb == -1) { 1933 printf("%s: state S%d unavailable\n", 1934 sc->sc_dev.dv_xname, state); 1935 return (ENXIO); 1936 } 1937 1938 memset(&env, 0, sizeof(env)); 1939 env.type = AML_OBJTYPE_INTEGER; 1940 env.v_integer = state; 1941 /* _TTS(state) */ 1942 if (sc->sc_tts) 1943 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1944 dnprintf(10, "%s evaluating method _TTS failed.\n", 1945 DEVNAME(sc)); 1946 return (ENXIO); 1947 } 1948 1949 switch (state) { 1950 case ACPI_STATE_S1: 1951 case ACPI_STATE_S2: 1952 resettodr(); 1953 dopowerhooks(PWR_SUSPEND); 1954 break; 1955 case ACPI_STATE_S3: 1956 resettodr(); 1957 dopowerhooks(PWR_STANDBY); 1958 break; 1959 } 1960 1961 /* _PTS(state) */ 1962 if (sc->sc_pts) 1963 if (aml_evalnode(sc, sc->sc_pts, 1, &env, NULL) != 0) { 1964 dnprintf(10, "%s evaluating method _PTS failed.\n", 1965 DEVNAME(sc)); 1966 return (ENXIO); 1967 } 1968 1969 sc->sc_state = state; 1970 /* _GTS(state) */ 1971 if (sc->sc_gts) 1972 if (aml_evalnode(sc, sc->sc_gts, 1, &env, NULL) != 0) { 1973 dnprintf(10, "%s evaluating method _GTS failed.\n", 1974 DEVNAME(sc)); 1975 return (ENXIO); 1976 } 1977 1978 disable_intr(); 1979 aml_evalname(sc, &aml_root, "\\_SST", 1, &env, NULL); 1980 sc->sc_state = state; 1981 1982 return (0); 1983 } 1984 1985 1986 1987 void 1988 acpi_powerdown(void) 1989 { 1990 /* 1991 * In case acpi_prepare_sleep fails, we shouldn't try to enter 1992 * the sleep state. It might cost us the battery. 1993 */ 1994 acpi_sleep_walk(acpi_softc, ACPI_STATE_S5); 1995 if (acpi_prepare_sleep_state(acpi_softc, ACPI_STATE_S5) == 0) 1996 acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5); 1997 } 1998 1999 2000 extern int aml_busy; 2001 2002 void 2003 acpi_isr_thread(void *arg) 2004 { 2005 struct acpi_thread *thread = arg; 2006 struct acpi_softc *sc = thread->sc; 2007 u_int32_t gpe; 2008 2009 /* 2010 * If we have an interrupt handler, we can get notification 2011 * when certain status bits changes in the ACPI registers, 2012 * so let us enable some events we can forward to userland 2013 */ 2014 if (sc->sc_interrupt) { 2015 int16_t flag; 2016 2017 dnprintf(1,"slpbtn:%c pwrbtn:%c\n", 2018 sc->sc_fadt->flags & FADT_SLP_BUTTON ? 'n' : 'y', 2019 sc->sc_fadt->flags & FADT_PWR_BUTTON ? 'n' : 'y'); 2020 dnprintf(10, "Enabling acpi interrupts...\n"); 2021 sc->sc_wakeup = 1; 2022 2023 /* Enable Sleep/Power buttons if they exist */ 2024 flag = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 2025 if (!(sc->sc_fadt->flags & FADT_PWR_BUTTON)) { 2026 flag |= ACPI_PM1_PWRBTN_EN; 2027 } 2028 if (!(sc->sc_fadt->flags & FADT_SLP_BUTTON)) { 2029 flag |= ACPI_PM1_SLPBTN_EN; 2030 } 2031 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, flag); 2032 2033 /* Enable handled GPEs here */ 2034 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2035 if (sc->gpe_table[gpe].handler) 2036 acpi_enable_onegpe(sc, gpe, 1); 2037 } 2038 } 2039 2040 while (thread->running) { 2041 dnprintf(10, "sleep... %d\n", sc->sc_wakeup); 2042 while (sc->sc_wakeup) 2043 tsleep(sc, PWAIT, "acpi_idle", 0); 2044 sc->sc_wakeup = 1; 2045 dnprintf(10, "wakeup..\n"); 2046 if (aml_busy) 2047 continue; 2048 2049 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2050 struct gpe_block *pgpe = &sc->gpe_table[gpe]; 2051 2052 if (pgpe->active) { 2053 pgpe->active = 0; 2054 dnprintf(50, "softgpe: %.2x\n", gpe); 2055 if (pgpe->handler) 2056 pgpe->handler(sc, gpe, pgpe->arg); 2057 } 2058 } 2059 if (sc->sc_powerbtn) { 2060 sc->sc_powerbtn = 0; 2061 2062 aml_notify_dev(ACPI_DEV_PBD, 0x80); 2063 2064 acpi_evindex++; 2065 dnprintf(1,"power button pressed\n"); 2066 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_PWRBTN, 2067 acpi_evindex)); 2068 } 2069 if (sc->sc_sleepbtn) { 2070 sc->sc_sleepbtn = 0; 2071 2072 aml_notify_dev(ACPI_DEV_SBD, 0x80); 2073 2074 acpi_evindex++; 2075 dnprintf(1,"sleep button pressed\n"); 2076 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_SLPBTN, 2077 acpi_evindex)); 2078 } 2079 2080 /* handle polling here to keep code non-concurrent*/ 2081 if (sc->sc_poll) { 2082 sc->sc_poll = 0; 2083 acpi_poll_notify(); 2084 } 2085 } 2086 free(thread, M_DEVBUF); 2087 2088 kthread_exit(0); 2089 } 2090 2091 void 2092 acpi_create_thread(void *arg) 2093 { 2094 struct acpi_softc *sc = arg; 2095 2096 if (kthread_create(acpi_isr_thread, sc->sc_thread, NULL, DEVNAME(sc)) 2097 != 0) { 2098 printf("%s: unable to create isr thread, GPEs disabled\n", 2099 DEVNAME(sc)); 2100 return; 2101 } 2102 } 2103 2104 int 2105 acpi_map_address(struct acpi_softc *sc, struct acpi_gas *gas, bus_addr_t base, 2106 bus_size_t size, bus_space_handle_t *pioh, bus_space_tag_t *piot) 2107 { 2108 int iospace = GAS_SYSTEM_IOSPACE; 2109 2110 /* No GAS structure, default to I/O space */ 2111 if (gas != NULL) { 2112 base += gas->address; 2113 iospace = gas->address_space_id; 2114 } 2115 switch (iospace) { 2116 case GAS_SYSTEM_MEMORY: 2117 *piot = sc->sc_memt; 2118 break; 2119 case GAS_SYSTEM_IOSPACE: 2120 *piot = sc->sc_iot; 2121 break; 2122 default: 2123 return -1; 2124 } 2125 if (bus_space_map(*piot, base, size, 0, pioh)) 2126 return -1; 2127 2128 return 0; 2129 } 2130 2131 int 2132 acpi_foundec(struct aml_node *node, void *arg) 2133 { 2134 struct acpi_softc *sc = (struct acpi_softc *)arg; 2135 struct device *self = (struct device *)arg; 2136 const char *dev; 2137 struct aml_value res; 2138 struct acpi_attach_args aaa; 2139 2140 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2141 return 0; 2142 2143 switch (res.type) { 2144 case AML_OBJTYPE_STRING: 2145 dev = res.v_string; 2146 break; 2147 case AML_OBJTYPE_INTEGER: 2148 dev = aml_eisaid(aml_val2int(&res)); 2149 break; 2150 default: 2151 dev = "unknown"; 2152 break; 2153 } 2154 2155 if (strcmp(dev, ACPI_DEV_ECD)) 2156 return 0; 2157 2158 memset(&aaa, 0, sizeof(aaa)); 2159 aaa.aaa_iot = sc->sc_iot; 2160 aaa.aaa_memt = sc->sc_memt; 2161 aaa.aaa_node = node->parent; 2162 aaa.aaa_dev = dev; 2163 aaa.aaa_name = "acpiec"; 2164 config_found(self, &aaa, acpi_print); 2165 aml_freevalue(&res); 2166 2167 return 0; 2168 } 2169 2170 int 2171 acpi_matchhids(struct acpi_attach_args *aa, const char *hids[], 2172 const char *driver) 2173 { 2174 int i; 2175 2176 if (aa->aaa_dev == NULL || aa->aaa_node == NULL) 2177 return (0); 2178 for (i = 0; hids[i]; i++) { 2179 if (!strcmp(aa->aaa_dev, hids[i])) { 2180 dnprintf(5, "driver %s matches %s\n", driver, hids[i]); 2181 return (1); 2182 } 2183 } 2184 return (0); 2185 } 2186 2187 int 2188 acpi_foundhid(struct aml_node *node, void *arg) 2189 { 2190 struct acpi_softc *sc = (struct acpi_softc *)arg; 2191 struct device *self = (struct device *)arg; 2192 const char *dev; 2193 struct aml_value res; 2194 struct acpi_attach_args aaa; 2195 2196 dnprintf(10, "found hid device: %s ", node->parent->name); 2197 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2198 return 0; 2199 2200 switch (res.type) { 2201 case AML_OBJTYPE_STRING: 2202 dev = res.v_string; 2203 break; 2204 case AML_OBJTYPE_INTEGER: 2205 dev = aml_eisaid(aml_val2int(&res)); 2206 break; 2207 default: 2208 dev = "unknown"; 2209 break; 2210 } 2211 dnprintf(10, " device: %s\n", dev); 2212 2213 memset(&aaa, 0, sizeof(aaa)); 2214 aaa.aaa_iot = sc->sc_iot; 2215 aaa.aaa_memt = sc->sc_memt; 2216 aaa.aaa_node = node->parent; 2217 aaa.aaa_dev = dev; 2218 2219 if (!strcmp(dev, ACPI_DEV_AC)) 2220 aaa.aaa_name = "acpiac"; 2221 else if (!strcmp(dev, ACPI_DEV_CMB)) 2222 aaa.aaa_name = "acpibat"; 2223 else if (!strcmp(dev, ACPI_DEV_LD) || 2224 !strcmp(dev, ACPI_DEV_PBD) || 2225 !strcmp(dev, ACPI_DEV_SBD)) 2226 aaa.aaa_name = "acpibtn"; 2227 else if (!strcmp(dev, ACPI_DEV_ASUS)) 2228 aaa.aaa_name = "acpiasus"; 2229 else if (!strcmp(dev, ACPI_DEV_THINKPAD)) { 2230 aaa.aaa_name = "acpithinkpad"; 2231 acpi_thinkpad_enabled = 1; 2232 } else if (!strcmp(dev, ACPI_DEV_ASUSAIBOOSTER)) 2233 aaa.aaa_name = "aibs"; 2234 2235 if (aaa.aaa_name) 2236 config_found(self, &aaa, acpi_print); 2237 2238 aml_freevalue(&res); 2239 2240 return 0; 2241 } 2242 2243 int 2244 acpi_founddock(struct aml_node *node, void *arg) 2245 { 2246 struct acpi_softc *sc = (struct acpi_softc *)arg; 2247 struct device *self = (struct device *)arg; 2248 struct acpi_attach_args aaa; 2249 2250 dnprintf(10, "found dock entry: %s\n", node->parent->name); 2251 2252 memset(&aaa, 0, sizeof(aaa)); 2253 aaa.aaa_iot = sc->sc_iot; 2254 aaa.aaa_memt = sc->sc_memt; 2255 aaa.aaa_node = node->parent; 2256 aaa.aaa_name = "acpidock"; 2257 2258 config_found(self, &aaa, acpi_print); 2259 2260 return 0; 2261 } 2262 2263 int 2264 acpi_foundvideo(struct aml_node *node, void *arg) 2265 { 2266 struct acpi_softc *sc = (struct acpi_softc *)arg; 2267 struct device *self = (struct device *)arg; 2268 struct acpi_attach_args aaa; 2269 2270 memset(&aaa, 0, sizeof(aaa)); 2271 aaa.aaa_iot = sc->sc_iot; 2272 aaa.aaa_memt = sc->sc_memt; 2273 aaa.aaa_node = node->parent; 2274 aaa.aaa_name = "acpivideo"; 2275 2276 config_found(self, &aaa, acpi_print); 2277 2278 return (0); 2279 } 2280 2281 TAILQ_HEAD(acpi_dv_hn, acpi_dev_rank) acpi_dv_h; 2282 void 2283 acpi_dev_sort(void) 2284 { 2285 struct device *dev, *idev; 2286 struct acpi_dev_rank *rentry, *ientry; 2287 int rank; 2288 2289 TAILQ_INIT(&acpi_dv_h); 2290 2291 TAILQ_FOREACH(dev, &alldevs, dv_list) { 2292 for (rank = -1, idev = dev; idev != NULL; 2293 idev = idev->dv_parent, rank++) 2294 ; /* nothing */ 2295 2296 rentry = malloc(sizeof(*rentry), M_DEVBUF, M_WAITOK | M_ZERO); 2297 rentry->rank = rank; 2298 rentry->dev = dev; 2299 2300 if (TAILQ_FIRST(&acpi_dv_h) == NULL) 2301 TAILQ_INSERT_HEAD(&acpi_dv_h, rentry, link); 2302 TAILQ_FOREACH_REVERSE(ientry, &acpi_dv_h, acpi_dv_hn, link) { 2303 if (rentry->rank > ientry->rank) { 2304 TAILQ_INSERT_AFTER(&acpi_dv_h, ientry, rentry, 2305 link); 2306 break; 2307 } 2308 } 2309 } 2310 } 2311 2312 void 2313 acpi_dev_free(void) 2314 { 2315 struct acpi_dev_rank *dvr; 2316 2317 while ((dvr = TAILQ_FIRST(&acpi_dv_h)) != NULL) { 2318 TAILQ_REMOVE(&acpi_dv_h, dvr, link); 2319 if (dvr != NULL) { 2320 free(dvr, M_DEVBUF); 2321 dvr = NULL; 2322 } 2323 } 2324 } 2325 #endif /* SMALL_KERNEL */ 2326