1 /* $OpenBSD: apm.c,v 1.133 2024/10/07 01:31:22 jsg Exp $ */ 2 3 /*- 4 * Copyright (c) 1998-2001 Michael Shalayeff. All rights reserved. 5 * Copyright (c) 1995 John T. Kohl. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the names of the authors nor the names of contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 #include "apm.h" 34 35 #if NAPM > 1 36 #error only one APM device may be configured 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/kthread.h> 43 #include <sys/rwlock.h> 44 #include <sys/proc.h> 45 #include <sys/sysctl.h> 46 #include <sys/clockintr.h> 47 #include <sys/device.h> 48 #include <sys/fcntl.h> 49 #include <sys/buf.h> 50 #include <sys/reboot.h> 51 #include <sys/event.h> 52 53 #include <machine/conf.h> 54 #include <machine/cpufunc.h> 55 #include <machine/gdt.h> 56 57 #include <dev/isa/isareg.h> 58 #include <dev/wscons/wsdisplayvar.h> 59 60 #include <machine/acpiapm.h> 61 #include <machine/biosvar.h> 62 #include <machine/apmvar.h> 63 64 #include "wsdisplay.h" 65 66 #if defined(APMDEBUG) 67 #define DPRINTF(x) printf x 68 #else 69 #define DPRINTF(x) /**/ 70 #endif 71 72 struct cfdriver apm_cd = { 73 NULL, "apm", DV_DULL 74 }; 75 76 struct apm_softc { 77 struct device sc_dev; 78 struct klist sc_note; 79 int sc_flags; 80 int batt_life; 81 int be_batt; 82 struct proc *sc_thread; 83 struct rwlock sc_lock; 84 }; 85 #define SCFLAG_OREAD 0x0000001 86 #define SCFLAG_OWRITE 0x0000002 87 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE) 88 89 int apmprobe(struct device *, void *, void *); 90 void apmattach(struct device *, struct device *, void *); 91 92 const struct cfattach apm_ca = { 93 sizeof(struct apm_softc), apmprobe, apmattach 94 }; 95 96 void filt_apmrdetach(struct knote *kn); 97 int filt_apmread(struct knote *kn, long hint); 98 99 const struct filterops apmread_filtops = { 100 .f_flags = FILTEROP_ISFD, 101 .f_attach = NULL, 102 .f_detach = filt_apmrdetach, 103 .f_event = filt_apmread, 104 }; 105 106 #define APM_RESUME_HOLDOFF 3 107 108 /* 109 * Flags to control kernel display 110 * SCFLAG_NOPRINT: do not output APM power messages due to 111 * a power change event. 112 * 113 * SCFLAG_PCTPRINT: do not output APM power messages due to 114 * to a power change event unless the battery 115 * percentage changes. 116 */ 117 #define SCFLAG_NOPRINT 0x0008000 118 #define SCFLAG_PCTPRINT 0x0004000 119 #define SCFLAG_PRINT (SCFLAG_NOPRINT|SCFLAG_PCTPRINT) 120 121 #define APMUNIT(dev) (minor(dev)&0xf0) 122 #define APMDEV(dev) (minor(dev)&0x0f) 123 #define APMDEV_NORMAL 0 124 #define APMDEV_CTL 8 125 126 int apm_standbys; 127 int apm_lidclose; 128 int apm_userstandbys; 129 int apm_suspends; 130 int apm_resumes; 131 int apm_battlow; 132 int apm_evindex; 133 int apm_error; 134 int apm_op_inprog; 135 136 u_int apm_flags; 137 u_char apm_majver; 138 u_char apm_minver; 139 int apm_attached = 0; 140 static int apm_slow_called = 0; 141 142 struct { 143 u_int32_t entry; 144 u_int16_t seg; 145 u_int16_t pad; 146 } apm_ep; 147 148 struct apmregs { 149 u_int32_t ax; 150 u_int32_t bx; 151 u_int32_t cx; 152 u_int32_t dx; 153 }; 154 155 int apmcall(u_int, u_int, struct apmregs *); 156 int apm_handle_event(struct apm_softc *, struct apmregs *); 157 void apm_set_ver(struct apm_softc *); 158 int apm_periodic_check(struct apm_softc *); 159 void apm_thread_create(void *v); 160 void apm_thread(void *); 161 void apm_disconnect(struct apm_softc *); 162 void apm_perror(const char *, struct apmregs *); 163 void apm_powmgt_enable(int onoff); 164 void apm_powmgt_engage(int onoff, u_int devid); 165 /* void apm_devpowmgt_enable(int onoff, u_int devid); */ 166 int apm_record_event(struct apm_softc *sc, u_int type); 167 const char *apm_err_translate(int code); 168 169 #define apm_get_powstat(r) apmcall(APM_POWER_STATUS, APM_DEV_ALLDEVS, r) 170 void apm_suspend(int); 171 void apm_resume(struct apm_softc *, struct apmregs *); 172 void apm_cpu_slow(void); 173 174 static int __inline 175 apm_get_event(struct apmregs *r) 176 { 177 int rv; 178 179 bzero(r, sizeof(*r)); 180 rv = apmcall(APM_GET_PM_EVENT, 0, r); 181 return rv; 182 } 183 184 const char * 185 apm_err_translate(int code) 186 { 187 switch (code) { 188 case APM_ERR_PM_DISABLED: 189 return "power management disabled"; 190 case APM_ERR_REALALREADY: 191 return "real mode interface already connected"; 192 case APM_ERR_NOTCONN: 193 return "interface not connected"; 194 case APM_ERR_16ALREADY: 195 return "16-bit interface already connected"; 196 case APM_ERR_16NOTSUPP: 197 return "16-bit interface not supported"; 198 case APM_ERR_32ALREADY: 199 return "32-bit interface already connected"; 200 case APM_ERR_32NOTSUPP: 201 return "32-bit interface not supported"; 202 case APM_ERR_UNRECOG_DEV: 203 return "unrecognized device ID"; 204 case APM_ERR_ERANGE: 205 return "parameter out of range"; 206 case APM_ERR_NOTENGAGED: 207 return "interface not engaged"; 208 case APM_ERR_UNABLE: 209 return "unable to enter requested state"; 210 case APM_ERR_NOEVENTS: 211 return "No pending events"; 212 case APM_ERR_NOT_PRESENT: 213 return "No APM present"; 214 default: 215 return "unknown error code?"; 216 } 217 } 218 219 int apmerrors = 0; 220 221 void 222 apm_perror(const char *str, struct apmregs *regs) 223 { 224 printf("apm0: APM %s: %s (%d)\n", str, 225 apm_err_translate(APM_ERR_CODE(regs)), 226 APM_ERR_CODE(regs)); 227 delay(1000000); 228 229 apmerrors++; 230 } 231 232 void 233 apm_suspend(int state) 234 { 235 extern int perflevel; 236 int s; 237 238 #if NWSDISPLAY > 0 239 wsdisplay_suspend(); 240 #endif /* NWSDISPLAY > 0 */ 241 stop_periodic_resettodr(); 242 config_suspend_all(DVACT_QUIESCE); 243 bufq_quiesce(); 244 245 s = splhigh(); 246 intr_disable(); 247 cold = 2; 248 config_suspend_all(DVACT_SUSPEND); 249 suspend_randomness(); 250 251 /* XXX 252 * Flag to disk drivers that they should "power down" the disk 253 * when we get to DVACT_POWERDOWN. 254 */ 255 boothowto |= RB_POWERDOWN; 256 config_suspend_all(DVACT_POWERDOWN); 257 boothowto &= ~RB_POWERDOWN; 258 259 /* Send machine to sleep */ 260 apm_set_powstate(APM_DEV_ALLDEVS, state); 261 /* Wake up */ 262 263 /* They say that some machines may require reinitializing the clocks */ 264 i8254_startclock(); 265 if (initclock_func == i8254_initclocks) 266 rtcstart(); /* in i8254 mode, rtc is profclock */ 267 inittodr(gettime()); 268 269 clockintr_cpu_init(NULL); 270 clockintr_trigger(); 271 272 config_suspend_all(DVACT_RESUME); 273 cold = 0; 274 intr_enable(); 275 splx(s); 276 277 resume_randomness(NULL, 0); /* force RNG upper level reseed */ 278 bufq_restart(); 279 280 config_suspend_all(DVACT_WAKEUP); 281 start_periodic_resettodr(); 282 283 #if NWSDISPLAY > 0 284 wsdisplay_resume(); 285 #endif /* NWSDISPLAY > 0 */ 286 287 /* restore hw.setperf */ 288 if (cpu_setperf != NULL) 289 cpu_setperf(perflevel); 290 } 291 292 void 293 apm_resume(struct apm_softc *sc, struct apmregs *regs) 294 { 295 296 apm_resumes = APM_RESUME_HOLDOFF; 297 298 /* lower bit in cx means pccard was powered down */ 299 300 apm_record_event(sc, regs->bx); 301 } 302 303 int 304 apm_record_event(struct apm_softc *sc, u_int type) 305 { 306 if (!apm_error && (sc->sc_flags & SCFLAG_OPEN) == 0) { 307 DPRINTF(("apm_record_event: no user waiting\n")); 308 apm_error++; 309 return 1; 310 } 311 312 apm_evindex++; 313 knote_locked(&sc->sc_note, APM_EVENT_COMPOSE(type, apm_evindex)); 314 return (0); 315 } 316 317 int 318 apm_handle_event(struct apm_softc *sc, struct apmregs *regs) 319 { 320 struct apmregs nregs; 321 int ret = 0; 322 323 switch (regs->bx) { 324 case APM_NOEVENT: 325 ret++; 326 break; 327 328 case APM_USER_STANDBY_REQ: 329 if (apm_resumes || apm_op_inprog) 330 break; 331 DPRINTF(("user wants STANDBY--fat chance\n")); 332 apm_op_inprog++; 333 if (apm_record_event(sc, regs->bx)) { 334 DPRINTF(("standby ourselves\n")); 335 apm_userstandbys++; 336 } 337 break; 338 case APM_STANDBY_REQ: 339 if (apm_resumes || apm_op_inprog) 340 break; 341 DPRINTF(("standby requested\n")); 342 if (apm_standbys || apm_suspends) { 343 DPRINTF(("premature standby\n")); 344 apm_error++; 345 ret++; 346 } 347 apm_op_inprog++; 348 if (apm_record_event(sc, regs->bx)) { 349 DPRINTF(("standby ourselves\n")); 350 apm_standbys++; 351 } 352 break; 353 case APM_USER_SUSPEND_REQ: 354 if (apm_resumes || apm_op_inprog) 355 break; 356 DPRINTF(("user wants suspend--fat chance!\n")); 357 apm_op_inprog++; 358 if (apm_record_event(sc, regs->bx)) { 359 DPRINTF(("suspend ourselves\n")); 360 apm_suspends++; 361 } 362 break; 363 case APM_SUSPEND_REQ: 364 if (apm_resumes || apm_op_inprog) 365 break; 366 DPRINTF(("suspend requested\n")); 367 if (apm_standbys || apm_suspends) { 368 DPRINTF(("premature suspend\n")); 369 apm_error++; 370 ret++; 371 } 372 apm_op_inprog++; 373 if (apm_record_event(sc, regs->bx)) { 374 DPRINTF(("suspend ourselves\n")); 375 apm_suspends++; 376 } 377 break; 378 case APM_POWER_CHANGE: 379 DPRINTF(("power status change\n")); 380 apm_get_powstat(&nregs); 381 apm_record_event(sc, regs->bx); 382 break; 383 case APM_NORMAL_RESUME: 384 DPRINTF(("system resumed\n")); 385 apm_resume(sc, regs); 386 break; 387 case APM_CRIT_RESUME: 388 DPRINTF(("system resumed without us!\n")); 389 apm_resume(sc, regs); 390 break; 391 case APM_SYS_STANDBY_RESUME: 392 DPRINTF(("system standby resume\n")); 393 apm_resume(sc, regs); 394 break; 395 case APM_UPDATE_TIME: 396 DPRINTF(("update time, please\n")); 397 apm_record_event(sc, regs->bx); 398 break; 399 case APM_CRIT_SUSPEND_REQ: 400 DPRINTF(("suspend required immediately\n")); 401 apm_record_event(sc, regs->bx); 402 apm_suspend(APM_SYS_SUSPEND); 403 break; 404 case APM_BATTERY_LOW: 405 DPRINTF(("Battery low!\n")); 406 apm_battlow++; 407 apm_record_event(sc, regs->bx); 408 break; 409 case APM_CAPABILITY_CHANGE: 410 DPRINTF(("capability change\n")); 411 if (apm_minver < 2) { 412 DPRINTF(("adult event\n")); 413 } else { 414 if (apmcall(APM_GET_CAPABILITIES, APM_DEV_APM_BIOS, 415 &nregs) != 0) { 416 apm_perror("get capabilities", &nregs); 417 } else { 418 apm_get_powstat(&nregs); 419 } 420 } 421 break; 422 default: { 423 #ifdef APMDEBUG 424 char *p; 425 switch (regs->bx >> 8) { 426 case 0: p = "reserved system"; break; 427 case 1: p = "reserved device"; break; 428 case 2: p = "OEM defined"; break; 429 default:p = "reserved"; break; 430 } 431 #endif 432 DPRINTF(("apm_handle_event: %s event, code %d\n", p, regs->bx)); 433 } 434 } 435 return ret; 436 } 437 438 int 439 apm_periodic_check(struct apm_softc *sc) 440 { 441 struct apmregs regs; 442 int ret = 0; 443 444 if (apm_op_inprog) 445 apm_set_powstate(APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 446 447 while (1) { 448 if (apm_get_event(®s) != 0) { 449 /* i think some bioses combine the error codes */ 450 if (!(APM_ERR_CODE(®s) & APM_ERR_NOEVENTS)) 451 apm_perror("get event", ®s); 452 break; 453 } 454 455 /* If the APM BIOS tells us to suspend, don't do it twice */ 456 if (regs.bx == APM_SUSPEND_REQ) 457 apm_lidclose = 0; 458 if (apm_handle_event(sc, ®s)) 459 break; 460 } 461 462 if (apm_error || APM_ERR_CODE(®s) == APM_ERR_NOTCONN) 463 ret = -1; 464 465 if (apm_lidclose) { 466 apm_lidclose = 0; 467 /* Fake a suspend request */ 468 regs.bx = APM_SUSPEND_REQ; 469 apm_handle_event(sc, ®s); 470 } 471 if (apm_suspends /*|| (apm_battlow && apm_userstandbys)*/) { 472 apm_op_inprog = 0; 473 apm_suspend(APM_SYS_SUSPEND); 474 } else if (apm_standbys || apm_userstandbys) { 475 apm_op_inprog = 0; 476 apm_suspend(APM_SYS_STANDBY); 477 } 478 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0; 479 apm_error = 0; 480 481 if (apm_resumes) 482 apm_resumes--; 483 return (ret); 484 } 485 486 void 487 apm_powmgt_enable(int onoff) 488 { 489 struct apmregs regs; 490 491 bzero(®s, sizeof(regs)); 492 regs.cx = onoff ? APM_MGT_ENABLE : APM_MGT_DISABLE; 493 if (apmcall(APM_PWR_MGT_ENABLE, 494 (apm_minver? APM_DEV_APM_BIOS : APM_MGT_ALL), ®s) != 0) 495 apm_perror("power management enable", ®s); 496 } 497 498 void 499 apm_powmgt_engage(int onoff, u_int dev) 500 { 501 struct apmregs regs; 502 503 if (apm_minver == 0) 504 return; 505 bzero(®s, sizeof(regs)); 506 regs.cx = onoff ? APM_MGT_ENGAGE : APM_MGT_DISENGAGE; 507 if (apmcall(APM_PWR_MGT_ENGAGE, dev, ®s) != 0) 508 printf("apm0: APM engage (device %x): %s (%d)\n", 509 dev, apm_err_translate(APM_ERR_CODE(®s)), 510 APM_ERR_CODE(®s)); 511 } 512 513 #ifdef notused 514 void 515 apm_devpowmgt_enable(int onoff, u_int dev) 516 { 517 struct apmregs regs; 518 519 if (apm_minver == 0) 520 return; 521 /* enable is auto BIOS management. 522 * disable is program control. 523 */ 524 bzero(®s, sizeof(regs)); 525 regs.cx = onoff ? APM_MGT_ENABLE : APM_MGT_DISABLE; 526 if (apmcall(APM_DEVICE_MGMT_ENABLE, dev, ®s) != 0) 527 printf("APM device engage (device %x): %s (%d)\n", 528 dev, apm_err_translate(APM_ERR_CODE(®s)), 529 APM_ERR_CODE(®s)); 530 } 531 #endif 532 533 int 534 apm_set_powstate(u_int dev, u_int state) 535 { 536 struct apmregs regs; 537 538 if (!apm_cd.cd_ndevs || (apm_minver == 0 && state > APM_SYS_OFF)) 539 return EINVAL; 540 bzero(®s, sizeof(regs)); 541 regs.cx = state; 542 if (apmcall(APM_SET_PWR_STATE, dev, ®s) != 0) { 543 apm_perror("set power state", ®s); 544 if (APM_ERR_CODE(®s) == APM_ERR_UNRECOG_DEV) 545 return ENXIO; 546 else 547 return EIO; 548 } 549 return 0; 550 } 551 552 void 553 apm_cpu_slow(void) 554 { 555 struct apmregs regs; 556 static u_int64_t call_apm_slow = 0; 557 558 if (call_apm_slow != curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]) { 559 /* Always call BIOS halt/idle stuff */ 560 bzero(®s, sizeof(regs)); 561 if (apmcall(APM_CPU_IDLE, 0, ®s) != 0) { 562 #ifdef DIAGNOSTIC 563 apm_perror("set CPU slow", ®s); 564 #endif 565 } 566 apm_slow_called = 1; 567 call_apm_slow = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]; 568 } 569 } 570 571 void 572 apm_cpu_busy(void) 573 { 574 struct apmregs regs; 575 576 if (!apm_slow_called) 577 return; 578 579 if (apm_flags & APM_IDLE_SLOWS) { 580 bzero(®s, sizeof(regs)); 581 if (apmcall(APM_CPU_BUSY, 0, ®s) != 0) { 582 #ifdef DIAGNOSTIC 583 apm_perror("set CPU busy", ®s); 584 #endif 585 } 586 apm_slow_called = 0; 587 } 588 } 589 590 void 591 apm_cpu_idle(void) 592 { 593 struct apmregs regs; 594 static u_int64_t call_apm_idle = 0; 595 596 /* 597 * We call the bios APM_IDLE routine here only when we 598 * have been idle for some time - otherwise we just hlt. 599 */ 600 601 if (call_apm_idle != curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]) { 602 /* Always call BIOS halt/idle stuff */ 603 bzero(®s, sizeof(regs)); 604 if (apmcall(APM_CPU_IDLE, 0, ®s) != 0) { 605 #ifdef DIAGNOSTIC 606 apm_perror("set CPU idle", ®s); 607 #endif 608 } 609 610 /* If BIOS did not halt, halt now! */ 611 if (apm_flags & APM_IDLE_SLOWS) { 612 __asm volatile("sti;hlt"); 613 } 614 call_apm_idle = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE]; 615 } else { 616 __asm volatile("sti;hlt"); 617 } 618 } 619 620 void 621 apm_set_ver(struct apm_softc *self) 622 { 623 struct apmregs regs; 624 int rv = 0; 625 626 bzero(®s, sizeof(regs)); 627 regs.cx = APM_VERSION; 628 629 if (APM_MAJOR(apm_flags) == 1 && APM_MINOR(apm_flags) == 2 && 630 (rv = apmcall(APM_DRIVER_VERSION, APM_DEV_APM_BIOS, ®s)) == 0) { 631 apm_majver = APM_CONN_MAJOR(®s); 632 apm_minver = APM_CONN_MINOR(®s); 633 } else { 634 #ifdef APMDEBUG 635 if (rv) 636 apm_perror("set version 1.2", ®s); 637 #endif 638 /* try downgrading to 1.1 */ 639 bzero(®s, sizeof(regs)); 640 regs.cx = 0x0101; 641 642 if (apmcall(APM_DRIVER_VERSION, APM_DEV_APM_BIOS, ®s) == 0) { 643 apm_majver = 1; 644 apm_minver = 1; 645 } else { 646 #ifdef APMDEBUG 647 apm_perror("set version 1.1", ®s); 648 #endif 649 /* stay w/ flags then */ 650 apm_majver = APM_MAJOR(apm_flags); 651 apm_minver = APM_MINOR(apm_flags); 652 653 /* fix version for some endianess-challenged compaqs */ 654 if (!apm_majver) { 655 apm_majver = 1; 656 apm_minver = 0; 657 } 658 } 659 } 660 printf(": Power Management spec V%d.%d", apm_majver, apm_minver); 661 #ifdef DIAGNOSTIC 662 if (apm_flags & APM_IDLE_SLOWS) 663 printf(" (slowidle)"); 664 if (apm_flags & APM_BIOS_PM_DISABLED) 665 printf(" (BIOS management disabled)"); 666 if (apm_flags & APM_BIOS_PM_DISENGAGED) 667 printf(" (BIOS managing devices)"); 668 #endif 669 printf("\n"); 670 } 671 672 void 673 apm_disconnect(struct apm_softc *sc) 674 { 675 struct apmregs regs; 676 677 bzero(®s, sizeof(regs)); 678 if (apmcall(APM_SYSTEM_DEFAULTS, 679 (apm_minver == 1 ? APM_DEV_ALLDEVS : APM_DEFAULTS_ALL), ®s)) 680 apm_perror("system defaults failed", ®s); 681 682 if (apmcall(APM_DISCONNECT, APM_DEV_APM_BIOS, ®s)) 683 apm_perror("disconnect failed", ®s); 684 else 685 printf("%s: disconnected\n", sc->sc_dev.dv_xname); 686 apm_flags |= APM_BIOS_PM_DISABLED; 687 } 688 689 int 690 apmprobe(struct device *parent, void *match, void *aux) 691 { 692 struct bios_attach_args *ba = aux; 693 bios_apminfo_t *ap = ba->ba_apmp; 694 bus_space_handle_t ch, dh; 695 696 if (apm_cd.cd_ndevs || strcmp(ba->ba_name, "apm") || 697 !(ap->apm_detail & APM_32BIT_SUPPORTED)) 698 return 0; 699 700 /* addresses check 701 since pc* console and vga* probes much later 702 we cannot check for video memory being mapped 703 for apm stuff w/ bus_space_map() */ 704 if (ap->apm_code_len == 0 || 705 (ap->apm_code32_base < IOM_BEGIN && 706 ap->apm_code32_base + ap->apm_code_len > IOM_BEGIN) || 707 (ap->apm_code16_base < IOM_BEGIN && 708 ap->apm_code16_base + ap->apm_code16_len > IOM_BEGIN) || 709 (ap->apm_data_base < IOM_BEGIN && 710 ap->apm_data_base + ap->apm_data_len > IOM_BEGIN)) 711 return 0; 712 713 if (bus_space_map(ba->ba_memt, ap->apm_code32_base, 714 ap->apm_code_len, 1, &ch) != 0) { 715 DPRINTF(("apm0: can't map code\n")); 716 return 0; 717 } 718 bus_space_unmap(ba->ba_memt, ch, ap->apm_code_len); 719 720 if (bus_space_map(ba->ba_memt, ap->apm_data_base, 721 ap->apm_data_len, 1, &dh) != 0) { 722 DPRINTF(("apm0: can't map data\n")); 723 return 0; 724 } 725 bus_space_unmap(ba->ba_memt, dh, ap->apm_data_len); 726 return 1; 727 } 728 729 void 730 apmattach(struct device *parent, struct device *self, void *aux) 731 { 732 struct bios_attach_args *ba = aux; 733 bios_apminfo_t *ap = ba->ba_apmp; 734 struct apm_softc *sc = (void *)self; 735 struct apmregs regs; 736 u_int cbase, clen, l; 737 bus_space_handle_t ch16, ch32, dh; 738 739 apm_flags = ap->apm_detail; 740 /* 741 * set up GDT descriptors for APM 742 */ 743 if (apm_flags & APM_32BIT_SUPPORTED) { 744 745 /* truncate segments' limits to a page */ 746 ap->apm_code_len -= (ap->apm_code32_base + 747 ap->apm_code_len + 1) & 0xfff; 748 ap->apm_code16_len -= (ap->apm_code16_base + 749 ap->apm_code16_len + 1) & 0xfff; 750 ap->apm_data_len -= (ap->apm_data_base + 751 ap->apm_data_len + 1) & 0xfff; 752 753 /* adjust version */ 754 if ((sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK) && 755 (apm_flags & APM_VERMASK) != 756 (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK)) 757 apm_flags = (apm_flags & ~APM_VERMASK) | 758 (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK); 759 if (sc->sc_dev.dv_cfdata->cf_flags & APM_NOCLI) { 760 extern int apm_cli; /* from apmcall.S */ 761 apm_cli = 0; 762 } 763 if (sc->sc_dev.dv_cfdata->cf_flags & APM_BEBATT) 764 sc->be_batt = 1; 765 apm_ep.seg = GSEL(GAPM32CODE_SEL,SEL_KPL); 766 apm_ep.entry = ap->apm_entry; 767 cbase = min(ap->apm_code32_base, ap->apm_code16_base); 768 clen = max(ap->apm_code32_base + ap->apm_code_len, 769 ap->apm_code16_base + ap->apm_code16_len) - cbase; 770 if ((cbase <= ap->apm_data_base && 771 cbase + clen >= ap->apm_data_base) || 772 (ap->apm_data_base <= cbase && 773 ap->apm_data_base + ap->apm_data_len >= cbase)) { 774 l = max(ap->apm_data_base + ap->apm_data_len + 1, 775 cbase + clen + 1) - 776 min(ap->apm_data_base, cbase); 777 bus_space_map(ba->ba_memt, 778 min(ap->apm_data_base, cbase), 779 l, 1, &dh); 780 ch16 = dh; 781 if (ap->apm_data_base < cbase) 782 ch16 += cbase - ap->apm_data_base; 783 else 784 dh += ap->apm_data_base - cbase; 785 } else { 786 787 bus_space_map(ba->ba_memt, cbase, clen + 1, 1, &ch16); 788 bus_space_map(ba->ba_memt, ap->apm_data_base, 789 ap->apm_data_len + 1, 1, &dh); 790 } 791 ch32 = ch16; 792 if (ap->apm_code16_base == cbase) 793 ch32 += ap->apm_code32_base - cbase; 794 else 795 ch16 += ap->apm_code16_base - cbase; 796 797 setgdt(GAPM32CODE_SEL, (void *)ch32, ap->apm_code_len, 798 SDT_MEMERA, SEL_KPL, 1, 0); 799 setgdt(GAPM16CODE_SEL, (void *)ch16, ap->apm_code16_len, 800 SDT_MEMERA, SEL_KPL, 0, 0); 801 setgdt(GAPMDATA_SEL, (void *)dh, ap->apm_data_len, SDT_MEMRWA, 802 SEL_KPL, 1, 0); 803 DPRINTF((": flags %x code 32:%x/%lx[%x] 16:%x/%lx[%x] " 804 "data %x/%lx/%x ep %x (%x:%lx)\n%s", apm_flags, 805 ap->apm_code32_base, ch32, ap->apm_code_len, 806 ap->apm_code16_base, ch16, ap->apm_code16_len, 807 ap->apm_data_base, dh, ap->apm_data_len, 808 ap->apm_entry, apm_ep.seg, ap->apm_entry+ch32, 809 sc->sc_dev.dv_xname)); 810 811 apm_set_ver(sc); 812 813 if (apm_flags & APM_BIOS_PM_DISABLED) 814 apm_powmgt_enable(1); 815 816 /* Engage cooperative power management on all devices (v1.1) */ 817 apm_powmgt_engage(1, APM_DEV_ALLDEVS); 818 819 bzero(®s, sizeof(regs)); 820 if (apm_get_powstat(®s) != 0) 821 apm_perror("get power status", ®s); 822 apm_cpu_busy(); 823 824 rw_init(&sc->sc_lock, "apmlk"); 825 826 /* 827 * Do a check once, ignoring any errors. This avoids 828 * gratuitous APM disconnects on laptops where the first 829 * event in the queue (after a boot) is non-recognizable. 830 * The IBM ThinkPad 770Z is one of those. 831 */ 832 apm_periodic_check(sc); 833 834 if (apm_periodic_check(sc) == -1) { 835 apm_disconnect(sc); 836 837 /* Failed, nuke APM idle loop */ 838 cpu_idle_enter_fcn = NULL; 839 cpu_idle_cycle_fcn = NULL; 840 cpu_idle_leave_fcn = NULL; 841 } else { 842 kthread_create_deferred(apm_thread_create, sc); 843 844 /* Setup APM idle loop */ 845 if (apm_flags & APM_IDLE_SLOWS) { 846 cpu_idle_enter_fcn = apm_cpu_slow; 847 cpu_idle_cycle_fcn = NULL; 848 cpu_idle_leave_fcn = apm_cpu_busy; 849 } else { 850 cpu_idle_enter_fcn = NULL; 851 cpu_idle_cycle_fcn = apm_cpu_idle; 852 cpu_idle_leave_fcn = NULL; 853 } 854 855 /* All is well, let the rest of the world know */ 856 acpiapm_open = apmopen; 857 acpiapm_close = apmclose; 858 acpiapm_ioctl = apmioctl; 859 acpiapm_kqfilter = apmkqfilter; 860 apm_attached = 1; 861 } 862 } else { 863 setgdt(GAPM32CODE_SEL, NULL, 0, 0, 0, 0, 0); 864 setgdt(GAPM16CODE_SEL, NULL, 0, 0, 0, 0, 0); 865 setgdt(GAPMDATA_SEL, NULL, 0, 0, 0, 0, 0); 866 } 867 } 868 869 void 870 apm_thread_create(void *v) 871 { 872 struct apm_softc *sc = v; 873 874 #ifdef MULTIPROCESSOR 875 if (ncpus > 1) { 876 apm_disconnect(sc); 877 878 /* Nuke APM idle loop */ 879 cpu_idle_enter_fcn = NULL; 880 cpu_idle_cycle_fcn = NULL; 881 cpu_idle_leave_fcn = NULL; 882 883 return; 884 } 885 #endif 886 887 if (kthread_create(apm_thread, sc, &sc->sc_thread, 888 sc->sc_dev.dv_xname)) { 889 apm_disconnect(sc); 890 printf("%s: failed to create kernel thread, disabled", 891 sc->sc_dev.dv_xname); 892 893 /* Nuke APM idle loop */ 894 cpu_idle_enter_fcn = NULL; 895 cpu_idle_cycle_fcn = NULL; 896 cpu_idle_leave_fcn = NULL; 897 } 898 } 899 900 void 901 apm_thread(void *v) 902 { 903 struct apm_softc *sc = v; 904 905 for (;;) { 906 rw_enter_write(&sc->sc_lock); 907 (void) apm_periodic_check(sc); 908 rw_exit_write(&sc->sc_lock); 909 tsleep_nsec(&nowake, PWAIT, "apmev", SEC_TO_NSEC(1)); 910 } 911 } 912 913 int 914 apmopen(dev_t dev, int flag, int mode, struct proc *p) 915 { 916 struct apm_softc *sc; 917 int error = 0; 918 919 /* apm0 only */ 920 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 || 921 !(sc = apm_cd.cd_devs[APMUNIT(dev)])) 922 return ENXIO; 923 924 if (apm_flags & APM_BIOS_PM_DISABLED) 925 return ENXIO; 926 927 DPRINTF(("apmopen: dev %d pid %d flag %x mode %x\n", 928 APMDEV(dev), p->p_p->ps_pid, flag, mode)); 929 930 rw_enter_write(&sc->sc_lock); 931 switch (APMDEV(dev)) { 932 case APMDEV_CTL: 933 if (!(flag & FWRITE)) { 934 error = EINVAL; 935 break; 936 } 937 if (sc->sc_flags & SCFLAG_OWRITE) { 938 error = EBUSY; 939 break; 940 } 941 sc->sc_flags |= SCFLAG_OWRITE; 942 break; 943 case APMDEV_NORMAL: 944 if (!(flag & FREAD) || (flag & FWRITE)) { 945 error = EINVAL; 946 break; 947 } 948 sc->sc_flags |= SCFLAG_OREAD; 949 break; 950 default: 951 error = ENXIO; 952 break; 953 } 954 rw_exit_write(&sc->sc_lock); 955 return error; 956 } 957 958 int 959 apmclose(dev_t dev, int flag, int mode, struct proc *p) 960 { 961 struct apm_softc *sc; 962 963 /* apm0 only */ 964 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 || 965 !(sc = apm_cd.cd_devs[APMUNIT(dev)])) 966 return ENXIO; 967 968 DPRINTF(("apmclose: pid %d flag %x mode %x\n", 969 p->p_p->ps_pid, flag, mode)); 970 971 rw_enter_write(&sc->sc_lock); 972 switch (APMDEV(dev)) { 973 case APMDEV_CTL: 974 sc->sc_flags &= ~SCFLAG_OWRITE; 975 break; 976 case APMDEV_NORMAL: 977 sc->sc_flags &= ~SCFLAG_OREAD; 978 break; 979 } 980 rw_exit_write(&sc->sc_lock); 981 return 0; 982 } 983 984 int 985 apmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 986 { 987 struct apm_softc *sc; 988 struct apmregs regs; 989 int error = 0; 990 991 /* apm0 only */ 992 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 || 993 !(sc = apm_cd.cd_devs[APMUNIT(dev)])) 994 return ENXIO; 995 996 rw_enter_write(&sc->sc_lock); 997 switch (cmd) { 998 /* some ioctl names from linux */ 999 case APM_IOC_STANDBY: 1000 if ((flag & FWRITE) == 0) 1001 error = EBADF; 1002 else 1003 apm_userstandbys++; 1004 break; 1005 case APM_IOC_SUSPEND: 1006 if ((flag & FWRITE) == 0) 1007 error = EBADF; 1008 else 1009 apm_suspends++; 1010 break; 1011 case APM_IOC_PRN_CTL: 1012 if ((flag & FWRITE) == 0) 1013 error = EBADF; 1014 else { 1015 int flag = *(int *)data; 1016 DPRINTF(( "APM_IOC_PRN_CTL: %d\n", flag )); 1017 switch (flag) { 1018 case APM_PRINT_ON: /* enable printing */ 1019 sc->sc_flags &= ~SCFLAG_PRINT; 1020 break; 1021 case APM_PRINT_OFF: /* disable printing */ 1022 sc->sc_flags &= ~SCFLAG_PRINT; 1023 sc->sc_flags |= SCFLAG_NOPRINT; 1024 break; 1025 case APM_PRINT_PCT: /* disable some printing */ 1026 sc->sc_flags &= ~SCFLAG_PRINT; 1027 sc->sc_flags |= SCFLAG_PCTPRINT; 1028 break; 1029 default: 1030 error = EINVAL; 1031 break; 1032 } 1033 } 1034 break; 1035 case APM_IOC_DEV_CTL: 1036 if ((flag & FWRITE) == 0) 1037 error = EBADF; 1038 else { 1039 struct apm_ctl *actl = (struct apm_ctl *)data; 1040 1041 bzero(®s, sizeof(regs)); 1042 if (!apmcall(APM_GET_POWER_STATE, actl->dev, ®s)) 1043 printf("%s: dev %04x state %04x\n", 1044 sc->sc_dev.dv_xname, dev, regs.cx); 1045 1046 error = apm_set_powstate(actl->dev, actl->mode); 1047 } 1048 break; 1049 case APM_IOC_GETPOWER: 1050 if (apm_get_powstat(®s) == 0) { 1051 struct apm_power_info *powerp = 1052 (struct apm_power_info *)data; 1053 1054 bzero(powerp, sizeof(*powerp)); 1055 if (BATT_LIFE(®s) != APM_BATT_LIFE_UNKNOWN) 1056 powerp->battery_life = BATT_LIFE(®s); 1057 powerp->ac_state = AC_STATE(®s); 1058 switch (apm_minver) { 1059 case 0: 1060 if (!(BATT_FLAGS(®s) & APM_BATT_FLAG_NOBATTERY)) 1061 powerp->battery_state = BATT_STATE(®s); 1062 break; 1063 case 1: 1064 default: 1065 if (BATT_FLAGS(®s) & APM_BATT_FLAG_HIGH) 1066 powerp->battery_state = APM_BATT_HIGH; 1067 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_LOW) 1068 powerp->battery_state = APM_BATT_LOW; 1069 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_CRITICAL) 1070 powerp->battery_state = APM_BATT_CRITICAL; 1071 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_CHARGING) 1072 powerp->battery_state = APM_BATT_CHARGING; 1073 else if (BATT_FLAGS(®s) & APM_BATT_FLAG_NOBATTERY) 1074 powerp->battery_state = APM_BATTERY_ABSENT; 1075 else 1076 powerp->battery_state = APM_BATT_UNKNOWN; 1077 if (BATT_REM_VALID(®s)) { 1078 powerp->minutes_left = BATT_REMAINING(®s); 1079 if (sc->be_batt) 1080 powerp->minutes_left = 1081 swap16(powerp->minutes_left); 1082 } 1083 } 1084 } else { 1085 apm_perror("ioctl get power status", ®s); 1086 error = EIO; 1087 } 1088 break; 1089 case APM_IOC_STANDBY_REQ: 1090 if ((flag & FWRITE) == 0) 1091 error = EBADF; 1092 /* only fails if no one cares. apmd at least should */ 1093 else if (apm_record_event(sc, APM_USER_STANDBY_REQ)) 1094 error = EINVAL; /* ? */ 1095 break; 1096 case APM_IOC_SUSPEND_REQ: 1097 if ((flag & FWRITE) == 0) 1098 error = EBADF; 1099 /* only fails if no one cares. apmd at least should */ 1100 else if (apm_record_event(sc, APM_USER_SUSPEND_REQ)) 1101 error = EINVAL; /* ? */ 1102 break; 1103 default: 1104 error = ENOTTY; 1105 } 1106 1107 rw_exit_write(&sc->sc_lock); 1108 return error; 1109 } 1110 1111 void 1112 filt_apmrdetach(struct knote *kn) 1113 { 1114 struct apm_softc *sc = (struct apm_softc *)kn->kn_hook; 1115 1116 rw_enter_write(&sc->sc_lock); 1117 klist_remove_locked(&sc->sc_note, kn); 1118 rw_exit_write(&sc->sc_lock); 1119 } 1120 1121 int 1122 filt_apmread(struct knote *kn, long hint) 1123 { 1124 /* XXX weird kqueue_scan() semantics */ 1125 if (hint && !kn->kn_data) 1126 kn->kn_data = (int)hint; 1127 return (1); 1128 } 1129 1130 int 1131 apmkqfilter(dev_t dev, struct knote *kn) 1132 { 1133 struct apm_softc *sc; 1134 1135 /* apm0 only */ 1136 if (!apm_cd.cd_ndevs || APMUNIT(dev) != 0 || 1137 !(sc = apm_cd.cd_devs[APMUNIT(dev)])) 1138 return ENXIO; 1139 1140 switch (kn->kn_filter) { 1141 case EVFILT_READ: 1142 kn->kn_fop = &apmread_filtops; 1143 break; 1144 default: 1145 return (EINVAL); 1146 } 1147 1148 kn->kn_hook = (caddr_t)sc; 1149 1150 rw_enter_write(&sc->sc_lock); 1151 klist_insert_locked(&sc->sc_note, kn); 1152 rw_exit_write(&sc->sc_lock); 1153 return (0); 1154 } 1155