1 /* $NetBSD: apmdev.c,v 1.21 2009/04/03 02:08:38 uwe Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by John Kohl and Christopher G. Demetriou. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.21 2009/04/03 02:08:38 uwe Exp $"); 37 38 #ifdef _KERNEL_OPT 39 #include "opt_apmdev.h" 40 #endif 41 42 #ifdef APM_NOIDLE 43 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead 44 #endif 45 46 #if defined(DEBUG) && !defined(APMDEBUG) 47 #define APMDEBUG 48 #endif 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/signalvar.h> 53 #include <sys/kernel.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/user.h> 57 #include <sys/malloc.h> 58 #include <sys/device.h> 59 #include <sys/fcntl.h> 60 #include <sys/ioctl.h> 61 #include <sys/select.h> 62 #include <sys/poll.h> 63 #include <sys/conf.h> 64 65 #include <dev/hpc/apm/apmvar.h> 66 67 #include <machine/stdarg.h> 68 69 #ifdef APMDEBUG 70 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0) 71 72 73 #ifdef APMDEBUG_VALUE 74 int apmdebug = APMDEBUG_VALUE; 75 #else 76 int apmdebug = 0; 77 #endif /* APMDEBUG_VALUE */ 78 79 #else 80 #define DPRINTF(f, x) /**/ 81 #endif /* APMDEBUG */ 82 83 #define APM_NEVENTS 16 84 85 struct apm_softc { 86 device_t sc_dev; 87 struct selinfo sc_rsel; 88 struct selinfo sc_xsel; 89 int sc_flags; 90 int sc_event_count; 91 int sc_event_ptr; 92 int sc_power_state; 93 lwp_t *sc_thread; 94 kmutex_t sc_lock; 95 struct apm_event_info sc_event_list[APM_NEVENTS]; 96 struct apm_accessops *sc_ops; 97 int sc_vers; 98 int sc_detail; 99 void *sc_cookie; 100 }; 101 #define SCFLAG_OREAD 0x0000001 102 #define SCFLAG_OWRITE 0x0000002 103 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE) 104 105 #define APMUNIT(dev) (minor(dev)&0xf0) 106 #define APM(dev) (minor(dev)&0x0f) 107 #define APM_NORMAL 0 108 #define APM_CTL 8 109 110 /* 111 * A brief note on the locking protocol: it's very simple; we 112 * assert an exclusive lock any time thread context enters the 113 * APM module. This is both the APM thread itself, as well as 114 * user context. 115 */ 116 #define APM_LOCK(apmsc) \ 117 (void) mutex_enter(&(apmsc)->sc_lock) 118 #define APM_UNLOCK(apmsc) \ 119 (void) mutex_exit(&(apmsc)->sc_lock) 120 121 /* in real dev/apm/apmvar.h */ 122 static int apm_match(void); 123 static void apm_attach(struct apm_softc *); 124 static const char *apm_strerror(int); 125 126 static void apmdevattach(device_t, device_t, void *); 127 static int apmdevmatch(device_t, cfdata_t, void *); 128 129 static void apm_event_handle(struct apm_softc *, u_int, u_int); 130 static void apm_periodic_check(struct apm_softc *); 131 static void apm_thread(void *); 132 static void apm_perror(const char *, int, ...) 133 __attribute__((__format__(__printf__,1,3))); 134 #ifdef APM_POWER_PRINT 135 static void apm_power_print(struct apm_softc *, struct apm_power_info *); 136 #endif 137 static int apm_record_event(struct apm_softc *, u_int); 138 static void apm_set_ver(struct apm_softc *); 139 static void apm_standby(struct apm_softc *); 140 static void apm_suspend(struct apm_softc *); 141 static void apm_resume(struct apm_softc *, u_int, u_int); 142 143 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc), 144 apmdevmatch, apmdevattach, NULL, NULL); 145 146 extern struct cfdriver apmdev_cd; 147 148 dev_type_open(apmdevopen); 149 dev_type_close(apmdevclose); 150 dev_type_ioctl(apmdevioctl); 151 dev_type_poll(apmdevpoll); 152 dev_type_kqfilter(apmdevkqfilter); 153 154 const struct cdevsw apmdev_cdevsw = { 155 apmdevopen, apmdevclose, noread, nowrite, apmdevioctl, 156 nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER 157 }; 158 159 /* configurable variables */ 160 int apm_bogus_bios = 0; 161 #ifdef APM_DISABLE 162 int apm_enabled = 0; 163 #else 164 int apm_enabled = 1; 165 #endif 166 #ifdef APM_NO_IDLE 167 int apm_do_idle = 0; 168 #else 169 int apm_do_idle = 1; 170 #endif 171 #ifdef APM_NO_STANDBY 172 int apm_do_standby = 0; 173 #else 174 int apm_do_standby = 1; 175 #endif 176 #ifdef APM_V10_ONLY 177 int apm_v11_enabled = 0; 178 #else 179 int apm_v11_enabled = 1; 180 #endif 181 #ifdef APM_NO_V12 182 int apm_v12_enabled = 0; 183 #else 184 int apm_v12_enabled = 1; 185 #endif 186 187 /* variables used during operation (XXX cgd) */ 188 u_char apm_majver, apm_minver; 189 int apm_inited; 190 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow; 191 int apm_damn_fool_bios, apm_op_inprog; 192 int apm_evindex; 193 194 static int apm_spl; /* saved spl while suspended */ 195 196 const char * 197 apm_strerror(int code) 198 { 199 switch (code) { 200 case APM_ERR_PM_DISABLED: 201 return ("power management disabled"); 202 case APM_ERR_REALALREADY: 203 return ("real mode interface already connected"); 204 case APM_ERR_NOTCONN: 205 return ("interface not connected"); 206 case APM_ERR_16ALREADY: 207 return ("16-bit interface already connected"); 208 case APM_ERR_16NOTSUPP: 209 return ("16-bit interface not supported"); 210 case APM_ERR_32ALREADY: 211 return ("32-bit interface already connected"); 212 case APM_ERR_32NOTSUPP: 213 return ("32-bit interface not supported"); 214 case APM_ERR_UNRECOG_DEV: 215 return ("unrecognized device ID"); 216 case APM_ERR_ERANGE: 217 return ("parameter out of range"); 218 case APM_ERR_NOTENGAGED: 219 return ("interface not engaged"); 220 case APM_ERR_UNABLE: 221 return ("unable to enter requested state"); 222 case APM_ERR_NOEVENTS: 223 return ("no pending events"); 224 case APM_ERR_NOT_PRESENT: 225 return ("no APM present"); 226 default: 227 return ("unknown error code"); 228 } 229 } 230 231 static void 232 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */ 233 { 234 va_list ap; 235 236 printf("APM "); 237 238 va_start(ap, errinfo); 239 vprintf(str, ap); /* XXX cgd */ 240 va_end(ap); 241 242 printf(": %s\n", apm_strerror(errinfo)); 243 } 244 245 #ifdef APM_POWER_PRINT 246 static void 247 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi) 248 { 249 250 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) { 251 aprint_normal_dev(sc->sc_dev, 252 "battery life expectancy: %d%%\n", 253 pi->battery_life); 254 } 255 aprint_normal_dev(sc->sc_dev, "A/C state: "); 256 switch (pi->ac_state) { 257 case APM_AC_OFF: 258 printf("off\n"); 259 break; 260 case APM_AC_ON: 261 printf("on\n"); 262 break; 263 case APM_AC_BACKUP: 264 printf("backup power\n"); 265 break; 266 default: 267 case APM_AC_UNKNOWN: 268 printf("unknown\n"); 269 break; 270 } 271 aprint_normal_dev(sc->sc_dev, "battery charge state:"); 272 if (apm_minver == 0) 273 switch (pi->battery_state) { 274 case APM_BATT_HIGH: 275 printf("high\n"); 276 break; 277 case APM_BATT_LOW: 278 printf("low\n"); 279 break; 280 case APM_BATT_CRITICAL: 281 printf("critical\n"); 282 break; 283 case APM_BATT_CHARGING: 284 printf("charging\n"); 285 break; 286 case APM_BATT_UNKNOWN: 287 printf("unknown\n"); 288 break; 289 default: 290 printf("undecoded state %x\n", pi->battery_state); 291 break; 292 } 293 else if (apm_minver >= 1) { 294 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY) 295 printf(" no battery"); 296 else { 297 if (pi->battery_flags & APM_BATT_FLAG_HIGH) 298 printf(" high"); 299 if (pi->battery_flags & APM_BATT_FLAG_LOW) 300 printf(" low"); 301 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL) 302 printf(" critical"); 303 if (pi->battery_flags & APM_BATT_FLAG_CHARGING) 304 printf(" charging"); 305 } 306 printf("\n"); 307 if (pi->minutes_valid) { 308 aprint_normal_dev(sc->sc_dev, "estimated "); 309 if (pi->minutes_left / 60) 310 printf("%dh ", pi->minutes_left / 60); 311 printf("%dm\n", pi->minutes_left % 60); 312 } 313 } 314 return; 315 } 316 #endif 317 318 static void 319 apm_suspend(struct apm_softc *sc) 320 { 321 int error; 322 323 if (sc->sc_power_state == PWR_SUSPEND) { 324 #ifdef APMDEBUG 325 aprint_debug_dev(sc->sc_dev, 326 "apm_suspend: already suspended?\n"); 327 #endif 328 return; 329 } 330 sc->sc_power_state = PWR_SUSPEND; 331 332 dopowerhooks(PWR_SOFTSUSPEND); 333 (void) tsleep(sc, PWAIT, "apmsuspend", hz/2); 334 335 apm_spl = splhigh(); 336 337 dopowerhooks(PWR_SUSPEND); 338 339 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 340 APM_SYS_SUSPEND); 341 342 if (error) 343 apm_resume(sc, 0, 0); 344 } 345 346 static void 347 apm_standby(struct apm_softc *sc) 348 { 349 int error; 350 351 if (sc->sc_power_state == PWR_STANDBY) { 352 #ifdef APMDEBUG 353 aprint_debug_dev(sc->sc_dev, 354 "apm_standby: already standing by?\n"); 355 #endif 356 return; 357 } 358 sc->sc_power_state = PWR_STANDBY; 359 360 dopowerhooks(PWR_SOFTSTANDBY); 361 (void) tsleep(sc, PWAIT, "apmstandby", hz/2); 362 363 apm_spl = splhigh(); 364 365 dopowerhooks(PWR_STANDBY); 366 367 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 368 APM_SYS_STANDBY); 369 if (error) 370 apm_resume(sc, 0, 0); 371 } 372 373 static void 374 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info) 375 { 376 377 if (sc->sc_power_state == PWR_RESUME) { 378 #ifdef APMDEBUG 379 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n"); 380 #endif 381 return; 382 } 383 sc->sc_power_state = PWR_RESUME; 384 385 #if 0 /* XXX: def TIME_FREQ */ 386 /* 387 * Some system requires its clock to be initialized after hybernation. 388 */ 389 initrtclock(TIMER_FREQ); 390 #endif 391 392 inittodr(time_second); 393 dopowerhooks(PWR_RESUME); 394 395 splx(apm_spl); 396 397 dopowerhooks(PWR_SOFTRESUME); 398 399 apm_record_event(sc, event_type); 400 } 401 402 /* 403 * return 0 if the user will notice and handle the event, 404 * return 1 if the kernel driver should do so. 405 */ 406 static int 407 apm_record_event(struct apm_softc *sc, u_int event_type) 408 { 409 struct apm_event_info *evp; 410 411 if ((sc->sc_flags & SCFLAG_OPEN) == 0) 412 return 1; /* no user waiting */ 413 if (sc->sc_event_count == APM_NEVENTS) 414 return 1; /* overflow */ 415 evp = &sc->sc_event_list[sc->sc_event_ptr]; 416 sc->sc_event_count++; 417 sc->sc_event_ptr++; 418 sc->sc_event_ptr %= APM_NEVENTS; 419 evp->type = event_type; 420 evp->index = ++apm_evindex; 421 selnotify(&sc->sc_rsel, 0, 0); 422 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */ 423 } 424 425 static void 426 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info) 427 { 428 int error; 429 const char *code; 430 struct apm_power_info pi; 431 432 switch (event_code) { 433 case APM_USER_STANDBY_REQ: 434 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n")); 435 if (apm_do_standby) { 436 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 437 apm_userstandbys++; 438 apm_op_inprog++; 439 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 440 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 441 } else { 442 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 443 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED); 444 /* in case BIOS hates being spurned */ 445 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 446 } 447 break; 448 449 case APM_STANDBY_REQ: 450 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n")); 451 if (apm_standbys || apm_suspends) { 452 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM, 453 ("damn fool BIOS did not wait for answer\n")); 454 /* just give up the fight */ 455 apm_damn_fool_bios = 1; 456 } 457 if (apm_do_standby) { 458 if (apm_op_inprog == 0 && 459 apm_record_event(sc, event_code)) 460 apm_standbys++; 461 apm_op_inprog++; 462 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 463 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 464 } else { 465 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 466 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED); 467 /* in case BIOS hates being spurned */ 468 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 469 } 470 break; 471 472 case APM_USER_SUSPEND_REQ: 473 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n")); 474 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 475 apm_suspends++; 476 apm_op_inprog++; 477 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 478 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 479 break; 480 481 case APM_SUSPEND_REQ: 482 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n")); 483 if (apm_standbys || apm_suspends) { 484 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM, 485 ("damn fool BIOS did not wait for answer\n")); 486 /* just give up the fight */ 487 apm_damn_fool_bios = 1; 488 } 489 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 490 apm_suspends++; 491 apm_op_inprog++; 492 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 493 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 494 break; 495 496 case APM_POWER_CHANGE: 497 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n")); 498 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, &pi); 499 #ifdef APM_POWER_PRINT 500 /* only print if nobody is catching events. */ 501 if (error == 0 && 502 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0) 503 apm_power_print(sc, &pi); 504 #endif 505 apm_record_event(sc, event_code); 506 break; 507 508 case APM_NORMAL_RESUME: 509 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n")); 510 apm_resume(sc, event_code, event_info); 511 break; 512 513 case APM_CRIT_RESUME: 514 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system")); 515 apm_resume(sc, event_code, event_info); 516 break; 517 518 case APM_SYS_STANDBY_RESUME: 519 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n")); 520 apm_resume(sc, event_code, event_info); 521 break; 522 523 case APM_UPDATE_TIME: 524 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n")); 525 apm_resume(sc, event_code, event_info); 526 break; 527 528 case APM_CRIT_SUSPEND_REQ: 529 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n")); 530 apm_record_event(sc, event_code); 531 apm_suspend(sc); 532 break; 533 534 case APM_BATTERY_LOW: 535 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n")); 536 apm_battlow++; 537 apm_record_event(sc, event_code); 538 break; 539 540 case APM_CAP_CHANGE: 541 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n")); 542 if (apm_minver < 2) { 543 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n")); 544 } else { 545 u_int numbatts, capflags; 546 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, 547 &numbatts, &capflags); 548 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, &pi); 549 } 550 break; 551 552 default: 553 switch (event_code >> 8) { 554 case 0: 555 code = "reserved system"; 556 break; 557 case 1: 558 code = "reserved device"; 559 break; 560 case 2: 561 code = "OEM defined"; 562 break; 563 default: 564 code = "reserved"; 565 break; 566 } 567 printf("APM: %s event code %x\n", code, event_code); 568 } 569 } 570 571 static void 572 apm_periodic_check(struct apm_softc *sc) 573 { 574 int error; 575 u_int event_code, event_info; 576 577 578 /* 579 * tell the BIOS we're working on it, if asked to do a 580 * suspend/standby 581 */ 582 if (apm_op_inprog) 583 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 584 APM_LASTREQ_INPROG); 585 586 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code, 587 &event_info)) == 0 && !apm_damn_fool_bios) 588 apm_event_handle(sc, event_code, event_info); 589 590 if (error != APM_ERR_NOEVENTS) 591 apm_perror("get event", error); 592 if (apm_suspends) { 593 apm_op_inprog = 0; 594 apm_suspend(sc); 595 } else if (apm_standbys || apm_userstandbys) { 596 apm_op_inprog = 0; 597 apm_standby(sc); 598 } 599 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0; 600 apm_damn_fool_bios = 0; 601 } 602 603 static void 604 apm_set_ver(struct apm_softc *sc) 605 { 606 607 if (apm_v12_enabled && 608 APM_MAJOR_VERS(sc->sc_vers) == 1 && 609 APM_MINOR_VERS(sc->sc_vers) == 2) { 610 apm_majver = 1; 611 apm_minver = 2; 612 goto ok; 613 } 614 615 if (apm_v11_enabled && 616 APM_MAJOR_VERS(sc->sc_vers) == 1 && 617 APM_MINOR_VERS(sc->sc_vers) == 1) { 618 apm_majver = 1; 619 apm_minver = 1; 620 } else { 621 apm_majver = 1; 622 apm_minver = 0; 623 } 624 ok: 625 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver); 626 apm_inited = 1; 627 if (sc->sc_detail & APM_IDLE_SLOWS) { 628 #ifdef DIAGNOSTIC 629 /* not relevant often */ 630 aprint_normal(" (slowidle)"); 631 #endif 632 /* leave apm_do_idle at its user-configured setting */ 633 } else 634 apm_do_idle = 0; 635 #ifdef DIAGNOSTIC 636 if (sc->sc_detail & APM_BIOS_PM_DISABLED) 637 aprint_normal(" (BIOS mgmt disabled)"); 638 if (sc->sc_detail & APM_BIOS_PM_DISENGAGED) 639 aprint_normal(" (BIOS managing devices)"); 640 #endif 641 } 642 643 static int 644 apmdevmatch(device_t parent, cfdata_t match, void *aux) 645 { 646 647 return apm_match(); 648 } 649 650 static void 651 apmdevattach(device_t parent, device_t self, void *aux) 652 { 653 struct apm_softc *sc; 654 struct apmdev_attach_args *aaa = aux; 655 656 sc = device_private(self); 657 sc->sc_dev = self; 658 659 sc->sc_detail = aaa->apm_detail; 660 sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */ 661 662 sc->sc_ops = aaa->accessops; 663 sc->sc_cookie = aaa->accesscookie; 664 665 apm_attach(sc); 666 } 667 668 /* 669 * Print function (for parent devices). 670 */ 671 int 672 apmprint(void *aux, const char *pnp) 673 { 674 if (pnp) 675 aprint_normal("apm at %s", pnp); 676 677 return (UNCONF); 678 } 679 680 int 681 apm_match(void) 682 { 683 static int got; 684 return !got++; 685 } 686 687 void 688 apm_attach(struct apm_softc *sc) 689 { 690 struct apm_power_info pinfo; 691 u_int numbatts, capflags; 692 int error; 693 694 aprint_naive("\n"); 695 aprint_normal(": "); 696 697 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) { 698 case 0x0100: 699 apm_v11_enabled = 0; 700 apm_v12_enabled = 0; 701 break; 702 case 0x0101: 703 apm_v12_enabled = 0; 704 /* fall through */ 705 case 0x0102: 706 default: 707 break; 708 } 709 710 apm_set_ver(sc); /* prints version info */ 711 aprint_normal("\n"); 712 if (apm_minver >= 2) 713 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts, 714 &capflags); 715 716 /* 717 * enable power management 718 */ 719 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 720 721 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, &pinfo); 722 if (error == 0) { 723 #ifdef APM_POWER_PRINT 724 apm_power_print(sc, &pinfo); 725 #endif 726 } else 727 apm_perror("get power status", error); 728 729 if (sc->sc_ops->aa_cpu_busy) 730 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie); 731 732 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 733 734 /* Initial state is `resumed'. */ 735 sc->sc_power_state = PWR_RESUME; 736 selinit(&sc->sc_rsel); 737 selinit(&sc->sc_xsel); 738 739 /* Do an initial check. */ 740 apm_periodic_check(sc); 741 742 /* 743 * Create a kernel thread to periodically check for APM events, 744 * and notify other subsystems when they occur. 745 */ 746 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc, 747 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) { 748 /* 749 * We were unable to create the APM thread; bail out. 750 */ 751 if (sc->sc_ops->aa_disconnect) 752 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie); 753 aprint_error_dev(sc->sc_dev, "unable to create thread, " 754 "kernel APM support disabled\n"); 755 } 756 } 757 758 void 759 apm_thread(void *arg) 760 { 761 struct apm_softc *apmsc = arg; 762 763 /* 764 * Loop forever, doing a periodic check for APM events. 765 */ 766 for (;;) { 767 APM_LOCK(apmsc); 768 apm_periodic_check(apmsc); 769 APM_UNLOCK(apmsc); 770 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7); 771 } 772 } 773 774 int 775 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l) 776 { 777 int ctl = APM(dev); 778 int error = 0; 779 struct apm_softc *sc; 780 781 sc = device_lookup_private(&apmdev_cd, APMUNIT(dev)); 782 if (!sc) 783 return ENXIO; 784 785 if (!apm_inited) 786 return ENXIO; 787 788 DPRINTF(APMDEBUG_DEVICE, 789 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode)); 790 791 APM_LOCK(sc); 792 switch (ctl) { 793 case APM_CTL: 794 if (!(flag & FWRITE)) { 795 error = EINVAL; 796 break; 797 } 798 if (sc->sc_flags & SCFLAG_OWRITE) { 799 error = EBUSY; 800 break; 801 } 802 sc->sc_flags |= SCFLAG_OWRITE; 803 break; 804 case APM_NORMAL: 805 if (!(flag & FREAD) || (flag & FWRITE)) { 806 error = EINVAL; 807 break; 808 } 809 sc->sc_flags |= SCFLAG_OREAD; 810 break; 811 default: 812 error = ENXIO; 813 break; 814 } 815 APM_UNLOCK(sc); 816 817 return (error); 818 } 819 820 int 821 apmdevclose(dev_t dev, int flag, int mode, 822 struct lwp *l) 823 { 824 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev)); 825 int ctl = APM(dev); 826 827 DPRINTF(APMDEBUG_DEVICE, 828 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode)); 829 830 APM_LOCK(sc); 831 switch (ctl) { 832 case APM_CTL: 833 sc->sc_flags &= ~SCFLAG_OWRITE; 834 break; 835 case APM_NORMAL: 836 sc->sc_flags &= ~SCFLAG_OREAD; 837 break; 838 } 839 if ((sc->sc_flags & SCFLAG_OPEN) == 0) { 840 sc->sc_event_count = 0; 841 sc->sc_event_ptr = 0; 842 } 843 APM_UNLOCK(sc); 844 return 0; 845 } 846 847 int 848 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag, 849 struct lwp *l) 850 { 851 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev)); 852 struct apm_power_info *powerp; 853 struct apm_event_info *evp; 854 #if 0 855 struct apm_ctl *actl; 856 #endif 857 int i, error = 0; 858 int batt_flags; 859 860 APM_LOCK(sc); 861 switch (cmd) { 862 case APM_IOC_STANDBY: 863 if (!apm_do_standby) { 864 error = EOPNOTSUPP; 865 break; 866 } 867 868 if ((flag & FWRITE) == 0) { 869 error = EBADF; 870 break; 871 } 872 apm_userstandbys++; 873 break; 874 875 case APM_IOC_SUSPEND: 876 if ((flag & FWRITE) == 0) { 877 error = EBADF; 878 break; 879 } 880 apm_suspends++; 881 break; 882 883 case APM_IOC_NEXTEVENT: 884 if (!sc->sc_event_count) 885 error = EAGAIN; 886 else { 887 evp = (struct apm_event_info *)data; 888 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count; 889 i %= APM_NEVENTS; 890 *evp = sc->sc_event_list[i]; 891 sc->sc_event_count--; 892 } 893 break; 894 895 case OAPM_IOC_GETPOWER: 896 case APM_IOC_GETPOWER: 897 powerp = (struct apm_power_info *)data; 898 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 899 powerp)) != 0) { 900 apm_perror("ioctl get power status", error); 901 error = EIO; 902 break; 903 } 904 switch (apm_minver) { 905 case 0: 906 break; 907 case 1: 908 default: 909 batt_flags = powerp->battery_flags; 910 powerp->battery_state = APM_BATT_UNKNOWN; 911 if (batt_flags & APM_BATT_FLAG_HIGH) 912 powerp->battery_state = APM_BATT_HIGH; 913 else if (batt_flags & APM_BATT_FLAG_LOW) 914 powerp->battery_state = APM_BATT_LOW; 915 else if (batt_flags & APM_BATT_FLAG_CRITICAL) 916 powerp->battery_state = APM_BATT_CRITICAL; 917 else if (batt_flags & APM_BATT_FLAG_CHARGING) 918 powerp->battery_state = APM_BATT_CHARGING; 919 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY) 920 powerp->battery_state = APM_BATT_ABSENT; 921 break; 922 } 923 break; 924 925 default: 926 error = ENOTTY; 927 } 928 APM_UNLOCK(sc); 929 930 return (error); 931 } 932 933 int 934 apmdevpoll(dev_t dev, int events, struct lwp *l) 935 { 936 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev)); 937 int revents = 0; 938 939 APM_LOCK(sc); 940 if (events & (POLLIN | POLLRDNORM)) { 941 if (sc->sc_event_count) 942 revents |= events & (POLLIN | POLLRDNORM); 943 else 944 selrecord(l, &sc->sc_rsel); 945 } 946 APM_UNLOCK(sc); 947 948 return (revents); 949 } 950 951 static void 952 filt_apmrdetach(struct knote *kn) 953 { 954 struct apm_softc *sc = kn->kn_hook; 955 956 APM_LOCK(sc); 957 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); 958 APM_UNLOCK(sc); 959 } 960 961 static int 962 filt_apmread(struct knote *kn, long hint) 963 { 964 struct apm_softc *sc = kn->kn_hook; 965 966 kn->kn_data = sc->sc_event_count; 967 return (kn->kn_data > 0); 968 } 969 970 static const struct filterops apmread_filtops = 971 { 1, NULL, filt_apmrdetach, filt_apmread }; 972 973 int 974 apmdevkqfilter(dev_t dev, struct knote *kn) 975 { 976 struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev)); 977 struct klist *klist; 978 979 switch (kn->kn_filter) { 980 case EVFILT_READ: 981 klist = &sc->sc_rsel.sel_klist; 982 kn->kn_fop = &apmread_filtops; 983 break; 984 985 default: 986 return (EINVAL); 987 } 988 989 kn->kn_hook = sc; 990 991 APM_LOCK(sc); 992 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 993 APM_UNLOCK(sc); 994 995 return (0); 996 } 997