1 /* $NetBSD: apm.c,v 1.22 2008/06/12 21:47:46 cegger Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by John Kohl and Christopher G. Demetriou. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.22 2008/06/12 21:47:46 cegger Exp $"); 37 38 #include "opt_apm.h" 39 40 #ifdef APM_NOIDLE 41 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead 42 #endif 43 44 #if defined(DEBUG) && !defined(APMDEBUG) 45 #define APMDEBUG 46 #endif 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/signalvar.h> 51 #include <sys/kernel.h> 52 #include <sys/proc.h> 53 #include <sys/kthread.h> 54 #include <sys/user.h> 55 #include <sys/malloc.h> 56 #include <sys/device.h> 57 #include <sys/fcntl.h> 58 #include <sys/ioctl.h> 59 #include <sys/select.h> 60 #include <sys/poll.h> 61 #include <sys/conf.h> 62 63 #include <dev/apm/apmvar.h> 64 65 #include <machine/stdarg.h> 66 67 #ifdef APMDEBUG 68 #define DPRINTF(f, x) do { if (apmdebug & (f)) printf x; } while (0) 69 70 71 #ifdef APMDEBUG_VALUE 72 int apmdebug = APMDEBUG_VALUE; 73 #else 74 int apmdebug = 0; 75 #endif /* APMDEBUG_VALUE */ 76 77 #else 78 #define DPRINTF(f, x) /**/ 79 #endif /* APMDEBUG */ 80 81 #define SCFLAG_OREAD 0x0000001 82 #define SCFLAG_OWRITE 0x0000002 83 #define SCFLAG_OPEN (SCFLAG_OREAD|SCFLAG_OWRITE) 84 85 #define APMUNIT(dev) (minor(dev)&0xf0) 86 #define APM(dev) (minor(dev)&0x0f) 87 #define APM_NORMAL 0 88 #define APM_CTL 8 89 90 /* 91 * A brief note on the locking protocol: it's very simple; we 92 * assert an exclusive lock any time thread context enters the 93 * APM module. This is both the APM thread itself, as well as 94 * user context. 95 */ 96 #define APM_LOCK(apmsc) \ 97 (void) mutex_enter(&(apmsc)->sc_lock) 98 #define APM_UNLOCK(apmsc) \ 99 (void) mutex_exit(&(apmsc)->sc_lock) 100 101 static void apm_event_handle(struct apm_softc *, u_int, u_int); 102 static void apm_periodic_check(struct apm_softc *); 103 static void apm_thread(void *); 104 static void apm_perror(const char *, int, ...) 105 __attribute__((__format__(__printf__,1,3))); 106 #ifdef APM_POWER_PRINT 107 static void apm_power_print(struct apm_softc *, struct apm_power_info *); 108 #endif 109 static int apm_record_event(struct apm_softc *, u_int); 110 static void apm_set_ver(struct apm_softc *); 111 static void apm_standby(struct apm_softc *); 112 static void apm_suspend(struct apm_softc *); 113 static void apm_resume(struct apm_softc *, u_int, u_int); 114 115 extern struct cfdriver apm_cd; 116 117 dev_type_open(apmopen); 118 dev_type_close(apmclose); 119 dev_type_ioctl(apmioctl); 120 dev_type_poll(apmpoll); 121 dev_type_kqfilter(apmkqfilter); 122 123 const struct cdevsw apm_cdevsw = { 124 apmopen, apmclose, noread, nowrite, apmioctl, 125 nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER, 126 }; 127 128 /* configurable variables */ 129 int apm_bogus_bios = 0; 130 #ifdef APM_DISABLE 131 int apm_enabled = 0; 132 #else 133 int apm_enabled = 1; 134 #endif 135 #ifdef APM_NO_IDLE 136 int apm_do_idle = 0; 137 #else 138 int apm_do_idle = 1; 139 #endif 140 #ifdef APM_NO_STANDBY 141 int apm_do_standby = 0; 142 #else 143 int apm_do_standby = 1; 144 #endif 145 #ifdef APM_V10_ONLY 146 int apm_v11_enabled = 0; 147 #else 148 int apm_v11_enabled = 1; 149 #endif 150 #ifdef APM_NO_V12 151 int apm_v12_enabled = 0; 152 #else 153 int apm_v12_enabled = 1; 154 #endif 155 #ifdef APM_FORCE_64K_SEGMENTS 156 int apm_force_64k_segments = 1; 157 #else 158 int apm_force_64k_segments = 0; 159 #endif 160 #ifdef APM_ALLOW_BOGUS_SEGMENTS 161 int apm_allow_bogus_segments = 1; 162 #else 163 int apm_allow_bogus_segments = 0; 164 #endif 165 166 /* variables used during operation (XXX cgd) */ 167 u_char apm_majver, apm_minver; 168 int apm_inited; 169 int apm_standbys, apm_userstandbys, apm_suspends, apm_battlow; 170 int apm_damn_fool_bios, apm_op_inprog; 171 int apm_evindex; 172 173 static int apm_spl; /* saved spl while suspended */ 174 175 const char * 176 apm_strerror(int code) 177 { 178 switch (code) { 179 case APM_ERR_PM_DISABLED: 180 return ("power management disabled"); 181 case APM_ERR_REALALREADY: 182 return ("real mode interface already connected"); 183 case APM_ERR_NOTCONN: 184 return ("interface not connected"); 185 case APM_ERR_16ALREADY: 186 return ("16-bit interface already connected"); 187 case APM_ERR_16NOTSUPP: 188 return ("16-bit interface not supported"); 189 case APM_ERR_32ALREADY: 190 return ("32-bit interface already connected"); 191 case APM_ERR_32NOTSUPP: 192 return ("32-bit interface not supported"); 193 case APM_ERR_UNRECOG_DEV: 194 return ("unrecognized device ID"); 195 case APM_ERR_ERANGE: 196 return ("parameter out of range"); 197 case APM_ERR_NOTENGAGED: 198 return ("interface not engaged"); 199 case APM_ERR_UNABLE: 200 return ("unable to enter requested state"); 201 case APM_ERR_NOEVENTS: 202 return ("no pending events"); 203 case APM_ERR_NOT_PRESENT: 204 return ("no APM present"); 205 default: 206 return ("unknown error code"); 207 } 208 } 209 210 static void 211 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */ 212 { 213 va_list ap; 214 215 printf("APM "); 216 217 va_start(ap, errinfo); 218 vprintf(str, ap); /* XXX cgd */ 219 va_end(ap); 220 221 printf(": %s\n", apm_strerror(errinfo)); 222 } 223 224 #ifdef APM_POWER_PRINT 225 static void 226 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi) 227 { 228 229 if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) { 230 aprint_normal_dev(sc->sc_dev, 231 "battery life expectancy: %d%%\n", 232 pi->battery_life); 233 } 234 aprint_normal_dev(sc->sc_dev, "A/C state: "); 235 switch (pi->ac_state) { 236 case APM_AC_OFF: 237 printf("off\n"); 238 break; 239 case APM_AC_ON: 240 printf("on\n"); 241 break; 242 case APM_AC_BACKUP: 243 printf("backup power\n"); 244 break; 245 default: 246 case APM_AC_UNKNOWN: 247 printf("unknown\n"); 248 break; 249 } 250 aprint_normal_dev(sc->sc_dev, "battery charge state:"); 251 if (apm_minver == 0) 252 switch (pi->battery_state) { 253 case APM_BATT_HIGH: 254 printf("high\n"); 255 break; 256 case APM_BATT_LOW: 257 printf("low\n"); 258 break; 259 case APM_BATT_CRITICAL: 260 printf("critical\n"); 261 break; 262 case APM_BATT_CHARGING: 263 printf("charging\n"); 264 break; 265 case APM_BATT_UNKNOWN: 266 printf("unknown\n"); 267 break; 268 default: 269 printf("undecoded state %x\n", pi->battery_state); 270 break; 271 } 272 else if (apm_minver >= 1) { 273 if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY) 274 printf(" no battery"); 275 else { 276 if (pi->battery_flags & APM_BATT_FLAG_HIGH) 277 printf(" high"); 278 if (pi->battery_flags & APM_BATT_FLAG_LOW) 279 printf(" low"); 280 if (pi->battery_flags & APM_BATT_FLAG_CRITICAL) 281 printf(" critical"); 282 if (pi->battery_flags & APM_BATT_FLAG_CHARGING) 283 printf(" charging"); 284 } 285 printf("\n"); 286 if (pi->minutes_valid) { 287 aprint_normal_dev(sc->sc_dev, "estimated "); 288 if (pi->minutes_left / 60) 289 printf("%dh ", pi->minutes_left / 60); 290 printf("%dm\n", pi->minutes_left % 60); 291 } 292 } 293 return; 294 } 295 #endif 296 297 static void 298 apm_suspend(struct apm_softc *sc) 299 { 300 int error; 301 302 if (sc->sc_power_state == PWR_SUSPEND) { 303 #ifdef APMDEBUG 304 aprint_debug_dev(sc->sc_dev, 305 "apm_suspend: already suspended?\n"); 306 #endif 307 return; 308 } 309 sc->sc_power_state = PWR_SUSPEND; 310 311 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) { 312 pmf_system_suspend(PMF_F_NONE); 313 apm_spl = splhigh(); 314 } 315 316 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 317 APM_SYS_SUSPEND); 318 319 if (error) 320 apm_resume(sc, 0, 0); 321 } 322 323 static void 324 apm_standby(struct apm_softc *sc) 325 { 326 int error; 327 328 if (sc->sc_power_state == PWR_STANDBY) { 329 #ifdef APMDEBUG 330 aprint_debug_dev(sc->sc_dev, 331 "apm_standby: already standing by?\n"); 332 #endif 333 return; 334 } 335 sc->sc_power_state = PWR_STANDBY; 336 337 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) { 338 pmf_system_suspend(PMF_F_NONE); 339 apm_spl = splhigh(); 340 } 341 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 342 APM_SYS_STANDBY); 343 if (error) 344 apm_resume(sc, 0, 0); 345 } 346 347 static void 348 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info) 349 { 350 351 if (sc->sc_power_state == PWR_RESUME) { 352 #ifdef APMDEBUG 353 aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n"); 354 #endif 355 return; 356 } 357 sc->sc_power_state = PWR_RESUME; 358 359 #ifdef TIMER_FREQ 360 /* 361 * Some system requires its clock to be initialized after hybernation. 362 */ 363 initrtclock(TIMER_FREQ); 364 #endif 365 366 inittodr(time_second); 367 if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) { 368 splx(apm_spl); 369 pmf_system_resume(PMF_F_NONE); 370 } 371 372 apm_record_event(sc, event_type); 373 } 374 375 /* 376 * return 0 if the user will notice and handle the event, 377 * return 1 if the kernel driver should do so. 378 */ 379 static int 380 apm_record_event(struct apm_softc *sc, u_int event_type) 381 { 382 struct apm_event_info *evp; 383 384 if ((sc->sc_flags & SCFLAG_OPEN) == 0) 385 return 1; /* no user waiting */ 386 if (sc->sc_event_count == APM_NEVENTS) 387 return 1; /* overflow */ 388 evp = &sc->sc_event_list[sc->sc_event_ptr]; 389 sc->sc_event_count++; 390 sc->sc_event_ptr++; 391 sc->sc_event_ptr %= APM_NEVENTS; 392 evp->type = event_type; 393 evp->index = ++apm_evindex; 394 selnotify(&sc->sc_rsel, 0, 0); 395 return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */ 396 } 397 398 static void 399 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info) 400 { 401 int error; 402 const char *code; 403 struct apm_power_info pi; 404 405 switch (event_code) { 406 case APM_USER_STANDBY_REQ: 407 DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n")); 408 if (apm_do_standby) { 409 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 410 apm_userstandbys++; 411 apm_op_inprog++; 412 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 413 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 414 } else { 415 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 416 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED); 417 /* in case BIOS hates being spurned */ 418 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 419 } 420 break; 421 422 case APM_STANDBY_REQ: 423 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n")); 424 if (apm_standbys || apm_suspends) { 425 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM, 426 ("damn fool BIOS did not wait for answer\n")); 427 /* just give up the fight */ 428 apm_damn_fool_bios = 1; 429 } 430 if (apm_do_standby) { 431 if (apm_op_inprog == 0 && 432 apm_record_event(sc, event_code)) 433 apm_standbys++; 434 apm_op_inprog++; 435 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 436 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 437 } else { 438 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 439 APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED); 440 /* in case BIOS hates being spurned */ 441 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 442 } 443 break; 444 445 case APM_USER_SUSPEND_REQ: 446 DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n")); 447 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 448 apm_suspends++; 449 apm_op_inprog++; 450 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 451 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 452 break; 453 454 case APM_SUSPEND_REQ: 455 DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n")); 456 if (apm_standbys || apm_suspends) { 457 DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM, 458 ("damn fool BIOS did not wait for answer\n")); 459 /* just give up the fight */ 460 apm_damn_fool_bios = 1; 461 } 462 if (apm_op_inprog == 0 && apm_record_event(sc, event_code)) 463 apm_suspends++; 464 apm_op_inprog++; 465 (void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, 466 APM_DEV_ALLDEVS, APM_LASTREQ_INPROG); 467 break; 468 469 case APM_POWER_CHANGE: 470 DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n")); 471 error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi); 472 #ifdef APM_POWER_PRINT 473 /* only print if nobody is catching events. */ 474 if (error == 0 && 475 (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0) 476 apm_power_print(sc, &pi); 477 #endif 478 apm_record_event(sc, event_code); 479 break; 480 481 case APM_NORMAL_RESUME: 482 DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n")); 483 apm_resume(sc, event_code, event_info); 484 break; 485 486 case APM_CRIT_RESUME: 487 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system")); 488 apm_resume(sc, event_code, event_info); 489 break; 490 491 case APM_SYS_STANDBY_RESUME: 492 DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n")); 493 apm_resume(sc, event_code, event_info); 494 break; 495 496 case APM_UPDATE_TIME: 497 DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n")); 498 apm_resume(sc, event_code, event_info); 499 break; 500 501 case APM_CRIT_SUSPEND_REQ: 502 DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n")); 503 apm_record_event(sc, event_code); 504 apm_suspend(sc); 505 break; 506 507 case APM_BATTERY_LOW: 508 DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n")); 509 apm_battlow++; 510 apm_record_event(sc, event_code); 511 break; 512 513 case APM_CAP_CHANGE: 514 DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n")); 515 if (apm_minver < 2) { 516 DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n")); 517 } else { 518 u_int numbatts, capflags; 519 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, 520 &numbatts, &capflags); 521 (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi); 522 } 523 break; 524 525 default: 526 switch (event_code >> 8) { 527 case 0: 528 code = "reserved system"; 529 break; 530 case 1: 531 code = "reserved device"; 532 break; 533 case 2: 534 code = "OEM defined"; 535 break; 536 default: 537 code = "reserved"; 538 break; 539 } 540 printf("APM: %s event code %x\n", code, event_code); 541 } 542 } 543 544 static void 545 apm_periodic_check(struct apm_softc *sc) 546 { 547 int error; 548 u_int event_code, event_info; 549 550 551 /* 552 * tell the BIOS we're working on it, if asked to do a 553 * suspend/standby 554 */ 555 if (apm_op_inprog) 556 (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS, 557 APM_LASTREQ_INPROG); 558 559 while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code, 560 &event_info)) == 0 && !apm_damn_fool_bios) 561 apm_event_handle(sc, event_code, event_info); 562 563 if (error != APM_ERR_NOEVENTS) 564 apm_perror("get event", error); 565 if (apm_suspends) { 566 apm_op_inprog = 0; 567 apm_suspend(sc); 568 } else if (apm_standbys || apm_userstandbys) { 569 apm_op_inprog = 0; 570 apm_standby(sc); 571 } 572 apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0; 573 apm_damn_fool_bios = 0; 574 } 575 576 static void 577 apm_set_ver(struct apm_softc *sc) 578 { 579 580 if (apm_v12_enabled && 581 APM_MAJOR_VERS(sc->sc_vers) == 1 && 582 APM_MINOR_VERS(sc->sc_vers) == 2) { 583 apm_majver = 1; 584 apm_minver = 2; 585 goto ok; 586 } 587 588 if (apm_v11_enabled && 589 APM_MAJOR_VERS(sc->sc_vers) == 1 && 590 APM_MINOR_VERS(sc->sc_vers) == 1) { 591 apm_majver = 1; 592 apm_minver = 1; 593 } else { 594 apm_majver = 1; 595 apm_minver = 0; 596 } 597 ok: 598 aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver); 599 apm_inited = 1; 600 if (sc->sc_detail & APM_IDLE_SLOWS) { 601 #ifdef DIAGNOSTIC 602 /* not relevant often */ 603 aprint_normal(" (slowidle)"); 604 #endif 605 /* leave apm_do_idle at its user-configured setting */ 606 } else 607 apm_do_idle = 0; 608 #ifdef DIAGNOSTIC 609 if (sc->sc_detail & APM_BIOS_PM_DISABLED) 610 aprint_normal(" (BIOS mgmt disabled)"); 611 if (sc->sc_detail & APM_BIOS_PM_DISENGAGED) 612 aprint_normal(" (BIOS managing devices)"); 613 #endif 614 } 615 616 int 617 apm_match(void) 618 { 619 static int got; 620 return !got++; 621 } 622 623 void 624 apm_attach(struct apm_softc *sc) 625 { 626 u_int numbatts, capflags; 627 628 aprint_normal(": "); 629 630 switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) { 631 case 0x0100: 632 apm_v11_enabled = 0; 633 apm_v12_enabled = 0; 634 break; 635 case 0x0101: 636 apm_v12_enabled = 0; 637 /* fall through */ 638 case 0x0102: 639 default: 640 break; 641 } 642 643 apm_set_ver(sc); /* prints version info */ 644 aprint_normal("\n"); 645 if (apm_minver >= 2) 646 (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts, 647 &capflags); 648 649 /* 650 * enable power management 651 */ 652 (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); 653 654 if (sc->sc_ops->aa_cpu_busy) 655 (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie); 656 657 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 658 659 /* Initial state is `resumed'. */ 660 sc->sc_power_state = PWR_RESUME; 661 selinit(&sc->sc_rsel); 662 selinit(&sc->sc_xsel); 663 664 /* Do an initial check. */ 665 apm_periodic_check(sc); 666 667 /* 668 * Create a kernel thread to periodically check for APM events, 669 * and notify other subsystems when they occur. 670 */ 671 if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc, 672 &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) { 673 /* 674 * We were unable to create the APM thread; bail out. 675 */ 676 if (sc->sc_ops->aa_disconnect) 677 (*sc->sc_ops->aa_disconnect)(sc->sc_cookie); 678 aprint_error_dev(sc->sc_dev, "unable to create thread, " 679 "kernel APM support disabled\n"); 680 } 681 682 if (!pmf_device_register(sc->sc_dev, NULL, NULL)) 683 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n"); 684 } 685 686 void 687 apm_thread(void *arg) 688 { 689 struct apm_softc *apmsc = arg; 690 691 /* 692 * Loop forever, doing a periodic check for APM events. 693 */ 694 for (;;) { 695 APM_LOCK(apmsc); 696 apm_periodic_check(apmsc); 697 APM_UNLOCK(apmsc); 698 (void) tsleep(apmsc, PWAIT, "apmev", (8 * hz) / 7); 699 } 700 } 701 702 int 703 apmopen(dev_t dev, int flag, int mode, struct lwp *l) 704 { 705 int ctl = APM(dev); 706 int error = 0; 707 struct apm_softc *sc; 708 709 sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 710 if (!sc) 711 return ENXIO; 712 713 if (!apm_inited) 714 return ENXIO; 715 716 DPRINTF(APMDEBUG_DEVICE, 717 ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode)); 718 719 APM_LOCK(sc); 720 switch (ctl) { 721 case APM_CTL: 722 if (!(flag & FWRITE)) { 723 error = EINVAL; 724 break; 725 } 726 if (sc->sc_flags & SCFLAG_OWRITE) { 727 error = EBUSY; 728 break; 729 } 730 sc->sc_flags |= SCFLAG_OWRITE; 731 break; 732 case APM_NORMAL: 733 if (!(flag & FREAD) || (flag & FWRITE)) { 734 error = EINVAL; 735 break; 736 } 737 sc->sc_flags |= SCFLAG_OREAD; 738 break; 739 default: 740 error = ENXIO; 741 break; 742 } 743 APM_UNLOCK(sc); 744 745 return (error); 746 } 747 748 int 749 apmclose(dev_t dev, int flag, int mode, 750 struct lwp *l) 751 { 752 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 753 int ctl = APM(dev); 754 755 DPRINTF(APMDEBUG_DEVICE, 756 ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode)); 757 758 APM_LOCK(sc); 759 switch (ctl) { 760 case APM_CTL: 761 sc->sc_flags &= ~SCFLAG_OWRITE; 762 break; 763 case APM_NORMAL: 764 sc->sc_flags &= ~SCFLAG_OREAD; 765 break; 766 } 767 if ((sc->sc_flags & SCFLAG_OPEN) == 0) { 768 sc->sc_event_count = 0; 769 sc->sc_event_ptr = 0; 770 } 771 APM_UNLOCK(sc); 772 return 0; 773 } 774 775 int 776 apmioctl(dev_t dev, u_long cmd, void *data, int flag, 777 struct lwp *l) 778 { 779 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 780 struct apm_power_info *powerp; 781 struct apm_event_info *evp; 782 #if 0 783 struct apm_ctl *actl; 784 #endif 785 int i, error = 0; 786 int batt_flags; 787 struct apm_ctl *actl; 788 789 APM_LOCK(sc); 790 switch (cmd) { 791 case APM_IOC_STANDBY: 792 if (!apm_do_standby) { 793 error = EOPNOTSUPP; 794 break; 795 } 796 797 if ((flag & FWRITE) == 0) { 798 error = EBADF; 799 break; 800 } 801 apm_userstandbys++; 802 break; 803 804 case APM_IOC_DEV_CTL: 805 actl = (struct apm_ctl *)data; 806 if ((flag & FWRITE) == 0) { 807 error = EBADF; 808 break; 809 } 810 #if 0 811 apm_get_powstate(actl->dev); /* XXX */ 812 #endif 813 error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev, 814 actl->mode); 815 apm_suspends++; 816 break; 817 818 case APM_IOC_SUSPEND: 819 if ((flag & FWRITE) == 0) { 820 error = EBADF; 821 break; 822 } 823 apm_suspends++; 824 break; 825 826 case APM_IOC_NEXTEVENT: 827 if (!sc->sc_event_count) 828 error = EAGAIN; 829 else { 830 evp = (struct apm_event_info *)data; 831 i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count; 832 i %= APM_NEVENTS; 833 *evp = sc->sc_event_list[i]; 834 sc->sc_event_count--; 835 } 836 break; 837 838 case APM_IOC_GETPOWER: 839 powerp = (struct apm_power_info *)data; 840 if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, 841 powerp)) != 0) { 842 apm_perror("ioctl get power status", error); 843 error = EIO; 844 break; 845 } 846 switch (apm_minver) { 847 case 0: 848 break; 849 case 1: 850 default: 851 batt_flags = powerp->battery_flags; 852 powerp->battery_state = APM_BATT_UNKNOWN; 853 if (batt_flags & APM_BATT_FLAG_HIGH) 854 powerp->battery_state = APM_BATT_HIGH; 855 else if (batt_flags & APM_BATT_FLAG_LOW) 856 powerp->battery_state = APM_BATT_LOW; 857 else if (batt_flags & APM_BATT_FLAG_CRITICAL) 858 powerp->battery_state = APM_BATT_CRITICAL; 859 else if (batt_flags & APM_BATT_FLAG_CHARGING) 860 powerp->battery_state = APM_BATT_CHARGING; 861 else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY) 862 powerp->battery_state = APM_BATT_ABSENT; 863 break; 864 } 865 break; 866 867 default: 868 error = ENOTTY; 869 } 870 APM_UNLOCK(sc); 871 872 return (error); 873 } 874 875 int 876 apmpoll(dev_t dev, int events, struct lwp *l) 877 { 878 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 879 int revents = 0; 880 881 APM_LOCK(sc); 882 if (events & (POLLIN | POLLRDNORM)) { 883 if (sc->sc_event_count) 884 revents |= events & (POLLIN | POLLRDNORM); 885 else 886 selrecord(l, &sc->sc_rsel); 887 } 888 APM_UNLOCK(sc); 889 890 return (revents); 891 } 892 893 static void 894 filt_apmrdetach(struct knote *kn) 895 { 896 struct apm_softc *sc = kn->kn_hook; 897 898 APM_LOCK(sc); 899 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); 900 APM_UNLOCK(sc); 901 } 902 903 static int 904 filt_apmread(struct knote *kn, long hint) 905 { 906 struct apm_softc *sc = kn->kn_hook; 907 908 kn->kn_data = sc->sc_event_count; 909 return (kn->kn_data > 0); 910 } 911 912 static const struct filterops apmread_filtops = 913 { 1, NULL, filt_apmrdetach, filt_apmread }; 914 915 int 916 apmkqfilter(dev_t dev, struct knote *kn) 917 { 918 struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev)); 919 struct klist *klist; 920 921 switch (kn->kn_filter) { 922 case EVFILT_READ: 923 klist = &sc->sc_rsel.sel_klist; 924 kn->kn_fop = &apmread_filtops; 925 break; 926 927 default: 928 return (EINVAL); 929 } 930 931 kn->kn_hook = sc; 932 933 APM_LOCK(sc); 934 SLIST_INSERT_HEAD(klist, kn, kn_selnext); 935 APM_UNLOCK(sc); 936 937 return (0); 938 } 939