xref: /netbsd-src/sys/dev/apm/apm.c (revision 3816d47b2c42fcd6e549e3407f842a5b1a1d23ad)
1 /*	$NetBSD: apm.c,v 1.25 2009/11/23 02:13:45 rmind Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.25 2009/11/23 02:13:45 rmind Exp $");
37 
38 #include "opt_apm.h"
39 
40 #ifdef APM_NOIDLE
41 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
42 #endif
43 
44 #if defined(DEBUG) && !defined(APMDEBUG)
45 #define	APMDEBUG
46 #endif
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/signalvar.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/kthread.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioctl.h>
58 #include <sys/select.h>
59 #include <sys/poll.h>
60 #include <sys/conf.h>
61 
62 #include <dev/apm/apmvar.h>
63 
64 #include <machine/stdarg.h>
65 
66 #ifdef APMDEBUG
67 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
68 
69 
70 #ifdef APMDEBUG_VALUE
71 int	apmdebug = APMDEBUG_VALUE;
72 #else
73 int	apmdebug = 0;
74 #endif /* APMDEBUG_VALUE */
75 
76 #else
77 #define	DPRINTF(f, x)		/**/
78 #endif /* APMDEBUG */
79 
80 #define	SCFLAG_OREAD	0x0000001
81 #define	SCFLAG_OWRITE	0x0000002
82 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
83 
84 #define	APMUNIT(dev)	(minor(dev)&0xf0)
85 #define	APM(dev)	(minor(dev)&0x0f)
86 #define APM_NORMAL	0
87 #define APM_CTL	8
88 
89 /*
90  * A brief note on the locking protocol: it's very simple; we
91  * assert an exclusive lock any time thread context enters the
92  * APM module.  This is both the APM thread itself, as well as
93  * user context.
94  */
95 #define	APM_LOCK(apmsc)						\
96 	(void) mutex_enter(&(apmsc)->sc_lock)
97 #define	APM_UNLOCK(apmsc)						\
98 	(void) mutex_exit(&(apmsc)->sc_lock)
99 
100 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
101 static void	apm_periodic_check(struct apm_softc *);
102 static void	apm_thread(void *);
103 static void	apm_perror(const char *, int, ...)
104 		    __attribute__((__format__(__printf__,1,3)));
105 #ifdef APM_POWER_PRINT
106 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
107 #endif
108 static int	apm_record_event(struct apm_softc *, u_int);
109 static void	apm_set_ver(struct apm_softc *);
110 static void	apm_standby(struct apm_softc *);
111 static void	apm_suspend(struct apm_softc *);
112 static void	apm_resume(struct apm_softc *, u_int, u_int);
113 
114 extern struct cfdriver apm_cd;
115 
116 dev_type_open(apmopen);
117 dev_type_close(apmclose);
118 dev_type_ioctl(apmioctl);
119 dev_type_poll(apmpoll);
120 dev_type_kqfilter(apmkqfilter);
121 
122 const struct cdevsw apm_cdevsw = {
123 	apmopen, apmclose, noread, nowrite, apmioctl,
124 	nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER,
125 };
126 
127 /* configurable variables */
128 int	apm_bogus_bios = 0;
129 #ifdef APM_DISABLE
130 int	apm_enabled = 0;
131 #else
132 int	apm_enabled = 1;
133 #endif
134 #ifdef APM_NO_IDLE
135 int	apm_do_idle = 0;
136 #else
137 int	apm_do_idle = 1;
138 #endif
139 #ifdef APM_NO_STANDBY
140 int	apm_do_standby = 0;
141 #else
142 int	apm_do_standby = 1;
143 #endif
144 #ifdef APM_V10_ONLY
145 int	apm_v11_enabled = 0;
146 #else
147 int	apm_v11_enabled = 1;
148 #endif
149 #ifdef APM_NO_V12
150 int	apm_v12_enabled = 0;
151 #else
152 int	apm_v12_enabled = 1;
153 #endif
154 #ifdef APM_FORCE_64K_SEGMENTS
155 int	apm_force_64k_segments = 1;
156 #else
157 int	apm_force_64k_segments = 0;
158 #endif
159 #ifdef APM_ALLOW_BOGUS_SEGMENTS
160 int	apm_allow_bogus_segments = 1;
161 #else
162 int	apm_allow_bogus_segments = 0;
163 #endif
164 
165 /* variables used during operation (XXX cgd) */
166 u_char	apm_majver, apm_minver;
167 int	apm_inited;
168 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
169 int	apm_damn_fool_bios, apm_op_inprog;
170 int	apm_evindex;
171 
172 static int apm_spl;		/* saved spl while suspended */
173 
174 const char *
175 apm_strerror(int code)
176 {
177 	switch (code) {
178 	case APM_ERR_PM_DISABLED:
179 		return ("power management disabled");
180 	case APM_ERR_REALALREADY:
181 		return ("real mode interface already connected");
182 	case APM_ERR_NOTCONN:
183 		return ("interface not connected");
184 	case APM_ERR_16ALREADY:
185 		return ("16-bit interface already connected");
186 	case APM_ERR_16NOTSUPP:
187 		return ("16-bit interface not supported");
188 	case APM_ERR_32ALREADY:
189 		return ("32-bit interface already connected");
190 	case APM_ERR_32NOTSUPP:
191 		return ("32-bit interface not supported");
192 	case APM_ERR_UNRECOG_DEV:
193 		return ("unrecognized device ID");
194 	case APM_ERR_ERANGE:
195 		return ("parameter out of range");
196 	case APM_ERR_NOTENGAGED:
197 		return ("interface not engaged");
198 	case APM_ERR_UNABLE:
199 		return ("unable to enter requested state");
200 	case APM_ERR_NOEVENTS:
201 		return ("no pending events");
202 	case APM_ERR_NOT_PRESENT:
203 		return ("no APM present");
204 	default:
205 		return ("unknown error code");
206 	}
207 }
208 
209 static void
210 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
211 {
212 	va_list ap;
213 
214 	printf("APM ");
215 
216 	va_start(ap, errinfo);
217 	vprintf(str, ap);			/* XXX cgd */
218 	va_end(ap);
219 
220 	printf(": %s\n", apm_strerror(errinfo));
221 }
222 
223 #ifdef APM_POWER_PRINT
224 static void
225 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
226 {
227 
228 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
229 		aprint_normal_dev(sc->sc_dev,
230 		    "battery life expectancy: %d%%\n",
231 		    pi->battery_life);
232 	}
233 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
234 	switch (pi->ac_state) {
235 	case APM_AC_OFF:
236 		printf("off\n");
237 		break;
238 	case APM_AC_ON:
239 		printf("on\n");
240 		break;
241 	case APM_AC_BACKUP:
242 		printf("backup power\n");
243 		break;
244 	default:
245 	case APM_AC_UNKNOWN:
246 		printf("unknown\n");
247 		break;
248 	}
249 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
250 	if (apm_minver == 0)
251 		switch (pi->battery_state) {
252 		case APM_BATT_HIGH:
253 			printf("high\n");
254 			break;
255 		case APM_BATT_LOW:
256 			printf("low\n");
257 			break;
258 		case APM_BATT_CRITICAL:
259 			printf("critical\n");
260 			break;
261 		case APM_BATT_CHARGING:
262 			printf("charging\n");
263 			break;
264 		case APM_BATT_UNKNOWN:
265 			printf("unknown\n");
266 			break;
267 		default:
268 			printf("undecoded state %x\n", pi->battery_state);
269 			break;
270 		}
271 	else if (apm_minver >= 1) {
272 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
273 			printf(" no battery");
274 		else {
275 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
276 				printf(" high");
277 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
278 				printf(" low");
279 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
280 				printf(" critical");
281 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
282 				printf(" charging");
283 		}
284 		printf("\n");
285 		if (pi->minutes_valid) {
286 			aprint_normal_dev(sc->sc_dev, "estimated ");
287 			if (pi->minutes_left / 60)
288 				printf("%dh ", pi->minutes_left / 60);
289 			printf("%dm\n", pi->minutes_left % 60);
290 		}
291 	}
292 	return;
293 }
294 #endif
295 
296 static void
297 apm_suspend(struct apm_softc *sc)
298 {
299 	int error;
300 
301 	if (sc->sc_power_state == PWR_SUSPEND) {
302 #ifdef APMDEBUG
303 		aprint_debug_dev(sc->sc_dev,
304 		    "apm_suspend: already suspended?\n");
305 #endif
306 		return;
307 	}
308 	sc->sc_power_state = PWR_SUSPEND;
309 
310 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
311 		pmf_system_suspend(PMF_Q_NONE);
312 		apm_spl = splhigh();
313 	}
314 
315 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
316 	    APM_SYS_SUSPEND);
317 
318 	if (error)
319 		apm_resume(sc, 0, 0);
320 }
321 
322 static void
323 apm_standby(struct apm_softc *sc)
324 {
325 	int error;
326 
327 	if (sc->sc_power_state == PWR_STANDBY) {
328 #ifdef APMDEBUG
329 		aprint_debug_dev(sc->sc_dev,
330 		    "apm_standby: already standing by?\n");
331 #endif
332 		return;
333 	}
334 	sc->sc_power_state = PWR_STANDBY;
335 
336 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
337 		pmf_system_suspend(PMF_Q_NONE);
338 		apm_spl = splhigh();
339 	}
340 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
341 	    APM_SYS_STANDBY);
342 	if (error)
343 		apm_resume(sc, 0, 0);
344 }
345 
346 static void
347 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
348 {
349 
350 	if (sc->sc_power_state == PWR_RESUME) {
351 #ifdef APMDEBUG
352 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
353 #endif
354 		return;
355 	}
356 	sc->sc_power_state = PWR_RESUME;
357 
358 #ifdef TIMER_FREQ
359 	/*
360 	 * Some system requires its clock to be initialized after hybernation.
361 	 */
362 	initrtclock(TIMER_FREQ);
363 #endif
364 
365 	inittodr(time_second);
366 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
367 		splx(apm_spl);
368 		pmf_system_resume(PMF_Q_NONE);
369 	}
370 
371 	apm_record_event(sc, event_type);
372 }
373 
374 /*
375  * return 0 if the user will notice and handle the event,
376  * return 1 if the kernel driver should do so.
377  */
378 static int
379 apm_record_event(struct apm_softc *sc, u_int event_type)
380 {
381 	struct apm_event_info *evp;
382 
383 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
384 		return 1;		/* no user waiting */
385 	if (sc->sc_event_count == APM_NEVENTS)
386 		return 1;			/* overflow */
387 	evp = &sc->sc_event_list[sc->sc_event_ptr];
388 	sc->sc_event_count++;
389 	sc->sc_event_ptr++;
390 	sc->sc_event_ptr %= APM_NEVENTS;
391 	evp->type = event_type;
392 	evp->index = ++apm_evindex;
393 	selnotify(&sc->sc_rsel, 0, 0);
394 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
395 }
396 
397 static void
398 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
399 {
400 	int error;
401 	const char *code;
402 	struct apm_power_info pi;
403 
404 	switch (event_code) {
405 	case APM_USER_STANDBY_REQ:
406 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
407 		if (apm_do_standby) {
408 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
409 				apm_userstandbys++;
410 			apm_op_inprog++;
411 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
412 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
413 		} else {
414 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
415 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
416 			/* in case BIOS hates being spurned */
417 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
418 		}
419 		break;
420 
421 	case APM_STANDBY_REQ:
422 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
423 		if (apm_standbys || apm_suspends) {
424 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
425 			    ("damn fool BIOS did not wait for answer\n"));
426 			/* just give up the fight */
427 			apm_damn_fool_bios = 1;
428 		}
429 		if (apm_do_standby) {
430 			if (apm_op_inprog == 0 &&
431 			    apm_record_event(sc, event_code))
432 				apm_standbys++;
433 			apm_op_inprog++;
434 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
435 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
436 		} else {
437 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
438 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
439 			/* in case BIOS hates being spurned */
440 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
441 		}
442 		break;
443 
444 	case APM_USER_SUSPEND_REQ:
445 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
446 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
447 			apm_suspends++;
448 		apm_op_inprog++;
449 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
450 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
451 		break;
452 
453 	case APM_SUSPEND_REQ:
454 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
455 		if (apm_standbys || apm_suspends) {
456 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
457 			    ("damn fool BIOS did not wait for answer\n"));
458 			/* just give up the fight */
459 			apm_damn_fool_bios = 1;
460 		}
461 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
462 			apm_suspends++;
463 		apm_op_inprog++;
464 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
465 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
466 		break;
467 
468 	case APM_POWER_CHANGE:
469 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
470 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
471 #ifdef APM_POWER_PRINT
472 		/* only print if nobody is catching events. */
473 		if (error == 0 &&
474 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
475 			apm_power_print(sc, &pi);
476 #endif
477 		apm_record_event(sc, event_code);
478 		break;
479 
480 	case APM_NORMAL_RESUME:
481 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
482 		apm_resume(sc, event_code, event_info);
483 		break;
484 
485 	case APM_CRIT_RESUME:
486 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
487 		apm_resume(sc, event_code, event_info);
488 		break;
489 
490 	case APM_SYS_STANDBY_RESUME:
491 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
492 		apm_resume(sc, event_code, event_info);
493 		break;
494 
495 	case APM_UPDATE_TIME:
496 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
497 		apm_resume(sc, event_code, event_info);
498 		break;
499 
500 	case APM_CRIT_SUSPEND_REQ:
501 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
502 		apm_record_event(sc, event_code);
503 		apm_suspend(sc);
504 		break;
505 
506 	case APM_BATTERY_LOW:
507 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
508 		apm_battlow++;
509 		apm_record_event(sc, event_code);
510 		break;
511 
512 	case APM_CAP_CHANGE:
513 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
514 		if (apm_minver < 2) {
515 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
516 		} else {
517 			u_int numbatts, capflags;
518 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
519 			    &numbatts, &capflags);
520 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
521 		}
522 		break;
523 
524 	default:
525 		switch (event_code >> 8) {
526 			case 0:
527 				code = "reserved system";
528 				break;
529 			case 1:
530 				code = "reserved device";
531 				break;
532 			case 2:
533 				code = "OEM defined";
534 				break;
535 			default:
536 				code = "reserved";
537 				break;
538 		}
539 		printf("APM: %s event code %x\n", code, event_code);
540 	}
541 }
542 
543 static void
544 apm_periodic_check(struct apm_softc *sc)
545 {
546 	int error;
547 	u_int event_code, event_info;
548 
549 
550 	/*
551 	 * tell the BIOS we're working on it, if asked to do a
552 	 * suspend/standby
553 	 */
554 	if (apm_op_inprog)
555 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
556 		    APM_LASTREQ_INPROG);
557 
558 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
559 	    &event_info)) == 0 && !apm_damn_fool_bios)
560 		apm_event_handle(sc, event_code, event_info);
561 
562 	if (error != APM_ERR_NOEVENTS)
563 		apm_perror("get event", error);
564 	if (apm_suspends) {
565 		apm_op_inprog = 0;
566 		apm_suspend(sc);
567 	} else if (apm_standbys || apm_userstandbys) {
568 		apm_op_inprog = 0;
569 		apm_standby(sc);
570 	}
571 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
572 	apm_damn_fool_bios = 0;
573 }
574 
575 static void
576 apm_set_ver(struct apm_softc *sc)
577 {
578 
579 	if (apm_v12_enabled &&
580 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
581 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
582 		apm_majver = 1;
583 		apm_minver = 2;
584 		goto ok;
585 	}
586 
587 	if (apm_v11_enabled &&
588 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
589 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
590 		apm_majver = 1;
591 		apm_minver = 1;
592 	} else {
593 		apm_majver = 1;
594 		apm_minver = 0;
595 	}
596 ok:
597 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
598 	apm_inited = 1;
599 	if (sc->sc_detail & APM_IDLE_SLOWS) {
600 #ifdef DIAGNOSTIC
601 		/* not relevant often */
602 		aprint_normal(" (slowidle)");
603 #endif
604 		/* leave apm_do_idle at its user-configured setting */
605 	} else
606 		apm_do_idle = 0;
607 #ifdef DIAGNOSTIC
608 	if (sc->sc_detail & APM_BIOS_PM_DISABLED)
609 		aprint_normal(" (BIOS mgmt disabled)");
610 	if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
611 		aprint_normal(" (BIOS managing devices)");
612 #endif
613 }
614 
615 int
616 apm_match(void)
617 {
618 	static int got;
619 	return !got++;
620 }
621 
622 void
623 apm_attach(struct apm_softc *sc)
624 {
625 	u_int numbatts, capflags;
626 
627 	aprint_normal(": ");
628 
629 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
630 	case 0x0100:
631 		apm_v11_enabled = 0;
632 		apm_v12_enabled = 0;
633 		break;
634 	case 0x0101:
635 		apm_v12_enabled = 0;
636 		/* fall through */
637 	case 0x0102:
638 	default:
639 		break;
640 	}
641 
642 	apm_set_ver(sc);	/* prints version info */
643 	aprint_normal("\n");
644 	if (apm_minver >= 2)
645 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
646 		    &capflags);
647 
648 	/*
649 	 * enable power management
650 	 */
651 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
652 
653 	if (sc->sc_ops->aa_cpu_busy)
654 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
655 
656 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
657 
658 	/* Initial state is `resumed'. */
659 	sc->sc_power_state = PWR_RESUME;
660 	selinit(&sc->sc_rsel);
661 	selinit(&sc->sc_xsel);
662 
663 	/* Do an initial check. */
664 	apm_periodic_check(sc);
665 
666 	/*
667 	 * Create a kernel thread to periodically check for APM events,
668 	 * and notify other subsystems when they occur.
669 	 */
670 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
671 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
672 		/*
673 		 * We were unable to create the APM thread; bail out.
674 		 */
675 		if (sc->sc_ops->aa_disconnect)
676 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
677 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
678 		    "kernel APM support disabled\n");
679 	}
680 
681 	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
682 		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
683 }
684 
685 void
686 apm_thread(void *arg)
687 {
688 	struct apm_softc *apmsc = arg;
689 
690 	/*
691 	 * Loop forever, doing a periodic check for APM events.
692 	 */
693 	for (;;) {
694 		APM_LOCK(apmsc);
695 		apm_periodic_check(apmsc);
696 		APM_UNLOCK(apmsc);
697 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
698 	}
699 }
700 
701 int
702 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
703 {
704 	int ctl = APM(dev);
705 	int error = 0;
706 	struct apm_softc *sc;
707 
708 	sc = device_lookup_private(&apm_cd, APMUNIT(dev));
709 	if (!sc)
710 		return ENXIO;
711 
712 	if (!apm_inited)
713 		return ENXIO;
714 
715 	DPRINTF(APMDEBUG_DEVICE,
716 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
717 
718 	APM_LOCK(sc);
719 	switch (ctl) {
720 	case APM_CTL:
721 		if (!(flag & FWRITE)) {
722 			error = EINVAL;
723 			break;
724 		}
725 		if (sc->sc_flags & SCFLAG_OWRITE) {
726 			error = EBUSY;
727 			break;
728 		}
729 		sc->sc_flags |= SCFLAG_OWRITE;
730 		break;
731 	case APM_NORMAL:
732 		if (!(flag & FREAD) || (flag & FWRITE)) {
733 			error = EINVAL;
734 			break;
735 		}
736 		sc->sc_flags |= SCFLAG_OREAD;
737 		break;
738 	default:
739 		error = ENXIO;
740 		break;
741 	}
742 	APM_UNLOCK(sc);
743 
744 	return (error);
745 }
746 
747 int
748 apmclose(dev_t dev, int flag, int mode,
749 	struct lwp *l)
750 {
751 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
752 	int ctl = APM(dev);
753 
754 	DPRINTF(APMDEBUG_DEVICE,
755 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
756 
757 	APM_LOCK(sc);
758 	switch (ctl) {
759 	case APM_CTL:
760 		sc->sc_flags &= ~SCFLAG_OWRITE;
761 		break;
762 	case APM_NORMAL:
763 		sc->sc_flags &= ~SCFLAG_OREAD;
764 		break;
765 	}
766 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
767 		sc->sc_event_count = 0;
768 		sc->sc_event_ptr = 0;
769 	}
770 	APM_UNLOCK(sc);
771 	return 0;
772 }
773 
774 int
775 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
776 	struct lwp *l)
777 {
778 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
779 	struct apm_power_info *powerp;
780 	struct apm_event_info *evp;
781 #if 0
782 	struct apm_ctl *actl;
783 #endif
784 	int i, error = 0;
785 	int batt_flags;
786 	struct apm_ctl *actl;
787 
788 	APM_LOCK(sc);
789 	switch (cmd) {
790 	case APM_IOC_STANDBY:
791 		if (!apm_do_standby) {
792 			error = EOPNOTSUPP;
793 			break;
794 		}
795 
796 		if ((flag & FWRITE) == 0) {
797 			error = EBADF;
798 			break;
799 		}
800 		apm_userstandbys++;
801 		break;
802 
803 	case APM_IOC_DEV_CTL:
804 		actl = (struct apm_ctl *)data;
805 		if ((flag & FWRITE) == 0) {
806 			error = EBADF;
807 			break;
808 		}
809 #if 0
810 		apm_get_powstate(actl->dev); /* XXX */
811 #endif
812 		error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
813 		    actl->mode);
814 		apm_suspends++;
815  		break;
816 
817 	case APM_IOC_SUSPEND:
818 		if ((flag & FWRITE) == 0) {
819 			error = EBADF;
820 			break;
821 		}
822 		apm_suspends++;
823 		break;
824 
825 	case APM_IOC_NEXTEVENT:
826 		if (!sc->sc_event_count)
827 			error = EAGAIN;
828 		else {
829 			evp = (struct apm_event_info *)data;
830 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
831 			i %= APM_NEVENTS;
832 			*evp = sc->sc_event_list[i];
833 			sc->sc_event_count--;
834 		}
835 		break;
836 
837 	case OAPM_IOC_GETPOWER:
838 	case APM_IOC_GETPOWER:
839 		powerp = (struct apm_power_info *)data;
840 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
841 		    powerp)) != 0) {
842 			apm_perror("ioctl get power status", error);
843 			error = EIO;
844 			break;
845 		}
846 		switch (apm_minver) {
847 		case 0:
848 			break;
849 		case 1:
850 		default:
851 			batt_flags = powerp->battery_flags;
852 			powerp->battery_state = APM_BATT_UNKNOWN;
853 			if (batt_flags & APM_BATT_FLAG_HIGH)
854 				powerp->battery_state = APM_BATT_HIGH;
855 			else if (batt_flags & APM_BATT_FLAG_LOW)
856 				powerp->battery_state = APM_BATT_LOW;
857 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
858 				powerp->battery_state = APM_BATT_CRITICAL;
859 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
860 				powerp->battery_state = APM_BATT_CHARGING;
861 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
862 				powerp->battery_state = APM_BATT_ABSENT;
863 			break;
864 		}
865 		break;
866 
867 	default:
868 		error = ENOTTY;
869 	}
870 	APM_UNLOCK(sc);
871 
872 	return (error);
873 }
874 
875 int
876 apmpoll(dev_t dev, int events, struct lwp *l)
877 {
878 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
879 	int revents = 0;
880 
881 	APM_LOCK(sc);
882 	if (events & (POLLIN | POLLRDNORM)) {
883 		if (sc->sc_event_count)
884 			revents |= events & (POLLIN | POLLRDNORM);
885 		else
886 			selrecord(l, &sc->sc_rsel);
887 	}
888 	APM_UNLOCK(sc);
889 
890 	return (revents);
891 }
892 
893 static void
894 filt_apmrdetach(struct knote *kn)
895 {
896 	struct apm_softc *sc = kn->kn_hook;
897 
898 	APM_LOCK(sc);
899 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
900 	APM_UNLOCK(sc);
901 }
902 
903 static int
904 filt_apmread(struct knote *kn, long hint)
905 {
906 	struct apm_softc *sc = kn->kn_hook;
907 
908 	kn->kn_data = sc->sc_event_count;
909 	return (kn->kn_data > 0);
910 }
911 
912 static const struct filterops apmread_filtops =
913 	{ 1, NULL, filt_apmrdetach, filt_apmread };
914 
915 int
916 apmkqfilter(dev_t dev, struct knote *kn)
917 {
918 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
919 	struct klist *klist;
920 
921 	switch (kn->kn_filter) {
922 	case EVFILT_READ:
923 		klist = &sc->sc_rsel.sel_klist;
924 		kn->kn_fop = &apmread_filtops;
925 		break;
926 
927 	default:
928 		return (EINVAL);
929 	}
930 
931 	kn->kn_hook = sc;
932 
933 	APM_LOCK(sc);
934 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
935 	APM_UNLOCK(sc);
936 
937 	return (0);
938 }
939