xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision 6874e511b743689cb8d27a5197ae92fe5e105411)
1 /*	$NetBSD: apmdev.c,v 1.13 2007/12/05 07:58:29 ad Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*
39  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.13 2007/12/05 07:58:29 ad Exp $");
44 
45 #ifdef _KERNEL_OPT
46 #include "opt_apmdev.h"
47 #endif
48 
49 #ifdef APM_NOIDLE
50 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
51 #endif
52 
53 #if defined(DEBUG) && !defined(APMDEBUG)
54 #define	APMDEBUG
55 #endif
56 
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/signalvar.h>
60 #include <sys/kernel.h>
61 #include <sys/proc.h>
62 #include <sys/kthread.h>
63 #include <sys/mutex.h>
64 #include <sys/user.h>
65 #include <sys/malloc.h>
66 #include <sys/device.h>
67 #include <sys/fcntl.h>
68 #include <sys/ioctl.h>
69 #include <sys/select.h>
70 #include <sys/poll.h>
71 #include <sys/conf.h>
72 
73 #include <dev/hpc/apm/apmvar.h>
74 
75 #include <machine/stdarg.h>
76 
77 #if defined(APMDEBUG)
78 #define	DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
79 
80 #define	APMDEBUG_INFO		0x01
81 #define	APMDEBUG_APMCALLS	0x02
82 #define	APMDEBUG_EVENTS		0x04
83 #define	APMDEBUG_PROBE		0x10
84 #define	APMDEBUG_ATTACH		0x40
85 #define	APMDEBUG_DEVICE		0x20
86 #define	APMDEBUG_ANOM		0x40
87 
88 #ifdef APMDEBUG_VALUE
89 int	apmdebug = APMDEBUG_VALUE;
90 #else
91 int	apmdebug = 0;
92 #endif
93 #else
94 #define	DPRINTF(f, x)		/**/
95 #endif
96 
97 #define APM_NEVENTS 16
98 
99 struct apm_softc {
100 	struct device sc_dev;
101 	struct selinfo sc_rsel;
102 	struct selinfo sc_xsel;
103 	int	sc_flags;
104 	int	event_count;
105 	int	event_ptr;
106 	int	sc_power_state;
107 	lwp_t	*sc_thread;
108 	kmutex_t sc_lock;
109 	struct apm_event_info event_list[APM_NEVENTS];
110 	struct apm_accessops *ops;
111 	void *cookie;
112 };
113 #define	SCFLAG_OREAD	0x0000001
114 #define	SCFLAG_OWRITE	0x0000002
115 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
116 
117 #define	APMUNIT(dev)	(minor(dev)&0xf0)
118 #define	APMDEV(dev)	(minor(dev)&0x0f)
119 #define APMDEV_NORMAL	0
120 #define APMDEV_CTL	8
121 
122 /*
123  * A brief note on the locking protocol: it's very simple; we
124  * assert an exclusive lock any time thread context enters the
125  * APM module.  This is both the APM thread itself, as well as
126  * user context.
127  */
128 #define	APM_LOCK(apmsc)							\
129 	(void) mutex_enter(&(apmsc)->sc_lock)
130 #define	APM_UNLOCK(apmsc)						\
131 	(void) mutex_exit(&(apmsc)->sc_lock)
132 
133 static void	apmattach(struct device *, struct device *, void *);
134 static int	apmmatch(struct device *, struct cfdata *, void *);
135 
136 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
137 static void	apm_periodic_check(struct apm_softc *);
138 static void	apm_thread(void *);
139 static void	apm_perror(const char *, int, ...)
140 		    __attribute__((__format__(__printf__,1,3)));
141 #ifdef APM_POWER_PRINT
142 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
143 #endif
144 static int	apm_record_event(struct apm_softc *, u_int);
145 static void	apm_set_ver(struct apm_softc *, u_long);
146 static void	apm_standby(struct apm_softc *);
147 static const char *apm_strerror(int);
148 static void	apm_suspend(struct apm_softc *);
149 static void	apm_resume(struct apm_softc *, u_int, u_int);
150 
151 CFATTACH_DECL(apmdev, sizeof(struct apm_softc),
152     apmmatch, apmattach, NULL, NULL);
153 
154 extern struct cfdriver apmdev_cd;
155 
156 dev_type_open(apmdevopen);
157 dev_type_close(apmdevclose);
158 dev_type_ioctl(apmdevioctl);
159 dev_type_poll(apmdevpoll);
160 dev_type_kqfilter(apmdevkqfilter);
161 
162 const struct cdevsw apmdev_cdevsw = {
163 	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
164 	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
165 };
166 
167 /* configurable variables */
168 int	apm_bogus_bios = 0;
169 #ifdef APM_DISABLE
170 int	apm_enabled = 0;
171 #else
172 int	apm_enabled = 1;
173 #endif
174 #ifdef APM_NO_IDLE
175 int	apm_do_idle = 0;
176 #else
177 int	apm_do_idle = 1;
178 #endif
179 #ifdef APM_NO_STANDBY
180 int	apm_do_standby = 0;
181 #else
182 int	apm_do_standby = 1;
183 #endif
184 #ifdef APM_V10_ONLY
185 int	apm_v11_enabled = 0;
186 #else
187 int	apm_v11_enabled = 1;
188 #endif
189 #ifdef APM_NO_V12
190 int	apm_v12_enabled = 0;
191 #else
192 int	apm_v12_enabled = 1;
193 #endif
194 
195 /* variables used during operation (XXX cgd) */
196 u_char	apm_majver, apm_minver;
197 int	apm_inited;
198 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
199 int	apm_damn_fool_bios, apm_op_inprog;
200 int	apm_evindex;
201 
202 static int apm_spl;		/* saved spl while suspended */
203 
204 static const char *
205 apm_strerror(int code)
206 {
207 	switch (code) {
208 	case APM_ERR_PM_DISABLED:
209 		return ("power management disabled");
210 	case APM_ERR_REALALREADY:
211 		return ("real mode interface already connected");
212 	case APM_ERR_NOTCONN:
213 		return ("interface not connected");
214 	case APM_ERR_16ALREADY:
215 		return ("16-bit interface already connected");
216 	case APM_ERR_16NOTSUPP:
217 		return ("16-bit interface not supported");
218 	case APM_ERR_32ALREADY:
219 		return ("32-bit interface already connected");
220 	case APM_ERR_32NOTSUPP:
221 		return ("32-bit interface not supported");
222 	case APM_ERR_UNRECOG_DEV:
223 		return ("unrecognized device ID");
224 	case APM_ERR_ERANGE:
225 		return ("parameter out of range");
226 	case APM_ERR_NOTENGAGED:
227 		return ("interface not engaged");
228 	case APM_ERR_UNABLE:
229 		return ("unable to enter requested state");
230 	case APM_ERR_NOEVENTS:
231 		return ("no pending events");
232 	case APM_ERR_NOT_PRESENT:
233 		return ("no APM present");
234 	default:
235 		return ("unknown error code");
236 	}
237 }
238 
239 static void
240 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
241 {
242 	va_list ap;
243 
244 	printf("APM ");
245 
246 	va_start(ap, errinfo);
247 	vprintf(str, ap);			/* XXX cgd */
248 	va_end(ap);
249 
250 	printf(": %s\n", apm_strerror(errinfo));
251 }
252 
253 #ifdef APM_POWER_PRINT
254 static void
255 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
256 {
257 
258 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
259 		printf("%s: battery life expectancy: %d%%\n",
260 		    sc->sc_dev.dv_xname, pi->battery_life);
261 	}
262 	printf("%s: A/C state: ", sc->sc_dev.dv_xname);
263 	switch (pi->ac_state) {
264 	case APM_AC_OFF:
265 		printf("off\n");
266 		break;
267 	case APM_AC_ON:
268 		printf("on\n");
269 		break;
270 	case APM_AC_BACKUP:
271 		printf("backup power\n");
272 		break;
273 	default:
274 	case APM_AC_UNKNOWN:
275 		printf("unknown\n");
276 		break;
277 	}
278 	if (apm_major == 1 && apm_minor == 0) {
279 		printf("%s: battery charge state:", sc->sc_dev.dv_xname);
280 		switch (pi->battery_state) {
281 		case APM_BATT_HIGH:
282 			printf("high\n");
283 			break;
284 		case APM_BATT_LOW:
285 			printf("low\n");
286 			break;
287 		case APM_BATT_CRITICAL:
288 			printf("critical\n");
289 			break;
290 		case APM_BATT_CHARGING:
291 			printf("charging\n");
292 			break;
293 		case APM_BATT_UNKNOWN:
294 			printf("unknown\n");
295 			break;
296 		default:
297 			printf("undecoded state %x\n", pi->battery_state);
298 			break;
299 		}
300 	} else {
301 		if (pi->battery_state&APM_BATT_FLAG_CHARGING)
302 			printf("charging ");
303 		}
304 		if (pi->battery_state&APM_BATT_FLAG_UNKNOWN)
305 			printf("unknown\n");
306 		else if (pi->battery_state&APM_BATT_FLAG_CRITICAL)
307 			printf("critical\n");
308 		else if (pi->battery_state&APM_BATT_FLAG_LOW)
309 			printf("low\n");
310 		else if (pi->battery_state&APM_BATT_FLAG_HIGH)
311 			printf("high\n");
312 	}
313 	if (pi->minutes_left != 0) {
314 		printf("%s: estimated ", sc->sc_dev.dv_xname);
315 		printf("%dh ", pi->minutes_left / 60);
316 	}
317 	return;
318 }
319 #endif
320 
321 static void
322 apm_suspend(struct apm_softc *sc)
323 {
324 
325 	if (sc->sc_power_state == PWR_SUSPEND) {
326 #ifdef APMDEBUG
327 		printf("%s: apm_suspend: already suspended?\n",
328 		    sc->sc_dev.dv_xname);
329 #endif
330 		return;
331 	}
332 	sc->sc_power_state = PWR_SUSPEND;
333 
334 	dopowerhooks(PWR_SOFTSUSPEND);
335 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
336 
337 	apm_spl = splhigh();
338 
339 	dopowerhooks(PWR_SUSPEND);
340 
341 	/* XXX cgd */
342 	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_SUSPEND);
343 }
344 
345 static void
346 apm_standby(struct apm_softc *sc)
347 {
348 
349 	if (sc->sc_power_state == PWR_STANDBY) {
350 #ifdef APMDEBUG
351 		printf("%s: apm_standby: already standing by?\n",
352 		    sc->sc_dev.dv_xname);
353 #endif
354 		return;
355 	}
356 	sc->sc_power_state = PWR_STANDBY;
357 
358 	dopowerhooks(PWR_SOFTSTANDBY);
359 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
360 
361 	apm_spl = splhigh();
362 
363 	dopowerhooks(PWR_STANDBY);
364 	/* XXX cgd */
365 	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_STANDBY);
366 }
367 
368 static void
369 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
370 {
371 
372 	if (sc->sc_power_state == PWR_RESUME) {
373 #ifdef APMDEBUG
374 		printf("%s: apm_resume: already running?\n",
375 		    sc->sc_dev.dv_xname);
376 #endif
377 		return;
378 	}
379 	sc->sc_power_state = PWR_RESUME;
380 
381 	/*
382 	 * Some system requires its clock to be initialized after hybernation.
383 	 */
384 /* XXX
385 	initrtclock();
386 */
387 
388 	inittodr(time_second);
389 	dopowerhooks(PWR_RESUME);
390 
391 	splx(apm_spl);
392 
393 	dopowerhooks(PWR_SOFTRESUME);
394 
395 	apm_record_event(sc, event_type);
396 }
397 
398 /*
399  * return 0 if the user will notice and handle the event,
400  * return 1 if the kernel driver should do so.
401  */
402 static int
403 apm_record_event(struct apm_softc *sc, u_int event_type)
404 {
405 	struct apm_event_info *evp;
406 
407 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
408 		return 1;		/* no user waiting */
409 	if (sc->event_count == APM_NEVENTS)
410 		return 1;			/* overflow */
411 	evp = &sc->event_list[sc->event_ptr];
412 	sc->event_count++;
413 	sc->event_ptr++;
414 	sc->event_ptr %= APM_NEVENTS;
415 	evp->type = event_type;
416 	evp->index = ++apm_evindex;
417 	selnotify(&sc->sc_rsel, 0);
418 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
419 }
420 
421 static void
422 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
423 {
424 	int error;
425 	const char *code;
426 	struct apm_power_info pi;
427 
428 	switch (event_code) {
429 	case APM_USER_STANDBY_REQ:
430 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
431 		if (apm_do_standby) {
432 			if (apm_record_event(sc, event_code))
433 				apm_userstandbys++;
434 			apm_op_inprog++;
435 			(void)sc->ops->set_powstate(sc->cookie,
436 						    APM_DEV_ALLDEVS,
437 						    APM_LASTREQ_INPROG);
438 		} else {
439 			(void)sc->ops->set_powstate(sc->cookie,
440 						    APM_DEV_ALLDEVS,
441 						    APM_LASTREQ_REJECTED);
442 			/* in case BIOS hates being spurned */
443 			sc->ops->enable(sc->cookie, 1);
444 		}
445 		break;
446 
447 	case APM_STANDBY_REQ:
448 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
449 		if (apm_standbys || apm_suspends) {
450 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
451 			    ("damn fool BIOS did not wait for answer\n"));
452 			/* just give up the fight */
453 			apm_damn_fool_bios = 1;
454 		}
455 		if (apm_do_standby) {
456 			if (apm_record_event(sc, event_code))
457 				apm_standbys++;
458 			apm_op_inprog++;
459 			(void)sc->ops->set_powstate(sc->cookie,
460 						    APM_DEV_ALLDEVS,
461 						    APM_LASTREQ_INPROG);
462 		} else {
463 			(void)sc->ops->set_powstate(sc->cookie,
464 						    APM_DEV_ALLDEVS,
465 						    APM_LASTREQ_REJECTED);
466 			/* in case BIOS hates being spurned */
467 			sc->ops->enable(sc->cookie, 1);
468 		}
469 		break;
470 
471 	case APM_USER_SUSPEND_REQ:
472 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
473 		if (apm_record_event(sc, event_code))
474 			apm_suspends++;
475 		apm_op_inprog++;
476 		(void)sc->ops->set_powstate(sc->cookie,
477 					    APM_DEV_ALLDEVS,
478 					    APM_LASTREQ_INPROG);
479 		break;
480 
481 	case APM_SUSPEND_REQ:
482 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
483 		if (apm_standbys || apm_suspends) {
484 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
485 			    ("damn fool BIOS did not wait for answer\n"));
486 			/* just give up the fight */
487 			apm_damn_fool_bios = 1;
488 		}
489 		if (apm_record_event(sc, event_code))
490 			apm_suspends++;
491 		apm_op_inprog++;
492 		(void)sc->ops->set_powstate(sc->cookie,
493 					    APM_DEV_ALLDEVS,
494 					    APM_LASTREQ_INPROG);
495 		break;
496 
497 	case APM_POWER_CHANGE:
498 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
499 		error = sc->ops->get_powstat(sc->cookie, &pi);
500 #ifdef APM_POWER_PRINT
501 		/* only print if nobody is catching events. */
502 		if (error == 0 &&
503 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
504 			apm_power_print(sc, &pi);
505 #endif
506 		apm_record_event(sc, event_code);
507 		break;
508 
509 	case APM_NORMAL_RESUME:
510 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
511 		apm_resume(sc, event_code, event_info);
512 		break;
513 
514 	case APM_CRIT_RESUME:
515 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
516 		apm_resume(sc, event_code, event_info);
517 		break;
518 
519 	case APM_SYS_STANDBY_RESUME:
520 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
521 		apm_resume(sc, event_code, event_info);
522 		break;
523 
524 	case APM_UPDATE_TIME:
525 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
526 		apm_resume(sc, event_code, event_info);
527 		break;
528 
529 	case APM_CRIT_SUSPEND_REQ:
530 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
531 		apm_record_event(sc, event_code);
532 		apm_suspend(sc);
533 		break;
534 
535 	case APM_BATTERY_LOW:
536 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
537 		apm_battlow++;
538 		apm_record_event(sc, event_code);
539 		break;
540 
541 	case APM_CAP_CHANGE:
542 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
543 		if (apm_minver < 2) {
544 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
545 		} else {
546 			u_int numbatts, capflags;
547 			sc->ops->get_capabilities(sc->cookie,
548 						  &numbatts, &capflags);
549 			sc->ops->get_powstat(sc->cookie, &pi); /* XXX */
550 		}
551 		break;
552 
553 	default:
554 		switch (event_code >> 8) {
555 			case 0:
556 				code = "reserved system";
557 				break;
558 			case 1:
559 				code = "reserved device";
560 				break;
561 			case 2:
562 				code = "OEM defined";
563 				break;
564 			default:
565 				code = "reserved";
566 				break;
567 		}
568 		printf("APM: %s event code %x\n", code, event_code);
569 	}
570 }
571 
572 static void
573 apm_periodic_check(struct apm_softc *sc)
574 {
575 	int error;
576 	u_int event_code, event_info;
577 
578 
579 	/*
580 	 * tell the BIOS we're working on it, if asked to do a
581 	 * suspend/standby
582 	 */
583 	if (apm_op_inprog)
584 		sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS,
585 				      APM_LASTREQ_INPROG);
586 
587 	while ((error = sc->ops->get_event(sc->cookie, &event_code,
588 					   &event_info)) == 0
589 	       && !apm_damn_fool_bios)
590 		apm_event_handle(sc, event_code, event_info);
591 
592 	if (error != APM_ERR_NOEVENTS)
593 		apm_perror("get event", error);
594 	if (apm_suspends) {
595 		apm_op_inprog = 0;
596 		apm_suspend(sc);
597 	} else if (apm_standbys || apm_userstandbys) {
598 		apm_op_inprog = 0;
599 		apm_standby(sc);
600 	}
601 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
602 	apm_damn_fool_bios = 0;
603 }
604 
605 static void
606 apm_set_ver(struct apm_softc *self, u_long detail)
607 {
608 
609 	if (apm_v12_enabled &&
610 	    APM_MAJOR_VERS(detail) == 1 &&
611 	    APM_MINOR_VERS(detail) == 2) {
612 		apm_majver = 1;
613 		apm_minver = 2;
614 		goto ok;
615 	}
616 
617 	if (apm_v11_enabled &&
618 	    APM_MAJOR_VERS(detail) == 1 &&
619 	    APM_MINOR_VERS(detail) == 1) {
620 		apm_majver = 1;
621 		apm_minver = 1;
622 	} else {
623 		apm_majver = 1;
624 		apm_minver = 0;
625 	}
626 ok:
627 	printf("Power Management spec V%d.%d", apm_majver, apm_minver);
628 	apm_inited = 1;
629 	if (detail & APM_IDLE_SLOWS) {
630 #ifdef DIAGNOSTIC
631 		/* not relevant often */
632 		printf(" (slowidle)");
633 #endif
634 		/* leave apm_do_idle at its user-configured setting */
635 	} else
636 		apm_do_idle = 0;
637 #ifdef DIAGNOSTIC
638 	if (detail & APM_BIOS_PM_DISABLED)
639 		printf(" (BIOS mgmt disabled)");
640 	if (detail & APM_BIOS_PM_DISENGAGED)
641 		printf(" (BIOS managing devices)");
642 #endif
643 }
644 
645 static int
646 apmmatch(struct device *parent,
647 	 struct cfdata *match, void *aux)
648 {
649 
650 	/* There can be only one! */
651 	if (apm_inited)
652 		return 0;
653 
654 	return (1);
655 }
656 
657 static void
658 apmattach(struct device *parent, struct device *self, void *aux)
659 {
660 	struct apm_softc *sc = (void *)self;
661 	struct apmdev_attach_args *aaa = aux;
662 	struct apm_power_info pinfo;
663 	u_int numbatts, capflags;
664 	int error;
665 
666 	printf(": ");
667 
668 	sc->ops = aaa->accessops;
669 	sc->cookie = aaa->accesscookie;
670 
671 	switch ((APM_MAJOR_VERS(aaa->apm_detail) << 8) +
672 		APM_MINOR_VERS(aaa->apm_detail)) {
673 	case 0x0100:
674 		apm_v11_enabled = 0;
675 		apm_v12_enabled = 0;
676 		break;
677 	case 0x0101:
678 		apm_v12_enabled = 0;
679 		/* fall through */
680 	case 0x0102:
681 	default:
682 		break;
683 	}
684 
685 	apm_set_ver(sc, aaa->apm_detail);	/* prints version info */
686 	printf("\n");
687 	if (apm_minver >= 2)
688 		sc->ops->get_capabilities(sc->cookie, &numbatts, &capflags);
689 
690 	/*
691 	 * enable power management
692 	 */
693 	sc->ops->enable(sc->cookie, 1);
694 
695 	error = sc->ops->get_powstat(sc->cookie, &pinfo);
696 	if (error == 0) {
697 #ifdef APM_POWER_PRINT
698 		apm_power_print(apmsc, &pinfo);
699 #endif
700 	} else
701 		apm_perror("get power status", error);
702 	sc->ops->cpu_busy(sc->cookie);
703 
704 	lockinit(&sc->sc_lock, PWAIT, "apmlk", 0, 0);
705 
706 	/* Initial state is `resumed'. */
707 	sc->sc_power_state = PWR_RESUME;
708 
709 	/* Do an initial check. */
710 	apm_periodic_check(sc);
711 
712 	/*
713 	 * Create a kernel thread to periodically check for APM events,
714 	 * and notify other subsystems when they occur.
715 	 */
716 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
717 	    &sc->sc_thread, "%s", sc->sc_dev.dv_xname) != 0) {
718 		/*
719 		 * We were unable to create the APM thread; bail out.
720 		 */
721 		sc->ops->disconnect(sc->cookie);
722 		printf("%s: unable to create thread, "
723 		    "kernel APM support disabled\n",
724 		       sc->sc_dev.dv_xname);
725 	}
726 }
727 
728 /*
729  * Print function (for parent devices).
730  */
731 int
732 apmprint(void *aux, const char *pnp)
733 {
734 	if (pnp)
735 		aprint_normal("apm at %s", pnp);
736 
737 	return (UNCONF);
738 }
739 void
740 apm_thread(void *arg)
741 {
742 	struct apm_softc *apmsc = arg;
743 
744 	/*
745 	 * Loop forever, doing a periodic check for APM events.
746 	 */
747 	for (;;) {
748 		APM_LOCK(apmsc);
749 		apm_periodic_check(apmsc);
750 		APM_UNLOCK(apmsc);
751 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
752 	}
753 }
754 
755 int
756 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
757 {
758 	int unit = APMUNIT(dev);
759 	int ctl = APMDEV(dev);
760 	int error = 0;
761 	struct apm_softc *sc;
762 
763 	if (unit >= apmdev_cd.cd_ndevs)
764 		return ENXIO;
765 	sc = apmdev_cd.cd_devs[unit];
766 	if (!sc)
767 		return ENXIO;
768 
769 	if (!apm_inited)
770 		return ENXIO;
771 
772 	DPRINTF(APMDEBUG_DEVICE,
773 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
774 
775 	APM_LOCK(sc);
776 	switch (ctl) {
777 	case APMDEV_CTL:
778 		if (!(flag & FWRITE)) {
779 			error = EINVAL;
780 			break;
781 		}
782 		if (sc->sc_flags & SCFLAG_OWRITE) {
783 			error = EBUSY;
784 			break;
785 		}
786 		sc->sc_flags |= SCFLAG_OWRITE;
787 		break;
788 	case APMDEV_NORMAL:
789 		if (!(flag & FREAD) || (flag & FWRITE)) {
790 			error = EINVAL;
791 			break;
792 		}
793 		sc->sc_flags |= SCFLAG_OREAD;
794 		break;
795 	default:
796 		error = ENXIO;
797 		break;
798 	}
799 	APM_UNLOCK(sc);
800 
801 	return (error);
802 }
803 
804 int
805 apmdevclose(dev_t dev, int flag, int mode,
806 	    struct lwp *l)
807 {
808 	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
809 	int ctl = APMDEV(dev);
810 
811 	DPRINTF(APMDEBUG_DEVICE,
812 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
813 
814 	APM_LOCK(sc);
815 	switch (ctl) {
816 	case APMDEV_CTL:
817 		sc->sc_flags &= ~SCFLAG_OWRITE;
818 		break;
819 	case APMDEV_NORMAL:
820 		sc->sc_flags &= ~SCFLAG_OREAD;
821 		break;
822 	}
823 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
824 		sc->event_count = 0;
825 		sc->event_ptr = 0;
826 	}
827 	APM_UNLOCK(sc);
828 	return 0;
829 }
830 
831 int
832 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
833 	    struct lwp *l)
834 {
835 	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
836 	struct apm_power_info *powerp;
837 	struct apm_event_info *evp;
838 #if 0
839 	struct apm_ctl *actl;
840 #endif
841 	int i, error = 0;
842 	int batt_flags;
843 
844 	APM_LOCK(sc);
845 	switch (cmd) {
846 	case APM_IOC_STANDBY:
847 		if (!apm_do_standby) {
848 			error = EOPNOTSUPP;
849 			break;
850 		}
851 
852 		if ((flag & FWRITE) == 0) {
853 			error = EBADF;
854 			break;
855 		}
856 		apm_userstandbys++;
857 		break;
858 
859 	case APM_IOC_SUSPEND:
860 		if ((flag & FWRITE) == 0) {
861 			error = EBADF;
862 			break;
863 		}
864 		apm_suspends++;
865 		break;
866 
867 	case APM_IOC_NEXTEVENT:
868 		if (!sc->event_count)
869 			error = EAGAIN;
870 		else {
871 			evp = (struct apm_event_info *)data;
872 			i = sc->event_ptr + APM_NEVENTS - sc->event_count;
873 			i %= APM_NEVENTS;
874 			*evp = sc->event_list[i];
875 			sc->event_count--;
876 		}
877 		break;
878 
879 	case OAPM_IOC_GETPOWER:
880 	case APM_IOC_GETPOWER:
881 		powerp = (struct apm_power_info *)data;
882 		if ((error = sc->ops->get_powstat(sc->cookie, powerp)) != 0) {
883 			apm_perror("ioctl get power status", error);
884 			error = EIO;
885 			break;
886 		}
887 		switch (apm_minver) {
888 		case 0:
889 			break;
890 		case 1:
891 		default:
892 			batt_flags = powerp->battery_state;
893 			powerp->battery_state = APM_BATT_UNKNOWN;
894 			if (batt_flags & APM_BATT_FLAG_HIGH)
895 				powerp->battery_state = APM_BATT_HIGH;
896 			else if (batt_flags & APM_BATT_FLAG_LOW)
897 				powerp->battery_state = APM_BATT_LOW;
898 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
899 				powerp->battery_state = APM_BATT_CRITICAL;
900 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
901 				powerp->battery_state = APM_BATT_CHARGING;
902 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
903 				powerp->battery_state = APM_BATT_ABSENT;
904 			break;
905 		}
906 		break;
907 
908 	default:
909 		error = ENOTTY;
910 	}
911 	APM_UNLOCK(sc);
912 
913 	return (error);
914 }
915 
916 int
917 apmdevpoll(dev_t dev, int events, struct lwp *l)
918 {
919 	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
920 	int revents = 0;
921 
922 	APM_LOCK(sc);
923 	if (events & (POLLIN | POLLRDNORM)) {
924 		if (sc->event_count)
925 			revents |= events & (POLLIN | POLLRDNORM);
926 		else
927 			selrecord(l, &sc->sc_rsel);
928 	}
929 	APM_UNLOCK(sc);
930 
931 	return (revents);
932 }
933 
934 static void
935 filt_apmrdetach(struct knote *kn)
936 {
937 	struct apm_softc *sc = kn->kn_hook;
938 
939 	APM_LOCK(sc);
940 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
941 	APM_UNLOCK(sc);
942 }
943 
944 static int
945 filt_apmread(struct knote *kn, long hint)
946 {
947 	struct apm_softc *sc = kn->kn_hook;
948 
949 	kn->kn_data = sc->event_count;
950 	return (kn->kn_data > 0);
951 }
952 
953 static const struct filterops apmread_filtops =
954 	{ 1, NULL, filt_apmrdetach, filt_apmread };
955 
956 int
957 apmdevkqfilter(dev_t dev, struct knote *kn)
958 {
959 	struct apm_softc *sc = apmdev_cd.cd_devs[APMUNIT(dev)];
960 	struct klist *klist;
961 
962 	switch (kn->kn_filter) {
963 	case EVFILT_READ:
964 		klist = &sc->sc_rsel.sel_klist;
965 		kn->kn_fop = &apmread_filtops;
966 		break;
967 
968 	default:
969 		return (1);
970 	}
971 
972 	kn->kn_hook = sc;
973 
974 	APM_LOCK(sc);
975 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
976 	APM_UNLOCK(sc);
977 
978 	return (0);
979 }
980