xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision 466a16a118933bd295a8a104f095714fadf9cf68)
1 /*	$NetBSD: apmdev.c,v 1.19 2008/06/12 01:46:32 rafal Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.19 2008/06/12 01:46:32 rafal Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_apmdev.h"
40 #endif
41 
42 #ifdef APM_NOIDLE
43 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
44 #endif
45 
46 #if defined(DEBUG) && !defined(APMDEBUG)
47 #define	APMDEBUG
48 #endif
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/signalvar.h>
53 #include <sys/kernel.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/mutex.h>
57 #include <sys/user.h>
58 #include <sys/malloc.h>
59 #include <sys/device.h>
60 #include <sys/fcntl.h>
61 #include <sys/ioctl.h>
62 #include <sys/select.h>
63 #include <sys/poll.h>
64 #include <sys/conf.h>
65 
66 #include <dev/hpc/apm/apmvar.h>
67 
68 #include <machine/stdarg.h>
69 
70 #if defined(APMDEBUG)
71 #define	DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
72 
73 #define	APMDEBUG_INFO		0x01
74 #define	APMDEBUG_APMCALLS	0x02
75 #define	APMDEBUG_EVENTS		0x04
76 #define	APMDEBUG_PROBE		0x10
77 #define	APMDEBUG_ATTACH		0x40
78 #define	APMDEBUG_DEVICE		0x20
79 #define	APMDEBUG_ANOM		0x40
80 
81 #ifdef APMDEBUG_VALUE
82 int	apmdebug = APMDEBUG_VALUE;
83 #else
84 int	apmdebug = 0;
85 #endif
86 #else
87 #define	DPRINTF(f, x)		/**/
88 #endif
89 
90 #define APM_NEVENTS 16
91 
92 struct apm_softc {
93 	struct device sc_dev;
94 	struct selinfo sc_rsel;
95 	struct selinfo sc_xsel;
96 	int	sc_flags;
97 	int	event_count;
98 	int	event_ptr;
99 	int	sc_power_state;
100 	lwp_t	*sc_thread;
101 	kmutex_t sc_mutex;
102 	struct apm_event_info event_list[APM_NEVENTS];
103 	struct apm_accessops *ops;
104 	void *cookie;
105 };
106 #define	SCFLAG_OREAD	0x0000001
107 #define	SCFLAG_OWRITE	0x0000002
108 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
109 
110 #define	APMUNIT(dev)	(minor(dev)&0xf0)
111 #define	APMDEV(dev)	(minor(dev)&0x0f)
112 #define APMDEV_NORMAL	0
113 #define APMDEV_CTL	8
114 
115 /*
116  * A brief note on the locking protocol: it's very simple; we
117  * assert an exclusive lock any time thread context enters the
118  * APM module.  This is both the APM thread itself, as well as
119  * user context.
120  */
121 #define	APM_LOCK(apmsc)		mutex_enter(&(apmsc)->sc_mutex)
122 #define	APM_UNLOCK(apmsc)	mutex_exit(&(apmsc)->sc_mutex)
123 
124 static void	apmattach(struct device *, struct device *, void *);
125 static int	apmmatch(struct device *, struct cfdata *, void *);
126 
127 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
128 static void	apm_periodic_check(struct apm_softc *);
129 static void	apm_thread(void *);
130 static void	apm_perror(const char *, int, ...)
131 		    __attribute__((__format__(__printf__,1,3)));
132 #ifdef APM_POWER_PRINT
133 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
134 #endif
135 static int	apm_record_event(struct apm_softc *, u_int);
136 static void	apm_set_ver(struct apm_softc *, u_long);
137 static void	apm_standby(struct apm_softc *);
138 static const char *apm_strerror(int);
139 static void	apm_suspend(struct apm_softc *);
140 static void	apm_resume(struct apm_softc *, u_int, u_int);
141 
142 CFATTACH_DECL(apmdev, sizeof(struct apm_softc),
143     apmmatch, apmattach, NULL, NULL);
144 
145 extern struct cfdriver apmdev_cd;
146 
147 dev_type_open(apmdevopen);
148 dev_type_close(apmdevclose);
149 dev_type_ioctl(apmdevioctl);
150 dev_type_poll(apmdevpoll);
151 dev_type_kqfilter(apmdevkqfilter);
152 
153 const struct cdevsw apmdev_cdevsw = {
154 	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
155 	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
156 };
157 
158 /* configurable variables */
159 int	apm_bogus_bios = 0;
160 #ifdef APM_DISABLE
161 int	apm_enabled = 0;
162 #else
163 int	apm_enabled = 1;
164 #endif
165 #ifdef APM_NO_IDLE
166 int	apm_do_idle = 0;
167 #else
168 int	apm_do_idle = 1;
169 #endif
170 #ifdef APM_NO_STANDBY
171 int	apm_do_standby = 0;
172 #else
173 int	apm_do_standby = 1;
174 #endif
175 #ifdef APM_V10_ONLY
176 int	apm_v11_enabled = 0;
177 #else
178 int	apm_v11_enabled = 1;
179 #endif
180 #ifdef APM_NO_V12
181 int	apm_v12_enabled = 0;
182 #else
183 int	apm_v12_enabled = 1;
184 #endif
185 
186 /* variables used during operation (XXX cgd) */
187 u_char	apm_majver, apm_minver;
188 int	apm_inited;
189 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
190 int	apm_damn_fool_bios, apm_op_inprog;
191 int	apm_evindex;
192 
193 static int apm_spl;		/* saved spl while suspended */
194 
195 static const char *
196 apm_strerror(int code)
197 {
198 	switch (code) {
199 	case APM_ERR_PM_DISABLED:
200 		return ("power management disabled");
201 	case APM_ERR_REALALREADY:
202 		return ("real mode interface already connected");
203 	case APM_ERR_NOTCONN:
204 		return ("interface not connected");
205 	case APM_ERR_16ALREADY:
206 		return ("16-bit interface already connected");
207 	case APM_ERR_16NOTSUPP:
208 		return ("16-bit interface not supported");
209 	case APM_ERR_32ALREADY:
210 		return ("32-bit interface already connected");
211 	case APM_ERR_32NOTSUPP:
212 		return ("32-bit interface not supported");
213 	case APM_ERR_UNRECOG_DEV:
214 		return ("unrecognized device ID");
215 	case APM_ERR_ERANGE:
216 		return ("parameter out of range");
217 	case APM_ERR_NOTENGAGED:
218 		return ("interface not engaged");
219 	case APM_ERR_UNABLE:
220 		return ("unable to enter requested state");
221 	case APM_ERR_NOEVENTS:
222 		return ("no pending events");
223 	case APM_ERR_NOT_PRESENT:
224 		return ("no APM present");
225 	default:
226 		return ("unknown error code");
227 	}
228 }
229 
230 static void
231 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
232 {
233 	va_list ap;
234 
235 	printf("APM ");
236 
237 	va_start(ap, errinfo);
238 	vprintf(str, ap);			/* XXX cgd */
239 	va_end(ap);
240 
241 	printf(": %s\n", apm_strerror(errinfo));
242 }
243 
244 #ifdef APM_POWER_PRINT
245 static void
246 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
247 {
248 
249 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
250 		printf("%s: battery life expectancy: %d%%\n",
251 		    device_xname(&sc->sc_dev), pi->battery_life);
252 	}
253 	printf("%s: A/C state: ", device_xname(&sc->sc_dev));
254 	switch (pi->ac_state) {
255 	case APM_AC_OFF:
256 		printf("off\n");
257 		break;
258 	case APM_AC_ON:
259 		printf("on\n");
260 		break;
261 	case APM_AC_BACKUP:
262 		printf("backup power\n");
263 		break;
264 	default:
265 	case APM_AC_UNKNOWN:
266 		printf("unknown\n");
267 		break;
268 	}
269 	if (apm_major == 1 && apm_minor == 0) {
270 		printf("%s: battery charge state:", device_xname(&sc->sc_dev));
271 		switch (pi->battery_state) {
272 		case APM_BATT_HIGH:
273 			printf("high\n");
274 			break;
275 		case APM_BATT_LOW:
276 			printf("low\n");
277 			break;
278 		case APM_BATT_CRITICAL:
279 			printf("critical\n");
280 			break;
281 		case APM_BATT_CHARGING:
282 			printf("charging\n");
283 			break;
284 		case APM_BATT_UNKNOWN:
285 			printf("unknown\n");
286 			break;
287 		default:
288 			printf("undecoded state %x\n", pi->battery_state);
289 			break;
290 		}
291 	} else {
292 		if (pi->battery_state&APM_BATT_FLAG_CHARGING)
293 			printf("charging ");
294 		}
295 		if (pi->battery_state&APM_BATT_FLAG_UNKNOWN)
296 			printf("unknown\n");
297 		else if (pi->battery_state&APM_BATT_FLAG_CRITICAL)
298 			printf("critical\n");
299 		else if (pi->battery_state&APM_BATT_FLAG_LOW)
300 			printf("low\n");
301 		else if (pi->battery_state&APM_BATT_FLAG_HIGH)
302 			printf("high\n");
303 	}
304 	if (pi->minutes_left != 0) {
305 		printf("%s: estimated ", device_xname(&sc->sc_dev));
306 		printf("%dh ", pi->minutes_left / 60);
307 	}
308 	return;
309 }
310 #endif
311 
312 static void
313 apm_suspend(struct apm_softc *sc)
314 {
315 
316 	if (sc->sc_power_state == PWR_SUSPEND) {
317 #ifdef APMDEBUG
318 		printf("%s: apm_suspend: already suspended?\n",
319 		    device_xname(&sc->sc_dev));
320 #endif
321 		return;
322 	}
323 	sc->sc_power_state = PWR_SUSPEND;
324 
325 	dopowerhooks(PWR_SOFTSUSPEND);
326 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
327 
328 	apm_spl = splhigh();
329 
330 	dopowerhooks(PWR_SUSPEND);
331 
332 	/* XXX cgd */
333 	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_SUSPEND);
334 }
335 
336 static void
337 apm_standby(struct apm_softc *sc)
338 {
339 
340 	if (sc->sc_power_state == PWR_STANDBY) {
341 #ifdef APMDEBUG
342 		printf("%s: apm_standby: already standing by?\n",
343 		    device_xname(&sc->sc_dev));
344 #endif
345 		return;
346 	}
347 	sc->sc_power_state = PWR_STANDBY;
348 
349 	dopowerhooks(PWR_SOFTSTANDBY);
350 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
351 
352 	apm_spl = splhigh();
353 
354 	dopowerhooks(PWR_STANDBY);
355 	/* XXX cgd */
356 	(void)sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS, APM_SYS_STANDBY);
357 }
358 
359 static void
360 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
361 {
362 
363 	if (sc->sc_power_state == PWR_RESUME) {
364 #ifdef APMDEBUG
365 		printf("%s: apm_resume: already running?\n",
366 		    device_xname(&sc->sc_dev));
367 #endif
368 		return;
369 	}
370 	sc->sc_power_state = PWR_RESUME;
371 
372 	/*
373 	 * Some system requires its clock to be initialized after hybernation.
374 	 */
375 /* XXX
376 	initrtclock();
377 */
378 
379 	inittodr(time_second);
380 	dopowerhooks(PWR_RESUME);
381 
382 	splx(apm_spl);
383 
384 	dopowerhooks(PWR_SOFTRESUME);
385 
386 	apm_record_event(sc, event_type);
387 }
388 
389 /*
390  * return 0 if the user will notice and handle the event,
391  * return 1 if the kernel driver should do so.
392  */
393 static int
394 apm_record_event(struct apm_softc *sc, u_int event_type)
395 {
396 	struct apm_event_info *evp;
397 
398 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
399 		return 1;		/* no user waiting */
400 	if (sc->event_count == APM_NEVENTS)
401 		return 1;			/* overflow */
402 	evp = &sc->event_list[sc->event_ptr];
403 	sc->event_count++;
404 	sc->event_ptr++;
405 	sc->event_ptr %= APM_NEVENTS;
406 	evp->type = event_type;
407 	evp->index = ++apm_evindex;
408 	selnotify(&sc->sc_rsel, 0, 0);
409 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
410 }
411 
412 static void
413 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
414 {
415 	int error;
416 	const char *code;
417 	struct apm_power_info pi;
418 
419 	switch (event_code) {
420 	case APM_USER_STANDBY_REQ:
421 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
422 		if (apm_do_standby) {
423 			if (apm_record_event(sc, event_code))
424 				apm_userstandbys++;
425 			apm_op_inprog++;
426 			(void)sc->ops->set_powstate(sc->cookie,
427 						    APM_DEV_ALLDEVS,
428 						    APM_LASTREQ_INPROG);
429 		} else {
430 			(void)sc->ops->set_powstate(sc->cookie,
431 						    APM_DEV_ALLDEVS,
432 						    APM_LASTREQ_REJECTED);
433 			/* in case BIOS hates being spurned */
434 			sc->ops->enable(sc->cookie, 1);
435 		}
436 		break;
437 
438 	case APM_STANDBY_REQ:
439 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
440 		if (apm_standbys || apm_suspends) {
441 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
442 			    ("damn fool BIOS did not wait for answer\n"));
443 			/* just give up the fight */
444 			apm_damn_fool_bios = 1;
445 		}
446 		if (apm_do_standby) {
447 			if (apm_record_event(sc, event_code))
448 				apm_standbys++;
449 			apm_op_inprog++;
450 			(void)sc->ops->set_powstate(sc->cookie,
451 						    APM_DEV_ALLDEVS,
452 						    APM_LASTREQ_INPROG);
453 		} else {
454 			(void)sc->ops->set_powstate(sc->cookie,
455 						    APM_DEV_ALLDEVS,
456 						    APM_LASTREQ_REJECTED);
457 			/* in case BIOS hates being spurned */
458 			sc->ops->enable(sc->cookie, 1);
459 		}
460 		break;
461 
462 	case APM_USER_SUSPEND_REQ:
463 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
464 		if (apm_record_event(sc, event_code))
465 			apm_suspends++;
466 		apm_op_inprog++;
467 		(void)sc->ops->set_powstate(sc->cookie,
468 					    APM_DEV_ALLDEVS,
469 					    APM_LASTREQ_INPROG);
470 		break;
471 
472 	case APM_SUSPEND_REQ:
473 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
474 		if (apm_standbys || apm_suspends) {
475 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
476 			    ("damn fool BIOS did not wait for answer\n"));
477 			/* just give up the fight */
478 			apm_damn_fool_bios = 1;
479 		}
480 		if (apm_record_event(sc, event_code))
481 			apm_suspends++;
482 		apm_op_inprog++;
483 		(void)sc->ops->set_powstate(sc->cookie,
484 					    APM_DEV_ALLDEVS,
485 					    APM_LASTREQ_INPROG);
486 		break;
487 
488 	case APM_POWER_CHANGE:
489 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
490 		error = sc->ops->get_powstat(sc->cookie, &pi);
491 #ifdef APM_POWER_PRINT
492 		/* only print if nobody is catching events. */
493 		if (error == 0 &&
494 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
495 			apm_power_print(sc, &pi);
496 #endif
497 		apm_record_event(sc, event_code);
498 		break;
499 
500 	case APM_NORMAL_RESUME:
501 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
502 		apm_resume(sc, event_code, event_info);
503 		break;
504 
505 	case APM_CRIT_RESUME:
506 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
507 		apm_resume(sc, event_code, event_info);
508 		break;
509 
510 	case APM_SYS_STANDBY_RESUME:
511 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
512 		apm_resume(sc, event_code, event_info);
513 		break;
514 
515 	case APM_UPDATE_TIME:
516 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
517 		apm_resume(sc, event_code, event_info);
518 		break;
519 
520 	case APM_CRIT_SUSPEND_REQ:
521 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
522 		apm_record_event(sc, event_code);
523 		apm_suspend(sc);
524 		break;
525 
526 	case APM_BATTERY_LOW:
527 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
528 		apm_battlow++;
529 		apm_record_event(sc, event_code);
530 		break;
531 
532 	case APM_CAP_CHANGE:
533 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
534 		if (apm_minver < 2) {
535 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
536 		} else {
537 			u_int numbatts, capflags;
538 			sc->ops->get_capabilities(sc->cookie,
539 						  &numbatts, &capflags);
540 			sc->ops->get_powstat(sc->cookie, &pi); /* XXX */
541 		}
542 		break;
543 
544 	default:
545 		switch (event_code >> 8) {
546 			case 0:
547 				code = "reserved system";
548 				break;
549 			case 1:
550 				code = "reserved device";
551 				break;
552 			case 2:
553 				code = "OEM defined";
554 				break;
555 			default:
556 				code = "reserved";
557 				break;
558 		}
559 		printf("APM: %s event code %x\n", code, event_code);
560 	}
561 }
562 
563 static void
564 apm_periodic_check(struct apm_softc *sc)
565 {
566 	int error;
567 	u_int event_code, event_info;
568 
569 
570 	/*
571 	 * tell the BIOS we're working on it, if asked to do a
572 	 * suspend/standby
573 	 */
574 	if (apm_op_inprog)
575 		sc->ops->set_powstate(sc->cookie, APM_DEV_ALLDEVS,
576 				      APM_LASTREQ_INPROG);
577 
578 	while ((error = sc->ops->get_event(sc->cookie, &event_code,
579 					   &event_info)) == 0
580 	       && !apm_damn_fool_bios)
581 		apm_event_handle(sc, event_code, event_info);
582 
583 	if (error != APM_ERR_NOEVENTS)
584 		apm_perror("get event", error);
585 	if (apm_suspends) {
586 		apm_op_inprog = 0;
587 		apm_suspend(sc);
588 	} else if (apm_standbys || apm_userstandbys) {
589 		apm_op_inprog = 0;
590 		apm_standby(sc);
591 	}
592 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
593 	apm_damn_fool_bios = 0;
594 }
595 
596 static void
597 apm_set_ver(struct apm_softc *self, u_long detail)
598 {
599 
600 	if (apm_v12_enabled &&
601 	    APM_MAJOR_VERS(detail) == 1 &&
602 	    APM_MINOR_VERS(detail) == 2) {
603 		apm_majver = 1;
604 		apm_minver = 2;
605 		goto ok;
606 	}
607 
608 	if (apm_v11_enabled &&
609 	    APM_MAJOR_VERS(detail) == 1 &&
610 	    APM_MINOR_VERS(detail) == 1) {
611 		apm_majver = 1;
612 		apm_minver = 1;
613 	} else {
614 		apm_majver = 1;
615 		apm_minver = 0;
616 	}
617 ok:
618 	printf("Power Management spec V%d.%d", apm_majver, apm_minver);
619 	apm_inited = 1;
620 	if (detail & APM_IDLE_SLOWS) {
621 #ifdef DIAGNOSTIC
622 		/* not relevant often */
623 		printf(" (slowidle)");
624 #endif
625 		/* leave apm_do_idle at its user-configured setting */
626 	} else
627 		apm_do_idle = 0;
628 #ifdef DIAGNOSTIC
629 	if (detail & APM_BIOS_PM_DISABLED)
630 		printf(" (BIOS mgmt disabled)");
631 	if (detail & APM_BIOS_PM_DISENGAGED)
632 		printf(" (BIOS managing devices)");
633 #endif
634 }
635 
636 static int
637 apmmatch(struct device *parent,
638 	 struct cfdata *match, void *aux)
639 {
640 
641 	/* There can be only one! */
642 	if (apm_inited)
643 		return 0;
644 
645 	return (1);
646 }
647 
648 static void
649 apmattach(struct device *parent, struct device *self, void *aux)
650 {
651 	struct apm_softc *sc = (void *)self;
652 	struct apmdev_attach_args *aaa = aux;
653 	struct apm_power_info pinfo;
654 	u_int numbatts, capflags;
655 	int error;
656 
657 	printf(": ");
658 
659 	sc->ops = aaa->accessops;
660 	sc->cookie = aaa->accesscookie;
661 
662 	switch ((APM_MAJOR_VERS(aaa->apm_detail) << 8) +
663 		APM_MINOR_VERS(aaa->apm_detail)) {
664 	case 0x0100:
665 		apm_v11_enabled = 0;
666 		apm_v12_enabled = 0;
667 		break;
668 	case 0x0101:
669 		apm_v12_enabled = 0;
670 		/* fall through */
671 	case 0x0102:
672 	default:
673 		break;
674 	}
675 
676 	apm_set_ver(sc, aaa->apm_detail);	/* prints version info */
677 	printf("\n");
678 	if (apm_minver >= 2)
679 		sc->ops->get_capabilities(sc->cookie, &numbatts, &capflags);
680 
681 	/*
682 	 * enable power management
683 	 */
684 	sc->ops->enable(sc->cookie, 1);
685 
686 	error = sc->ops->get_powstat(sc->cookie, &pinfo);
687 	if (error == 0) {
688 #ifdef APM_POWER_PRINT
689 		apm_power_print(apmsc, &pinfo);
690 #endif
691 	} else
692 		apm_perror("get power status", error);
693 	sc->ops->cpu_busy(sc->cookie);
694 
695 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
696 	selinit(&sc->sc_rsel);
697 	selinit(&sc->sc_xsel);
698 
699 	/* Initial state is `resumed'. */
700 	sc->sc_power_state = PWR_RESUME;
701 
702 	/* Do an initial check. */
703 	apm_periodic_check(sc);
704 
705 	/*
706 	 * Create a kernel thread to periodically check for APM events,
707 	 * and notify other subsystems when they occur.
708 	 */
709 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
710 	    &sc->sc_thread, "%s", device_xname(&sc->sc_dev)) != 0) {
711 		/*
712 		 * We were unable to create the APM thread; bail out.
713 		 */
714 		sc->ops->disconnect(sc->cookie);
715 		aprint_error_dev(&sc->sc_dev, "unable to create thread, "
716 		    "kernel APM support disabled\n");
717 	}
718 }
719 
720 /*
721  * Print function (for parent devices).
722  */
723 int
724 apmprint(void *aux, const char *pnp)
725 {
726 	if (pnp)
727 		aprint_normal("apm at %s", pnp);
728 
729 	return (UNCONF);
730 }
731 void
732 apm_thread(void *arg)
733 {
734 	struct apm_softc *apmsc = arg;
735 
736 	/*
737 	 * Loop forever, doing a periodic check for APM events.
738 	 */
739 	for (;;) {
740 		APM_LOCK(apmsc);
741 		apm_periodic_check(apmsc);
742 		APM_UNLOCK(apmsc);
743 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
744 	}
745 }
746 
747 int
748 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
749 {
750 	int ctl = APMDEV(dev);
751 	int error = 0;
752 	struct apm_softc *sc;
753 
754 	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
755 	if (!sc)
756 		return ENXIO;
757 
758 	if (!apm_inited)
759 		return ENXIO;
760 
761 	DPRINTF(APMDEBUG_DEVICE,
762 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
763 
764 	APM_LOCK(sc);
765 	switch (ctl) {
766 	case APMDEV_CTL:
767 		if (!(flag & FWRITE)) {
768 			error = EINVAL;
769 			break;
770 		}
771 		if (sc->sc_flags & SCFLAG_OWRITE) {
772 			error = EBUSY;
773 			break;
774 		}
775 		sc->sc_flags |= SCFLAG_OWRITE;
776 		break;
777 	case APMDEV_NORMAL:
778 		if (!(flag & FREAD) || (flag & FWRITE)) {
779 			error = EINVAL;
780 			break;
781 		}
782 		sc->sc_flags |= SCFLAG_OREAD;
783 		break;
784 	default:
785 		error = ENXIO;
786 		break;
787 	}
788 	APM_UNLOCK(sc);
789 
790 	return (error);
791 }
792 
793 int
794 apmdevclose(dev_t dev, int flag, int mode,
795 	    struct lwp *l)
796 {
797 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
798 	int ctl = APMDEV(dev);
799 
800 	DPRINTF(APMDEBUG_DEVICE,
801 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
802 
803 	APM_LOCK(sc);
804 	switch (ctl) {
805 	case APMDEV_CTL:
806 		sc->sc_flags &= ~SCFLAG_OWRITE;
807 		break;
808 	case APMDEV_NORMAL:
809 		sc->sc_flags &= ~SCFLAG_OREAD;
810 		break;
811 	}
812 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
813 		sc->event_count = 0;
814 		sc->event_ptr = 0;
815 	}
816 	APM_UNLOCK(sc);
817 	return 0;
818 }
819 
820 int
821 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
822 	    struct lwp *l)
823 {
824 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
825 	struct apm_power_info *powerp;
826 	struct apm_event_info *evp;
827 #if 0
828 	struct apm_ctl *actl;
829 #endif
830 	int i, error = 0;
831 	int batt_flags;
832 
833 	APM_LOCK(sc);
834 	switch (cmd) {
835 	case APM_IOC_STANDBY:
836 		if (!apm_do_standby) {
837 			error = EOPNOTSUPP;
838 			break;
839 		}
840 
841 		if ((flag & FWRITE) == 0) {
842 			error = EBADF;
843 			break;
844 		}
845 		apm_userstandbys++;
846 		break;
847 
848 	case APM_IOC_SUSPEND:
849 		if ((flag & FWRITE) == 0) {
850 			error = EBADF;
851 			break;
852 		}
853 		apm_suspends++;
854 		break;
855 
856 	case APM_IOC_NEXTEVENT:
857 		if (!sc->event_count)
858 			error = EAGAIN;
859 		else {
860 			evp = (struct apm_event_info *)data;
861 			i = sc->event_ptr + APM_NEVENTS - sc->event_count;
862 			i %= APM_NEVENTS;
863 			*evp = sc->event_list[i];
864 			sc->event_count--;
865 		}
866 		break;
867 
868 	case OAPM_IOC_GETPOWER:
869 	case APM_IOC_GETPOWER:
870 		powerp = (struct apm_power_info *)data;
871 		if ((error = sc->ops->get_powstat(sc->cookie, powerp)) != 0) {
872 			apm_perror("ioctl get power status", error);
873 			error = EIO;
874 			break;
875 		}
876 		switch (apm_minver) {
877 		case 0:
878 			break;
879 		case 1:
880 		default:
881 			batt_flags = powerp->battery_state;
882 			powerp->battery_state = APM_BATT_UNKNOWN;
883 			if (batt_flags & APM_BATT_FLAG_HIGH)
884 				powerp->battery_state = APM_BATT_HIGH;
885 			else if (batt_flags & APM_BATT_FLAG_LOW)
886 				powerp->battery_state = APM_BATT_LOW;
887 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
888 				powerp->battery_state = APM_BATT_CRITICAL;
889 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
890 				powerp->battery_state = APM_BATT_CHARGING;
891 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
892 				powerp->battery_state = APM_BATT_ABSENT;
893 			break;
894 		}
895 		break;
896 
897 	default:
898 		error = ENOTTY;
899 	}
900 	APM_UNLOCK(sc);
901 
902 	return (error);
903 }
904 
905 int
906 apmdevpoll(dev_t dev, int events, struct lwp *l)
907 {
908 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
909 	int revents = 0;
910 
911 	APM_LOCK(sc);
912 	if (events & (POLLIN | POLLRDNORM)) {
913 		if (sc->event_count)
914 			revents |= events & (POLLIN | POLLRDNORM);
915 		else
916 			selrecord(l, &sc->sc_rsel);
917 	}
918 	APM_UNLOCK(sc);
919 
920 	return (revents);
921 }
922 
923 static void
924 filt_apmrdetach(struct knote *kn)
925 {
926 	struct apm_softc *sc = kn->kn_hook;
927 
928 	APM_LOCK(sc);
929 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
930 	APM_UNLOCK(sc);
931 }
932 
933 static int
934 filt_apmread(struct knote *kn, long hint)
935 {
936 	struct apm_softc *sc = kn->kn_hook;
937 
938 	kn->kn_data = sc->event_count;
939 	return (kn->kn_data > 0);
940 }
941 
942 static const struct filterops apmread_filtops =
943 	{ 1, NULL, filt_apmrdetach, filt_apmread };
944 
945 int
946 apmdevkqfilter(dev_t dev, struct knote *kn)
947 {
948 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
949 	struct klist *klist;
950 
951 	switch (kn->kn_filter) {
952 	case EVFILT_READ:
953 		klist = &sc->sc_rsel.sel_klist;
954 		kn->kn_fop = &apmread_filtops;
955 		break;
956 
957 	default:
958 		return (EINVAL);
959 	}
960 
961 	kn->kn_hook = sc;
962 
963 	APM_LOCK(sc);
964 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
965 	APM_UNLOCK(sc);
966 
967 	return (0);
968 }
969