xref: /netbsd-src/sys/dev/apm/apm.c (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: apm.c,v 1.28 2012/09/30 21:36:19 dsl Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.28 2012/09/30 21:36:19 dsl Exp $");
37 
38 #include "opt_apm.h"
39 
40 #if defined(DEBUG) && !defined(APMDEBUG)
41 #define	APMDEBUG
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/signalvar.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/kthread.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/fcntl.h>
53 #include <sys/ioctl.h>
54 #include <sys/select.h>
55 #include <sys/poll.h>
56 #include <sys/conf.h>
57 
58 #include <dev/apm/apmvar.h>
59 
60 #ifdef APMDEBUG
61 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
62 
63 
64 #ifdef APMDEBUG_VALUE
65 int	apmdebug = APMDEBUG_VALUE;
66 #else
67 int	apmdebug = 0;
68 #endif /* APMDEBUG_VALUE */
69 
70 #else
71 #define	DPRINTF(f, x)		/**/
72 #endif /* APMDEBUG */
73 
74 #define	SCFLAG_OREAD	0x0000001
75 #define	SCFLAG_OWRITE	0x0000002
76 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
77 
78 #define	APMUNIT(dev)	(minor(dev)&0xf0)
79 #define	APM(dev)	(minor(dev)&0x0f)
80 #define APM_NORMAL	0
81 #define APM_CTL	8
82 
83 /*
84  * A brief note on the locking protocol: it's very simple; we
85  * assert an exclusive lock any time thread context enters the
86  * APM module.  This is both the APM thread itself, as well as
87  * user context.
88  */
89 #define	APM_LOCK(apmsc)						\
90 	(void) mutex_enter(&(apmsc)->sc_lock)
91 #define	APM_UNLOCK(apmsc)						\
92 	(void) mutex_exit(&(apmsc)->sc_lock)
93 
94 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
95 static void	apm_periodic_check(struct apm_softc *);
96 static void	apm_thread(void *);
97 static void	apm_perror(const char *, int, ...)
98 		    __attribute__((__format__(__printf__,1,3)));
99 #ifdef APM_POWER_PRINT
100 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
101 #endif
102 static int	apm_record_event(struct apm_softc *, u_int);
103 static void	apm_set_ver(struct apm_softc *);
104 static void	apm_standby(struct apm_softc *);
105 static void	apm_suspend(struct apm_softc *);
106 static void	apm_resume(struct apm_softc *, u_int, u_int);
107 
108 extern struct cfdriver apm_cd;
109 
110 dev_type_open(apmopen);
111 dev_type_close(apmclose);
112 dev_type_ioctl(apmioctl);
113 dev_type_poll(apmpoll);
114 dev_type_kqfilter(apmkqfilter);
115 
116 const struct cdevsw apm_cdevsw = {
117 	apmopen, apmclose, noread, nowrite, apmioctl,
118 	nostop, notty, apmpoll, nommap, apmkqfilter, D_OTHER,
119 };
120 
121 /* configurable variables */
122 #ifdef APM_NO_STANDBY
123 int	apm_do_standby = 0;
124 #else
125 int	apm_do_standby = 1;
126 #endif
127 #ifdef APM_V10_ONLY
128 int	apm_v11_enabled = 0;
129 #else
130 int	apm_v11_enabled = 1;
131 #endif
132 #ifdef APM_NO_V12
133 int	apm_v12_enabled = 0;
134 #else
135 int	apm_v12_enabled = 1;
136 #endif
137 
138 /* variables used during operation (XXX cgd) */
139 u_char	apm_majver, apm_minver;
140 int	apm_inited;
141 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
142 int	apm_damn_fool_bios, apm_op_inprog;
143 int	apm_evindex;
144 
145 static int apm_spl;		/* saved spl while suspended */
146 
147 const char *
148 apm_strerror(int code)
149 {
150 	switch (code) {
151 	case APM_ERR_PM_DISABLED:
152 		return ("power management disabled");
153 	case APM_ERR_REALALREADY:
154 		return ("real mode interface already connected");
155 	case APM_ERR_NOTCONN:
156 		return ("interface not connected");
157 	case APM_ERR_16ALREADY:
158 		return ("16-bit interface already connected");
159 	case APM_ERR_16NOTSUPP:
160 		return ("16-bit interface not supported");
161 	case APM_ERR_32ALREADY:
162 		return ("32-bit interface already connected");
163 	case APM_ERR_32NOTSUPP:
164 		return ("32-bit interface not supported");
165 	case APM_ERR_UNRECOG_DEV:
166 		return ("unrecognized device ID");
167 	case APM_ERR_ERANGE:
168 		return ("parameter out of range");
169 	case APM_ERR_NOTENGAGED:
170 		return ("interface not engaged");
171 	case APM_ERR_UNABLE:
172 		return ("unable to enter requested state");
173 	case APM_ERR_NOEVENTS:
174 		return ("no pending events");
175 	case APM_ERR_NOT_PRESENT:
176 		return ("no APM present");
177 	default:
178 		return ("unknown error code");
179 	}
180 }
181 
182 static void
183 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
184 {
185 	va_list ap;
186 
187 	printf("APM ");
188 
189 	va_start(ap, errinfo);
190 	vprintf(str, ap);			/* XXX cgd */
191 	va_end(ap);
192 
193 	printf(": %s\n", apm_strerror(errinfo));
194 }
195 
196 #ifdef APM_POWER_PRINT
197 static void
198 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
199 {
200 
201 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
202 		aprint_normal_dev(sc->sc_dev,
203 		    "battery life expectancy: %d%%\n",
204 		    pi->battery_life);
205 	}
206 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
207 	switch (pi->ac_state) {
208 	case APM_AC_OFF:
209 		printf("off\n");
210 		break;
211 	case APM_AC_ON:
212 		printf("on\n");
213 		break;
214 	case APM_AC_BACKUP:
215 		printf("backup power\n");
216 		break;
217 	default:
218 	case APM_AC_UNKNOWN:
219 		printf("unknown\n");
220 		break;
221 	}
222 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
223 	if (apm_minver == 0)
224 		switch (pi->battery_state) {
225 		case APM_BATT_HIGH:
226 			printf("high\n");
227 			break;
228 		case APM_BATT_LOW:
229 			printf("low\n");
230 			break;
231 		case APM_BATT_CRITICAL:
232 			printf("critical\n");
233 			break;
234 		case APM_BATT_CHARGING:
235 			printf("charging\n");
236 			break;
237 		case APM_BATT_UNKNOWN:
238 			printf("unknown\n");
239 			break;
240 		default:
241 			printf("undecoded state %x\n", pi->battery_state);
242 			break;
243 		}
244 	else if (apm_minver >= 1) {
245 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
246 			printf(" no battery");
247 		else {
248 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
249 				printf(" high");
250 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
251 				printf(" low");
252 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
253 				printf(" critical");
254 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
255 				printf(" charging");
256 		}
257 		printf("\n");
258 		if (pi->minutes_valid) {
259 			aprint_normal_dev(sc->sc_dev, "estimated ");
260 			if (pi->minutes_left / 60)
261 				printf("%dh ", pi->minutes_left / 60);
262 			printf("%dm\n", pi->minutes_left % 60);
263 		}
264 	}
265 	return;
266 }
267 #endif
268 
269 static void
270 apm_suspend(struct apm_softc *sc)
271 {
272 	int error;
273 
274 	if (sc->sc_power_state == PWR_SUSPEND) {
275 #ifdef APMDEBUG
276 		aprint_debug_dev(sc->sc_dev,
277 		    "apm_suspend: already suspended?\n");
278 #endif
279 		return;
280 	}
281 	sc->sc_power_state = PWR_SUSPEND;
282 
283 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
284 		pmf_system_suspend(PMF_Q_NONE);
285 		apm_spl = splhigh();
286 	}
287 
288 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
289 	    APM_SYS_SUSPEND);
290 
291 	if (error)
292 		apm_resume(sc, 0, 0);
293 	else
294 		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
295 }
296 
297 static void
298 apm_standby(struct apm_softc *sc)
299 {
300 	int error;
301 
302 	if (sc->sc_power_state == PWR_STANDBY) {
303 #ifdef APMDEBUG
304 		aprint_debug_dev(sc->sc_dev,
305 		    "apm_standby: already standing by?\n");
306 #endif
307 		return;
308 	}
309 	sc->sc_power_state = PWR_STANDBY;
310 
311 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
312 		pmf_system_suspend(PMF_Q_NONE);
313 		apm_spl = splhigh();
314 	}
315 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
316 	    APM_SYS_STANDBY);
317 	if (error)
318 		apm_resume(sc, 0, 0);
319 	else
320 		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
321 }
322 
323 static void
324 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
325 {
326 	if (sc->sc_power_state == PWR_RESUME) {
327 #ifdef APMDEBUG
328 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
329 #endif
330 		return;
331 	}
332 	sc->sc_power_state = PWR_RESUME;
333 
334 #ifdef TIMER_FREQ
335 	/*
336 	 * Some system requires its clock to be initialized after hybernation.
337 	 */
338 	initrtclock(TIMER_FREQ);
339 #endif
340 
341 	inittodr(time_second);
342 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
343 		splx(apm_spl);
344 		pmf_system_resume(PMF_Q_NONE);
345 	}
346 
347 	apm_record_event(sc, event_type);
348 }
349 
350 /*
351  * return 0 if the user will notice and handle the event,
352  * return 1 if the kernel driver should do so.
353  */
354 static int
355 apm_record_event(struct apm_softc *sc, u_int event_type)
356 {
357 	struct apm_event_info *evp;
358 
359 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
360 		return 1;		/* no user waiting */
361 	if (sc->sc_event_count == APM_NEVENTS)
362 		return 1;			/* overflow */
363 	evp = &sc->sc_event_list[sc->sc_event_ptr];
364 	sc->sc_event_count++;
365 	sc->sc_event_ptr++;
366 	sc->sc_event_ptr %= APM_NEVENTS;
367 	evp->type = event_type;
368 	evp->index = ++apm_evindex;
369 	selnotify(&sc->sc_rsel, 0, 0);
370 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
371 }
372 
373 static void
374 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
375 {
376 	int error;
377 	const char *code;
378 	struct apm_power_info pi;
379 
380 	switch (event_code) {
381 	case APM_USER_STANDBY_REQ:
382 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
383 		if (apm_do_standby) {
384 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
385 				apm_userstandbys++;
386 			apm_op_inprog++;
387 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
388 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
389 		} else {
390 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
391 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
392 			/* in case BIOS hates being spurned */
393 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
394 		}
395 		break;
396 
397 	case APM_STANDBY_REQ:
398 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
399 		if (apm_op_inprog) {
400 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
401 			    ("damn fool BIOS did not wait for answer\n"));
402 			/* just give up the fight */
403 			apm_damn_fool_bios = 1;
404 		}
405 		if (apm_do_standby) {
406 			if (apm_op_inprog == 0 &&
407 			    apm_record_event(sc, event_code))
408 				apm_standbys++;
409 			apm_op_inprog++;
410 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
411 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
412 		} else {
413 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
414 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
415 			/* in case BIOS hates being spurned */
416 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
417 		}
418 		break;
419 
420 	case APM_USER_SUSPEND_REQ:
421 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
422 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
423 			apm_suspends++;
424 		apm_op_inprog++;
425 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
426 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
427 		break;
428 
429 	case APM_SUSPEND_REQ:
430 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
431 		if (apm_op_inprog) {
432 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
433 			    ("damn fool BIOS did not wait for answer\n"));
434 			/* just give up the fight */
435 			apm_damn_fool_bios = 1;
436 		}
437 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
438 			apm_suspends++;
439 		apm_op_inprog++;
440 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
441 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
442 		break;
443 
444 	case APM_POWER_CHANGE:
445 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
446 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
447 #ifdef APM_POWER_PRINT
448 		/* only print if nobody is catching events. */
449 		if (error == 0 &&
450 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
451 			apm_power_print(sc, &pi);
452 #endif
453 		apm_record_event(sc, event_code);
454 		break;
455 
456 	case APM_NORMAL_RESUME:
457 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
458 		apm_resume(sc, event_code, event_info);
459 		break;
460 
461 	case APM_CRIT_RESUME:
462 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
463 		apm_resume(sc, event_code, event_info);
464 		break;
465 
466 	case APM_SYS_STANDBY_RESUME:
467 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
468 		apm_resume(sc, event_code, event_info);
469 		break;
470 
471 	case APM_UPDATE_TIME:
472 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
473 		apm_resume(sc, event_code, event_info);
474 		break;
475 
476 	case APM_CRIT_SUSPEND_REQ:
477 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
478 		apm_record_event(sc, event_code);
479 		apm_suspend(sc);
480 		break;
481 
482 	case APM_BATTERY_LOW:
483 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
484 		apm_battlow++;
485 		apm_record_event(sc, event_code);
486 		break;
487 
488 	case APM_CAP_CHANGE:
489 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
490 		if (apm_minver < 2) {
491 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
492 		} else {
493 			u_int numbatts, capflags;
494 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
495 			    &numbatts, &capflags);
496 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
497 		}
498 		break;
499 
500 	default:
501 		switch (event_code >> 8) {
502 			case 0:
503 				code = "reserved system";
504 				break;
505 			case 1:
506 				code = "reserved device";
507 				break;
508 			case 2:
509 				code = "OEM defined";
510 				break;
511 			default:
512 				code = "reserved";
513 				break;
514 		}
515 		printf("APM: %s event code %x\n", code, event_code);
516 	}
517 }
518 
519 static void
520 apm_periodic_check(struct apm_softc *sc)
521 {
522 	int error;
523 	u_int event_code, event_info;
524 
525 
526 	/*
527 	 * tell the BIOS we're working on it, if asked to do a
528 	 * suspend/standby
529 	 */
530 	if (apm_op_inprog)
531 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
532 		    APM_LASTREQ_INPROG);
533 
534 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
535 	    &event_info)) == 0 && !apm_damn_fool_bios)
536 		apm_event_handle(sc, event_code, event_info);
537 
538 	if (error != APM_ERR_NOEVENTS)
539 		apm_perror("get event", error);
540 	if (apm_suspends) {
541 		apm_op_inprog = 0;
542 		apm_suspend(sc);
543 	} else if (apm_standbys || apm_userstandbys) {
544 		apm_op_inprog = 0;
545 		apm_standby(sc);
546 	}
547 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
548 	apm_damn_fool_bios = 0;
549 }
550 
551 static void
552 apm_set_ver(struct apm_softc *sc)
553 {
554 
555 	if (apm_v12_enabled &&
556 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
557 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
558 		apm_majver = 1;
559 		apm_minver = 2;
560 		goto ok;
561 	}
562 
563 	if (apm_v11_enabled &&
564 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
565 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
566 		apm_majver = 1;
567 		apm_minver = 1;
568 	} else {
569 		apm_majver = 1;
570 		apm_minver = 0;
571 	}
572 ok:
573 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
574 	apm_inited = 1;
575 }
576 
577 int
578 apm_match(void)
579 {
580 	static int got;
581 	return !got++;
582 }
583 
584 void
585 apm_attach(struct apm_softc *sc)
586 {
587 	u_int numbatts, capflags;
588 
589 	aprint_normal(": ");
590 
591 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
592 	case 0x0100:
593 		apm_v11_enabled = 0;
594 		apm_v12_enabled = 0;
595 		break;
596 	case 0x0101:
597 		apm_v12_enabled = 0;
598 		/* fall through */
599 	case 0x0102:
600 	default:
601 		break;
602 	}
603 
604 	apm_set_ver(sc);	/* prints version info */
605 	aprint_normal("\n");
606 	if (apm_minver >= 2)
607 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
608 		    &capflags);
609 
610 	/*
611 	 * enable power management
612 	 */
613 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
614 
615 	if (sc->sc_ops->aa_cpu_busy)
616 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
617 
618 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
619 
620 	/* Initial state is `resumed'. */
621 	sc->sc_power_state = PWR_RESUME;
622 	selinit(&sc->sc_rsel);
623 	selinit(&sc->sc_xsel);
624 
625 	/* Do an initial check. */
626 	apm_periodic_check(sc);
627 
628 	/*
629 	 * Create a kernel thread to periodically check for APM events,
630 	 * and notify other subsystems when they occur.
631 	 */
632 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
633 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
634 		/*
635 		 * We were unable to create the APM thread; bail out.
636 		 */
637 		if (sc->sc_ops->aa_disconnect)
638 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
639 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
640 		    "kernel APM support disabled\n");
641 	}
642 
643 	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
644 		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
645 }
646 
647 void
648 apm_thread(void *arg)
649 {
650 	struct apm_softc *apmsc = arg;
651 
652 	/*
653 	 * Loop forever, doing a periodic check for APM events.
654 	 */
655 	for (;;) {
656 		APM_LOCK(apmsc);
657 		apm_periodic_check(apmsc);
658 		APM_UNLOCK(apmsc);
659 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
660 	}
661 }
662 
663 int
664 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
665 {
666 	int ctl = APM(dev);
667 	int error = 0;
668 	struct apm_softc *sc;
669 
670 	sc = device_lookup_private(&apm_cd, APMUNIT(dev));
671 	if (!sc)
672 		return ENXIO;
673 
674 	if (!apm_inited)
675 		return ENXIO;
676 
677 	DPRINTF(APMDEBUG_DEVICE,
678 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
679 
680 	APM_LOCK(sc);
681 	switch (ctl) {
682 	case APM_CTL:
683 		if (!(flag & FWRITE)) {
684 			error = EINVAL;
685 			break;
686 		}
687 		if (sc->sc_flags & SCFLAG_OWRITE) {
688 			error = EBUSY;
689 			break;
690 		}
691 		sc->sc_flags |= SCFLAG_OWRITE;
692 		break;
693 	case APM_NORMAL:
694 		if (!(flag & FREAD) || (flag & FWRITE)) {
695 			error = EINVAL;
696 			break;
697 		}
698 		sc->sc_flags |= SCFLAG_OREAD;
699 		break;
700 	default:
701 		error = ENXIO;
702 		break;
703 	}
704 	APM_UNLOCK(sc);
705 
706 	return (error);
707 }
708 
709 int
710 apmclose(dev_t dev, int flag, int mode,
711 	struct lwp *l)
712 {
713 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
714 	int ctl = APM(dev);
715 
716 	DPRINTF(APMDEBUG_DEVICE,
717 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
718 
719 	APM_LOCK(sc);
720 	switch (ctl) {
721 	case APM_CTL:
722 		sc->sc_flags &= ~SCFLAG_OWRITE;
723 		break;
724 	case APM_NORMAL:
725 		sc->sc_flags &= ~SCFLAG_OREAD;
726 		break;
727 	}
728 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
729 		sc->sc_event_count = 0;
730 		sc->sc_event_ptr = 0;
731 	}
732 	APM_UNLOCK(sc);
733 	return 0;
734 }
735 
736 int
737 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
738 	struct lwp *l)
739 {
740 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
741 	struct apm_power_info *powerp;
742 	struct apm_event_info *evp;
743 #if 0
744 	struct apm_ctl *actl;
745 #endif
746 	int i, error = 0;
747 	int batt_flags;
748 	struct apm_ctl *actl;
749 
750 	APM_LOCK(sc);
751 	switch (cmd) {
752 	case APM_IOC_STANDBY:
753 		if (!apm_do_standby) {
754 			error = EOPNOTSUPP;
755 			break;
756 		}
757 
758 		if ((flag & FWRITE) == 0) {
759 			error = EBADF;
760 			break;
761 		}
762 		apm_userstandbys++;
763 		break;
764 
765 	case APM_IOC_DEV_CTL:
766 		actl = (struct apm_ctl *)data;
767 		if ((flag & FWRITE) == 0) {
768 			error = EBADF;
769 			break;
770 		}
771 #if 0
772 		apm_get_powstate(actl->dev); /* XXX */
773 #endif
774 		error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
775 		    actl->mode);
776 		apm_suspends++;
777  		break;
778 
779 	case APM_IOC_SUSPEND:
780 		if ((flag & FWRITE) == 0) {
781 			error = EBADF;
782 			break;
783 		}
784 		apm_suspends++;
785 		break;
786 
787 	case APM_IOC_NEXTEVENT:
788 		if (!sc->sc_event_count)
789 			error = EAGAIN;
790 		else {
791 			evp = (struct apm_event_info *)data;
792 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
793 			i %= APM_NEVENTS;
794 			*evp = sc->sc_event_list[i];
795 			sc->sc_event_count--;
796 		}
797 		break;
798 
799 	case OAPM_IOC_GETPOWER:
800 	case APM_IOC_GETPOWER:
801 		powerp = (struct apm_power_info *)data;
802 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
803 		    powerp)) != 0) {
804 			apm_perror("ioctl get power status", error);
805 			error = EIO;
806 			break;
807 		}
808 		switch (apm_minver) {
809 		case 0:
810 			break;
811 		case 1:
812 		default:
813 			batt_flags = powerp->battery_flags;
814 			powerp->battery_state = APM_BATT_UNKNOWN;
815 			if (batt_flags & APM_BATT_FLAG_HIGH)
816 				powerp->battery_state = APM_BATT_HIGH;
817 			else if (batt_flags & APM_BATT_FLAG_LOW)
818 				powerp->battery_state = APM_BATT_LOW;
819 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
820 				powerp->battery_state = APM_BATT_CRITICAL;
821 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
822 				powerp->battery_state = APM_BATT_CHARGING;
823 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
824 				powerp->battery_state = APM_BATT_ABSENT;
825 			break;
826 		}
827 		break;
828 
829 	default:
830 		error = ENOTTY;
831 	}
832 	APM_UNLOCK(sc);
833 
834 	return (error);
835 }
836 
837 int
838 apmpoll(dev_t dev, int events, struct lwp *l)
839 {
840 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
841 	int revents = 0;
842 
843 	APM_LOCK(sc);
844 	if (events & (POLLIN | POLLRDNORM)) {
845 		if (sc->sc_event_count)
846 			revents |= events & (POLLIN | POLLRDNORM);
847 		else
848 			selrecord(l, &sc->sc_rsel);
849 	}
850 	APM_UNLOCK(sc);
851 
852 	return (revents);
853 }
854 
855 static void
856 filt_apmrdetach(struct knote *kn)
857 {
858 	struct apm_softc *sc = kn->kn_hook;
859 
860 	APM_LOCK(sc);
861 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
862 	APM_UNLOCK(sc);
863 }
864 
865 static int
866 filt_apmread(struct knote *kn, long hint)
867 {
868 	struct apm_softc *sc = kn->kn_hook;
869 
870 	kn->kn_data = sc->sc_event_count;
871 	return (kn->kn_data > 0);
872 }
873 
874 static const struct filterops apmread_filtops =
875 	{ 1, NULL, filt_apmrdetach, filt_apmread };
876 
877 int
878 apmkqfilter(dev_t dev, struct knote *kn)
879 {
880 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
881 	struct klist *klist;
882 
883 	switch (kn->kn_filter) {
884 	case EVFILT_READ:
885 		klist = &sc->sc_rsel.sel_klist;
886 		kn->kn_fop = &apmread_filtops;
887 		break;
888 
889 	default:
890 		return (EINVAL);
891 	}
892 
893 	kn->kn_hook = sc;
894 
895 	APM_LOCK(sc);
896 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
897 	APM_UNLOCK(sc);
898 
899 	return (0);
900 }
901