xref: /netbsd-src/sys/dev/apm/apm.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: apm.c,v 1.30 2014/03/16 05:20:26 dholland Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apm.c,v 1.30 2014/03/16 05:20:26 dholland Exp $");
37 
38 #include "opt_apm.h"
39 
40 #if defined(DEBUG) && !defined(APMDEBUG)
41 #define	APMDEBUG
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/signalvar.h>
47 #include <sys/kernel.h>
48 #include <sys/proc.h>
49 #include <sys/kthread.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/fcntl.h>
53 #include <sys/ioctl.h>
54 #include <sys/select.h>
55 #include <sys/poll.h>
56 #include <sys/conf.h>
57 
58 #include <dev/apm/apmvar.h>
59 
60 #ifdef APMDEBUG
61 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
62 
63 
64 #ifdef APMDEBUG_VALUE
65 int	apmdebug = APMDEBUG_VALUE;
66 #else
67 int	apmdebug = 0;
68 #endif /* APMDEBUG_VALUE */
69 
70 #else
71 #define	DPRINTF(f, x)		/**/
72 #endif /* APMDEBUG */
73 
74 #define	SCFLAG_OREAD	0x0000001
75 #define	SCFLAG_OWRITE	0x0000002
76 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
77 
78 #define	APMUNIT(dev)	(minor(dev)&0xf0)
79 #define	APM(dev)	(minor(dev)&0x0f)
80 #define APM_NORMAL	0
81 #define APM_CTL	8
82 
83 /*
84  * A brief note on the locking protocol: it's very simple; we
85  * assert an exclusive lock any time thread context enters the
86  * APM module.  This is both the APM thread itself, as well as
87  * user context.
88  */
89 #define	APM_LOCK(apmsc)						\
90 	(void) mutex_enter(&(apmsc)->sc_lock)
91 #define	APM_UNLOCK(apmsc)						\
92 	(void) mutex_exit(&(apmsc)->sc_lock)
93 
94 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
95 static void	apm_periodic_check(struct apm_softc *);
96 static void	apm_thread(void *);
97 static void	apm_perror(const char *, int, ...)
98 		    __attribute__((__format__(__printf__,1,3)));
99 #ifdef APM_POWER_PRINT
100 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
101 #endif
102 static int	apm_record_event(struct apm_softc *, u_int);
103 static void	apm_set_ver(struct apm_softc *);
104 static void	apm_standby(struct apm_softc *);
105 static void	apm_suspend(struct apm_softc *);
106 static void	apm_resume(struct apm_softc *, u_int, u_int);
107 
108 extern struct cfdriver apm_cd;
109 
110 dev_type_open(apmopen);
111 dev_type_close(apmclose);
112 dev_type_ioctl(apmioctl);
113 dev_type_poll(apmpoll);
114 dev_type_kqfilter(apmkqfilter);
115 
116 const struct cdevsw apm_cdevsw = {
117 	.d_open = apmopen,
118 	.d_close = apmclose,
119 	.d_read = noread,
120 	.d_write = nowrite,
121 	.d_ioctl = apmioctl,
122 	.d_stop = nostop,
123 	.d_tty = notty,
124 	.d_poll = apmpoll,
125 	.d_mmap = nommap,
126 	.d_kqfilter = apmkqfilter,
127 	.d_flag = D_OTHER,
128 };
129 
130 /* configurable variables */
131 #ifdef APM_NO_STANDBY
132 int	apm_do_standby = 0;
133 #else
134 int	apm_do_standby = 1;
135 #endif
136 #ifdef APM_V10_ONLY
137 int	apm_v11_enabled = 0;
138 #else
139 int	apm_v11_enabled = 1;
140 #endif
141 #ifdef APM_NO_V12
142 int	apm_v12_enabled = 0;
143 #else
144 int	apm_v12_enabled = 1;
145 #endif
146 
147 /* variables used during operation (XXX cgd) */
148 u_char	apm_majver, apm_minver;
149 int	apm_inited;
150 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
151 int	apm_damn_fool_bios, apm_op_inprog;
152 int	apm_evindex;
153 
154 static int apm_spl;		/* saved spl while suspended */
155 
156 const char *
157 apm_strerror(int code)
158 {
159 	switch (code) {
160 	case APM_ERR_PM_DISABLED:
161 		return ("power management disabled");
162 	case APM_ERR_REALALREADY:
163 		return ("real mode interface already connected");
164 	case APM_ERR_NOTCONN:
165 		return ("interface not connected");
166 	case APM_ERR_16ALREADY:
167 		return ("16-bit interface already connected");
168 	case APM_ERR_16NOTSUPP:
169 		return ("16-bit interface not supported");
170 	case APM_ERR_32ALREADY:
171 		return ("32-bit interface already connected");
172 	case APM_ERR_32NOTSUPP:
173 		return ("32-bit interface not supported");
174 	case APM_ERR_UNRECOG_DEV:
175 		return ("unrecognized device ID");
176 	case APM_ERR_ERANGE:
177 		return ("parameter out of range");
178 	case APM_ERR_NOTENGAGED:
179 		return ("interface not engaged");
180 	case APM_ERR_UNABLE:
181 		return ("unable to enter requested state");
182 	case APM_ERR_NOEVENTS:
183 		return ("no pending events");
184 	case APM_ERR_NOT_PRESENT:
185 		return ("no APM present");
186 	default:
187 		return ("unknown error code");
188 	}
189 }
190 
191 static void
192 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
193 {
194 	va_list ap;
195 
196 	printf("APM ");
197 
198 	va_start(ap, errinfo);
199 	vprintf(str, ap);			/* XXX cgd */
200 	va_end(ap);
201 
202 	printf(": %s\n", apm_strerror(errinfo));
203 }
204 
205 #ifdef APM_POWER_PRINT
206 static void
207 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
208 {
209 
210 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
211 		aprint_normal_dev(sc->sc_dev,
212 		    "battery life expectancy: %d%%\n",
213 		    pi->battery_life);
214 	}
215 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
216 	switch (pi->ac_state) {
217 	case APM_AC_OFF:
218 		printf("off\n");
219 		break;
220 	case APM_AC_ON:
221 		printf("on\n");
222 		break;
223 	case APM_AC_BACKUP:
224 		printf("backup power\n");
225 		break;
226 	default:
227 	case APM_AC_UNKNOWN:
228 		printf("unknown\n");
229 		break;
230 	}
231 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
232 	if (apm_minver == 0)
233 		switch (pi->battery_state) {
234 		case APM_BATT_HIGH:
235 			printf("high\n");
236 			break;
237 		case APM_BATT_LOW:
238 			printf("low\n");
239 			break;
240 		case APM_BATT_CRITICAL:
241 			printf("critical\n");
242 			break;
243 		case APM_BATT_CHARGING:
244 			printf("charging\n");
245 			break;
246 		case APM_BATT_UNKNOWN:
247 			printf("unknown\n");
248 			break;
249 		default:
250 			printf("undecoded state %x\n", pi->battery_state);
251 			break;
252 		}
253 	else if (apm_minver >= 1) {
254 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
255 			printf(" no battery");
256 		else {
257 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
258 				printf(" high");
259 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
260 				printf(" low");
261 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
262 				printf(" critical");
263 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
264 				printf(" charging");
265 		}
266 		printf("\n");
267 		if (pi->minutes_valid) {
268 			aprint_normal_dev(sc->sc_dev, "estimated ");
269 			if (pi->minutes_left / 60)
270 				printf("%dh ", pi->minutes_left / 60);
271 			printf("%dm\n", pi->minutes_left % 60);
272 		}
273 	}
274 	return;
275 }
276 #endif
277 
278 static void
279 apm_suspend(struct apm_softc *sc)
280 {
281 	int error;
282 
283 	if (sc->sc_power_state == PWR_SUSPEND) {
284 #ifdef APMDEBUG
285 		aprint_debug_dev(sc->sc_dev,
286 		    "apm_suspend: already suspended?\n");
287 #endif
288 		return;
289 	}
290 	sc->sc_power_state = PWR_SUSPEND;
291 
292 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
293 		pmf_system_suspend(PMF_Q_NONE);
294 		apm_spl = splhigh();
295 	}
296 
297 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
298 	    APM_SYS_SUSPEND);
299 
300 	if (error)
301 		apm_resume(sc, 0, 0);
302 	else
303 		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
304 }
305 
306 static void
307 apm_standby(struct apm_softc *sc)
308 {
309 	int error;
310 
311 	if (sc->sc_power_state == PWR_STANDBY) {
312 #ifdef APMDEBUG
313 		aprint_debug_dev(sc->sc_dev,
314 		    "apm_standby: already standing by?\n");
315 #endif
316 		return;
317 	}
318 	sc->sc_power_state = PWR_STANDBY;
319 
320 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
321 		pmf_system_suspend(PMF_Q_NONE);
322 		apm_spl = splhigh();
323 	}
324 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
325 	    APM_SYS_STANDBY);
326 	if (error)
327 		apm_resume(sc, 0, 0);
328 	else
329 		apm_resume(sc, APM_SYS_STANDBY_RESUME, 0);
330 }
331 
332 static void
333 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
334 {
335 	if (sc->sc_power_state == PWR_RESUME) {
336 #ifdef APMDEBUG
337 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
338 #endif
339 		return;
340 	}
341 	sc->sc_power_state = PWR_RESUME;
342 
343 #ifdef TIMER_FREQ
344 	/*
345 	 * Some system requires its clock to be initialized after hybernation.
346 	 */
347 	initrtclock(TIMER_FREQ);
348 #endif
349 
350 	inittodr(time_second);
351 	if (!(sc->sc_hwflags & APM_F_DONT_RUN_HOOKS)) {
352 		splx(apm_spl);
353 		pmf_system_resume(PMF_Q_NONE);
354 	}
355 
356 	apm_record_event(sc, event_type);
357 }
358 
359 /*
360  * return 0 if the user will notice and handle the event,
361  * return 1 if the kernel driver should do so.
362  */
363 static int
364 apm_record_event(struct apm_softc *sc, u_int event_type)
365 {
366 	struct apm_event_info *evp;
367 
368 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
369 		return 1;		/* no user waiting */
370 	if (sc->sc_event_count == APM_NEVENTS)
371 		return 1;			/* overflow */
372 	evp = &sc->sc_event_list[sc->sc_event_ptr];
373 	sc->sc_event_count++;
374 	sc->sc_event_ptr++;
375 	sc->sc_event_ptr %= APM_NEVENTS;
376 	evp->type = event_type;
377 	evp->index = ++apm_evindex;
378 	selnotify(&sc->sc_rsel, 0, 0);
379 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
380 }
381 
382 static void
383 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
384 {
385 	int error;
386 	const char *code;
387 	struct apm_power_info pi;
388 
389 	switch (event_code) {
390 	case APM_USER_STANDBY_REQ:
391 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
392 		if (apm_do_standby) {
393 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
394 				apm_userstandbys++;
395 			apm_op_inprog++;
396 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
397 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
398 		} else {
399 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
400 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
401 			/* in case BIOS hates being spurned */
402 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
403 		}
404 		break;
405 
406 	case APM_STANDBY_REQ:
407 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
408 		if (apm_op_inprog) {
409 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
410 			    ("damn fool BIOS did not wait for answer\n"));
411 			/* just give up the fight */
412 			apm_damn_fool_bios = 1;
413 		}
414 		if (apm_do_standby) {
415 			if (apm_op_inprog == 0 &&
416 			    apm_record_event(sc, event_code))
417 				apm_standbys++;
418 			apm_op_inprog++;
419 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
420 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
421 		} else {
422 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
423 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
424 			/* in case BIOS hates being spurned */
425 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
426 		}
427 		break;
428 
429 	case APM_USER_SUSPEND_REQ:
430 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
431 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
432 			apm_suspends++;
433 		apm_op_inprog++;
434 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
435 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
436 		break;
437 
438 	case APM_SUSPEND_REQ:
439 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
440 		if (apm_op_inprog) {
441 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
442 			    ("damn fool BIOS did not wait for answer\n"));
443 			/* just give up the fight */
444 			apm_damn_fool_bios = 1;
445 		}
446 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
447 			apm_suspends++;
448 		apm_op_inprog++;
449 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
450 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
451 		break;
452 
453 	case APM_POWER_CHANGE:
454 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
455 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
456 #ifdef APM_POWER_PRINT
457 		/* only print if nobody is catching events. */
458 		if (error == 0 &&
459 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
460 			apm_power_print(sc, &pi);
461 #else
462 		__USE(error);
463 #endif
464 		apm_record_event(sc, event_code);
465 		break;
466 
467 	case APM_NORMAL_RESUME:
468 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
469 		apm_resume(sc, event_code, event_info);
470 		break;
471 
472 	case APM_CRIT_RESUME:
473 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
474 		apm_resume(sc, event_code, event_info);
475 		break;
476 
477 	case APM_SYS_STANDBY_RESUME:
478 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
479 		apm_resume(sc, event_code, event_info);
480 		break;
481 
482 	case APM_UPDATE_TIME:
483 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
484 		apm_resume(sc, event_code, event_info);
485 		break;
486 
487 	case APM_CRIT_SUSPEND_REQ:
488 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
489 		apm_record_event(sc, event_code);
490 		apm_suspend(sc);
491 		break;
492 
493 	case APM_BATTERY_LOW:
494 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
495 		apm_battlow++;
496 		apm_record_event(sc, event_code);
497 		break;
498 
499 	case APM_CAP_CHANGE:
500 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
501 		if (apm_minver < 2) {
502 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
503 		} else {
504 			u_int numbatts, capflags;
505 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
506 			    &numbatts, &capflags);
507 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
508 		}
509 		break;
510 
511 	default:
512 		switch (event_code >> 8) {
513 			case 0:
514 				code = "reserved system";
515 				break;
516 			case 1:
517 				code = "reserved device";
518 				break;
519 			case 2:
520 				code = "OEM defined";
521 				break;
522 			default:
523 				code = "reserved";
524 				break;
525 		}
526 		printf("APM: %s event code %x\n", code, event_code);
527 	}
528 }
529 
530 static void
531 apm_periodic_check(struct apm_softc *sc)
532 {
533 	int error;
534 	u_int event_code, event_info;
535 
536 
537 	/*
538 	 * tell the BIOS we're working on it, if asked to do a
539 	 * suspend/standby
540 	 */
541 	if (apm_op_inprog)
542 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
543 		    APM_LASTREQ_INPROG);
544 
545 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
546 	    &event_info)) == 0 && !apm_damn_fool_bios)
547 		apm_event_handle(sc, event_code, event_info);
548 
549 	if (error != APM_ERR_NOEVENTS)
550 		apm_perror("get event", error);
551 	if (apm_suspends) {
552 		apm_op_inprog = 0;
553 		apm_suspend(sc);
554 	} else if (apm_standbys || apm_userstandbys) {
555 		apm_op_inprog = 0;
556 		apm_standby(sc);
557 	}
558 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
559 	apm_damn_fool_bios = 0;
560 }
561 
562 static void
563 apm_set_ver(struct apm_softc *sc)
564 {
565 
566 	if (apm_v12_enabled &&
567 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
568 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
569 		apm_majver = 1;
570 		apm_minver = 2;
571 		goto ok;
572 	}
573 
574 	if (apm_v11_enabled &&
575 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
576 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
577 		apm_majver = 1;
578 		apm_minver = 1;
579 	} else {
580 		apm_majver = 1;
581 		apm_minver = 0;
582 	}
583 ok:
584 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
585 	apm_inited = 1;
586 }
587 
588 int
589 apm_match(void)
590 {
591 	static int got;
592 	return !got++;
593 }
594 
595 void
596 apm_attach(struct apm_softc *sc)
597 {
598 	u_int numbatts, capflags;
599 
600 	aprint_normal(": ");
601 
602 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
603 	case 0x0100:
604 		apm_v11_enabled = 0;
605 		apm_v12_enabled = 0;
606 		break;
607 	case 0x0101:
608 		apm_v12_enabled = 0;
609 		/* fall through */
610 	case 0x0102:
611 	default:
612 		break;
613 	}
614 
615 	apm_set_ver(sc);	/* prints version info */
616 	aprint_normal("\n");
617 	if (apm_minver >= 2)
618 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
619 		    &capflags);
620 
621 	/*
622 	 * enable power management
623 	 */
624 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
625 
626 	if (sc->sc_ops->aa_cpu_busy)
627 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
628 
629 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
630 
631 	/* Initial state is `resumed'. */
632 	sc->sc_power_state = PWR_RESUME;
633 	selinit(&sc->sc_rsel);
634 	selinit(&sc->sc_xsel);
635 
636 	/* Do an initial check. */
637 	apm_periodic_check(sc);
638 
639 	/*
640 	 * Create a kernel thread to periodically check for APM events,
641 	 * and notify other subsystems when they occur.
642 	 */
643 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
644 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
645 		/*
646 		 * We were unable to create the APM thread; bail out.
647 		 */
648 		if (sc->sc_ops->aa_disconnect)
649 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
650 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
651 		    "kernel APM support disabled\n");
652 	}
653 
654 	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
655 		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
656 }
657 
658 void
659 apm_thread(void *arg)
660 {
661 	struct apm_softc *apmsc = arg;
662 
663 	/*
664 	 * Loop forever, doing a periodic check for APM events.
665 	 */
666 	for (;;) {
667 		APM_LOCK(apmsc);
668 		apm_periodic_check(apmsc);
669 		APM_UNLOCK(apmsc);
670 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
671 	}
672 }
673 
674 int
675 apmopen(dev_t dev, int flag, int mode, struct lwp *l)
676 {
677 	int ctl = APM(dev);
678 	int error = 0;
679 	struct apm_softc *sc;
680 
681 	sc = device_lookup_private(&apm_cd, APMUNIT(dev));
682 	if (!sc)
683 		return ENXIO;
684 
685 	if (!apm_inited)
686 		return ENXIO;
687 
688 	DPRINTF(APMDEBUG_DEVICE,
689 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
690 
691 	APM_LOCK(sc);
692 	switch (ctl) {
693 	case APM_CTL:
694 		if (!(flag & FWRITE)) {
695 			error = EINVAL;
696 			break;
697 		}
698 		if (sc->sc_flags & SCFLAG_OWRITE) {
699 			error = EBUSY;
700 			break;
701 		}
702 		sc->sc_flags |= SCFLAG_OWRITE;
703 		break;
704 	case APM_NORMAL:
705 		if (!(flag & FREAD) || (flag & FWRITE)) {
706 			error = EINVAL;
707 			break;
708 		}
709 		sc->sc_flags |= SCFLAG_OREAD;
710 		break;
711 	default:
712 		error = ENXIO;
713 		break;
714 	}
715 	APM_UNLOCK(sc);
716 
717 	return (error);
718 }
719 
720 int
721 apmclose(dev_t dev, int flag, int mode,
722 	struct lwp *l)
723 {
724 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
725 	int ctl = APM(dev);
726 
727 	DPRINTF(APMDEBUG_DEVICE,
728 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
729 
730 	APM_LOCK(sc);
731 	switch (ctl) {
732 	case APM_CTL:
733 		sc->sc_flags &= ~SCFLAG_OWRITE;
734 		break;
735 	case APM_NORMAL:
736 		sc->sc_flags &= ~SCFLAG_OREAD;
737 		break;
738 	}
739 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
740 		sc->sc_event_count = 0;
741 		sc->sc_event_ptr = 0;
742 	}
743 	APM_UNLOCK(sc);
744 	return 0;
745 }
746 
747 int
748 apmioctl(dev_t dev, u_long cmd, void *data, int flag,
749 	struct lwp *l)
750 {
751 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
752 	struct apm_power_info *powerp;
753 	struct apm_event_info *evp;
754 #if 0
755 	struct apm_ctl *actl;
756 #endif
757 	int i, error = 0;
758 	int batt_flags;
759 	struct apm_ctl *actl;
760 
761 	APM_LOCK(sc);
762 	switch (cmd) {
763 	case APM_IOC_STANDBY:
764 		if (!apm_do_standby) {
765 			error = EOPNOTSUPP;
766 			break;
767 		}
768 
769 		if ((flag & FWRITE) == 0) {
770 			error = EBADF;
771 			break;
772 		}
773 		apm_userstandbys++;
774 		break;
775 
776 	case APM_IOC_DEV_CTL:
777 		actl = (struct apm_ctl *)data;
778 		if ((flag & FWRITE) == 0) {
779 			error = EBADF;
780 			break;
781 		}
782 #if 0
783 		apm_get_powstate(actl->dev); /* XXX */
784 #endif
785 		error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, actl->dev,
786 		    actl->mode);
787 		apm_suspends++;
788  		break;
789 
790 	case APM_IOC_SUSPEND:
791 		if ((flag & FWRITE) == 0) {
792 			error = EBADF;
793 			break;
794 		}
795 		apm_suspends++;
796 		break;
797 
798 	case APM_IOC_NEXTEVENT:
799 		if (!sc->sc_event_count)
800 			error = EAGAIN;
801 		else {
802 			evp = (struct apm_event_info *)data;
803 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
804 			i %= APM_NEVENTS;
805 			*evp = sc->sc_event_list[i];
806 			sc->sc_event_count--;
807 		}
808 		break;
809 
810 	case OAPM_IOC_GETPOWER:
811 	case APM_IOC_GETPOWER:
812 		powerp = (struct apm_power_info *)data;
813 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
814 		    powerp)) != 0) {
815 			apm_perror("ioctl get power status", error);
816 			error = EIO;
817 			break;
818 		}
819 		switch (apm_minver) {
820 		case 0:
821 			break;
822 		case 1:
823 		default:
824 			batt_flags = powerp->battery_flags;
825 			powerp->battery_state = APM_BATT_UNKNOWN;
826 			if (batt_flags & APM_BATT_FLAG_HIGH)
827 				powerp->battery_state = APM_BATT_HIGH;
828 			else if (batt_flags & APM_BATT_FLAG_LOW)
829 				powerp->battery_state = APM_BATT_LOW;
830 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
831 				powerp->battery_state = APM_BATT_CRITICAL;
832 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
833 				powerp->battery_state = APM_BATT_CHARGING;
834 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
835 				powerp->battery_state = APM_BATT_ABSENT;
836 			break;
837 		}
838 		break;
839 
840 	default:
841 		error = ENOTTY;
842 	}
843 	APM_UNLOCK(sc);
844 
845 	return (error);
846 }
847 
848 int
849 apmpoll(dev_t dev, int events, struct lwp *l)
850 {
851 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
852 	int revents = 0;
853 
854 	APM_LOCK(sc);
855 	if (events & (POLLIN | POLLRDNORM)) {
856 		if (sc->sc_event_count)
857 			revents |= events & (POLLIN | POLLRDNORM);
858 		else
859 			selrecord(l, &sc->sc_rsel);
860 	}
861 	APM_UNLOCK(sc);
862 
863 	return (revents);
864 }
865 
866 static void
867 filt_apmrdetach(struct knote *kn)
868 {
869 	struct apm_softc *sc = kn->kn_hook;
870 
871 	APM_LOCK(sc);
872 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
873 	APM_UNLOCK(sc);
874 }
875 
876 static int
877 filt_apmread(struct knote *kn, long hint)
878 {
879 	struct apm_softc *sc = kn->kn_hook;
880 
881 	kn->kn_data = sc->sc_event_count;
882 	return (kn->kn_data > 0);
883 }
884 
885 static const struct filterops apmread_filtops =
886 	{ 1, NULL, filt_apmrdetach, filt_apmread };
887 
888 int
889 apmkqfilter(dev_t dev, struct knote *kn)
890 {
891 	struct apm_softc *sc = device_lookup_private(&apm_cd, APMUNIT(dev));
892 	struct klist *klist;
893 
894 	switch (kn->kn_filter) {
895 	case EVFILT_READ:
896 		klist = &sc->sc_rsel.sel_klist;
897 		kn->kn_fop = &apmread_filtops;
898 		break;
899 
900 	default:
901 		return (EINVAL);
902 	}
903 
904 	kn->kn_hook = sc;
905 
906 	APM_LOCK(sc);
907 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
908 	APM_UNLOCK(sc);
909 
910 	return (0);
911 }
912