xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: apmdev.c,v 1.23 2009/04/03 05:01:10 uwe Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.23 2009/04/03 05:01:10 uwe Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_apm.h"
40 #endif
41 
42 #ifdef APM_NOIDLE
43 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
44 #endif
45 
46 #if defined(DEBUG) && !defined(APMDEBUG)
47 #define	APMDEBUG
48 #endif
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/signalvar.h>
53 #include <sys/kernel.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/user.h>
57 #include <sys/malloc.h>
58 #include <sys/device.h>
59 #include <sys/fcntl.h>
60 #include <sys/ioctl.h>
61 #include <sys/select.h>
62 #include <sys/poll.h>
63 #include <sys/conf.h>
64 
65 #include <dev/hpc/apm/apmvar.h>
66 
67 #include <machine/stdarg.h>
68 
69 #ifdef APMDEBUG
70 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
71 
72 
73 #ifdef APMDEBUG_VALUE
74 int	apmdebug = APMDEBUG_VALUE;
75 #else
76 int	apmdebug = 0;
77 #endif /* APMDEBUG_VALUE */
78 
79 #else
80 #define	DPRINTF(f, x)		/**/
81 #endif /* APMDEBUG */
82 
83 #define	SCFLAG_OREAD	0x0000001
84 #define	SCFLAG_OWRITE	0x0000002
85 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
86 
87 #define	APMUNIT(dev)	(minor(dev)&0xf0)
88 #define	APM(dev)	(minor(dev)&0x0f)
89 #define APM_NORMAL	0
90 #define APM_CTL	8
91 
92 /*
93  * A brief note on the locking protocol: it's very simple; we
94  * assert an exclusive lock any time thread context enters the
95  * APM module.  This is both the APM thread itself, as well as
96  * user context.
97  */
98 #define	APM_LOCK(apmsc)						\
99 	(void) mutex_enter(&(apmsc)->sc_lock)
100 #define	APM_UNLOCK(apmsc)						\
101 	(void) mutex_exit(&(apmsc)->sc_lock)
102 
103 static void	apmdevattach(device_t, device_t, void *);
104 static int	apmdevmatch(device_t, cfdata_t, void *);
105 
106 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
107 static void	apm_periodic_check(struct apm_softc *);
108 static void	apm_thread(void *);
109 static void	apm_perror(const char *, int, ...)
110 		    __attribute__((__format__(__printf__,1,3)));
111 #ifdef APM_POWER_PRINT
112 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
113 #endif
114 static int	apm_record_event(struct apm_softc *, u_int);
115 static void	apm_set_ver(struct apm_softc *);
116 static void	apm_standby(struct apm_softc *);
117 static void	apm_suspend(struct apm_softc *);
118 static void	apm_resume(struct apm_softc *, u_int, u_int);
119 
120 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
121     apmdevmatch, apmdevattach, NULL, NULL);
122 
123 extern struct cfdriver apmdev_cd;
124 
125 dev_type_open(apmdevopen);
126 dev_type_close(apmdevclose);
127 dev_type_ioctl(apmdevioctl);
128 dev_type_poll(apmdevpoll);
129 dev_type_kqfilter(apmdevkqfilter);
130 
131 const struct cdevsw apmdev_cdevsw = {
132 	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
133 	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
134 };
135 
136 /* configurable variables */
137 int	apm_bogus_bios = 0;
138 #ifdef APM_DISABLE
139 int	apm_enabled = 0;
140 #else
141 int	apm_enabled = 1;
142 #endif
143 #ifdef APM_NO_IDLE
144 int	apm_do_idle = 0;
145 #else
146 int	apm_do_idle = 1;
147 #endif
148 #ifdef APM_NO_STANDBY
149 int	apm_do_standby = 0;
150 #else
151 int	apm_do_standby = 1;
152 #endif
153 #ifdef APM_V10_ONLY
154 int	apm_v11_enabled = 0;
155 #else
156 int	apm_v11_enabled = 1;
157 #endif
158 #ifdef APM_NO_V12
159 int	apm_v12_enabled = 0;
160 #else
161 int	apm_v12_enabled = 1;
162 #endif
163 
164 /* variables used during operation (XXX cgd) */
165 u_char	apm_majver, apm_minver;
166 int	apm_inited;
167 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
168 int	apm_damn_fool_bios, apm_op_inprog;
169 int	apm_evindex;
170 
171 static int apm_spl;		/* saved spl while suspended */
172 
173 const char *
174 apm_strerror(int code)
175 {
176 	switch (code) {
177 	case APM_ERR_PM_DISABLED:
178 		return ("power management disabled");
179 	case APM_ERR_REALALREADY:
180 		return ("real mode interface already connected");
181 	case APM_ERR_NOTCONN:
182 		return ("interface not connected");
183 	case APM_ERR_16ALREADY:
184 		return ("16-bit interface already connected");
185 	case APM_ERR_16NOTSUPP:
186 		return ("16-bit interface not supported");
187 	case APM_ERR_32ALREADY:
188 		return ("32-bit interface already connected");
189 	case APM_ERR_32NOTSUPP:
190 		return ("32-bit interface not supported");
191 	case APM_ERR_UNRECOG_DEV:
192 		return ("unrecognized device ID");
193 	case APM_ERR_ERANGE:
194 		return ("parameter out of range");
195 	case APM_ERR_NOTENGAGED:
196 		return ("interface not engaged");
197 	case APM_ERR_UNABLE:
198 		return ("unable to enter requested state");
199 	case APM_ERR_NOEVENTS:
200 		return ("no pending events");
201 	case APM_ERR_NOT_PRESENT:
202 		return ("no APM present");
203 	default:
204 		return ("unknown error code");
205 	}
206 }
207 
208 static void
209 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
210 {
211 	va_list ap;
212 
213 	printf("APM ");
214 
215 	va_start(ap, errinfo);
216 	vprintf(str, ap);			/* XXX cgd */
217 	va_end(ap);
218 
219 	printf(": %s\n", apm_strerror(errinfo));
220 }
221 
222 #ifdef APM_POWER_PRINT
223 static void
224 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
225 {
226 
227 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
228 		aprint_normal_dev(sc->sc_dev,
229 		    "battery life expectancy: %d%%\n",
230 		    pi->battery_life);
231 	}
232 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
233 	switch (pi->ac_state) {
234 	case APM_AC_OFF:
235 		printf("off\n");
236 		break;
237 	case APM_AC_ON:
238 		printf("on\n");
239 		break;
240 	case APM_AC_BACKUP:
241 		printf("backup power\n");
242 		break;
243 	default:
244 	case APM_AC_UNKNOWN:
245 		printf("unknown\n");
246 		break;
247 	}
248 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
249 	if (apm_minver == 0)
250 		switch (pi->battery_state) {
251 		case APM_BATT_HIGH:
252 			printf("high\n");
253 			break;
254 		case APM_BATT_LOW:
255 			printf("low\n");
256 			break;
257 		case APM_BATT_CRITICAL:
258 			printf("critical\n");
259 			break;
260 		case APM_BATT_CHARGING:
261 			printf("charging\n");
262 			break;
263 		case APM_BATT_UNKNOWN:
264 			printf("unknown\n");
265 			break;
266 		default:
267 			printf("undecoded state %x\n", pi->battery_state);
268 			break;
269 		}
270 	else if (apm_minver >= 1) {
271 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
272 			printf(" no battery");
273 		else {
274 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
275 				printf(" high");
276 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
277 				printf(" low");
278 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
279 				printf(" critical");
280 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
281 				printf(" charging");
282 		}
283 		printf("\n");
284 		if (pi->minutes_valid) {
285 			aprint_normal_dev(sc->sc_dev, "estimated ");
286 			if (pi->minutes_left / 60)
287 				printf("%dh ", pi->minutes_left / 60);
288 			printf("%dm\n", pi->minutes_left % 60);
289 		}
290 	}
291 	return;
292 }
293 #endif
294 
295 static void
296 apm_suspend(struct apm_softc *sc)
297 {
298 	int error;
299 
300 	if (sc->sc_power_state == PWR_SUSPEND) {
301 #ifdef APMDEBUG
302 		aprint_debug_dev(sc->sc_dev,
303 		    "apm_suspend: already suspended?\n");
304 #endif
305 		return;
306 	}
307 	sc->sc_power_state = PWR_SUSPEND;
308 
309 	dopowerhooks(PWR_SOFTSUSPEND);
310 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
311 
312 	apm_spl = splhigh();
313 
314 	dopowerhooks(PWR_SUSPEND);
315 
316 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
317 	    APM_SYS_SUSPEND);
318 
319 	if (error)
320 		apm_resume(sc, 0, 0);
321 }
322 
323 static void
324 apm_standby(struct apm_softc *sc)
325 {
326 	int error;
327 
328 	if (sc->sc_power_state == PWR_STANDBY) {
329 #ifdef APMDEBUG
330 		aprint_debug_dev(sc->sc_dev,
331 		    "apm_standby: already standing by?\n");
332 #endif
333 		return;
334 	}
335 	sc->sc_power_state = PWR_STANDBY;
336 
337 	dopowerhooks(PWR_SOFTSTANDBY);
338 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
339 
340 	apm_spl = splhigh();
341 
342 	dopowerhooks(PWR_STANDBY);
343 
344 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
345 	    APM_SYS_STANDBY);
346 	if (error)
347 		apm_resume(sc, 0, 0);
348 }
349 
350 static void
351 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
352 {
353 
354 	if (sc->sc_power_state == PWR_RESUME) {
355 #ifdef APMDEBUG
356 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
357 #endif
358 		return;
359 	}
360 	sc->sc_power_state = PWR_RESUME;
361 
362 #if 0 /* XXX: def TIME_FREQ */
363 	/*
364 	 * Some system requires its clock to be initialized after hybernation.
365 	 */
366 	initrtclock(TIMER_FREQ);
367 #endif
368 
369 	inittodr(time_second);
370 	dopowerhooks(PWR_RESUME);
371 
372 	splx(apm_spl);
373 
374 	dopowerhooks(PWR_SOFTRESUME);
375 
376 	apm_record_event(sc, event_type);
377 }
378 
379 /*
380  * return 0 if the user will notice and handle the event,
381  * return 1 if the kernel driver should do so.
382  */
383 static int
384 apm_record_event(struct apm_softc *sc, u_int event_type)
385 {
386 	struct apm_event_info *evp;
387 
388 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
389 		return 1;		/* no user waiting */
390 	if (sc->sc_event_count == APM_NEVENTS)
391 		return 1;			/* overflow */
392 	evp = &sc->sc_event_list[sc->sc_event_ptr];
393 	sc->sc_event_count++;
394 	sc->sc_event_ptr++;
395 	sc->sc_event_ptr %= APM_NEVENTS;
396 	evp->type = event_type;
397 	evp->index = ++apm_evindex;
398 	selnotify(&sc->sc_rsel, 0, 0);
399 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
400 }
401 
402 static void
403 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
404 {
405 	int error;
406 	const char *code;
407 	struct apm_power_info pi;
408 
409 	switch (event_code) {
410 	case APM_USER_STANDBY_REQ:
411 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
412 		if (apm_do_standby) {
413 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
414 				apm_userstandbys++;
415 			apm_op_inprog++;
416 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
417 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
418 		} else {
419 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
420 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
421 			/* in case BIOS hates being spurned */
422 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
423 		}
424 		break;
425 
426 	case APM_STANDBY_REQ:
427 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
428 		if (apm_standbys || apm_suspends) {
429 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
430 			    ("damn fool BIOS did not wait for answer\n"));
431 			/* just give up the fight */
432 			apm_damn_fool_bios = 1;
433 		}
434 		if (apm_do_standby) {
435 			if (apm_op_inprog == 0 &&
436 			    apm_record_event(sc, event_code))
437 				apm_standbys++;
438 			apm_op_inprog++;
439 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
440 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
441 		} else {
442 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
443 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
444 			/* in case BIOS hates being spurned */
445 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
446 		}
447 		break;
448 
449 	case APM_USER_SUSPEND_REQ:
450 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
451 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
452 			apm_suspends++;
453 		apm_op_inprog++;
454 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
455 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
456 		break;
457 
458 	case APM_SUSPEND_REQ:
459 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
460 		if (apm_standbys || apm_suspends) {
461 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
462 			    ("damn fool BIOS did not wait for answer\n"));
463 			/* just give up the fight */
464 			apm_damn_fool_bios = 1;
465 		}
466 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
467 			apm_suspends++;
468 		apm_op_inprog++;
469 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
470 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
471 		break;
472 
473 	case APM_POWER_CHANGE:
474 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
475 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
476 #ifdef APM_POWER_PRINT
477 		/* only print if nobody is catching events. */
478 		if (error == 0 &&
479 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
480 			apm_power_print(sc, &pi);
481 #endif
482 		apm_record_event(sc, event_code);
483 		break;
484 
485 	case APM_NORMAL_RESUME:
486 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
487 		apm_resume(sc, event_code, event_info);
488 		break;
489 
490 	case APM_CRIT_RESUME:
491 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
492 		apm_resume(sc, event_code, event_info);
493 		break;
494 
495 	case APM_SYS_STANDBY_RESUME:
496 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
497 		apm_resume(sc, event_code, event_info);
498 		break;
499 
500 	case APM_UPDATE_TIME:
501 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
502 		apm_resume(sc, event_code, event_info);
503 		break;
504 
505 	case APM_CRIT_SUSPEND_REQ:
506 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
507 		apm_record_event(sc, event_code);
508 		apm_suspend(sc);
509 		break;
510 
511 	case APM_BATTERY_LOW:
512 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
513 		apm_battlow++;
514 		apm_record_event(sc, event_code);
515 		break;
516 
517 	case APM_CAP_CHANGE:
518 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
519 		if (apm_minver < 2) {
520 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
521 		} else {
522 			u_int numbatts, capflags;
523 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
524 			    &numbatts, &capflags);
525 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
526 		}
527 		break;
528 
529 	default:
530 		switch (event_code >> 8) {
531 			case 0:
532 				code = "reserved system";
533 				break;
534 			case 1:
535 				code = "reserved device";
536 				break;
537 			case 2:
538 				code = "OEM defined";
539 				break;
540 			default:
541 				code = "reserved";
542 				break;
543 		}
544 		printf("APM: %s event code %x\n", code, event_code);
545 	}
546 }
547 
548 static void
549 apm_periodic_check(struct apm_softc *sc)
550 {
551 	int error;
552 	u_int event_code, event_info;
553 
554 
555 	/*
556 	 * tell the BIOS we're working on it, if asked to do a
557 	 * suspend/standby
558 	 */
559 	if (apm_op_inprog)
560 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
561 		    APM_LASTREQ_INPROG);
562 
563 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
564 	    &event_info)) == 0 && !apm_damn_fool_bios)
565 		apm_event_handle(sc, event_code, event_info);
566 
567 	if (error != APM_ERR_NOEVENTS)
568 		apm_perror("get event", error);
569 	if (apm_suspends) {
570 		apm_op_inprog = 0;
571 		apm_suspend(sc);
572 	} else if (apm_standbys || apm_userstandbys) {
573 		apm_op_inprog = 0;
574 		apm_standby(sc);
575 	}
576 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
577 	apm_damn_fool_bios = 0;
578 }
579 
580 static void
581 apm_set_ver(struct apm_softc *sc)
582 {
583 
584 	if (apm_v12_enabled &&
585 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
586 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
587 		apm_majver = 1;
588 		apm_minver = 2;
589 		goto ok;
590 	}
591 
592 	if (apm_v11_enabled &&
593 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
594 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
595 		apm_majver = 1;
596 		apm_minver = 1;
597 	} else {
598 		apm_majver = 1;
599 		apm_minver = 0;
600 	}
601 ok:
602 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
603 	apm_inited = 1;
604 	if (sc->sc_detail & APM_IDLE_SLOWS) {
605 #ifdef DIAGNOSTIC
606 		/* not relevant often */
607 		aprint_normal(" (slowidle)");
608 #endif
609 		/* leave apm_do_idle at its user-configured setting */
610 	} else
611 		apm_do_idle = 0;
612 #ifdef DIAGNOSTIC
613 	if (sc->sc_detail & APM_BIOS_PM_DISABLED)
614 		aprint_normal(" (BIOS mgmt disabled)");
615 	if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
616 		aprint_normal(" (BIOS managing devices)");
617 #endif
618 }
619 
620 static int
621 apmdevmatch(device_t parent, cfdata_t match, void *aux)
622 {
623 
624 	return apm_match();
625 }
626 
627 static void
628 apmdevattach(device_t parent, device_t self, void *aux)
629 {
630 	struct apm_softc *sc;
631 	struct apmdev_attach_args *aaa = aux;
632 
633 	sc = device_private(self);
634 	sc->sc_dev = self;
635 
636 	sc->sc_detail = aaa->apm_detail;
637 	sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
638 
639 	sc->sc_ops = aaa->accessops;
640 	sc->sc_cookie = aaa->accesscookie;
641 
642 	apm_attach(sc);
643 }
644 
645 /*
646  * Print function (for parent devices).
647  */
648 int
649 apmprint(void *aux, const char *pnp)
650 {
651 	if (pnp)
652 		aprint_normal("apm at %s", pnp);
653 
654 	return (UNCONF);
655 }
656 
657 int
658 apm_match(void)
659 {
660 	static int got;
661 	return !got++;
662 }
663 
664 void
665 apm_attach(struct apm_softc *sc)
666 {
667 	struct apm_power_info pinfo;
668 	u_int numbatts, capflags;
669 	int error;
670 
671 	aprint_naive("\n");
672 	aprint_normal(": ");
673 
674 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
675 	case 0x0100:
676 		apm_v11_enabled = 0;
677 		apm_v12_enabled = 0;
678 		break;
679 	case 0x0101:
680 		apm_v12_enabled = 0;
681 		/* fall through */
682 	case 0x0102:
683 	default:
684 		break;
685 	}
686 
687 	apm_set_ver(sc);	/* prints version info */
688 	aprint_normal("\n");
689 	if (apm_minver >= 2)
690 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
691 		    &capflags);
692 
693 	/*
694 	 * enable power management
695 	 */
696 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
697 
698 	error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
699 	if (error == 0) {
700 #ifdef APM_POWER_PRINT
701 		apm_power_print(sc, &pinfo);
702 #endif
703 	} else
704 		apm_perror("get power status", error);
705 
706 	if (sc->sc_ops->aa_cpu_busy)
707 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
708 
709 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
710 
711 	/* Initial state is `resumed'. */
712 	sc->sc_power_state = PWR_RESUME;
713 	selinit(&sc->sc_rsel);
714 	selinit(&sc->sc_xsel);
715 
716 	/* Do an initial check. */
717 	apm_periodic_check(sc);
718 
719 	/*
720 	 * Create a kernel thread to periodically check for APM events,
721 	 * and notify other subsystems when they occur.
722 	 */
723 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
724 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
725 		/*
726 		 * We were unable to create the APM thread; bail out.
727 		 */
728 		if (sc->sc_ops->aa_disconnect)
729 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
730 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
731 		    "kernel APM support disabled\n");
732 	}
733 }
734 
735 void
736 apm_thread(void *arg)
737 {
738 	struct apm_softc *apmsc = arg;
739 
740 	/*
741 	 * Loop forever, doing a periodic check for APM events.
742 	 */
743 	for (;;) {
744 		APM_LOCK(apmsc);
745 		apm_periodic_check(apmsc);
746 		APM_UNLOCK(apmsc);
747 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
748 	}
749 }
750 
751 int
752 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
753 {
754 	int ctl = APM(dev);
755 	int error = 0;
756 	struct apm_softc *sc;
757 
758 	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
759 	if (!sc)
760 		return ENXIO;
761 
762 	if (!apm_inited)
763 		return ENXIO;
764 
765 	DPRINTF(APMDEBUG_DEVICE,
766 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
767 
768 	APM_LOCK(sc);
769 	switch (ctl) {
770 	case APM_CTL:
771 		if (!(flag & FWRITE)) {
772 			error = EINVAL;
773 			break;
774 		}
775 		if (sc->sc_flags & SCFLAG_OWRITE) {
776 			error = EBUSY;
777 			break;
778 		}
779 		sc->sc_flags |= SCFLAG_OWRITE;
780 		break;
781 	case APM_NORMAL:
782 		if (!(flag & FREAD) || (flag & FWRITE)) {
783 			error = EINVAL;
784 			break;
785 		}
786 		sc->sc_flags |= SCFLAG_OREAD;
787 		break;
788 	default:
789 		error = ENXIO;
790 		break;
791 	}
792 	APM_UNLOCK(sc);
793 
794 	return (error);
795 }
796 
797 int
798 apmdevclose(dev_t dev, int flag, int mode,
799 	    struct lwp *l)
800 {
801 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
802 	int ctl = APM(dev);
803 
804 	DPRINTF(APMDEBUG_DEVICE,
805 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
806 
807 	APM_LOCK(sc);
808 	switch (ctl) {
809 	case APM_CTL:
810 		sc->sc_flags &= ~SCFLAG_OWRITE;
811 		break;
812 	case APM_NORMAL:
813 		sc->sc_flags &= ~SCFLAG_OREAD;
814 		break;
815 	}
816 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
817 		sc->sc_event_count = 0;
818 		sc->sc_event_ptr = 0;
819 	}
820 	APM_UNLOCK(sc);
821 	return 0;
822 }
823 
824 int
825 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
826 	    struct lwp *l)
827 {
828 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
829 	struct apm_power_info *powerp;
830 	struct apm_event_info *evp;
831 #if 0
832 	struct apm_ctl *actl;
833 #endif
834 	int i, error = 0;
835 	int batt_flags;
836 
837 	APM_LOCK(sc);
838 	switch (cmd) {
839 	case APM_IOC_STANDBY:
840 		if (!apm_do_standby) {
841 			error = EOPNOTSUPP;
842 			break;
843 		}
844 
845 		if ((flag & FWRITE) == 0) {
846 			error = EBADF;
847 			break;
848 		}
849 		apm_userstandbys++;
850 		break;
851 
852 	case APM_IOC_SUSPEND:
853 		if ((flag & FWRITE) == 0) {
854 			error = EBADF;
855 			break;
856 		}
857 		apm_suspends++;
858 		break;
859 
860 	case APM_IOC_NEXTEVENT:
861 		if (!sc->sc_event_count)
862 			error = EAGAIN;
863 		else {
864 			evp = (struct apm_event_info *)data;
865 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
866 			i %= APM_NEVENTS;
867 			*evp = sc->sc_event_list[i];
868 			sc->sc_event_count--;
869 		}
870 		break;
871 
872 	case OAPM_IOC_GETPOWER:
873 	case APM_IOC_GETPOWER:
874 		powerp = (struct apm_power_info *)data;
875 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
876 		    powerp)) != 0) {
877 			apm_perror("ioctl get power status", error);
878 			error = EIO;
879 			break;
880 		}
881 		switch (apm_minver) {
882 		case 0:
883 			break;
884 		case 1:
885 		default:
886 			batt_flags = powerp->battery_flags;
887 			powerp->battery_state = APM_BATT_UNKNOWN;
888 			if (batt_flags & APM_BATT_FLAG_HIGH)
889 				powerp->battery_state = APM_BATT_HIGH;
890 			else if (batt_flags & APM_BATT_FLAG_LOW)
891 				powerp->battery_state = APM_BATT_LOW;
892 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
893 				powerp->battery_state = APM_BATT_CRITICAL;
894 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
895 				powerp->battery_state = APM_BATT_CHARGING;
896 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
897 				powerp->battery_state = APM_BATT_ABSENT;
898 			break;
899 		}
900 		break;
901 
902 	default:
903 		error = ENOTTY;
904 	}
905 	APM_UNLOCK(sc);
906 
907 	return (error);
908 }
909 
910 int
911 apmdevpoll(dev_t dev, int events, struct lwp *l)
912 {
913 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
914 	int revents = 0;
915 
916 	APM_LOCK(sc);
917 	if (events & (POLLIN | POLLRDNORM)) {
918 		if (sc->sc_event_count)
919 			revents |= events & (POLLIN | POLLRDNORM);
920 		else
921 			selrecord(l, &sc->sc_rsel);
922 	}
923 	APM_UNLOCK(sc);
924 
925 	return (revents);
926 }
927 
928 static void
929 filt_apmrdetach(struct knote *kn)
930 {
931 	struct apm_softc *sc = kn->kn_hook;
932 
933 	APM_LOCK(sc);
934 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
935 	APM_UNLOCK(sc);
936 }
937 
938 static int
939 filt_apmread(struct knote *kn, long hint)
940 {
941 	struct apm_softc *sc = kn->kn_hook;
942 
943 	kn->kn_data = sc->sc_event_count;
944 	return (kn->kn_data > 0);
945 }
946 
947 static const struct filterops apmread_filtops =
948 	{ 1, NULL, filt_apmrdetach, filt_apmread };
949 
950 int
951 apmdevkqfilter(dev_t dev, struct knote *kn)
952 {
953 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
954 	struct klist *klist;
955 
956 	switch (kn->kn_filter) {
957 	case EVFILT_READ:
958 		klist = &sc->sc_rsel.sel_klist;
959 		kn->kn_fop = &apmread_filtops;
960 		break;
961 
962 	default:
963 		return (EINVAL);
964 	}
965 
966 	kn->kn_hook = sc;
967 
968 	APM_LOCK(sc);
969 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
970 	APM_UNLOCK(sc);
971 
972 	return (0);
973 }
974