xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision 96aaef2d65996329f7487a7fd1d2f67e6d31fe5b)
1 /*	$NetBSD: apmdev.c,v 1.28 2013/11/09 02:44:52 christos Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.28 2013/11/09 02:44:52 christos Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_apm.h"
40 #endif
41 
42 #if defined(DEBUG) && !defined(APMDEBUG)
43 #define	APMDEBUG
44 #endif
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/signalvar.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/device.h>
54 #include <sys/fcntl.h>
55 #include <sys/ioctl.h>
56 #include <sys/select.h>
57 #include <sys/poll.h>
58 #include <sys/conf.h>
59 
60 #include <dev/hpc/apm/apmvar.h>
61 
62 #ifdef APMDEBUG
63 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
64 
65 
66 #ifdef APMDEBUG_VALUE
67 int	apmdebug = APMDEBUG_VALUE;
68 #else
69 int	apmdebug = 0;
70 #endif /* APMDEBUG_VALUE */
71 
72 #else
73 #define	DPRINTF(f, x)		/**/
74 #endif /* APMDEBUG */
75 
76 #define	SCFLAG_OREAD	0x0000001
77 #define	SCFLAG_OWRITE	0x0000002
78 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
79 
80 #define	APMUNIT(dev)	(minor(dev)&0xf0)
81 #define	APM(dev)	(minor(dev)&0x0f)
82 #define APM_NORMAL	0
83 #define APM_CTL	8
84 
85 /*
86  * A brief note on the locking protocol: it's very simple; we
87  * assert an exclusive lock any time thread context enters the
88  * APM module.  This is both the APM thread itself, as well as
89  * user context.
90  */
91 #define	APM_LOCK(apmsc)						\
92 	(void) mutex_enter(&(apmsc)->sc_lock)
93 #define	APM_UNLOCK(apmsc)						\
94 	(void) mutex_exit(&(apmsc)->sc_lock)
95 
96 static void	apmdevattach(device_t, device_t, void *);
97 static int	apmdevmatch(device_t, cfdata_t, void *);
98 
99 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
100 static void	apm_periodic_check(struct apm_softc *);
101 static void	apm_thread(void *);
102 static void	apm_perror(const char *, int, ...)
103 		    __attribute__((__format__(__printf__,1,3)));
104 #ifdef APM_POWER_PRINT
105 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
106 #endif
107 static int	apm_record_event(struct apm_softc *, u_int);
108 static void	apm_set_ver(struct apm_softc *);
109 static void	apm_standby(struct apm_softc *);
110 static void	apm_suspend(struct apm_softc *);
111 static void	apm_resume(struct apm_softc *, u_int, u_int);
112 
113 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
114     apmdevmatch, apmdevattach, NULL, NULL);
115 
116 extern struct cfdriver apmdev_cd;
117 
118 dev_type_open(apmdevopen);
119 dev_type_close(apmdevclose);
120 dev_type_ioctl(apmdevioctl);
121 dev_type_poll(apmdevpoll);
122 dev_type_kqfilter(apmdevkqfilter);
123 
124 const struct cdevsw apmdev_cdevsw = {
125 	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
126 	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
127 };
128 
129 /* configurable variables */
130 int	apm_bogus_bios = 0;
131 #ifdef APM_NO_STANDBY
132 int	apm_do_standby = 0;
133 #else
134 int	apm_do_standby = 1;
135 #endif
136 #ifdef APM_V10_ONLY
137 int	apm_v11_enabled = 0;
138 #else
139 int	apm_v11_enabled = 1;
140 #endif
141 #ifdef APM_NO_V12
142 int	apm_v12_enabled = 0;
143 #else
144 int	apm_v12_enabled = 1;
145 #endif
146 
147 /* variables used during operation (XXX cgd) */
148 u_char	apm_majver, apm_minver;
149 int	apm_inited;
150 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
151 int	apm_damn_fool_bios, apm_op_inprog;
152 int	apm_evindex;
153 
154 static int apm_spl;		/* saved spl while suspended */
155 
156 const char *
157 apm_strerror(int code)
158 {
159 	switch (code) {
160 	case APM_ERR_PM_DISABLED:
161 		return ("power management disabled");
162 	case APM_ERR_REALALREADY:
163 		return ("real mode interface already connected");
164 	case APM_ERR_NOTCONN:
165 		return ("interface not connected");
166 	case APM_ERR_16ALREADY:
167 		return ("16-bit interface already connected");
168 	case APM_ERR_16NOTSUPP:
169 		return ("16-bit interface not supported");
170 	case APM_ERR_32ALREADY:
171 		return ("32-bit interface already connected");
172 	case APM_ERR_32NOTSUPP:
173 		return ("32-bit interface not supported");
174 	case APM_ERR_UNRECOG_DEV:
175 		return ("unrecognized device ID");
176 	case APM_ERR_ERANGE:
177 		return ("parameter out of range");
178 	case APM_ERR_NOTENGAGED:
179 		return ("interface not engaged");
180 	case APM_ERR_UNABLE:
181 		return ("unable to enter requested state");
182 	case APM_ERR_NOEVENTS:
183 		return ("no pending events");
184 	case APM_ERR_NOT_PRESENT:
185 		return ("no APM present");
186 	default:
187 		return ("unknown error code");
188 	}
189 }
190 
191 static void
192 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
193 {
194 	va_list ap;
195 
196 	printf("APM ");
197 
198 	va_start(ap, errinfo);
199 	vprintf(str, ap);			/* XXX cgd */
200 	va_end(ap);
201 
202 	printf(": %s\n", apm_strerror(errinfo));
203 }
204 
205 #ifdef APM_POWER_PRINT
206 static void
207 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
208 {
209 
210 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
211 		aprint_normal_dev(sc->sc_dev,
212 		    "battery life expectancy: %d%%\n",
213 		    pi->battery_life);
214 	}
215 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
216 	switch (pi->ac_state) {
217 	case APM_AC_OFF:
218 		printf("off\n");
219 		break;
220 	case APM_AC_ON:
221 		printf("on\n");
222 		break;
223 	case APM_AC_BACKUP:
224 		printf("backup power\n");
225 		break;
226 	default:
227 	case APM_AC_UNKNOWN:
228 		printf("unknown\n");
229 		break;
230 	}
231 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
232 	if (apm_minver == 0)
233 		switch (pi->battery_state) {
234 		case APM_BATT_HIGH:
235 			printf("high\n");
236 			break;
237 		case APM_BATT_LOW:
238 			printf("low\n");
239 			break;
240 		case APM_BATT_CRITICAL:
241 			printf("critical\n");
242 			break;
243 		case APM_BATT_CHARGING:
244 			printf("charging\n");
245 			break;
246 		case APM_BATT_UNKNOWN:
247 			printf("unknown\n");
248 			break;
249 		default:
250 			printf("undecoded state %x\n", pi->battery_state);
251 			break;
252 		}
253 	else if (apm_minver >= 1) {
254 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
255 			printf(" no battery");
256 		else {
257 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
258 				printf(" high");
259 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
260 				printf(" low");
261 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
262 				printf(" critical");
263 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
264 				printf(" charging");
265 		}
266 		printf("\n");
267 		if (pi->minutes_valid) {
268 			aprint_normal_dev(sc->sc_dev, "estimated ");
269 			if (pi->minutes_left / 60)
270 				printf("%dh ", pi->minutes_left / 60);
271 			printf("%dm\n", pi->minutes_left % 60);
272 		}
273 	}
274 	return;
275 }
276 #endif
277 
278 static void
279 apm_suspend(struct apm_softc *sc)
280 {
281 	int error;
282 
283 	if (sc->sc_power_state == PWR_SUSPEND) {
284 #ifdef APMDEBUG
285 		aprint_debug_dev(sc->sc_dev,
286 		    "apm_suspend: already suspended?\n");
287 #endif
288 		return;
289 	}
290 	sc->sc_power_state = PWR_SUSPEND;
291 
292 	dopowerhooks(PWR_SOFTSUSPEND);
293 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
294 
295 	apm_spl = splhigh();
296 
297 	dopowerhooks(PWR_SUSPEND);
298 
299 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
300 	    APM_SYS_SUSPEND);
301 
302 	if (error)
303 		apm_resume(sc, 0, 0);
304 }
305 
306 static void
307 apm_standby(struct apm_softc *sc)
308 {
309 	int error;
310 
311 	if (sc->sc_power_state == PWR_STANDBY) {
312 #ifdef APMDEBUG
313 		aprint_debug_dev(sc->sc_dev,
314 		    "apm_standby: already standing by?\n");
315 #endif
316 		return;
317 	}
318 	sc->sc_power_state = PWR_STANDBY;
319 
320 	dopowerhooks(PWR_SOFTSTANDBY);
321 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
322 
323 	apm_spl = splhigh();
324 
325 	dopowerhooks(PWR_STANDBY);
326 
327 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
328 	    APM_SYS_STANDBY);
329 	if (error)
330 		apm_resume(sc, 0, 0);
331 }
332 
333 static void
334 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
335 {
336 
337 	if (sc->sc_power_state == PWR_RESUME) {
338 #ifdef APMDEBUG
339 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
340 #endif
341 		return;
342 	}
343 	sc->sc_power_state = PWR_RESUME;
344 
345 #if 0 /* XXX: def TIME_FREQ */
346 	/*
347 	 * Some system requires its clock to be initialized after hybernation.
348 	 */
349 	initrtclock(TIMER_FREQ);
350 #endif
351 
352 	inittodr(time_second);
353 	dopowerhooks(PWR_RESUME);
354 
355 	splx(apm_spl);
356 
357 	dopowerhooks(PWR_SOFTRESUME);
358 
359 	apm_record_event(sc, event_type);
360 }
361 
362 /*
363  * return 0 if the user will notice and handle the event,
364  * return 1 if the kernel driver should do so.
365  */
366 static int
367 apm_record_event(struct apm_softc *sc, u_int event_type)
368 {
369 	struct apm_event_info *evp;
370 
371 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
372 		return 1;		/* no user waiting */
373 	if (sc->sc_event_count == APM_NEVENTS)
374 		return 1;			/* overflow */
375 	evp = &sc->sc_event_list[sc->sc_event_ptr];
376 	sc->sc_event_count++;
377 	sc->sc_event_ptr++;
378 	sc->sc_event_ptr %= APM_NEVENTS;
379 	evp->type = event_type;
380 	evp->index = ++apm_evindex;
381 	selnotify(&sc->sc_rsel, 0, 0);
382 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
383 }
384 
385 static void
386 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
387 {
388 	int error;
389 	const char *code;
390 	struct apm_power_info pi;
391 
392 	switch (event_code) {
393 	case APM_USER_STANDBY_REQ:
394 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
395 		if (apm_do_standby) {
396 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
397 				apm_userstandbys++;
398 			apm_op_inprog++;
399 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
400 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
401 		} else {
402 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
403 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
404 			/* in case BIOS hates being spurned */
405 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
406 		}
407 		break;
408 
409 	case APM_STANDBY_REQ:
410 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
411 		if (apm_standbys || apm_suspends) {
412 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
413 			    ("damn fool BIOS did not wait for answer\n"));
414 			/* just give up the fight */
415 			apm_damn_fool_bios = 1;
416 		}
417 		if (apm_do_standby) {
418 			if (apm_op_inprog == 0 &&
419 			    apm_record_event(sc, event_code))
420 				apm_standbys++;
421 			apm_op_inprog++;
422 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
423 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
424 		} else {
425 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
426 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
427 			/* in case BIOS hates being spurned */
428 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
429 		}
430 		break;
431 
432 	case APM_USER_SUSPEND_REQ:
433 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
434 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
435 			apm_suspends++;
436 		apm_op_inprog++;
437 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
438 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
439 		break;
440 
441 	case APM_SUSPEND_REQ:
442 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
443 		if (apm_standbys || apm_suspends) {
444 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
445 			    ("damn fool BIOS did not wait for answer\n"));
446 			/* just give up the fight */
447 			apm_damn_fool_bios = 1;
448 		}
449 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
450 			apm_suspends++;
451 		apm_op_inprog++;
452 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
453 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
454 		break;
455 
456 	case APM_POWER_CHANGE:
457 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
458 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
459 #ifdef APM_POWER_PRINT
460 		/* only print if nobody is catching events. */
461 		if (error == 0 &&
462 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
463 			apm_power_print(sc, &pi);
464 #else
465 		__USE(error);
466 #endif
467 		apm_record_event(sc, event_code);
468 		break;
469 
470 	case APM_NORMAL_RESUME:
471 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
472 		apm_resume(sc, event_code, event_info);
473 		break;
474 
475 	case APM_CRIT_RESUME:
476 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
477 		apm_resume(sc, event_code, event_info);
478 		break;
479 
480 	case APM_SYS_STANDBY_RESUME:
481 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
482 		apm_resume(sc, event_code, event_info);
483 		break;
484 
485 	case APM_UPDATE_TIME:
486 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
487 		apm_resume(sc, event_code, event_info);
488 		break;
489 
490 	case APM_CRIT_SUSPEND_REQ:
491 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
492 		apm_record_event(sc, event_code);
493 		apm_suspend(sc);
494 		break;
495 
496 	case APM_BATTERY_LOW:
497 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
498 		apm_battlow++;
499 		apm_record_event(sc, event_code);
500 		break;
501 
502 	case APM_CAP_CHANGE:
503 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
504 		if (apm_minver < 2) {
505 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
506 		} else {
507 			u_int numbatts, capflags;
508 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
509 			    &numbatts, &capflags);
510 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
511 		}
512 		break;
513 
514 	default:
515 		switch (event_code >> 8) {
516 			case 0:
517 				code = "reserved system";
518 				break;
519 			case 1:
520 				code = "reserved device";
521 				break;
522 			case 2:
523 				code = "OEM defined";
524 				break;
525 			default:
526 				code = "reserved";
527 				break;
528 		}
529 		printf("APM: %s event code %x\n", code, event_code);
530 	}
531 }
532 
533 static void
534 apm_periodic_check(struct apm_softc *sc)
535 {
536 	int error;
537 	u_int event_code, event_info;
538 
539 
540 	/*
541 	 * tell the BIOS we're working on it, if asked to do a
542 	 * suspend/standby
543 	 */
544 	if (apm_op_inprog)
545 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
546 		    APM_LASTREQ_INPROG);
547 
548 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
549 	    &event_info)) == 0 && !apm_damn_fool_bios)
550 		apm_event_handle(sc, event_code, event_info);
551 
552 	if (error != APM_ERR_NOEVENTS)
553 		apm_perror("get event", error);
554 	if (apm_suspends) {
555 		apm_op_inprog = 0;
556 		apm_suspend(sc);
557 	} else if (apm_standbys || apm_userstandbys) {
558 		apm_op_inprog = 0;
559 		apm_standby(sc);
560 	}
561 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
562 	apm_damn_fool_bios = 0;
563 }
564 
565 static void
566 apm_set_ver(struct apm_softc *sc)
567 {
568 
569 	if (apm_v12_enabled &&
570 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
571 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
572 		apm_majver = 1;
573 		apm_minver = 2;
574 		goto ok;
575 	}
576 
577 	if (apm_v11_enabled &&
578 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
579 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
580 		apm_majver = 1;
581 		apm_minver = 1;
582 	} else {
583 		apm_majver = 1;
584 		apm_minver = 0;
585 	}
586 ok:
587 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
588 	apm_inited = 1;
589 }
590 
591 static int
592 apmdevmatch(device_t parent, cfdata_t match, void *aux)
593 {
594 
595 	return apm_match();
596 }
597 
598 static void
599 apmdevattach(device_t parent, device_t self, void *aux)
600 {
601 	struct apm_softc *sc;
602 	struct apmdev_attach_args *aaa = aux;
603 
604 	sc = device_private(self);
605 	sc->sc_dev = self;
606 
607 	sc->sc_detail = aaa->apm_detail;
608 	sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
609 
610 	sc->sc_ops = aaa->accessops;
611 	sc->sc_cookie = aaa->accesscookie;
612 
613 	apm_attach(sc);
614 }
615 
616 /*
617  * Print function (for parent devices).
618  */
619 int
620 apmprint(void *aux, const char *pnp)
621 {
622 	if (pnp)
623 		aprint_normal("apm at %s", pnp);
624 
625 	return (UNCONF);
626 }
627 
628 int
629 apm_match(void)
630 {
631 	static int got;
632 	return !got++;
633 }
634 
635 void
636 apm_attach(struct apm_softc *sc)
637 {
638 	struct apm_power_info pinfo;
639 	u_int numbatts, capflags;
640 	int error;
641 
642 	aprint_naive("\n");
643 	aprint_normal(": ");
644 
645 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
646 	case 0x0100:
647 		apm_v11_enabled = 0;
648 		apm_v12_enabled = 0;
649 		break;
650 	case 0x0101:
651 		apm_v12_enabled = 0;
652 		/* fall through */
653 	case 0x0102:
654 	default:
655 		break;
656 	}
657 
658 	apm_set_ver(sc);	/* prints version info */
659 	aprint_normal("\n");
660 	if (apm_minver >= 2)
661 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
662 		    &capflags);
663 
664 	/*
665 	 * enable power management
666 	 */
667 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
668 
669 	error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
670 	if (error == 0) {
671 #ifdef APM_POWER_PRINT
672 		apm_power_print(sc, &pinfo);
673 #endif
674 	} else
675 		apm_perror("get power status", error);
676 
677 	if (sc->sc_ops->aa_cpu_busy)
678 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
679 
680 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
681 
682 	/* Initial state is `resumed'. */
683 	sc->sc_power_state = PWR_RESUME;
684 	selinit(&sc->sc_rsel);
685 	selinit(&sc->sc_xsel);
686 
687 	/* Do an initial check. */
688 	apm_periodic_check(sc);
689 
690 	/*
691 	 * Create a kernel thread to periodically check for APM events,
692 	 * and notify other subsystems when they occur.
693 	 */
694 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
695 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
696 		/*
697 		 * We were unable to create the APM thread; bail out.
698 		 */
699 		if (sc->sc_ops->aa_disconnect)
700 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
701 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
702 		    "kernel APM support disabled\n");
703 	}
704 }
705 
706 void
707 apm_thread(void *arg)
708 {
709 	struct apm_softc *apmsc = arg;
710 
711 	/*
712 	 * Loop forever, doing a periodic check for APM events.
713 	 */
714 	for (;;) {
715 		APM_LOCK(apmsc);
716 		apm_periodic_check(apmsc);
717 		APM_UNLOCK(apmsc);
718 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
719 	}
720 }
721 
722 int
723 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
724 {
725 	int ctl = APM(dev);
726 	int error = 0;
727 	struct apm_softc *sc;
728 
729 	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
730 	if (!sc)
731 		return ENXIO;
732 
733 	if (!apm_inited)
734 		return ENXIO;
735 
736 	DPRINTF(APMDEBUG_DEVICE,
737 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
738 
739 	APM_LOCK(sc);
740 	switch (ctl) {
741 	case APM_CTL:
742 		if (!(flag & FWRITE)) {
743 			error = EINVAL;
744 			break;
745 		}
746 		if (sc->sc_flags & SCFLAG_OWRITE) {
747 			error = EBUSY;
748 			break;
749 		}
750 		sc->sc_flags |= SCFLAG_OWRITE;
751 		break;
752 	case APM_NORMAL:
753 		if (!(flag & FREAD) || (flag & FWRITE)) {
754 			error = EINVAL;
755 			break;
756 		}
757 		sc->sc_flags |= SCFLAG_OREAD;
758 		break;
759 	default:
760 		error = ENXIO;
761 		break;
762 	}
763 	APM_UNLOCK(sc);
764 
765 	return (error);
766 }
767 
768 int
769 apmdevclose(dev_t dev, int flag, int mode,
770 	    struct lwp *l)
771 {
772 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
773 	int ctl = APM(dev);
774 
775 	DPRINTF(APMDEBUG_DEVICE,
776 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
777 
778 	APM_LOCK(sc);
779 	switch (ctl) {
780 	case APM_CTL:
781 		sc->sc_flags &= ~SCFLAG_OWRITE;
782 		break;
783 	case APM_NORMAL:
784 		sc->sc_flags &= ~SCFLAG_OREAD;
785 		break;
786 	}
787 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
788 		sc->sc_event_count = 0;
789 		sc->sc_event_ptr = 0;
790 	}
791 	APM_UNLOCK(sc);
792 	return 0;
793 }
794 
795 int
796 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
797 	    struct lwp *l)
798 {
799 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
800 	struct apm_power_info *powerp;
801 	struct apm_event_info *evp;
802 #if 0
803 	struct apm_ctl *actl;
804 #endif
805 	int i, error = 0;
806 	int batt_flags;
807 
808 	APM_LOCK(sc);
809 	switch (cmd) {
810 	case APM_IOC_STANDBY:
811 		if (!apm_do_standby) {
812 			error = EOPNOTSUPP;
813 			break;
814 		}
815 
816 		if ((flag & FWRITE) == 0) {
817 			error = EBADF;
818 			break;
819 		}
820 		apm_userstandbys++;
821 		break;
822 
823 	case APM_IOC_SUSPEND:
824 		if ((flag & FWRITE) == 0) {
825 			error = EBADF;
826 			break;
827 		}
828 		apm_suspends++;
829 		break;
830 
831 	case APM_IOC_NEXTEVENT:
832 		if (!sc->sc_event_count)
833 			error = EAGAIN;
834 		else {
835 			evp = (struct apm_event_info *)data;
836 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
837 			i %= APM_NEVENTS;
838 			*evp = sc->sc_event_list[i];
839 			sc->sc_event_count--;
840 		}
841 		break;
842 
843 	case OAPM_IOC_GETPOWER:
844 	case APM_IOC_GETPOWER:
845 		powerp = (struct apm_power_info *)data;
846 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
847 		    powerp)) != 0) {
848 			apm_perror("ioctl get power status", error);
849 			error = EIO;
850 			break;
851 		}
852 		switch (apm_minver) {
853 		case 0:
854 			break;
855 		case 1:
856 		default:
857 			batt_flags = powerp->battery_flags;
858 			powerp->battery_state = APM_BATT_UNKNOWN;
859 			if (batt_flags & APM_BATT_FLAG_HIGH)
860 				powerp->battery_state = APM_BATT_HIGH;
861 			else if (batt_flags & APM_BATT_FLAG_LOW)
862 				powerp->battery_state = APM_BATT_LOW;
863 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
864 				powerp->battery_state = APM_BATT_CRITICAL;
865 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
866 				powerp->battery_state = APM_BATT_CHARGING;
867 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
868 				powerp->battery_state = APM_BATT_ABSENT;
869 			break;
870 		}
871 		break;
872 
873 	default:
874 		error = ENOTTY;
875 	}
876 	APM_UNLOCK(sc);
877 
878 	return (error);
879 }
880 
881 int
882 apmdevpoll(dev_t dev, int events, struct lwp *l)
883 {
884 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
885 	int revents = 0;
886 
887 	APM_LOCK(sc);
888 	if (events & (POLLIN | POLLRDNORM)) {
889 		if (sc->sc_event_count)
890 			revents |= events & (POLLIN | POLLRDNORM);
891 		else
892 			selrecord(l, &sc->sc_rsel);
893 	}
894 	APM_UNLOCK(sc);
895 
896 	return (revents);
897 }
898 
899 static void
900 filt_apmrdetach(struct knote *kn)
901 {
902 	struct apm_softc *sc = kn->kn_hook;
903 
904 	APM_LOCK(sc);
905 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
906 	APM_UNLOCK(sc);
907 }
908 
909 static int
910 filt_apmread(struct knote *kn, long hint)
911 {
912 	struct apm_softc *sc = kn->kn_hook;
913 
914 	kn->kn_data = sc->sc_event_count;
915 	return (kn->kn_data > 0);
916 }
917 
918 static const struct filterops apmread_filtops =
919 	{ 1, NULL, filt_apmrdetach, filt_apmread };
920 
921 int
922 apmdevkqfilter(dev_t dev, struct knote *kn)
923 {
924 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
925 	struct klist *klist;
926 
927 	switch (kn->kn_filter) {
928 	case EVFILT_READ:
929 		klist = &sc->sc_rsel.sel_klist;
930 		kn->kn_fop = &apmread_filtops;
931 		break;
932 
933 	default:
934 		return (EINVAL);
935 	}
936 
937 	kn->kn_hook = sc;
938 
939 	APM_LOCK(sc);
940 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
941 	APM_UNLOCK(sc);
942 
943 	return (0);
944 }
945