xref: /netbsd-src/sys/dev/hpc/apm/apmdev.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: apmdev.c,v 1.24 2009/11/23 02:13:45 rmind Exp $ */
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John Kohl and Christopher G. Demetriou.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*
32  * from: sys/arch/i386/i386/apm.c,v 1.49 2000/05/08
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: apmdev.c,v 1.24 2009/11/23 02:13:45 rmind Exp $");
37 
38 #ifdef _KERNEL_OPT
39 #include "opt_apm.h"
40 #endif
41 
42 #ifdef APM_NOIDLE
43 #error APM_NOIDLE option deprecated; use APM_NO_IDLE instead
44 #endif
45 
46 #if defined(DEBUG) && !defined(APMDEBUG)
47 #define	APMDEBUG
48 #endif
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/signalvar.h>
53 #include <sys/kernel.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/malloc.h>
57 #include <sys/device.h>
58 #include <sys/fcntl.h>
59 #include <sys/ioctl.h>
60 #include <sys/select.h>
61 #include <sys/poll.h>
62 #include <sys/conf.h>
63 
64 #include <dev/hpc/apm/apmvar.h>
65 
66 #include <machine/stdarg.h>
67 
68 #ifdef APMDEBUG
69 #define DPRINTF(f, x)		do { if (apmdebug & (f)) printf x; } while (0)
70 
71 
72 #ifdef APMDEBUG_VALUE
73 int	apmdebug = APMDEBUG_VALUE;
74 #else
75 int	apmdebug = 0;
76 #endif /* APMDEBUG_VALUE */
77 
78 #else
79 #define	DPRINTF(f, x)		/**/
80 #endif /* APMDEBUG */
81 
82 #define	SCFLAG_OREAD	0x0000001
83 #define	SCFLAG_OWRITE	0x0000002
84 #define	SCFLAG_OPEN	(SCFLAG_OREAD|SCFLAG_OWRITE)
85 
86 #define	APMUNIT(dev)	(minor(dev)&0xf0)
87 #define	APM(dev)	(minor(dev)&0x0f)
88 #define APM_NORMAL	0
89 #define APM_CTL	8
90 
91 /*
92  * A brief note on the locking protocol: it's very simple; we
93  * assert an exclusive lock any time thread context enters the
94  * APM module.  This is both the APM thread itself, as well as
95  * user context.
96  */
97 #define	APM_LOCK(apmsc)						\
98 	(void) mutex_enter(&(apmsc)->sc_lock)
99 #define	APM_UNLOCK(apmsc)						\
100 	(void) mutex_exit(&(apmsc)->sc_lock)
101 
102 static void	apmdevattach(device_t, device_t, void *);
103 static int	apmdevmatch(device_t, cfdata_t, void *);
104 
105 static void	apm_event_handle(struct apm_softc *, u_int, u_int);
106 static void	apm_periodic_check(struct apm_softc *);
107 static void	apm_thread(void *);
108 static void	apm_perror(const char *, int, ...)
109 		    __attribute__((__format__(__printf__,1,3)));
110 #ifdef APM_POWER_PRINT
111 static void	apm_power_print(struct apm_softc *, struct apm_power_info *);
112 #endif
113 static int	apm_record_event(struct apm_softc *, u_int);
114 static void	apm_set_ver(struct apm_softc *);
115 static void	apm_standby(struct apm_softc *);
116 static void	apm_suspend(struct apm_softc *);
117 static void	apm_resume(struct apm_softc *, u_int, u_int);
118 
119 CFATTACH_DECL_NEW(apmdev, sizeof(struct apm_softc),
120     apmdevmatch, apmdevattach, NULL, NULL);
121 
122 extern struct cfdriver apmdev_cd;
123 
124 dev_type_open(apmdevopen);
125 dev_type_close(apmdevclose);
126 dev_type_ioctl(apmdevioctl);
127 dev_type_poll(apmdevpoll);
128 dev_type_kqfilter(apmdevkqfilter);
129 
130 const struct cdevsw apmdev_cdevsw = {
131 	apmdevopen, apmdevclose, noread, nowrite, apmdevioctl,
132 	nostop, notty, apmdevpoll, nommap, apmdevkqfilter, D_OTHER
133 };
134 
135 /* configurable variables */
136 int	apm_bogus_bios = 0;
137 #ifdef APM_DISABLE
138 int	apm_enabled = 0;
139 #else
140 int	apm_enabled = 1;
141 #endif
142 #ifdef APM_NO_IDLE
143 int	apm_do_idle = 0;
144 #else
145 int	apm_do_idle = 1;
146 #endif
147 #ifdef APM_NO_STANDBY
148 int	apm_do_standby = 0;
149 #else
150 int	apm_do_standby = 1;
151 #endif
152 #ifdef APM_V10_ONLY
153 int	apm_v11_enabled = 0;
154 #else
155 int	apm_v11_enabled = 1;
156 #endif
157 #ifdef APM_NO_V12
158 int	apm_v12_enabled = 0;
159 #else
160 int	apm_v12_enabled = 1;
161 #endif
162 
163 /* variables used during operation (XXX cgd) */
164 u_char	apm_majver, apm_minver;
165 int	apm_inited;
166 int	apm_standbys, apm_userstandbys, apm_suspends, apm_battlow;
167 int	apm_damn_fool_bios, apm_op_inprog;
168 int	apm_evindex;
169 
170 static int apm_spl;		/* saved spl while suspended */
171 
172 const char *
173 apm_strerror(int code)
174 {
175 	switch (code) {
176 	case APM_ERR_PM_DISABLED:
177 		return ("power management disabled");
178 	case APM_ERR_REALALREADY:
179 		return ("real mode interface already connected");
180 	case APM_ERR_NOTCONN:
181 		return ("interface not connected");
182 	case APM_ERR_16ALREADY:
183 		return ("16-bit interface already connected");
184 	case APM_ERR_16NOTSUPP:
185 		return ("16-bit interface not supported");
186 	case APM_ERR_32ALREADY:
187 		return ("32-bit interface already connected");
188 	case APM_ERR_32NOTSUPP:
189 		return ("32-bit interface not supported");
190 	case APM_ERR_UNRECOG_DEV:
191 		return ("unrecognized device ID");
192 	case APM_ERR_ERANGE:
193 		return ("parameter out of range");
194 	case APM_ERR_NOTENGAGED:
195 		return ("interface not engaged");
196 	case APM_ERR_UNABLE:
197 		return ("unable to enter requested state");
198 	case APM_ERR_NOEVENTS:
199 		return ("no pending events");
200 	case APM_ERR_NOT_PRESENT:
201 		return ("no APM present");
202 	default:
203 		return ("unknown error code");
204 	}
205 }
206 
207 static void
208 apm_perror(const char *str, int errinfo, ...) /* XXX cgd */
209 {
210 	va_list ap;
211 
212 	printf("APM ");
213 
214 	va_start(ap, errinfo);
215 	vprintf(str, ap);			/* XXX cgd */
216 	va_end(ap);
217 
218 	printf(": %s\n", apm_strerror(errinfo));
219 }
220 
221 #ifdef APM_POWER_PRINT
222 static void
223 apm_power_print(struct apm_softc *sc, struct apm_power_info *pi)
224 {
225 
226 	if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) {
227 		aprint_normal_dev(sc->sc_dev,
228 		    "battery life expectancy: %d%%\n",
229 		    pi->battery_life);
230 	}
231 	aprint_normal_dev(sc->sc_dev, "A/C state: ");
232 	switch (pi->ac_state) {
233 	case APM_AC_OFF:
234 		printf("off\n");
235 		break;
236 	case APM_AC_ON:
237 		printf("on\n");
238 		break;
239 	case APM_AC_BACKUP:
240 		printf("backup power\n");
241 		break;
242 	default:
243 	case APM_AC_UNKNOWN:
244 		printf("unknown\n");
245 		break;
246 	}
247 	aprint_normal_dev(sc->sc_dev, "battery charge state:");
248 	if (apm_minver == 0)
249 		switch (pi->battery_state) {
250 		case APM_BATT_HIGH:
251 			printf("high\n");
252 			break;
253 		case APM_BATT_LOW:
254 			printf("low\n");
255 			break;
256 		case APM_BATT_CRITICAL:
257 			printf("critical\n");
258 			break;
259 		case APM_BATT_CHARGING:
260 			printf("charging\n");
261 			break;
262 		case APM_BATT_UNKNOWN:
263 			printf("unknown\n");
264 			break;
265 		default:
266 			printf("undecoded state %x\n", pi->battery_state);
267 			break;
268 		}
269 	else if (apm_minver >= 1) {
270 		if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
271 			printf(" no battery");
272 		else {
273 			if (pi->battery_flags & APM_BATT_FLAG_HIGH)
274 				printf(" high");
275 			if (pi->battery_flags & APM_BATT_FLAG_LOW)
276 				printf(" low");
277 			if (pi->battery_flags & APM_BATT_FLAG_CRITICAL)
278 				printf(" critical");
279 			if (pi->battery_flags & APM_BATT_FLAG_CHARGING)
280 				printf(" charging");
281 		}
282 		printf("\n");
283 		if (pi->minutes_valid) {
284 			aprint_normal_dev(sc->sc_dev, "estimated ");
285 			if (pi->minutes_left / 60)
286 				printf("%dh ", pi->minutes_left / 60);
287 			printf("%dm\n", pi->minutes_left % 60);
288 		}
289 	}
290 	return;
291 }
292 #endif
293 
294 static void
295 apm_suspend(struct apm_softc *sc)
296 {
297 	int error;
298 
299 	if (sc->sc_power_state == PWR_SUSPEND) {
300 #ifdef APMDEBUG
301 		aprint_debug_dev(sc->sc_dev,
302 		    "apm_suspend: already suspended?\n");
303 #endif
304 		return;
305 	}
306 	sc->sc_power_state = PWR_SUSPEND;
307 
308 	dopowerhooks(PWR_SOFTSUSPEND);
309 	(void) tsleep(sc, PWAIT, "apmsuspend",  hz/2);
310 
311 	apm_spl = splhigh();
312 
313 	dopowerhooks(PWR_SUSPEND);
314 
315 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
316 	    APM_SYS_SUSPEND);
317 
318 	if (error)
319 		apm_resume(sc, 0, 0);
320 }
321 
322 static void
323 apm_standby(struct apm_softc *sc)
324 {
325 	int error;
326 
327 	if (sc->sc_power_state == PWR_STANDBY) {
328 #ifdef APMDEBUG
329 		aprint_debug_dev(sc->sc_dev,
330 		    "apm_standby: already standing by?\n");
331 #endif
332 		return;
333 	}
334 	sc->sc_power_state = PWR_STANDBY;
335 
336 	dopowerhooks(PWR_SOFTSTANDBY);
337 	(void) tsleep(sc, PWAIT, "apmstandby",  hz/2);
338 
339 	apm_spl = splhigh();
340 
341 	dopowerhooks(PWR_STANDBY);
342 
343 	error = (*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
344 	    APM_SYS_STANDBY);
345 	if (error)
346 		apm_resume(sc, 0, 0);
347 }
348 
349 static void
350 apm_resume(struct apm_softc *sc, u_int event_type, u_int event_info)
351 {
352 
353 	if (sc->sc_power_state == PWR_RESUME) {
354 #ifdef APMDEBUG
355 		aprint_debug_dev(sc->sc_dev, "apm_resume: already running?\n");
356 #endif
357 		return;
358 	}
359 	sc->sc_power_state = PWR_RESUME;
360 
361 #if 0 /* XXX: def TIME_FREQ */
362 	/*
363 	 * Some system requires its clock to be initialized after hybernation.
364 	 */
365 	initrtclock(TIMER_FREQ);
366 #endif
367 
368 	inittodr(time_second);
369 	dopowerhooks(PWR_RESUME);
370 
371 	splx(apm_spl);
372 
373 	dopowerhooks(PWR_SOFTRESUME);
374 
375 	apm_record_event(sc, event_type);
376 }
377 
378 /*
379  * return 0 if the user will notice and handle the event,
380  * return 1 if the kernel driver should do so.
381  */
382 static int
383 apm_record_event(struct apm_softc *sc, u_int event_type)
384 {
385 	struct apm_event_info *evp;
386 
387 	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
388 		return 1;		/* no user waiting */
389 	if (sc->sc_event_count == APM_NEVENTS)
390 		return 1;			/* overflow */
391 	evp = &sc->sc_event_list[sc->sc_event_ptr];
392 	sc->sc_event_count++;
393 	sc->sc_event_ptr++;
394 	sc->sc_event_ptr %= APM_NEVENTS;
395 	evp->type = event_type;
396 	evp->index = ++apm_evindex;
397 	selnotify(&sc->sc_rsel, 0, 0);
398 	return (sc->sc_flags & SCFLAG_OWRITE) ? 0 : 1; /* user may handle */
399 }
400 
401 static void
402 apm_event_handle(struct apm_softc *sc, u_int event_code, u_int event_info)
403 {
404 	int error;
405 	const char *code;
406 	struct apm_power_info pi;
407 
408 	switch (event_code) {
409 	case APM_USER_STANDBY_REQ:
410 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user standby request\n"));
411 		if (apm_do_standby) {
412 			if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
413 				apm_userstandbys++;
414 			apm_op_inprog++;
415 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
416 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
417 		} else {
418 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
419 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
420 			/* in case BIOS hates being spurned */
421 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
422 		}
423 		break;
424 
425 	case APM_STANDBY_REQ:
426 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby request\n"));
427 		if (apm_standbys || apm_suspends) {
428 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
429 			    ("damn fool BIOS did not wait for answer\n"));
430 			/* just give up the fight */
431 			apm_damn_fool_bios = 1;
432 		}
433 		if (apm_do_standby) {
434 			if (apm_op_inprog == 0 &&
435 			    apm_record_event(sc, event_code))
436 				apm_standbys++;
437 			apm_op_inprog++;
438 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
439 			    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
440 		} else {
441 			(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
442 			    APM_DEV_ALLDEVS, APM_LASTREQ_REJECTED);
443 			/* in case BIOS hates being spurned */
444 			(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
445 		}
446 		break;
447 
448 	case APM_USER_SUSPEND_REQ:
449 		DPRINTF(APMDEBUG_EVENTS, ("apmev: user suspend request\n"));
450 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
451 			apm_suspends++;
452 		apm_op_inprog++;
453 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
454 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
455 		break;
456 
457 	case APM_SUSPEND_REQ:
458 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system suspend request\n"));
459 		if (apm_standbys || apm_suspends) {
460 			DPRINTF(APMDEBUG_EVENTS | APMDEBUG_ANOM,
461 			    ("damn fool BIOS did not wait for answer\n"));
462 			/* just give up the fight */
463 			apm_damn_fool_bios = 1;
464 		}
465 		if (apm_op_inprog == 0 && apm_record_event(sc, event_code))
466 			apm_suspends++;
467 		apm_op_inprog++;
468 		(void)(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie,
469 		    APM_DEV_ALLDEVS, APM_LASTREQ_INPROG);
470 		break;
471 
472 	case APM_POWER_CHANGE:
473 		DPRINTF(APMDEBUG_EVENTS, ("apmev: power status change\n"));
474 		error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
475 #ifdef APM_POWER_PRINT
476 		/* only print if nobody is catching events. */
477 		if (error == 0 &&
478 		    (sc->sc_flags & (SCFLAG_OREAD|SCFLAG_OWRITE)) == 0)
479 			apm_power_print(sc, &pi);
480 #endif
481 		apm_record_event(sc, event_code);
482 		break;
483 
484 	case APM_NORMAL_RESUME:
485 		DPRINTF(APMDEBUG_EVENTS, ("apmev: resume system\n"));
486 		apm_resume(sc, event_code, event_info);
487 		break;
488 
489 	case APM_CRIT_RESUME:
490 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical resume system"));
491 		apm_resume(sc, event_code, event_info);
492 		break;
493 
494 	case APM_SYS_STANDBY_RESUME:
495 		DPRINTF(APMDEBUG_EVENTS, ("apmev: system standby resume\n"));
496 		apm_resume(sc, event_code, event_info);
497 		break;
498 
499 	case APM_UPDATE_TIME:
500 		DPRINTF(APMDEBUG_EVENTS, ("apmev: update time\n"));
501 		apm_resume(sc, event_code, event_info);
502 		break;
503 
504 	case APM_CRIT_SUSPEND_REQ:
505 		DPRINTF(APMDEBUG_EVENTS, ("apmev: critical system suspend\n"));
506 		apm_record_event(sc, event_code);
507 		apm_suspend(sc);
508 		break;
509 
510 	case APM_BATTERY_LOW:
511 		DPRINTF(APMDEBUG_EVENTS, ("apmev: battery low\n"));
512 		apm_battlow++;
513 		apm_record_event(sc, event_code);
514 		break;
515 
516 	case APM_CAP_CHANGE:
517 		DPRINTF(APMDEBUG_EVENTS, ("apmev: capability change\n"));
518 		if (apm_minver < 2) {
519 			DPRINTF(APMDEBUG_EVENTS, ("apm: unexpected event\n"));
520 		} else {
521 			u_int numbatts, capflags;
522 			(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie,
523 			    &numbatts, &capflags);
524 			(*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pi);
525 		}
526 		break;
527 
528 	default:
529 		switch (event_code >> 8) {
530 			case 0:
531 				code = "reserved system";
532 				break;
533 			case 1:
534 				code = "reserved device";
535 				break;
536 			case 2:
537 				code = "OEM defined";
538 				break;
539 			default:
540 				code = "reserved";
541 				break;
542 		}
543 		printf("APM: %s event code %x\n", code, event_code);
544 	}
545 }
546 
547 static void
548 apm_periodic_check(struct apm_softc *sc)
549 {
550 	int error;
551 	u_int event_code, event_info;
552 
553 
554 	/*
555 	 * tell the BIOS we're working on it, if asked to do a
556 	 * suspend/standby
557 	 */
558 	if (apm_op_inprog)
559 		(*sc->sc_ops->aa_set_powstate)(sc->sc_cookie, APM_DEV_ALLDEVS,
560 		    APM_LASTREQ_INPROG);
561 
562 	while ((error = (*sc->sc_ops->aa_get_event)(sc->sc_cookie, &event_code,
563 	    &event_info)) == 0 && !apm_damn_fool_bios)
564 		apm_event_handle(sc, event_code, event_info);
565 
566 	if (error != APM_ERR_NOEVENTS)
567 		apm_perror("get event", error);
568 	if (apm_suspends) {
569 		apm_op_inprog = 0;
570 		apm_suspend(sc);
571 	} else if (apm_standbys || apm_userstandbys) {
572 		apm_op_inprog = 0;
573 		apm_standby(sc);
574 	}
575 	apm_suspends = apm_standbys = apm_battlow = apm_userstandbys = 0;
576 	apm_damn_fool_bios = 0;
577 }
578 
579 static void
580 apm_set_ver(struct apm_softc *sc)
581 {
582 
583 	if (apm_v12_enabled &&
584 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
585 	    APM_MINOR_VERS(sc->sc_vers) == 2) {
586 		apm_majver = 1;
587 		apm_minver = 2;
588 		goto ok;
589 	}
590 
591 	if (apm_v11_enabled &&
592 	    APM_MAJOR_VERS(sc->sc_vers) == 1 &&
593 	    APM_MINOR_VERS(sc->sc_vers) == 1) {
594 		apm_majver = 1;
595 		apm_minver = 1;
596 	} else {
597 		apm_majver = 1;
598 		apm_minver = 0;
599 	}
600 ok:
601 	aprint_normal("Power Management spec V%d.%d", apm_majver, apm_minver);
602 	apm_inited = 1;
603 	if (sc->sc_detail & APM_IDLE_SLOWS) {
604 #ifdef DIAGNOSTIC
605 		/* not relevant often */
606 		aprint_normal(" (slowidle)");
607 #endif
608 		/* leave apm_do_idle at its user-configured setting */
609 	} else
610 		apm_do_idle = 0;
611 #ifdef DIAGNOSTIC
612 	if (sc->sc_detail & APM_BIOS_PM_DISABLED)
613 		aprint_normal(" (BIOS mgmt disabled)");
614 	if (sc->sc_detail & APM_BIOS_PM_DISENGAGED)
615 		aprint_normal(" (BIOS managing devices)");
616 #endif
617 }
618 
619 static int
620 apmdevmatch(device_t parent, cfdata_t match, void *aux)
621 {
622 
623 	return apm_match();
624 }
625 
626 static void
627 apmdevattach(device_t parent, device_t self, void *aux)
628 {
629 	struct apm_softc *sc;
630 	struct apmdev_attach_args *aaa = aux;
631 
632 	sc = device_private(self);
633 	sc->sc_dev = self;
634 
635 	sc->sc_detail = aaa->apm_detail;
636 	sc->sc_vers = aaa->apm_detail & 0xffff; /* XXX: magic */
637 
638 	sc->sc_ops = aaa->accessops;
639 	sc->sc_cookie = aaa->accesscookie;
640 
641 	apm_attach(sc);
642 }
643 
644 /*
645  * Print function (for parent devices).
646  */
647 int
648 apmprint(void *aux, const char *pnp)
649 {
650 	if (pnp)
651 		aprint_normal("apm at %s", pnp);
652 
653 	return (UNCONF);
654 }
655 
656 int
657 apm_match(void)
658 {
659 	static int got;
660 	return !got++;
661 }
662 
663 void
664 apm_attach(struct apm_softc *sc)
665 {
666 	struct apm_power_info pinfo;
667 	u_int numbatts, capflags;
668 	int error;
669 
670 	aprint_naive("\n");
671 	aprint_normal(": ");
672 
673 	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
674 	case 0x0100:
675 		apm_v11_enabled = 0;
676 		apm_v12_enabled = 0;
677 		break;
678 	case 0x0101:
679 		apm_v12_enabled = 0;
680 		/* fall through */
681 	case 0x0102:
682 	default:
683 		break;
684 	}
685 
686 	apm_set_ver(sc);	/* prints version info */
687 	aprint_normal("\n");
688 	if (apm_minver >= 2)
689 		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
690 		    &capflags);
691 
692 	/*
693 	 * enable power management
694 	 */
695 	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);
696 
697 	error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0, &pinfo);
698 	if (error == 0) {
699 #ifdef APM_POWER_PRINT
700 		apm_power_print(sc, &pinfo);
701 #endif
702 	} else
703 		apm_perror("get power status", error);
704 
705 	if (sc->sc_ops->aa_cpu_busy)
706 		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);
707 
708 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
709 
710 	/* Initial state is `resumed'. */
711 	sc->sc_power_state = PWR_RESUME;
712 	selinit(&sc->sc_rsel);
713 	selinit(&sc->sc_xsel);
714 
715 	/* Do an initial check. */
716 	apm_periodic_check(sc);
717 
718 	/*
719 	 * Create a kernel thread to periodically check for APM events,
720 	 * and notify other subsystems when they occur.
721 	 */
722 	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
723 	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
724 		/*
725 		 * We were unable to create the APM thread; bail out.
726 		 */
727 		if (sc->sc_ops->aa_disconnect)
728 			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
729 		aprint_error_dev(sc->sc_dev, "unable to create thread, "
730 		    "kernel APM support disabled\n");
731 	}
732 }
733 
734 void
735 apm_thread(void *arg)
736 {
737 	struct apm_softc *apmsc = arg;
738 
739 	/*
740 	 * Loop forever, doing a periodic check for APM events.
741 	 */
742 	for (;;) {
743 		APM_LOCK(apmsc);
744 		apm_periodic_check(apmsc);
745 		APM_UNLOCK(apmsc);
746 		(void) tsleep(apmsc, PWAIT, "apmev",  (8 * hz) / 7);
747 	}
748 }
749 
750 int
751 apmdevopen(dev_t dev, int flag, int mode, struct lwp *l)
752 {
753 	int ctl = APM(dev);
754 	int error = 0;
755 	struct apm_softc *sc;
756 
757 	sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
758 	if (!sc)
759 		return ENXIO;
760 
761 	if (!apm_inited)
762 		return ENXIO;
763 
764 	DPRINTF(APMDEBUG_DEVICE,
765 	    ("apmopen: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
766 
767 	APM_LOCK(sc);
768 	switch (ctl) {
769 	case APM_CTL:
770 		if (!(flag & FWRITE)) {
771 			error = EINVAL;
772 			break;
773 		}
774 		if (sc->sc_flags & SCFLAG_OWRITE) {
775 			error = EBUSY;
776 			break;
777 		}
778 		sc->sc_flags |= SCFLAG_OWRITE;
779 		break;
780 	case APM_NORMAL:
781 		if (!(flag & FREAD) || (flag & FWRITE)) {
782 			error = EINVAL;
783 			break;
784 		}
785 		sc->sc_flags |= SCFLAG_OREAD;
786 		break;
787 	default:
788 		error = ENXIO;
789 		break;
790 	}
791 	APM_UNLOCK(sc);
792 
793 	return (error);
794 }
795 
796 int
797 apmdevclose(dev_t dev, int flag, int mode,
798 	    struct lwp *l)
799 {
800 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
801 	int ctl = APM(dev);
802 
803 	DPRINTF(APMDEBUG_DEVICE,
804 	    ("apmclose: pid %d flag %x mode %x\n", l->l_proc->p_pid, flag, mode));
805 
806 	APM_LOCK(sc);
807 	switch (ctl) {
808 	case APM_CTL:
809 		sc->sc_flags &= ~SCFLAG_OWRITE;
810 		break;
811 	case APM_NORMAL:
812 		sc->sc_flags &= ~SCFLAG_OREAD;
813 		break;
814 	}
815 	if ((sc->sc_flags & SCFLAG_OPEN) == 0) {
816 		sc->sc_event_count = 0;
817 		sc->sc_event_ptr = 0;
818 	}
819 	APM_UNLOCK(sc);
820 	return 0;
821 }
822 
823 int
824 apmdevioctl(dev_t dev, u_long cmd, void *data, int flag,
825 	    struct lwp *l)
826 {
827 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
828 	struct apm_power_info *powerp;
829 	struct apm_event_info *evp;
830 #if 0
831 	struct apm_ctl *actl;
832 #endif
833 	int i, error = 0;
834 	int batt_flags;
835 
836 	APM_LOCK(sc);
837 	switch (cmd) {
838 	case APM_IOC_STANDBY:
839 		if (!apm_do_standby) {
840 			error = EOPNOTSUPP;
841 			break;
842 		}
843 
844 		if ((flag & FWRITE) == 0) {
845 			error = EBADF;
846 			break;
847 		}
848 		apm_userstandbys++;
849 		break;
850 
851 	case APM_IOC_SUSPEND:
852 		if ((flag & FWRITE) == 0) {
853 			error = EBADF;
854 			break;
855 		}
856 		apm_suspends++;
857 		break;
858 
859 	case APM_IOC_NEXTEVENT:
860 		if (!sc->sc_event_count)
861 			error = EAGAIN;
862 		else {
863 			evp = (struct apm_event_info *)data;
864 			i = sc->sc_event_ptr + APM_NEVENTS - sc->sc_event_count;
865 			i %= APM_NEVENTS;
866 			*evp = sc->sc_event_list[i];
867 			sc->sc_event_count--;
868 		}
869 		break;
870 
871 	case OAPM_IOC_GETPOWER:
872 	case APM_IOC_GETPOWER:
873 		powerp = (struct apm_power_info *)data;
874 		if ((error = (*sc->sc_ops->aa_get_powstat)(sc->sc_cookie, 0,
875 		    powerp)) != 0) {
876 			apm_perror("ioctl get power status", error);
877 			error = EIO;
878 			break;
879 		}
880 		switch (apm_minver) {
881 		case 0:
882 			break;
883 		case 1:
884 		default:
885 			batt_flags = powerp->battery_flags;
886 			powerp->battery_state = APM_BATT_UNKNOWN;
887 			if (batt_flags & APM_BATT_FLAG_HIGH)
888 				powerp->battery_state = APM_BATT_HIGH;
889 			else if (batt_flags & APM_BATT_FLAG_LOW)
890 				powerp->battery_state = APM_BATT_LOW;
891 			else if (batt_flags & APM_BATT_FLAG_CRITICAL)
892 				powerp->battery_state = APM_BATT_CRITICAL;
893 			else if (batt_flags & APM_BATT_FLAG_CHARGING)
894 				powerp->battery_state = APM_BATT_CHARGING;
895 			else if (batt_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY)
896 				powerp->battery_state = APM_BATT_ABSENT;
897 			break;
898 		}
899 		break;
900 
901 	default:
902 		error = ENOTTY;
903 	}
904 	APM_UNLOCK(sc);
905 
906 	return (error);
907 }
908 
909 int
910 apmdevpoll(dev_t dev, int events, struct lwp *l)
911 {
912 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
913 	int revents = 0;
914 
915 	APM_LOCK(sc);
916 	if (events & (POLLIN | POLLRDNORM)) {
917 		if (sc->sc_event_count)
918 			revents |= events & (POLLIN | POLLRDNORM);
919 		else
920 			selrecord(l, &sc->sc_rsel);
921 	}
922 	APM_UNLOCK(sc);
923 
924 	return (revents);
925 }
926 
927 static void
928 filt_apmrdetach(struct knote *kn)
929 {
930 	struct apm_softc *sc = kn->kn_hook;
931 
932 	APM_LOCK(sc);
933 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
934 	APM_UNLOCK(sc);
935 }
936 
937 static int
938 filt_apmread(struct knote *kn, long hint)
939 {
940 	struct apm_softc *sc = kn->kn_hook;
941 
942 	kn->kn_data = sc->sc_event_count;
943 	return (kn->kn_data > 0);
944 }
945 
946 static const struct filterops apmread_filtops =
947 	{ 1, NULL, filt_apmrdetach, filt_apmread };
948 
949 int
950 apmdevkqfilter(dev_t dev, struct knote *kn)
951 {
952 	struct apm_softc *sc = device_lookup_private(&apmdev_cd, APMUNIT(dev));
953 	struct klist *klist;
954 
955 	switch (kn->kn_filter) {
956 	case EVFILT_READ:
957 		klist = &sc->sc_rsel.sel_klist;
958 		kn->kn_fop = &apmread_filtops;
959 		break;
960 
961 	default:
962 		return (EINVAL);
963 	}
964 
965 	kn->kn_hook = sc;
966 
967 	APM_LOCK(sc);
968 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
969 	APM_UNLOCK(sc);
970 
971 	return (0);
972 }
973